remove builtin providers
All providers moved to new repos. Added README, which also serves to preserve the directory in git in cacse we want to add select providers back into core (e.g. null, template, test)
This commit is contained in:
parent
a30007b41d
commit
1ab40eae35
|
@ -0,0 +1 @@
|
|||
providers moved to github.com/terraform-providers
|
|
@ -1,93 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
type InstanceNetWork string
|
||||
|
||||
const (
|
||||
ClassicNet = InstanceNetWork("classic")
|
||||
VpcNet = InstanceNetWork("vpc")
|
||||
)
|
||||
|
||||
// timeout for common product, ecs e.g.
|
||||
const defaultTimeout = 120
|
||||
|
||||
// timeout for long time progerss product, rds e.g.
|
||||
const defaultLongTimeout = 1000
|
||||
|
||||
func getRegion(d *schema.ResourceData, meta interface{}) common.Region {
|
||||
return meta.(*AliyunClient).Region
|
||||
}
|
||||
|
||||
func notFoundError(err error) bool {
|
||||
if e, ok := err.(*common.Error); ok &&
|
||||
(e.StatusCode == 404 || e.ErrorResponse.Message == "Not found" || e.Code == InstanceNotfound) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Protocol represents network protocol
|
||||
type Protocol string
|
||||
|
||||
// Constants of protocol definition
|
||||
const (
|
||||
Http = Protocol("http")
|
||||
Https = Protocol("https")
|
||||
Tcp = Protocol("tcp")
|
||||
Udp = Protocol("udp")
|
||||
)
|
||||
|
||||
// ValidProtocols network protocol list
|
||||
var ValidProtocols = []Protocol{Http, Https, Tcp, Udp}
|
||||
|
||||
// simple array value check method, support string type only
|
||||
func isProtocolValid(value string) bool {
|
||||
res := false
|
||||
for _, v := range ValidProtocols {
|
||||
if string(v) == value {
|
||||
res = true
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
var DefaultBusinessInfo = ecs.BusinessInfo{
|
||||
Pack: "terraform",
|
||||
}
|
||||
|
||||
// default region for all resource
|
||||
const DEFAULT_REGION = "cn-beijing"
|
||||
|
||||
// default security ip for db
|
||||
const DEFAULT_DB_SECURITY_IP = "127.0.0.1"
|
||||
|
||||
// we the count of create instance is only one
|
||||
const DEFAULT_INSTANCE_COUNT = 1
|
||||
|
||||
// symbol of multiIZ
|
||||
const MULTI_IZ_SYMBOL = "MAZ"
|
||||
|
||||
// default connect port of db
|
||||
const DB_DEFAULT_CONNECT_PORT = "3306"
|
||||
|
||||
const COMMA_SEPARATED = ","
|
||||
|
||||
const COLON_SEPARATED = ":"
|
||||
|
||||
const LOCAL_HOST_IP = "127.0.0.1"
|
||||
|
||||
// Takes the result of flatmap.Expand for an array of strings
|
||||
// and returns a []string
|
||||
func expandStringList(configured []interface{}) []string {
|
||||
vs := make([]string, 0, len(configured))
|
||||
for _, v := range configured {
|
||||
vs = append(vs, v.(string))
|
||||
}
|
||||
return vs
|
||||
}
|
|
@ -1,138 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/denverdino/aliyungo/ess"
|
||||
"github.com/denverdino/aliyungo/rds"
|
||||
"github.com/denverdino/aliyungo/slb"
|
||||
)
|
||||
|
||||
// Config of aliyun
|
||||
type Config struct {
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
Region common.Region
|
||||
}
|
||||
|
||||
// AliyunClient of aliyun
|
||||
type AliyunClient struct {
|
||||
Region common.Region
|
||||
ecsconn *ecs.Client
|
||||
essconn *ess.Client
|
||||
rdsconn *rds.Client
|
||||
// use new version
|
||||
ecsNewconn *ecs.Client
|
||||
vpcconn *ecs.Client
|
||||
slbconn *slb.Client
|
||||
}
|
||||
|
||||
// Client for AliyunClient
|
||||
func (c *Config) Client() (*AliyunClient, error) {
|
||||
err := c.loadAndValidate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ecsconn, err := c.ecsConn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ecsNewconn, err := c.ecsConn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ecsNewconn.SetVersion(EcsApiVersion20160314)
|
||||
|
||||
rdsconn, err := c.rdsConn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slbconn, err := c.slbConn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vpcconn, err := c.vpcConn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
essconn, err := c.essConn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AliyunClient{
|
||||
Region: c.Region,
|
||||
ecsconn: ecsconn,
|
||||
ecsNewconn: ecsNewconn,
|
||||
vpcconn: vpcconn,
|
||||
slbconn: slbconn,
|
||||
rdsconn: rdsconn,
|
||||
essconn: essconn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
const BusinessInfoKey = "Terraform"
|
||||
|
||||
func (c *Config) loadAndValidate() error {
|
||||
err := c.validateRegion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) validateRegion() error {
|
||||
|
||||
for _, valid := range common.ValidRegions {
|
||||
if c.Region == valid {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Not a valid region: %s", c.Region)
|
||||
}
|
||||
|
||||
func (c *Config) ecsConn() (*ecs.Client, error) {
|
||||
client := ecs.NewECSClient(c.AccessKey, c.SecretKey, c.Region)
|
||||
client.SetBusinessInfo(BusinessInfoKey)
|
||||
|
||||
_, err := client.DescribeRegions()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *Config) rdsConn() (*rds.Client, error) {
|
||||
client := rds.NewRDSClient(c.AccessKey, c.SecretKey, c.Region)
|
||||
client.SetBusinessInfo(BusinessInfoKey)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *Config) slbConn() (*slb.Client, error) {
|
||||
client := slb.NewSLBClient(c.AccessKey, c.SecretKey, c.Region)
|
||||
client.SetBusinessInfo(BusinessInfoKey)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *Config) vpcConn() (*ecs.Client, error) {
|
||||
client := ecs.NewVPCClient(c.AccessKey, c.SecretKey, c.Region)
|
||||
client.SetBusinessInfo(BusinessInfoKey)
|
||||
return client, nil
|
||||
|
||||
}
|
||||
func (c *Config) essConn() (*ess.Client, error) {
|
||||
client := ess.NewESSClient(c.AccessKey, c.SecretKey, c.Region)
|
||||
client.SetBusinessInfo(BusinessInfoKey)
|
||||
return client, nil
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
)
|
||||
|
||||
// Generates a hash for the set hash function used by the ID
|
||||
func dataResourceIdHash(ids []string) string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
for _, id := range ids {
|
||||
buf.WriteString(fmt.Sprintf("%s-", id))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", hashcode.String(buf.String()))
|
||||
}
|
|
@ -1,337 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func dataSourceAlicloudImages() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceAlicloudImagesRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name_regex": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateNameRegex,
|
||||
},
|
||||
"most_recent": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
ForceNew: true,
|
||||
},
|
||||
"owners": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateImageOwners,
|
||||
},
|
||||
// Computed values.
|
||||
"images": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"architecture": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"creation_time": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"image_owner_alias": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"os_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"os_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"platform": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"state": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
// Complex computed values
|
||||
"disk_device_mappings": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
//Set: imageDiskDeviceMappingHash,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"device": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"snapshot_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"product_code": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"is_self_shared": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"is_subscribed": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"is_copied": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"is_support_io_optimized": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"image_version": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"progress": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"usage": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"tags": tagsSchema(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// dataSourceAlicloudImagesDescriptionRead performs the Alicloud Image lookup.
|
||||
func dataSourceAlicloudImagesRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
nameRegex, nameRegexOk := d.GetOk("name_regex")
|
||||
owners, ownersOk := d.GetOk("owners")
|
||||
mostRecent, mostRecentOk := d.GetOk("most_recent")
|
||||
|
||||
if nameRegexOk == false && ownersOk == false && mostRecentOk == false {
|
||||
return fmt.Errorf("One of name_regex, owners or most_recent must be assigned")
|
||||
}
|
||||
|
||||
params := &ecs.DescribeImagesArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
}
|
||||
|
||||
if ownersOk {
|
||||
params.ImageOwnerAlias = ecs.ImageOwnerAlias(owners.(string))
|
||||
}
|
||||
|
||||
var allImages []ecs.ImageType
|
||||
|
||||
for {
|
||||
images, paginationResult, err := conn.DescribeImages(params)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
allImages = append(allImages, images...)
|
||||
|
||||
pagination := paginationResult.NextPage()
|
||||
if pagination == nil {
|
||||
break
|
||||
}
|
||||
|
||||
params.Pagination = *pagination
|
||||
}
|
||||
|
||||
var filteredImages []ecs.ImageType
|
||||
if nameRegexOk {
|
||||
r := regexp.MustCompile(nameRegex.(string))
|
||||
for _, image := range allImages {
|
||||
// Check for a very rare case where the response would include no
|
||||
// image name. No name means nothing to attempt a match against,
|
||||
// therefore we are skipping such image.
|
||||
if image.ImageName == "" {
|
||||
log.Printf("[WARN] Unable to find Image name to match against "+
|
||||
"for image ID %q, nothing to do.",
|
||||
image.ImageId)
|
||||
continue
|
||||
}
|
||||
if r.MatchString(image.ImageName) {
|
||||
filteredImages = append(filteredImages, image)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filteredImages = allImages[:]
|
||||
}
|
||||
|
||||
var images []ecs.ImageType
|
||||
if len(filteredImages) < 1 {
|
||||
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] alicloud_image - multiple results found and `most_recent` is set to: %t", mostRecent.(bool))
|
||||
if len(filteredImages) > 1 && mostRecent.(bool) {
|
||||
// Query returned single result.
|
||||
images = append(images, mostRecentImage(filteredImages))
|
||||
} else {
|
||||
images = filteredImages
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] alicloud_image - Images found: %#v", images)
|
||||
return imagesDescriptionAttributes(d, images, meta)
|
||||
}
|
||||
|
||||
// populate the numerous fields that the image description returns.
|
||||
func imagesDescriptionAttributes(d *schema.ResourceData, images []ecs.ImageType, meta interface{}) error {
|
||||
var ids []string
|
||||
var s []map[string]interface{}
|
||||
for _, image := range images {
|
||||
mapping := map[string]interface{}{
|
||||
"id": image.ImageId,
|
||||
"architecture": image.Architecture,
|
||||
"creation_time": image.CreationTime.String(),
|
||||
"description": image.Description,
|
||||
"image_id": image.ImageId,
|
||||
"image_owner_alias": image.ImageOwnerAlias,
|
||||
"os_name": image.OSName,
|
||||
"os_type": image.OSType,
|
||||
"name": image.ImageName,
|
||||
"platform": image.Platform,
|
||||
"status": image.Status,
|
||||
"state": image.Status,
|
||||
"size": image.Size,
|
||||
"is_self_shared": image.IsSelfShared,
|
||||
"is_subscribed": image.IsSubscribed,
|
||||
"is_copied": image.IsCopied,
|
||||
"is_support_io_optimized": image.IsSupportIoOptimized,
|
||||
"image_version": image.ImageVersion,
|
||||
"progress": image.Progress,
|
||||
"usage": image.Usage,
|
||||
"product_code": image.ProductCode,
|
||||
|
||||
// Complex types get their own functions
|
||||
"disk_device_mappings": imageDiskDeviceMappings(image.DiskDeviceMappings.DiskDeviceMapping),
|
||||
"tags": imageTagsMappings(d, image.ImageId, meta),
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] alicloud_image - adding image mapping: %v", mapping)
|
||||
ids = append(ids, image.ImageId)
|
||||
s = append(s, mapping)
|
||||
}
|
||||
|
||||
d.SetId(dataResourceIdHash(ids))
|
||||
if err := d.Set("images", s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//Find most recent image
|
||||
type imageSort []ecs.ImageType
|
||||
|
||||
func (a imageSort) Len() int {
|
||||
return len(a)
|
||||
}
|
||||
func (a imageSort) Swap(i, j int) {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
func (a imageSort) Less(i, j int) bool {
|
||||
itime, _ := time.Parse(time.RFC3339, a[i].CreationTime.String())
|
||||
jtime, _ := time.Parse(time.RFC3339, a[j].CreationTime.String())
|
||||
return itime.Unix() < jtime.Unix()
|
||||
}
|
||||
|
||||
// Returns the most recent Image out of a slice of images.
|
||||
func mostRecentImage(images []ecs.ImageType) ecs.ImageType {
|
||||
sortedImages := images
|
||||
sort.Sort(imageSort(sortedImages))
|
||||
return sortedImages[len(sortedImages)-1]
|
||||
}
|
||||
|
||||
// Returns a set of disk device mappings.
|
||||
func imageDiskDeviceMappings(m []ecs.DiskDeviceMapping) []map[string]interface{} {
|
||||
var s []map[string]interface{}
|
||||
|
||||
for _, v := range m {
|
||||
mapping := map[string]interface{}{
|
||||
"device": v.Device,
|
||||
"size": v.Size,
|
||||
"snapshot_id": v.SnapshotId,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] alicloud_image - adding disk device mapping: %v", mapping)
|
||||
s = append(s, mapping)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
//Returns a mapping of image tags
|
||||
func imageTagsMappings(d *schema.ResourceData, imageId string, meta interface{}) map[string]string {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
tags, _, err := conn.DescribeTags(&ecs.DescribeTagsArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
ResourceType: ecs.TagResourceImage,
|
||||
ResourceId: imageId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] DescribeTags for image got error: %#v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] DescribeTags for image : %v", tags)
|
||||
return tagsToMap(tags)
|
||||
}
|
|
@ -1,155 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccAlicloudImagesDataSource_images(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudImagesDataSourceImagesConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_images.multi_image"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.#", "2"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.architecture", "x86_64"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.disk_device_mappings.#", "0"),
|
||||
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.0.creation_time", regexp.MustCompile("^20[0-9]{2}-")),
|
||||
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.0.image_id", regexp.MustCompile("^centos_6\\w{1,5}[64]{1}.")),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.image_owner_alias", "system"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.os_type", "linux"),
|
||||
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.0.name", regexp.MustCompile("^centos_6[a-zA-Z0-9_]{1,5}[64]{1}.")),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.progress", "100%"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.state", "Available"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.status", "Available"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.usage", "instance"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.tags.%", "0"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.architecture", "i386"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.disk_device_mappings.#", "0"),
|
||||
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.1.creation_time", regexp.MustCompile("^20[0-9]{2}-")),
|
||||
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.1.image_id", regexp.MustCompile("^centos_6[a-zA-Z0-9_]{1,5}[32]{1}.")),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.image_owner_alias", "system"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.os_type", "linux"),
|
||||
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.1.name", regexp.MustCompile("^centos_6\\w{1,5}[32]{1}.")),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.progress", "100%"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.state", "Available"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.status", "Available"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.usage", "instance"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.tags.%", "0"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudImagesDataSource_owners(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudImagesDataSourceOwnersConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_images.owners_filtered_image"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudImagesDataSource_ownersEmpty(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudImagesDataSourceEmptyOwnersConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_images.empty_owners_filtered_image"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_images.empty_owners_filtered_image", "most_recent", "true"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudImagesDataSource_nameRegexFilter(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudImagesDataSourceNameRegexConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_images.name_regex_filtered_image"),
|
||||
resource.TestMatchResourceAttr("data.alicloud_images.name_regex_filtered_image", "images.0.image_id", regexp.MustCompile("^centos_")),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudImagesDataSource_imageNotInFirstPage(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudImagesDataSourceImageNotInFirstPageConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_images.name_regex_filtered_image"),
|
||||
resource.TestMatchResourceAttr("data.alicloud_images.name_regex_filtered_image", "images.0.image_id", regexp.MustCompile("^ubuntu_14")),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Instance store test - using centos images
|
||||
const testAccCheckAlicloudImagesDataSourceImagesConfig = `
|
||||
data "alicloud_images" "multi_image" {
|
||||
owners = "system"
|
||||
name_regex = "^centos_6"
|
||||
}
|
||||
`
|
||||
|
||||
// Testing owner parameter
|
||||
const testAccCheckAlicloudImagesDataSourceOwnersConfig = `
|
||||
data "alicloud_images" "owners_filtered_image" {
|
||||
most_recent = true
|
||||
owners = "system"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccCheckAlicloudImagesDataSourceEmptyOwnersConfig = `
|
||||
data "alicloud_images" "empty_owners_filtered_image" {
|
||||
most_recent = true
|
||||
owners = ""
|
||||
}
|
||||
`
|
||||
|
||||
// Testing name_regex parameter
|
||||
const testAccCheckAlicloudImagesDataSourceNameRegexConfig = `
|
||||
data "alicloud_images" "name_regex_filtered_image" {
|
||||
most_recent = true
|
||||
owners = "system"
|
||||
name_regex = "^centos_6\\w{1,5}[64]{1}.*"
|
||||
}
|
||||
`
|
||||
|
||||
// Testing image not in first page response
|
||||
const testAccCheckAlicloudImagesDataSourceImageNotInFirstPageConfig = `
|
||||
data "alicloud_images" "name_regex_filtered_image" {
|
||||
most_recent = true
|
||||
owners = "system"
|
||||
name_regex = "^ubuntu_14.*_64"
|
||||
}
|
||||
`
|
|
@ -1,127 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"log"
|
||||
)
|
||||
|
||||
func dataSourceAlicloudInstanceTypes() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceAlicloudInstanceTypesRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"instance_type_family": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"cpu_core_count": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"memory_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
// Computed values.
|
||||
"instance_types": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"cpu_core_count": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"memory_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"family": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceAlicloudInstanceTypesRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
cpu, _ := d.Get("cpu_core_count").(int)
|
||||
mem, _ := d.Get("memory_size").(float64)
|
||||
|
||||
args, err := buildAliyunAlicloudInstanceTypesArgs(d, meta)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := conn.DescribeInstanceTypesNew(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var instanceTypes []ecs.InstanceTypeItemType
|
||||
for _, types := range resp {
|
||||
if cpu > 0 && types.CpuCoreCount != cpu {
|
||||
continue
|
||||
}
|
||||
|
||||
if mem > 0 && types.MemorySize != mem {
|
||||
continue
|
||||
}
|
||||
instanceTypes = append(instanceTypes, types)
|
||||
}
|
||||
|
||||
if len(instanceTypes) < 1 {
|
||||
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] alicloud_instance_type - Types found: %#v", instanceTypes)
|
||||
return instanceTypesDescriptionAttributes(d, instanceTypes)
|
||||
}
|
||||
|
||||
func instanceTypesDescriptionAttributes(d *schema.ResourceData, types []ecs.InstanceTypeItemType) error {
|
||||
var ids []string
|
||||
var s []map[string]interface{}
|
||||
for _, t := range types {
|
||||
mapping := map[string]interface{}{
|
||||
"id": t.InstanceTypeId,
|
||||
"cpu_core_count": t.CpuCoreCount,
|
||||
"memory_size": t.MemorySize,
|
||||
"family": t.InstanceTypeFamily,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] alicloud_instance_type - adding type mapping: %v", mapping)
|
||||
ids = append(ids, t.InstanceTypeId)
|
||||
s = append(s, mapping)
|
||||
}
|
||||
|
||||
d.SetId(dataResourceIdHash(ids))
|
||||
if err := d.Set("instance_types", s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildAliyunAlicloudInstanceTypesArgs(d *schema.ResourceData, meta interface{}) (*ecs.DescribeInstanceTypesArgs, error) {
|
||||
args := &ecs.DescribeInstanceTypesArgs{}
|
||||
|
||||
if v := d.Get("instance_type_family").(string); v != "" {
|
||||
args.InstanceTypeFamily = v
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudInstanceTypesDataSource_basic(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudInstanceTypesDataSourceBasicConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_instance_types.4c8g"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.cpu_core_count", "4"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.memory_size", "8"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.id", "ecs.s3.large"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccCheckAlicloudInstanceTypesDataSourceBasicConfigUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_instance_types.4c8g"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.#", "1"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.cpu_core_count", "4"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.memory_size", "8"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const testAccCheckAlicloudInstanceTypesDataSourceBasicConfig = `
|
||||
data "alicloud_instance_types" "4c8g" {
|
||||
cpu_core_count = 4
|
||||
memory_size = 8
|
||||
}
|
||||
`
|
||||
|
||||
const testAccCheckAlicloudInstanceTypesDataSourceBasicConfigUpdate = `
|
||||
data "alicloud_instance_types" "4c8g" {
|
||||
instance_type_family= "ecs.s3"
|
||||
cpu_core_count = 4
|
||||
memory_size = 8
|
||||
}
|
||||
`
|
|
@ -1,114 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"log"
|
||||
)
|
||||
|
||||
func dataSourceAlicloudRegions() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceAlicloudRegionsRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"current": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
//Computed value
|
||||
"regions": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"region_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"local_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceAlicloudRegionsRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
currentRegion := getRegion(d, meta)
|
||||
|
||||
resp, err := conn.DescribeRegions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp == nil || len(resp) == 0 {
|
||||
return fmt.Errorf("no matching regions found")
|
||||
}
|
||||
name, nameOk := d.GetOk("name")
|
||||
current := d.Get("current").(bool)
|
||||
var filterRegions []ecs.RegionType
|
||||
for _, region := range resp {
|
||||
if current {
|
||||
if nameOk && common.Region(name.(string)) != currentRegion {
|
||||
return fmt.Errorf("name doesn't match current region: %#v, please input again.", currentRegion)
|
||||
}
|
||||
if region.RegionId == currentRegion {
|
||||
filterRegions = append(filterRegions, region)
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
if nameOk {
|
||||
if common.Region(name.(string)) == region.RegionId {
|
||||
filterRegions = append(filterRegions, region)
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
filterRegions = append(filterRegions, region)
|
||||
}
|
||||
if len(filterRegions) < 1 {
|
||||
return fmt.Errorf("Your query region returned no results. Please change your search criteria and try again.")
|
||||
}
|
||||
|
||||
return regionsDescriptionAttributes(d, filterRegions)
|
||||
}
|
||||
|
||||
func regionsDescriptionAttributes(d *schema.ResourceData, regions []ecs.RegionType) error {
|
||||
var ids []string
|
||||
var s []map[string]interface{}
|
||||
for _, region := range regions {
|
||||
mapping := map[string]interface{}{
|
||||
"id": region.RegionId,
|
||||
"region_id": region.RegionId,
|
||||
"local_name": region.LocalName,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] alicloud_regions - adding region mapping: %v", mapping)
|
||||
ids = append(ids, string(region.RegionId))
|
||||
s = append(s, mapping)
|
||||
}
|
||||
|
||||
d.SetId(dataResourceIdHash(ids))
|
||||
if err := d.Set("regions", s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccAlicloudRegionsDataSource_regions(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudRegionsDataSourceRegionsConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_regions.region"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.region", "name", "cn-beijing"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.region", "current", "true"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.#", "1"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.0.id", "cn-beijing"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.0.region_id", "cn-beijing"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.0.local_name", "华北 2"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudRegionsDataSource_name(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudRegionsDataSourceNameConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_regions.name_filtered_region"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.name_filtered_region", "name", "cn-hangzhou")),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudRegionsDataSource_current(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudRegionsDataSourceCurrentConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_regions.current_filtered_region"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.current_filtered_region", "current", "true"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudRegionsDataSource_empty(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudRegionsDataSourceEmptyConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_regions.empty_params_region"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.empty_params_region", "regions.0.id", "cn-shenzhen"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.empty_params_region", "regions.0.region_id", "cn-shenzhen"),
|
||||
resource.TestCheckResourceAttr("data.alicloud_regions.empty_params_region", "regions.0.local_name", "华南 1"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Instance store test - using centos regions
|
||||
const testAccCheckAlicloudRegionsDataSourceRegionsConfig = `
|
||||
data "alicloud_regions" "region" {
|
||||
name = "cn-beijing"
|
||||
current = true
|
||||
}
|
||||
`
|
||||
|
||||
// Testing name parameter
|
||||
const testAccCheckAlicloudRegionsDataSourceNameConfig = `
|
||||
data "alicloud_regions" "name_filtered_region" {
|
||||
name = "cn-hangzhou"
|
||||
}
|
||||
`
|
||||
|
||||
// Testing current parameter
|
||||
const testAccCheckAlicloudRegionsDataSourceCurrentConfig = `
|
||||
data "alicloud_regions" "current_filtered_region" {
|
||||
current = true
|
||||
}
|
||||
`
|
||||
|
||||
// Testing empty parmas
|
||||
const testAccCheckAlicloudRegionsDataSourceEmptyConfig = `
|
||||
data "alicloud_regions" "empty_params_region" {
|
||||
}
|
||||
`
|
|
@ -1,137 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"log"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func dataSourceAlicloudZones() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceAlicloudZonesRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"available_instance_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"available_resource_creation": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"available_disk_category": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
// Computed values.
|
||||
"zones": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"local_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"available_instance_types": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"available_resource_creation": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"available_disk_categories": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceAlicloudZonesRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
insType, _ := d.Get("available_instance_type").(string)
|
||||
resType, _ := d.Get("available_resource_creation").(string)
|
||||
diskType, _ := d.Get("available_disk_category").(string)
|
||||
|
||||
resp, err := conn.DescribeZones(getRegion(d, meta))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var zoneTypes []ecs.ZoneType
|
||||
for _, types := range resp {
|
||||
if insType != "" && !constraints(types.AvailableInstanceTypes.InstanceTypes, insType) {
|
||||
continue
|
||||
}
|
||||
|
||||
if resType != "" && !constraints(types.AvailableResourceCreation.ResourceTypes, resType) {
|
||||
continue
|
||||
}
|
||||
|
||||
if diskType != "" && !constraints(types.AvailableDiskCategories.DiskCategories, diskType) {
|
||||
continue
|
||||
}
|
||||
zoneTypes = append(zoneTypes, types)
|
||||
}
|
||||
|
||||
if len(zoneTypes) < 1 {
|
||||
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] alicloud_zones - Zones found: %#v", zoneTypes)
|
||||
return zonesDescriptionAttributes(d, zoneTypes)
|
||||
}
|
||||
|
||||
// check array constraints str
|
||||
func constraints(arr interface{}, v string) bool {
|
||||
arrs := reflect.ValueOf(arr)
|
||||
len := arrs.Len()
|
||||
for i := 0; i < len; i++ {
|
||||
if arrs.Index(i).String() == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func zonesDescriptionAttributes(d *schema.ResourceData, types []ecs.ZoneType) error {
|
||||
var ids []string
|
||||
var s []map[string]interface{}
|
||||
for _, t := range types {
|
||||
mapping := map[string]interface{}{
|
||||
"id": t.ZoneId,
|
||||
"local_name": t.LocalName,
|
||||
"available_instance_types": t.AvailableInstanceTypes.InstanceTypes,
|
||||
"available_resource_creation": t.AvailableResourceCreation.ResourceTypes,
|
||||
"available_disk_categories": t.AvailableDiskCategories.DiskCategories,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] alicloud_zones - adding zone mapping: %v", mapping)
|
||||
ids = append(ids, t.ZoneId)
|
||||
s = append(s, mapping)
|
||||
}
|
||||
|
||||
d.SetId(dataResourceIdHash(ids))
|
||||
if err := d.Set("zones", s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,132 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudZonesDataSource_basic(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudZonesDataSourceBasicConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudZonesDataSource_filter(t *testing.T) {
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudZonesDataSourceFilter,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"),
|
||||
testCheckZoneLength("data.alicloud_zones.foo"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccCheckAlicloudZonesDataSourceFilterIoOptimized,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"),
|
||||
testCheckZoneLength("data.alicloud_zones.foo"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudZonesDataSource_unitRegion(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckAlicloudZonesDataSource_unitRegion,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// the zone length changed occasionally
|
||||
// check by range to avoid test case failure
|
||||
func testCheckZoneLength(name string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
ms := s.RootModule()
|
||||
rs, ok := ms.Resources[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", name)
|
||||
}
|
||||
|
||||
is := rs.Primary
|
||||
if is == nil {
|
||||
return fmt.Errorf("No primary instance: %s", name)
|
||||
}
|
||||
|
||||
i, err := strconv.Atoi(is.Attributes["zones.#"])
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert zone length err: %#v", err)
|
||||
}
|
||||
|
||||
if i <= 0 {
|
||||
return fmt.Errorf("zone length expected greater than 0 got err: %d", i)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccCheckAlicloudZonesDataSourceBasicConfig = `
|
||||
data "alicloud_zones" "foo" {
|
||||
}
|
||||
`
|
||||
|
||||
const testAccCheckAlicloudZonesDataSourceFilter = `
|
||||
data "alicloud_zones" "foo" {
|
||||
available_instance_type= "ecs.c2.xlarge"
|
||||
available_resource_creation= "VSwitch"
|
||||
available_disk_category= "cloud_efficiency"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccCheckAlicloudZonesDataSourceFilterIoOptimized = `
|
||||
data "alicloud_zones" "foo" {
|
||||
available_instance_type= "ecs.c2.xlarge"
|
||||
available_resource_creation= "IoOptimized"
|
||||
available_disk_category= "cloud"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccCheckAlicloudZonesDataSource_unitRegion = `
|
||||
provider "alicloud" {
|
||||
alias = "northeast"
|
||||
region = "ap-northeast-1"
|
||||
}
|
||||
|
||||
data "alicloud_zones" "foo" {
|
||||
provider = "alicloud.northeast"
|
||||
available_resource_creation= "VSwitch"
|
||||
}
|
||||
`
|
|
@ -1,52 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import "github.com/denverdino/aliyungo/common"
|
||||
|
||||
const (
|
||||
// common
|
||||
Notfound = "Not found"
|
||||
// ecs
|
||||
InstanceNotfound = "Instance.Notfound"
|
||||
// disk
|
||||
DiskIncorrectStatus = "IncorrectDiskStatus"
|
||||
DiskCreatingSnapshot = "DiskCreatingSnapshot"
|
||||
InstanceLockedForSecurity = "InstanceLockedForSecurity"
|
||||
SystemDiskNotFound = "SystemDiskNotFound"
|
||||
// eip
|
||||
EipIncorrectStatus = "IncorrectEipStatus"
|
||||
InstanceIncorrectStatus = "IncorrectInstanceStatus"
|
||||
HaVipIncorrectStatus = "IncorrectHaVipStatus"
|
||||
// slb
|
||||
LoadBalancerNotFound = "InvalidLoadBalancerId.NotFound"
|
||||
|
||||
// security_group
|
||||
InvalidInstanceIdAlreadyExists = "InvalidInstanceId.AlreadyExists"
|
||||
InvalidSecurityGroupIdNotFound = "InvalidSecurityGroupId.NotFound"
|
||||
SgDependencyViolation = "DependencyViolation"
|
||||
|
||||
//Nat gateway
|
||||
NatGatewayInvalidRegionId = "Invalid.RegionId"
|
||||
DependencyViolationBandwidthPackages = "DependencyViolation.BandwidthPackages"
|
||||
NotFindSnatEntryBySnatId = "NotFindSnatEntryBySnatId"
|
||||
NotFindForwardEntryByForwardId = "NotFindForwardEntryByForwardId"
|
||||
|
||||
// vswitch
|
||||
VswitcInvalidRegionId = "InvalidRegionId.NotFound"
|
||||
|
||||
// ess
|
||||
InvalidScalingGroupIdNotFound = "InvalidScalingGroupId.NotFound"
|
||||
IncorrectScalingConfigurationLifecycleState = "IncorrectScalingConfigurationLifecycleState"
|
||||
|
||||
//unknown Error
|
||||
UnknownError = "UnknownError"
|
||||
)
|
||||
|
||||
func GetNotFoundErrorFromString(str string) error {
|
||||
return &common.Error{
|
||||
ErrorResponse: common.ErrorResponse{
|
||||
Code: InstanceNotfound,
|
||||
Message: str,
|
||||
},
|
||||
StatusCode: -1,
|
||||
}
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
type GroupRuleDirection string
|
||||
|
||||
const (
|
||||
GroupRuleIngress = GroupRuleDirection("ingress")
|
||||
GroupRuleEgress = GroupRuleDirection("egress")
|
||||
)
|
||||
|
||||
type GroupRuleIpProtocol string
|
||||
|
||||
const (
|
||||
GroupRuleTcp = GroupRuleIpProtocol("tcp")
|
||||
GroupRuleUdp = GroupRuleIpProtocol("udp")
|
||||
GroupRuleIcmp = GroupRuleIpProtocol("icmp")
|
||||
GroupRuleGre = GroupRuleIpProtocol("gre")
|
||||
GroupRuleAll = GroupRuleIpProtocol("all")
|
||||
)
|
||||
|
||||
type GroupRuleNicType string
|
||||
|
||||
const (
|
||||
GroupRuleInternet = GroupRuleNicType("internet")
|
||||
GroupRuleIntranet = GroupRuleNicType("intranet")
|
||||
)
|
||||
|
||||
type GroupRulePolicy string
|
||||
|
||||
const (
|
||||
GroupRulePolicyAccept = GroupRulePolicy("accept")
|
||||
GroupRulePolicyDrop = GroupRulePolicy("drop")
|
||||
)
|
||||
|
||||
const (
|
||||
EcsApiVersion20160314 = "2016-03-14"
|
||||
EcsApiVersion20140526 = "2014-05-26"
|
||||
)
|
|
@ -1,164 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/denverdino/aliyungo/slb"
|
||||
)
|
||||
|
||||
type Listener struct {
|
||||
slb.HTTPListenerType
|
||||
|
||||
InstancePort int
|
||||
LoadBalancerPort int
|
||||
Protocol string
|
||||
//tcp & udp
|
||||
PersistenceTimeout int
|
||||
|
||||
//https
|
||||
SSLCertificateId string
|
||||
|
||||
//tcp
|
||||
HealthCheckType slb.HealthCheckType
|
||||
|
||||
//api interface: http & https is HealthCheckTimeout, tcp & udp is HealthCheckConnectTimeout
|
||||
HealthCheckConnectTimeout int
|
||||
}
|
||||
|
||||
type ListenerErr struct {
|
||||
ErrType string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ListenerErr) Error() string {
|
||||
return e.ErrType + " " + e.Err.Error()
|
||||
|
||||
}
|
||||
|
||||
const (
|
||||
HealthCheckErrType = "healthCheckErrType"
|
||||
StickySessionErrType = "stickySessionErrType"
|
||||
CookieTimeOutErrType = "cookieTimeoutErrType"
|
||||
CookieErrType = "cookieErrType"
|
||||
)
|
||||
|
||||
// Takes the result of flatmap.Expand for an array of listeners and
|
||||
// returns ELB API compatible objects
|
||||
func expandListeners(configured []interface{}) ([]*Listener, error) {
|
||||
listeners := make([]*Listener, 0, len(configured))
|
||||
|
||||
// Loop over our configured listeners and create
|
||||
// an array of aws-sdk-go compatabile objects
|
||||
for _, lRaw := range configured {
|
||||
data := lRaw.(map[string]interface{})
|
||||
|
||||
ip := data["instance_port"].(int)
|
||||
lp := data["lb_port"].(int)
|
||||
l := &Listener{
|
||||
InstancePort: ip,
|
||||
LoadBalancerPort: lp,
|
||||
Protocol: data["lb_protocol"].(string),
|
||||
}
|
||||
|
||||
l.Bandwidth = data["bandwidth"].(int)
|
||||
|
||||
if v, ok := data["scheduler"]; ok {
|
||||
l.Scheduler = slb.SchedulerType(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := data["ssl_certificate_id"]; ok {
|
||||
l.SSLCertificateId = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := data["sticky_session"]; ok {
|
||||
l.StickySession = slb.FlagType(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := data["sticky_session_type"]; ok {
|
||||
l.StickySessionType = slb.StickySessionType(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := data["cookie_timeout"]; ok {
|
||||
l.CookieTimeout = v.(int)
|
||||
}
|
||||
|
||||
if v, ok := data["cookie"]; ok {
|
||||
l.Cookie = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := data["persistence_timeout"]; ok {
|
||||
l.PersistenceTimeout = v.(int)
|
||||
}
|
||||
|
||||
if v, ok := data["health_check"]; ok {
|
||||
l.HealthCheck = slb.FlagType(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := data["health_check_type"]; ok {
|
||||
l.HealthCheckType = slb.HealthCheckType(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := data["health_check_domain"]; ok {
|
||||
l.HealthCheckDomain = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := data["health_check_uri"]; ok {
|
||||
l.HealthCheckURI = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := data["health_check_connect_port"]; ok {
|
||||
l.HealthCheckConnectPort = v.(int)
|
||||
}
|
||||
|
||||
if v, ok := data["healthy_threshold"]; ok {
|
||||
l.HealthyThreshold = v.(int)
|
||||
}
|
||||
|
||||
if v, ok := data["unhealthy_threshold"]; ok {
|
||||
l.UnhealthyThreshold = v.(int)
|
||||
}
|
||||
|
||||
if v, ok := data["health_check_timeout"]; ok {
|
||||
l.HealthCheckTimeout = v.(int)
|
||||
}
|
||||
|
||||
if v, ok := data["health_check_interval"]; ok {
|
||||
l.HealthCheckInterval = v.(int)
|
||||
}
|
||||
|
||||
if v, ok := data["health_check_http_code"]; ok {
|
||||
l.HealthCheckHttpCode = slb.HealthCheckHttpCodeType(v.(string))
|
||||
}
|
||||
|
||||
var valid bool
|
||||
if l.SSLCertificateId != "" {
|
||||
// validate the protocol is correct
|
||||
for _, p := range []string{"https", "ssl"} {
|
||||
if strings.ToLower(l.Protocol) == p {
|
||||
valid = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
valid = true
|
||||
}
|
||||
|
||||
if valid {
|
||||
listeners = append(listeners, l)
|
||||
} else {
|
||||
return nil, fmt.Errorf("[ERR] SLB Listener: ssl_certificate_id may be set only when protocol is 'https' or 'ssl'")
|
||||
}
|
||||
}
|
||||
|
||||
return listeners, nil
|
||||
}
|
||||
|
||||
func expandBackendServers(list []interface{}) []slb.BackendServerType {
|
||||
result := make([]slb.BackendServerType, 0, len(list))
|
||||
for _, i := range list {
|
||||
if i.(string) != "" {
|
||||
result = append(result, slb.BackendServerType{ServerId: i.(string), Weight: 100})
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
)
|
||||
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
type AddTagsArgs struct {
|
||||
ResourceId string
|
||||
ResourceType ecs.TagResourceType //image, instance, snapshot or disk
|
||||
RegionId common.Region
|
||||
Tag []Tag
|
||||
}
|
||||
|
||||
type RemoveTagsArgs struct {
|
||||
ResourceId string
|
||||
ResourceType ecs.TagResourceType //image, instance, snapshot or disk
|
||||
RegionId common.Region
|
||||
Tag []Tag
|
||||
}
|
||||
|
||||
func AddTags(client *ecs.Client, args *AddTagsArgs) error {
|
||||
response := ecs.AddTagsResponse{}
|
||||
err := client.Invoke("AddTags", args, &response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func RemoveTags(client *ecs.Client, args *RemoveTagsArgs) error {
|
||||
response := ecs.RemoveTagsResponse{}
|
||||
err := client.Invoke("RemoveTags", args, &response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/hashicorp/terraform/helper/mutexkv"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Provider returns a schema.Provider for alicloud
|
||||
func Provider() terraform.ResourceProvider {
|
||||
return &schema.Provider{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"access_key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", nil),
|
||||
Description: descriptions["access_key"],
|
||||
},
|
||||
"secret_key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", nil),
|
||||
Description: descriptions["secret_key"],
|
||||
},
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", DEFAULT_REGION),
|
||||
Description: descriptions["region"],
|
||||
},
|
||||
},
|
||||
DataSourcesMap: map[string]*schema.Resource{
|
||||
|
||||
"alicloud_images": dataSourceAlicloudImages(),
|
||||
"alicloud_regions": dataSourceAlicloudRegions(),
|
||||
"alicloud_zones": dataSourceAlicloudZones(),
|
||||
"alicloud_instance_types": dataSourceAlicloudInstanceTypes(),
|
||||
},
|
||||
ResourcesMap: map[string]*schema.Resource{
|
||||
"alicloud_instance": resourceAliyunInstance(),
|
||||
"alicloud_disk": resourceAliyunDisk(),
|
||||
"alicloud_disk_attachment": resourceAliyunDiskAttachment(),
|
||||
"alicloud_security_group": resourceAliyunSecurityGroup(),
|
||||
"alicloud_security_group_rule": resourceAliyunSecurityGroupRule(),
|
||||
"alicloud_db_instance": resourceAlicloudDBInstance(),
|
||||
"alicloud_ess_scaling_group": resourceAlicloudEssScalingGroup(),
|
||||
"alicloud_ess_scaling_configuration": resourceAlicloudEssScalingConfiguration(),
|
||||
"alicloud_ess_scaling_rule": resourceAlicloudEssScalingRule(),
|
||||
"alicloud_ess_schedule": resourceAlicloudEssSchedule(),
|
||||
"alicloud_vpc": resourceAliyunVpc(),
|
||||
"alicloud_nat_gateway": resourceAliyunNatGateway(),
|
||||
//both subnet and vswith exists,cause compatible old version, and compatible aws habit.
|
||||
"alicloud_subnet": resourceAliyunSubnet(),
|
||||
"alicloud_vswitch": resourceAliyunSubnet(),
|
||||
"alicloud_route_entry": resourceAliyunRouteEntry(),
|
||||
"alicloud_snat_entry": resourceAliyunSnatEntry(),
|
||||
"alicloud_forward_entry": resourceAliyunForwardEntry(),
|
||||
"alicloud_eip": resourceAliyunEip(),
|
||||
"alicloud_eip_association": resourceAliyunEipAssociation(),
|
||||
"alicloud_slb": resourceAliyunSlb(),
|
||||
"alicloud_slb_attachment": resourceAliyunSlbAttachment(),
|
||||
},
|
||||
|
||||
ConfigureFunc: providerConfigure,
|
||||
}
|
||||
}
|
||||
|
||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||
accesskey, ok := d.GetOk("access_key")
|
||||
if !ok {
|
||||
accesskey = os.Getenv("ALICLOUD_ACCESS_KEY")
|
||||
}
|
||||
secretkey, ok := d.GetOk("secret_key")
|
||||
if !ok {
|
||||
secretkey = os.Getenv("ALICLOUD_SECRET_KEY")
|
||||
}
|
||||
region, ok := d.GetOk("region")
|
||||
if !ok {
|
||||
region = os.Getenv("ALICLOUD_REGION")
|
||||
if region == "" {
|
||||
region = DEFAULT_REGION
|
||||
}
|
||||
}
|
||||
|
||||
config := Config{
|
||||
AccessKey: accesskey.(string),
|
||||
SecretKey: secretkey.(string),
|
||||
Region: common.Region(region.(string)),
|
||||
}
|
||||
|
||||
client, err := config.Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// This is a global MutexKV for use within this plugin.
|
||||
var alicloudMutexKV = mutexkv.NewMutexKV()
|
||||
|
||||
var descriptions map[string]string
|
||||
|
||||
func init() {
|
||||
descriptions = map[string]string{
|
||||
"access_key": "Access key of alicloud",
|
||||
"secret_key": "Secret key of alicloud",
|
||||
"region": "Region of alicloud",
|
||||
}
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
var testAccProviders map[string]terraform.ResourceProvider
|
||||
var testAccProvider *schema.Provider
|
||||
|
||||
func init() {
|
||||
testAccProvider = Provider().(*schema.Provider)
|
||||
testAccProviders = map[string]terraform.ResourceProvider{
|
||||
"alicloud": testAccProvider,
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider_impl(t *testing.T) {
|
||||
var _ terraform.ResourceProvider = Provider()
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("ALICLOUD_ACCESS_KEY"); v == "" {
|
||||
t.Fatal("ALICLOUD_ACCESS_KEY must be set for acceptance tests")
|
||||
}
|
||||
if v := os.Getenv("ALICLOUD_SECRET_KEY"); v == "" {
|
||||
t.Fatal("ALICLOUD_SECRET_KEY must be set for acceptance tests")
|
||||
}
|
||||
if v := os.Getenv("ALICLOUD_REGION"); v == "" {
|
||||
log.Println("[INFO] Test: Using cn-beijing as test region")
|
||||
os.Setenv("ALICLOUD_REGION", "cn-beijing")
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckAlicloudDataSourceID(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Can't find data source: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("data source ID not set")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -1,550 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/rds"
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAlicloudDBInstance() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAlicloudDBInstanceCreate,
|
||||
Read: resourceAlicloudDBInstanceRead,
|
||||
Update: resourceAlicloudDBInstanceUpdate,
|
||||
Delete: resourceAlicloudDBInstanceDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"engine": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue([]string{"MySQL", "SQLServer", "PostgreSQL", "PPAS"}),
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
"engine_version": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue([]string{"5.5", "5.6", "5.7", "2008r2", "2012", "9.4", "9.3"}),
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
"db_instance_class": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"db_instance_storage": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"instance_charge_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue([]string{string(rds.Postpaid), string(rds.Prepaid)}),
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: rds.Postpaid,
|
||||
},
|
||||
"period": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateAllowedIntValue([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 24, 36}),
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: 1,
|
||||
},
|
||||
|
||||
"zone_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"multi_az": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"db_instance_net_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue([]string{string(common.Internet), string(common.Intranet)}),
|
||||
Optional: true,
|
||||
},
|
||||
"allocate_public_connection": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"instance_network_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue([]string{string(common.VPC), string(common.Classic)}),
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"vswitch_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"master_user_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
},
|
||||
"master_user_password": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
|
||||
"preferred_backup_period": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
// terraform does not support ValidateFunc of TypeList attr
|
||||
// ValidateFunc: validateAllowedStringValue([]string{"Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"}),
|
||||
Optional: true,
|
||||
},
|
||||
"preferred_backup_time": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue(rds.BACKUP_TIME),
|
||||
Optional: true,
|
||||
},
|
||||
"backup_retention_period": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateIntegerInRange(7, 730),
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"security_ips": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"connections": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"connection_string": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"ip_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"ip_address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"db_mappings": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"db_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"character_set_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue(rds.CHARACTER_SET_NAME),
|
||||
Required: true,
|
||||
},
|
||||
"db_description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Optional: true,
|
||||
Set: resourceAlicloudDatabaseHash,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAlicloudDatabaseHash(v interface{}) int {
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["db_name"].(string)))
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["character_set_name"].(string)))
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["db_description"].(string)))
|
||||
|
||||
return hashcode.String(buf.String())
|
||||
}
|
||||
|
||||
func resourceAlicloudDBInstanceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.rdsconn
|
||||
|
||||
args, err := buildDBCreateOrderArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := conn.CreateOrder(args)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Alicloud db instance: %#v", err)
|
||||
}
|
||||
|
||||
instanceId := resp.DBInstanceId
|
||||
if instanceId == "" {
|
||||
return fmt.Errorf("Error get Alicloud db instance id")
|
||||
}
|
||||
|
||||
d.SetId(instanceId)
|
||||
d.Set("instance_charge_type", d.Get("instance_charge_type"))
|
||||
d.Set("period", d.Get("period"))
|
||||
d.Set("period_type", d.Get("period_type"))
|
||||
|
||||
// wait instance status change from Creating to running
|
||||
if err := conn.WaitForInstance(d.Id(), rds.Running, defaultLongTimeout); err != nil {
|
||||
return fmt.Errorf("WaitForInstance %s got error: %#v", rds.Running, err)
|
||||
}
|
||||
|
||||
if err := modifySecurityIps(d.Id(), d.Get("security_ips"), meta); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
masterUserName := d.Get("master_user_name").(string)
|
||||
masterUserPwd := d.Get("master_user_password").(string)
|
||||
if masterUserName != "" && masterUserPwd != "" {
|
||||
if err := client.CreateAccountByInfo(d.Id(), masterUserName, masterUserPwd); err != nil {
|
||||
return fmt.Errorf("Create db account %s error: %v", masterUserName, err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.Get("allocate_public_connection").(bool) {
|
||||
if err := client.AllocateDBPublicConnection(d.Id(), DB_DEFAULT_CONNECT_PORT); err != nil {
|
||||
return fmt.Errorf("Allocate public connection error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return resourceAlicloudDBInstanceUpdate(d, meta)
|
||||
}
|
||||
|
||||
func modifySecurityIps(id string, ips interface{}, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
ipList := expandStringList(ips.([]interface{}))
|
||||
|
||||
ipstr := strings.Join(ipList[:], COMMA_SEPARATED)
|
||||
// default disable connect from outside
|
||||
if ipstr == "" {
|
||||
ipstr = LOCAL_HOST_IP
|
||||
}
|
||||
|
||||
if err := client.ModifyDBSecurityIps(id, ipstr); err != nil {
|
||||
return fmt.Errorf("Error modify security ips %s: %#v", ipstr, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAlicloudDBInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.rdsconn
|
||||
d.Partial(true)
|
||||
|
||||
if d.HasChange("db_mappings") {
|
||||
o, n := d.GetChange("db_mappings")
|
||||
os := o.(*schema.Set)
|
||||
ns := n.(*schema.Set)
|
||||
|
||||
var allDbs []string
|
||||
remove := os.Difference(ns).List()
|
||||
add := ns.Difference(os).List()
|
||||
|
||||
if len(remove) > 0 && len(add) > 0 {
|
||||
return fmt.Errorf("Failure modify database, we neither support create and delete database simultaneous nor modify database attributes.")
|
||||
}
|
||||
|
||||
if len(remove) > 0 {
|
||||
for _, db := range remove {
|
||||
dbm, _ := db.(map[string]interface{})
|
||||
if err := conn.DeleteDatabase(d.Id(), dbm["db_name"].(string)); err != nil {
|
||||
return fmt.Errorf("Failure delete database %s: %#v", dbm["db_name"].(string), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(add) > 0 {
|
||||
for _, db := range add {
|
||||
dbm, _ := db.(map[string]interface{})
|
||||
dbName := dbm["db_name"].(string)
|
||||
allDbs = append(allDbs, dbName)
|
||||
|
||||
if err := client.CreateDatabaseByInfo(d.Id(), dbName, dbm["character_set_name"].(string), dbm["db_description"].(string)); err != nil {
|
||||
return fmt.Errorf("Failure create database %s: %#v", dbName, err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if err := conn.WaitForAllDatabase(d.Id(), allDbs, rds.Running, 600); err != nil {
|
||||
return fmt.Errorf("Failure create database %#v", err)
|
||||
}
|
||||
|
||||
if user := d.Get("master_user_name").(string); user != "" {
|
||||
for _, dbName := range allDbs {
|
||||
if err := client.GrantDBPrivilege2Account(d.Id(), user, dbName); err != nil {
|
||||
return fmt.Errorf("Failed to grant database %s readwrite privilege to account %s: %#v", dbName, user, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.SetPartial("db_mappings")
|
||||
}
|
||||
|
||||
if d.HasChange("preferred_backup_period") || d.HasChange("preferred_backup_time") || d.HasChange("backup_retention_period") {
|
||||
period := d.Get("preferred_backup_period").([]interface{})
|
||||
periodList := expandStringList(period)
|
||||
time := d.Get("preferred_backup_time").(string)
|
||||
retention := d.Get("backup_retention_period").(int)
|
||||
|
||||
if time == "" || retention == 0 || len(periodList) < 1 {
|
||||
return fmt.Errorf("Both backup_time, backup_period and retention_period are required to set backup policy.")
|
||||
}
|
||||
|
||||
ps := strings.Join(periodList[:], COMMA_SEPARATED)
|
||||
|
||||
if err := client.ConfigDBBackup(d.Id(), time, ps, retention); err != nil {
|
||||
return fmt.Errorf("Error set backup policy: %#v", err)
|
||||
}
|
||||
d.SetPartial("preferred_backup_period")
|
||||
d.SetPartial("preferred_backup_time")
|
||||
d.SetPartial("backup_retention_period")
|
||||
}
|
||||
|
||||
if d.HasChange("security_ips") {
|
||||
if err := modifySecurityIps(d.Id(), d.Get("security_ips"), meta); err != nil {
|
||||
return err
|
||||
}
|
||||
d.SetPartial("security_ips")
|
||||
}
|
||||
|
||||
if d.HasChange("db_instance_class") || d.HasChange("db_instance_storage") {
|
||||
co, cn := d.GetChange("db_instance_class")
|
||||
so, sn := d.GetChange("db_instance_storage")
|
||||
classOld := co.(string)
|
||||
classNew := cn.(string)
|
||||
storageOld := so.(int)
|
||||
storageNew := sn.(int)
|
||||
|
||||
// update except the first time, because we will do it in create function
|
||||
if classOld != "" && storageOld != 0 {
|
||||
chargeType := d.Get("instance_charge_type").(string)
|
||||
if chargeType == string(rds.Prepaid) {
|
||||
return fmt.Errorf("Prepaid db instance does not support modify db_instance_class or db_instance_storage")
|
||||
}
|
||||
|
||||
if err := client.ModifyDBClassStorage(d.Id(), classNew, strconv.Itoa(storageNew)); err != nil {
|
||||
return fmt.Errorf("Error modify db instance class or storage error: %#v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
return resourceAlicloudDBInstanceRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAlicloudDBInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.rdsconn
|
||||
|
||||
instance, err := client.DescribeDBInstanceById(d.Id())
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error Describe DB InstanceAttribute: %#v", err)
|
||||
}
|
||||
|
||||
args := rds.DescribeDatabasesArgs{
|
||||
DBInstanceId: d.Id(),
|
||||
}
|
||||
|
||||
resp, err := conn.DescribeDatabases(&args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Databases.Database == nil {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
d.Set("db_mappings", flattenDatabaseMappings(resp.Databases.Database))
|
||||
|
||||
argn := rds.DescribeDBInstanceNetInfoArgs{
|
||||
DBInstanceId: d.Id(),
|
||||
}
|
||||
|
||||
resn, err := conn.DescribeDBInstanceNetInfo(&argn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Set("connections", flattenDBConnections(resn.DBInstanceNetInfos.DBInstanceNetInfo))
|
||||
|
||||
ips, err := client.GetSecurityIps(d.Id())
|
||||
if err != nil {
|
||||
log.Printf("Describe DB security ips error: %#v", err)
|
||||
}
|
||||
d.Set("security_ips", ips)
|
||||
|
||||
d.Set("engine", instance.Engine)
|
||||
d.Set("engine_version", instance.EngineVersion)
|
||||
d.Set("db_instance_class", instance.DBInstanceClass)
|
||||
d.Set("port", instance.Port)
|
||||
d.Set("db_instance_storage", instance.DBInstanceStorage)
|
||||
d.Set("zone_id", instance.ZoneId)
|
||||
d.Set("db_instance_net_type", instance.DBInstanceNetType)
|
||||
d.Set("instance_network_type", instance.InstanceNetworkType)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAlicloudDBInstanceDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).rdsconn
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := conn.DeleteInstance(d.Id())
|
||||
|
||||
if err != nil {
|
||||
return resource.RetryableError(fmt.Errorf("DB Instance in use - trying again while it is deleted."))
|
||||
}
|
||||
|
||||
args := &rds.DescribeDBInstancesArgs{
|
||||
DBInstanceId: d.Id(),
|
||||
}
|
||||
resp, err := conn.DescribeDBInstanceAttribute(args)
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
} else if len(resp.Items.DBInstanceAttribute) < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("DB in use - trying again while it is deleted."))
|
||||
})
|
||||
}
|
||||
|
||||
func buildDBCreateOrderArgs(d *schema.ResourceData, meta interface{}) (*rds.CreateOrderArgs, error) {
|
||||
client := meta.(*AliyunClient)
|
||||
args := &rds.CreateOrderArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
// we does not expose this param to user,
|
||||
// because create prepaid instance progress will be stopped when set auto_pay to false,
|
||||
// then could not get instance info, cause timeout error
|
||||
AutoPay: "true",
|
||||
EngineVersion: d.Get("engine_version").(string),
|
||||
Engine: rds.Engine(d.Get("engine").(string)),
|
||||
DBInstanceStorage: d.Get("db_instance_storage").(int),
|
||||
DBInstanceClass: d.Get("db_instance_class").(string),
|
||||
Quantity: DEFAULT_INSTANCE_COUNT,
|
||||
Resource: rds.DefaultResource,
|
||||
}
|
||||
|
||||
bussStr, err := json.Marshal(DefaultBusinessInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to translate bussiness info %#v from json to string", DefaultBusinessInfo)
|
||||
}
|
||||
|
||||
args.BusinessInfo = string(bussStr)
|
||||
|
||||
zoneId := d.Get("zone_id").(string)
|
||||
args.ZoneId = zoneId
|
||||
|
||||
multiAZ := d.Get("multi_az").(bool)
|
||||
if multiAZ {
|
||||
if zoneId != "" {
|
||||
return nil, fmt.Errorf("You cannot set the ZoneId parameter when the MultiAZ parameter is set to true")
|
||||
}
|
||||
izs, err := client.DescribeMultiIZByRegion()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Get multiAZ id error")
|
||||
}
|
||||
|
||||
if len(izs) < 1 {
|
||||
return nil, fmt.Errorf("Current region does not support MultiAZ.")
|
||||
}
|
||||
|
||||
args.ZoneId = izs[0]
|
||||
}
|
||||
|
||||
vswitchId := d.Get("vswitch_id").(string)
|
||||
|
||||
networkType := d.Get("instance_network_type").(string)
|
||||
args.InstanceNetworkType = common.NetworkType(networkType)
|
||||
|
||||
if vswitchId != "" {
|
||||
args.VSwitchId = vswitchId
|
||||
|
||||
// check InstanceNetworkType with vswitchId
|
||||
if networkType == string(common.Classic) {
|
||||
return nil, fmt.Errorf("When fill vswitchId, you shold set instance_network_type to VPC")
|
||||
} else if networkType == "" {
|
||||
args.InstanceNetworkType = common.VPC
|
||||
}
|
||||
|
||||
// get vpcId
|
||||
vpcId, err := client.GetVpcIdByVSwitchId(vswitchId)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("VswitchId %s is not valid of current region", vswitchId)
|
||||
}
|
||||
// fill vpcId by vswitchId
|
||||
args.VPCId = vpcId
|
||||
|
||||
// check vswitchId in zone
|
||||
vsw, err := client.QueryVswitchById(vpcId, vswitchId)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("VswitchId %s is not valid of current region", vswitchId)
|
||||
}
|
||||
|
||||
if zoneId == "" {
|
||||
args.ZoneId = vsw.ZoneId
|
||||
} else if vsw.ZoneId != zoneId {
|
||||
return nil, fmt.Errorf("VswitchId %s is not belong to the zone %s", vswitchId, zoneId)
|
||||
}
|
||||
}
|
||||
|
||||
if v := d.Get("db_instance_net_type").(string); v != "" {
|
||||
args.DBInstanceNetType = common.NetType(v)
|
||||
}
|
||||
|
||||
chargeType := d.Get("instance_charge_type").(string)
|
||||
if chargeType != "" {
|
||||
args.PayType = rds.DBPayType(chargeType)
|
||||
} else {
|
||||
args.PayType = rds.Postpaid
|
||||
}
|
||||
|
||||
// if charge type is postpaid, the commodity code must set to bards
|
||||
if chargeType == string(rds.Postpaid) {
|
||||
args.CommodityCode = rds.Bards
|
||||
} else {
|
||||
args.CommodityCode = rds.Rds
|
||||
}
|
||||
|
||||
period := d.Get("period").(int)
|
||||
args.UsedTime, args.TimeType = TransformPeriod2Time(period, chargeType)
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,765 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/rds"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudDBInstance_basic(t *testing.T) {
|
||||
var instance rds.DBInstanceAttribute
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_db_instance.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDBInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstanceConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDBInstanceExists(
|
||||
"alicloud_db_instance.foo", &instance),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"port",
|
||||
"3306"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"db_instance_storage",
|
||||
"10"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"instance_network_type",
|
||||
"Classic"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"db_instance_net_type",
|
||||
"Intranet"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"engine_version",
|
||||
"5.6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"engine",
|
||||
"MySQL"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudDBInstance_vpc(t *testing.T) {
|
||||
var instance rds.DBInstanceAttribute
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_db_instance.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDBInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_vpc,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDBInstanceExists(
|
||||
"alicloud_db_instance.foo", &instance),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"port",
|
||||
"3306"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"db_instance_storage",
|
||||
"10"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"instance_network_type",
|
||||
"VPC"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"db_instance_net_type",
|
||||
"Intranet"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"engine_version",
|
||||
"5.6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"engine",
|
||||
"MySQL"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestC2CAlicloudDBInstance_prepaid_order(t *testing.T) {
|
||||
var instance rds.DBInstanceAttribute
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_db_instance.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDBInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_prepaid_order,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDBInstanceExists(
|
||||
"alicloud_db_instance.foo", &instance),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"port",
|
||||
"3306"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"db_instance_storage",
|
||||
"10"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"instance_network_type",
|
||||
"VPC"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"db_instance_net_type",
|
||||
"Intranet"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"engine_version",
|
||||
"5.6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_db_instance.foo",
|
||||
"engine",
|
||||
"MySQL"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudDBInstance_multiIZ(t *testing.T) {
|
||||
var instance rds.DBInstanceAttribute
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_db_instance.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDBInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_multiIZ,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDBInstanceExists(
|
||||
"alicloud_db_instance.foo", &instance),
|
||||
testAccCheckDBInstanceMultiIZ(&instance),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudDBInstance_database(t *testing.T) {
|
||||
var instance rds.DBInstanceAttribute
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_db_instance.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDBInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_database,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDBInstanceExists(
|
||||
"alicloud_db_instance.foo", &instance),
|
||||
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_mappings.#", "2"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_database_update,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDBInstanceExists(
|
||||
"alicloud_db_instance.foo", &instance),
|
||||
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_mappings.#", "3"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudDBInstance_account(t *testing.T) {
|
||||
var instance rds.DBInstanceAttribute
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_db_instance.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDBInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_grantDatabasePrivilege2Account,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDBInstanceExists(
|
||||
"alicloud_db_instance.foo", &instance),
|
||||
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_mappings.#", "2"),
|
||||
testAccCheckAccountHasPrivilege2Database("alicloud_db_instance.foo", "tester", "foo", "ReadWrite"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudDBInstance_allocatePublicConnection(t *testing.T) {
|
||||
var instance rds.DBInstanceAttribute
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_db_instance.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDBInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_allocatePublicConnection,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDBInstanceExists(
|
||||
"alicloud_db_instance.foo", &instance),
|
||||
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "connections.#", "2"),
|
||||
testAccCheckHasPublicConnection("alicloud_db_instance.foo"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudDBInstance_backupPolicy(t *testing.T) {
|
||||
var policies []map[string]interface{}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_db_instance.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDBInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_backup,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckBackupPolicyExists(
|
||||
"alicloud_db_instance.foo", policies),
|
||||
testAccCheckKeyValueInMaps(policies, "backup policy", "preferred_backup_period", "Wednesday,Thursday"),
|
||||
testAccCheckKeyValueInMaps(policies, "backup policy", "preferred_backup_time", "00:00Z-01:00Z"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudDBInstance_securityIps(t *testing.T) {
|
||||
var ips []map[string]interface{}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_db_instance.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDBInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_securityIps,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSecurityIpExists(
|
||||
"alicloud_db_instance.foo", ips),
|
||||
testAccCheckKeyValueInMaps(ips, "security ip", "security_ips", "127.0.0.1"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_securityIpsConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSecurityIpExists(
|
||||
"alicloud_db_instance.foo", ips),
|
||||
testAccCheckKeyValueInMaps(ips, "security ip", "security_ips", "10.168.1.12,100.69.7.112"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudDBInstance_upgradeClass(t *testing.T) {
|
||||
var instance rds.DBInstanceAttribute
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_db_instance.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDBInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_class,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDBInstanceExists(
|
||||
"alicloud_db_instance.foo", &instance),
|
||||
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_instance_class", "rds.mysql.t1.small"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccDBInstance_classUpgrade,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDBInstanceExists(
|
||||
"alicloud_db_instance.foo", &instance),
|
||||
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_instance_class", "rds.mysql.s1.small"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckSecurityIpExists(n string, ips []map[string]interface{}) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No DB Instance ID is set")
|
||||
}
|
||||
|
||||
conn := testAccProvider.Meta().(*AliyunClient).rdsconn
|
||||
args := rds.DescribeDBInstanceIPsArgs{
|
||||
DBInstanceId: rs.Primary.ID,
|
||||
}
|
||||
|
||||
resp, err := conn.DescribeDBInstanceIPs(&args)
|
||||
log.Printf("[DEBUG] check instance %s security ip %#v", rs.Primary.ID, resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := resp.Items.DBInstanceIPArray
|
||||
|
||||
if len(p) < 1 {
|
||||
return fmt.Errorf("DB security ip not found")
|
||||
}
|
||||
|
||||
ips = flattenDBSecurityIPs(p)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckDBInstanceMultiIZ(i *rds.DBInstanceAttribute) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if !strings.Contains(i.ZoneId, MULTI_IZ_SYMBOL) {
|
||||
return fmt.Errorf("Current region does not support multiIZ.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckAccountHasPrivilege2Database(n, accountName, dbName, privilege string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No DB instance ID is set")
|
||||
}
|
||||
|
||||
conn := testAccProvider.Meta().(*AliyunClient).rdsconn
|
||||
if err := conn.WaitForAccountPrivilege(rs.Primary.ID, accountName, dbName, rds.AccountPrivilege(privilege), 50); err != nil {
|
||||
return fmt.Errorf("Failed to grant database %s privilege to account %s: %v", dbName, accountName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckHasPublicConnection(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No DB instance ID is set")
|
||||
}
|
||||
|
||||
conn := testAccProvider.Meta().(*AliyunClient).rdsconn
|
||||
if err := conn.WaitForPublicConnection(rs.Primary.ID, 50); err != nil {
|
||||
return fmt.Errorf("Failed to allocate public connection: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckDBInstanceExists(n string, d *rds.DBInstanceAttribute) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No DB Instance ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
attr, err := client.DescribeDBInstanceById(rs.Primary.ID)
|
||||
log.Printf("[DEBUG] check instance %s attribute %#v", rs.Primary.ID, attr)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if attr == nil {
|
||||
return fmt.Errorf("DB Instance not found")
|
||||
}
|
||||
|
||||
*d = *attr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckBackupPolicyExists(n string, ps []map[string]interface{}) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Backup policy not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No DB Instance ID is set")
|
||||
}
|
||||
|
||||
conn := testAccProvider.Meta().(*AliyunClient).rdsconn
|
||||
|
||||
args := rds.DescribeBackupPolicyArgs{
|
||||
DBInstanceId: rs.Primary.ID,
|
||||
}
|
||||
resp, err := conn.DescribeBackupPolicy(&args)
|
||||
log.Printf("[DEBUG] check instance %s backup policy %#v", rs.Primary.ID, resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var bs []rds.BackupPolicy
|
||||
bs = append(bs, resp.BackupPolicy)
|
||||
ps = flattenDBBackup(bs)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckKeyValueInMaps(ps []map[string]interface{}, propName, key, value string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
for _, policy := range ps {
|
||||
if policy[key].(string) != value {
|
||||
return fmt.Errorf("DB %s attribute '%s' expected %#v, got %#v", propName, key, value, policy[key])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckDBInstanceDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_db_instance" {
|
||||
continue
|
||||
}
|
||||
|
||||
ins, err := client.DescribeDBInstanceById(rs.Primary.ID)
|
||||
|
||||
if ins != nil {
|
||||
return fmt.Errorf("Error DB Instance still exist")
|
||||
}
|
||||
|
||||
// Verify the error is what we want
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == InstanceNotfound {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccDBInstanceConfig = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
instance_charge_type = "Postpaid"
|
||||
db_instance_net_type = "Intranet"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccDBInstance_vpc = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
instance_charge_type = "Postpaid"
|
||||
db_instance_net_type = "Intranet"
|
||||
|
||||
vswitch_id = "${alicloud_vswitch.foo.id}"
|
||||
}
|
||||
`
|
||||
const testAccDBInstance_multiIZ = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
db_instance_net_type = "Intranet"
|
||||
multi_az = true
|
||||
}
|
||||
`
|
||||
|
||||
const testAccDBInstance_prepaid_order = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
instance_charge_type = "Prepaid"
|
||||
db_instance_net_type = "Intranet"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccDBInstance_database = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
instance_charge_type = "Postpaid"
|
||||
db_instance_net_type = "Intranet"
|
||||
|
||||
db_mappings = [
|
||||
{
|
||||
"db_name" = "foo"
|
||||
"character_set_name" = "utf8"
|
||||
"db_description" = "tf"
|
||||
},{
|
||||
"db_name" = "bar"
|
||||
"character_set_name" = "utf8"
|
||||
"db_description" = "tf"
|
||||
}]
|
||||
}
|
||||
`
|
||||
const testAccDBInstance_database_update = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
instance_charge_type = "Postpaid"
|
||||
db_instance_net_type = "Intranet"
|
||||
|
||||
db_mappings = [
|
||||
{
|
||||
"db_name" = "foo"
|
||||
"character_set_name" = "utf8"
|
||||
"db_description" = "tf"
|
||||
},{
|
||||
"db_name" = "bar"
|
||||
"character_set_name" = "utf8"
|
||||
"db_description" = "tf"
|
||||
},{
|
||||
"db_name" = "zzz"
|
||||
"character_set_name" = "utf8"
|
||||
"db_description" = "tf"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testAccDBInstance_grantDatabasePrivilege2Account = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
instance_charge_type = "Postpaid"
|
||||
db_instance_net_type = "Intranet"
|
||||
|
||||
master_user_name = "tester"
|
||||
master_user_password = "Test12345"
|
||||
|
||||
db_mappings = [
|
||||
{
|
||||
"db_name" = "foo"
|
||||
"character_set_name" = "utf8"
|
||||
"db_description" = "tf"
|
||||
},{
|
||||
"db_name" = "bar"
|
||||
"character_set_name" = "utf8"
|
||||
"db_description" = "tf"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testAccDBInstance_allocatePublicConnection = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
instance_charge_type = "Postpaid"
|
||||
db_instance_net_type = "Intranet"
|
||||
|
||||
master_user_name = "tester"
|
||||
master_user_password = "Test12345"
|
||||
|
||||
allocate_public_connection = true
|
||||
}
|
||||
`
|
||||
|
||||
const testAccDBInstance_backup = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
instance_charge_type = "Postpaid"
|
||||
db_instance_net_type = "Intranet"
|
||||
|
||||
preferred_backup_period = ["Wednesday","Thursday"]
|
||||
preferred_backup_time = "00:00Z-01:00Z"
|
||||
backup_retention_period = 9
|
||||
}
|
||||
`
|
||||
|
||||
const testAccDBInstance_securityIps = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
instance_charge_type = "Postpaid"
|
||||
db_instance_net_type = "Intranet"
|
||||
}
|
||||
`
|
||||
const testAccDBInstance_securityIpsConfig = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
instance_charge_type = "Postpaid"
|
||||
db_instance_net_type = "Intranet"
|
||||
|
||||
security_ips = ["10.168.1.12", "100.69.7.112"]
|
||||
}
|
||||
`
|
||||
|
||||
const testAccDBInstance_class = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.t1.small"
|
||||
db_instance_storage = "10"
|
||||
db_instance_net_type = "Intranet"
|
||||
}
|
||||
`
|
||||
const testAccDBInstance_classUpgrade = `
|
||||
resource "alicloud_db_instance" "foo" {
|
||||
engine = "MySQL"
|
||||
engine_version = "5.6"
|
||||
db_instance_class = "rds.mysql.s1.small"
|
||||
db_instance_storage = "10"
|
||||
db_instance_net_type = "Intranet"
|
||||
}
|
||||
`
|
|
@ -1,247 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunDisk() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunDiskCreate,
|
||||
Read: resourceAliyunDiskRead,
|
||||
Update: resourceAliyunDiskUpdate,
|
||||
Delete: resourceAliyunDiskDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"availability_zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateDiskName,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateDiskDescription,
|
||||
},
|
||||
|
||||
"category": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateDiskCategory,
|
||||
Default: "cloud",
|
||||
},
|
||||
|
||||
"size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"snapshot_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"status": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"tags": tagsSchema(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunDiskCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
conn := client.ecsconn
|
||||
|
||||
availabilityZone, err := client.DescribeZone(d.Get("availability_zone").(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := &ecs.CreateDiskArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
ZoneId: availabilityZone.ZoneId,
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("category"); ok && v.(string) != "" {
|
||||
category := ecs.DiskCategory(v.(string))
|
||||
if err := client.DiskAvailable(availabilityZone, category); err != nil {
|
||||
return err
|
||||
}
|
||||
args.DiskCategory = category
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("size"); ok {
|
||||
size := v.(int)
|
||||
if args.DiskCategory == ecs.DiskCategoryCloud && (size < 5 || size > 2000) {
|
||||
return fmt.Errorf("the size of cloud disk must between 5 to 2000")
|
||||
}
|
||||
|
||||
if (args.DiskCategory == ecs.DiskCategoryCloudEfficiency ||
|
||||
args.DiskCategory == ecs.DiskCategoryCloudSSD) && (size < 20 || size > 32768) {
|
||||
return fmt.Errorf("the size of %s disk must between 20 to 32768", args.DiskCategory)
|
||||
}
|
||||
args.Size = size
|
||||
|
||||
d.Set("size", args.Size)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("snapshot_id"); ok && v.(string) != "" {
|
||||
args.SnapshotId = v.(string)
|
||||
}
|
||||
|
||||
if args.Size <= 0 && args.SnapshotId == "" {
|
||||
return fmt.Errorf("One of size or snapshot_id is required when specifying an ECS disk.")
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("name"); ok && v.(string) != "" {
|
||||
args.DiskName = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok && v.(string) != "" {
|
||||
args.Description = v.(string)
|
||||
}
|
||||
|
||||
diskID, err := conn.CreateDisk(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("CreateDisk got a error: %#v", err)
|
||||
}
|
||||
|
||||
d.SetId(diskID)
|
||||
|
||||
return resourceAliyunDiskUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunDiskRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
disks, _, err := conn.DescribeDisks(&ecs.DescribeDisksArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
DiskIds: []string{d.Id()},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error DescribeDiskAttribute: %#v", err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] DescribeDiskAttribute for instance: %#v", disks)
|
||||
|
||||
if disks == nil || len(disks) <= 0 {
|
||||
return fmt.Errorf("No disks found.")
|
||||
}
|
||||
|
||||
disk := disks[0]
|
||||
d.Set("availability_zone", disk.ZoneId)
|
||||
d.Set("category", disk.Category)
|
||||
d.Set("size", disk.Size)
|
||||
d.Set("status", disk.Status)
|
||||
d.Set("name", disk.DiskName)
|
||||
d.Set("description", disk.Description)
|
||||
d.Set("snapshot_id", disk.SourceSnapshotId)
|
||||
|
||||
tags, _, err := conn.DescribeTags(&ecs.DescribeTagsArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
ResourceType: ecs.TagResourceDisk,
|
||||
ResourceId: d.Id(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] DescribeTags for disk got error: %#v", err)
|
||||
}
|
||||
|
||||
d.Set("tags", tagsToMap(tags))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunDiskUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
if err := setTags(client, ecs.TagResourceDisk, d); err != nil {
|
||||
log.Printf("[DEBUG] Set tags for instance got error: %#v", err)
|
||||
return fmt.Errorf("Set tags for instance got error: %#v", err)
|
||||
} else {
|
||||
d.SetPartial("tags")
|
||||
}
|
||||
attributeUpdate := false
|
||||
args := &ecs.ModifyDiskAttributeArgs{
|
||||
DiskId: d.Id(),
|
||||
}
|
||||
|
||||
if d.HasChange("name") {
|
||||
d.SetPartial("name")
|
||||
val := d.Get("name").(string)
|
||||
args.DiskName = val
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("description") {
|
||||
d.SetPartial("description")
|
||||
val := d.Get("description").(string)
|
||||
args.Description = val
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
if attributeUpdate {
|
||||
if err := conn.ModifyDiskAttribute(args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceAliyunDiskRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunDiskDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := conn.DeleteDisk(d.Id())
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == DiskIncorrectStatus || e.ErrorResponse.Code == DiskCreatingSnapshot {
|
||||
return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it is deleted."))
|
||||
}
|
||||
}
|
||||
|
||||
disks, _, descErr := conn.DescribeDisks(&ecs.DescribeDisksArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
DiskIds: []string{d.Id()},
|
||||
})
|
||||
|
||||
if descErr != nil {
|
||||
log.Printf("[ERROR] Delete disk is failed.")
|
||||
return resource.NonRetryableError(descErr)
|
||||
}
|
||||
if disks == nil || len(disks) < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it is deleted."))
|
||||
})
|
||||
}
|
|
@ -1,176 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunDiskAttachment() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunDiskAttachmentCreate,
|
||||
Read: resourceAliyunDiskAttachmentRead,
|
||||
Delete: resourceAliyunDiskAttachmentDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"instance_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"disk_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"device_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunDiskAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
err := diskAttachment(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(d.Get("disk_id").(string) + ":" + d.Get("instance_id").(string))
|
||||
|
||||
return resourceAliyunDiskAttachmentRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunDiskAttachmentRead(d *schema.ResourceData, meta interface{}) error {
|
||||
diskId, instanceId, err := getDiskIDAndInstanceID(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
disks, _, err := conn.DescribeDisks(&ecs.DescribeDisksArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
InstanceId: instanceId,
|
||||
DiskIds: []string{diskId},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error DescribeDiskAttribute: %#v", err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] DescribeDiskAttribute for instance: %#v", disks)
|
||||
if disks == nil || len(disks) <= 0 {
|
||||
return fmt.Errorf("No Disks Found.")
|
||||
}
|
||||
|
||||
disk := disks[0]
|
||||
d.Set("instance_id", disk.InstanceId)
|
||||
d.Set("disk_id", disk.DiskId)
|
||||
d.Set("device_name", disk.Device)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunDiskAttachmentDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
diskID, instanceID, err := getDiskIDAndInstanceID(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := conn.DetachDisk(instanceID, diskID)
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == DiskIncorrectStatus || e.ErrorResponse.Code == InstanceLockedForSecurity {
|
||||
return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it detaches"))
|
||||
}
|
||||
}
|
||||
|
||||
disks, _, descErr := conn.DescribeDisks(&ecs.DescribeDisksArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
DiskIds: []string{diskID},
|
||||
})
|
||||
|
||||
if descErr != nil {
|
||||
log.Printf("[ERROR] Disk %s is not detached.", diskID)
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
if disk.Status != ecs.DiskStatusAvailable {
|
||||
return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it is deleted."))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func getDiskIDAndInstanceID(d *schema.ResourceData, meta interface{}) (string, string, error) {
|
||||
parts := strings.Split(d.Id(), ":")
|
||||
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("invalid resource id")
|
||||
}
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
func diskAttachment(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
diskID := d.Get("disk_id").(string)
|
||||
instanceID := d.Get("instance_id").(string)
|
||||
|
||||
deviceName := d.Get("device_name").(string)
|
||||
|
||||
args := &ecs.AttachDiskArgs{
|
||||
InstanceId: instanceID,
|
||||
DiskId: diskID,
|
||||
Device: deviceName,
|
||||
}
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := conn.AttachDisk(args)
|
||||
log.Printf("error : %s", err)
|
||||
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == DiskIncorrectStatus || e.ErrorResponse.Code == InstanceIncorrectStatus {
|
||||
return resource.RetryableError(fmt.Errorf("Disk or Instance status is incorrect - trying again while it attaches"))
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
disks, _, descErr := conn.DescribeDisks(&ecs.DescribeDisksArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
InstanceId: instanceID,
|
||||
DiskIds: []string{diskID},
|
||||
})
|
||||
|
||||
if descErr != nil {
|
||||
log.Printf("[ERROR] Disk %s is not attached.", diskID)
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
if disks == nil || len(disks) <= 0 {
|
||||
return resource.RetryableError(fmt.Errorf("Disk in attaching - trying again while it is attached."))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
})
|
||||
}
|
|
@ -1,155 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestAccAlicloudDiskAttachment(t *testing.T) {
|
||||
var i ecs.InstanceAttributesType
|
||||
var v ecs.DiskItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_disk_attachment.disk-att",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDiskAttachmentDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDiskAttachmentConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceExists(
|
||||
"alicloud_instance.instance", &i),
|
||||
testAccCheckDiskExists(
|
||||
"alicloud_disk.disk", &v),
|
||||
testAccCheckDiskAttachmentExists(
|
||||
"alicloud_disk_attachment.disk-att", &i, &v),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_disk_attachment.disk-att",
|
||||
"device_name",
|
||||
"/dev/xvdb"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckDiskAttachmentExists(n string, instance *ecs.InstanceAttributesType, disk *ecs.DiskItemType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No Disk ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
request := &ecs.DescribeDisksArgs{
|
||||
RegionId: client.Region,
|
||||
DiskIds: []string{rs.Primary.Attributes["disk_id"]},
|
||||
}
|
||||
|
||||
return resource.Retry(3*time.Minute, func() *resource.RetryError {
|
||||
response, _, err := conn.DescribeDisks(request)
|
||||
if response != nil {
|
||||
for _, d := range response {
|
||||
if d.Status != ecs.DiskStatusInUse {
|
||||
return resource.RetryableError(fmt.Errorf("Disk is in attaching - trying again while it attaches"))
|
||||
} else if d.InstanceId == instance.InstanceId {
|
||||
// pass
|
||||
*disk = d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
return resource.NonRetryableError(fmt.Errorf("Error finding instance/disk"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckDiskAttachmentDestroy(s *terraform.State) error {
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_disk_attachment" {
|
||||
continue
|
||||
}
|
||||
// Try to find the Disk
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
request := &ecs.DescribeDisksArgs{
|
||||
RegionId: client.Region,
|
||||
DiskIds: []string{rs.Primary.ID},
|
||||
}
|
||||
|
||||
response, _, err := conn.DescribeDisks(request)
|
||||
|
||||
for _, disk := range response {
|
||||
if disk.Status != ecs.DiskStatusAvailable {
|
||||
return fmt.Errorf("Error ECS Disk Attachment still exist")
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccDiskAttachmentConfig = `
|
||||
resource "alicloud_disk" "disk" {
|
||||
availability_zone = "cn-beijing-a"
|
||||
size = "50"
|
||||
|
||||
tags {
|
||||
Name = "TerraformTest-disk"
|
||||
}
|
||||
}
|
||||
|
||||
resource "alicloud_instance" "instance" {
|
||||
image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd"
|
||||
instance_type = "ecs.s1.small"
|
||||
availability_zone = "cn-beijing-a"
|
||||
security_groups = ["${alicloud_security_group.group.id}"]
|
||||
instance_name = "hello"
|
||||
internet_charge_type = "PayByBandwidth"
|
||||
io_optimized = "none"
|
||||
|
||||
tags {
|
||||
Name = "TerraformTest-instance"
|
||||
}
|
||||
}
|
||||
|
||||
resource "alicloud_disk_attachment" "disk-att" {
|
||||
disk_id = "${alicloud_disk.disk.id}"
|
||||
instance_id = "${alicloud_instance.instance.id}"
|
||||
device_name = "/dev/xvdb"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "group" {
|
||||
name = "terraform-test-group"
|
||||
description = "New security group"
|
||||
}
|
||||
|
||||
`
|
|
@ -1,166 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
)
|
||||
|
||||
func TestAccAlicloudDisk_basic(t *testing.T) {
|
||||
var v ecs.DiskItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_disk.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDiskDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDiskConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDiskExists(
|
||||
"alicloud_disk.foo", &v),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_disk.foo",
|
||||
"category",
|
||||
"cloud_efficiency"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_disk.foo",
|
||||
"size",
|
||||
"30"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudDisk_withTags(t *testing.T) {
|
||||
var v ecs.DiskItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
//module name
|
||||
IDRefreshName: "alicloud_disk.bar",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDiskDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDiskConfigWithTags,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDiskExists("alicloud_disk.bar", &v),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_disk.bar",
|
||||
"tags.Name",
|
||||
"TerraformTest"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckDiskExists(n string, disk *ecs.DiskItemType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No Disk ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
request := &ecs.DescribeDisksArgs{
|
||||
RegionId: client.Region,
|
||||
DiskIds: []string{rs.Primary.ID},
|
||||
}
|
||||
|
||||
response, _, err := conn.DescribeDisks(request)
|
||||
log.Printf("[WARN] disk ids %#v", rs.Primary.ID)
|
||||
|
||||
if err == nil {
|
||||
if response != nil && len(response) > 0 {
|
||||
*disk = response[0]
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Error finding ECS Disk %#v", rs.Primary.ID)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckDiskDestroy(s *terraform.State) error {
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_disk" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to find the Disk
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
request := &ecs.DescribeDisksArgs{
|
||||
RegionId: client.Region,
|
||||
DiskIds: []string{rs.Primary.ID},
|
||||
}
|
||||
|
||||
response, _, err := conn.DescribeDisks(request)
|
||||
|
||||
if response != nil && len(response) > 0 {
|
||||
return fmt.Errorf("Error ECS Disk still exist")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccDiskConfig = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_disk_category"= "cloud_efficiency"
|
||||
}
|
||||
|
||||
resource "alicloud_disk" "foo" {
|
||||
# cn-beijing
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
name = "New-disk"
|
||||
description = "Hello ecs disk."
|
||||
category = "cloud_efficiency"
|
||||
size = "30"
|
||||
}
|
||||
`
|
||||
const testAccDiskConfigWithTags = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_disk_category"= "cloud_efficiency"
|
||||
}
|
||||
|
||||
resource "alicloud_disk" "bar" {
|
||||
# cn-beijing
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
category = "cloud_efficiency"
|
||||
size = "20"
|
||||
tags {
|
||||
Name = "TerraformTest"
|
||||
}
|
||||
}
|
||||
`
|
|
@ -1,157 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunEip() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunEipCreate,
|
||||
Read: resourceAliyunEipRead,
|
||||
Update: resourceAliyunEipUpdate,
|
||||
Delete: resourceAliyunEipDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"bandwidth": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 5,
|
||||
},
|
||||
"internet_charge_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Default: "PayByBandwidth",
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateInternetChargeType,
|
||||
},
|
||||
|
||||
"ip_address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"status": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"instance": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunEipCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
args, err := buildAliyunEipArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, allocationID, err := conn.AllocateEipAddress(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(allocationID)
|
||||
|
||||
return resourceAliyunEipRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunEipRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
eip, err := client.DescribeEipAddress(d.Id())
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error Describe Eip Attribute: %#v", err)
|
||||
}
|
||||
|
||||
if eip.InstanceId != "" {
|
||||
d.Set("instance", eip.InstanceId)
|
||||
} else {
|
||||
d.Set("instance", "")
|
||||
return nil
|
||||
}
|
||||
|
||||
bandwidth, _ := strconv.Atoi(eip.Bandwidth)
|
||||
d.Set("bandwidth", bandwidth)
|
||||
d.Set("internet_charge_type", eip.InternetChargeType)
|
||||
d.Set("ip_address", eip.IpAddress)
|
||||
d.Set("status", eip.Status)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunEipUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
if d.HasChange("bandwidth") {
|
||||
err := conn.ModifyEipAddressAttribute(d.Id(), d.Get("bandwidth").(int))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetPartial("bandwidth")
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunEipDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := conn.ReleaseEipAddress(d.Id())
|
||||
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == EipIncorrectStatus {
|
||||
return resource.RetryableError(fmt.Errorf("EIP in use - trying again while it is deleted."))
|
||||
}
|
||||
}
|
||||
|
||||
args := &ecs.DescribeEipAddressesArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
AllocationId: d.Id(),
|
||||
}
|
||||
|
||||
eips, _, descErr := conn.DescribeEipAddresses(args)
|
||||
if descErr != nil {
|
||||
return resource.NonRetryableError(descErr)
|
||||
} else if eips == nil || len(eips) < 1 {
|
||||
return nil
|
||||
}
|
||||
return resource.RetryableError(fmt.Errorf("EIP in use - trying again while it is deleted."))
|
||||
})
|
||||
}
|
||||
|
||||
func buildAliyunEipArgs(d *schema.ResourceData, meta interface{}) (*ecs.AllocateEipAddressArgs, error) {
|
||||
|
||||
args := &ecs.AllocateEipAddressArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
Bandwidth: d.Get("bandwidth").(int),
|
||||
InternetChargeType: common.InternetChargeType(d.Get("internet_charge_type").(string)),
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,131 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunEipAssociation() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunEipAssociationCreate,
|
||||
Read: resourceAliyunEipAssociationRead,
|
||||
Delete: resourceAliyunEipAssociationDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"allocation_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"instance_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunEipAssociationCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
allocationId := d.Get("allocation_id").(string)
|
||||
instanceId := d.Get("instance_id").(string)
|
||||
|
||||
if err := conn.AssociateEipAddress(allocationId, instanceId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(allocationId + ":" + instanceId)
|
||||
|
||||
return resourceAliyunEipAssociationRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunEipAssociationRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
allocationId, instanceId, err := getAllocationIdAndInstanceId(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eip, err := client.DescribeEipAddress(allocationId)
|
||||
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error Describe Eip Attribute: %#v", err)
|
||||
}
|
||||
|
||||
if eip.InstanceId != instanceId {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
d.Set("instance_id", eip.InstanceId)
|
||||
d.Set("allocation_id", allocationId)
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunEipAssociationDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
allocationId, instanceId, err := getAllocationIdAndInstanceId(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := conn.UnassociateEipAddress(allocationId, instanceId)
|
||||
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
errCode := e.ErrorResponse.Code
|
||||
if errCode == InstanceIncorrectStatus || errCode == HaVipIncorrectStatus {
|
||||
return resource.RetryableError(fmt.Errorf("Eip in use - trying again while make it unassociated."))
|
||||
}
|
||||
}
|
||||
|
||||
args := &ecs.DescribeEipAddressesArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
AllocationId: allocationId,
|
||||
}
|
||||
|
||||
eips, _, descErr := conn.DescribeEipAddresses(args)
|
||||
|
||||
if descErr != nil {
|
||||
return resource.NonRetryableError(descErr)
|
||||
} else if eips == nil || len(eips) < 1 {
|
||||
return nil
|
||||
}
|
||||
for _, eip := range eips {
|
||||
if eip.Status != ecs.EipStatusAvailable {
|
||||
return resource.RetryableError(fmt.Errorf("Eip in use - trying again while make it unassociated."))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func getAllocationIdAndInstanceId(d *schema.ResourceData, meta interface{}) (string, string, error) {
|
||||
parts := strings.Split(d.Id(), ":")
|
||||
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("invalid resource id")
|
||||
}
|
||||
return parts[0], parts[1], nil
|
||||
}
|
|
@ -1,158 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestAccAlicloudEIPAssociation(t *testing.T) {
|
||||
var asso ecs.EipAddressSetType
|
||||
var inst ecs.InstanceAttributesType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_eip_association.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEIPAssociationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEIPAssociationConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceExists(
|
||||
"alicloud_instance.instance", &inst),
|
||||
testAccCheckEIPExists(
|
||||
"alicloud_eip.eip", &asso),
|
||||
testAccCheckEIPAssociationExists(
|
||||
"alicloud_eip_association.foo", &inst, &asso),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckEIPAssociationExists(n string, instance *ecs.InstanceAttributesType, eip *ecs.EipAddressSetType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No EIP Association ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
return resource.Retry(3*time.Minute, func() *resource.RetryError {
|
||||
d, err := client.DescribeEipAddress(rs.Primary.Attributes["allocation_id"])
|
||||
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
if d != nil {
|
||||
if d.Status != ecs.EipStatusInUse {
|
||||
return resource.RetryableError(fmt.Errorf("Eip is in associating - trying again while it associates"))
|
||||
} else if d.InstanceId == instance.InstanceId {
|
||||
*eip = *d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return resource.NonRetryableError(fmt.Errorf("EIP Association not found"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckEIPAssociationDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_eip_association" {
|
||||
continue
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No EIP Association ID is set")
|
||||
}
|
||||
|
||||
// Try to find the EIP
|
||||
eips, _, err := client.ecsconn.DescribeEipAddresses(&ecs.DescribeEipAddressesArgs{
|
||||
RegionId: client.Region,
|
||||
AllocationId: rs.Primary.Attributes["allocation_id"],
|
||||
})
|
||||
|
||||
for _, eip := range eips {
|
||||
if eip.Status != ecs.EipStatusAvailable {
|
||||
return fmt.Errorf("Error EIP Association still exist")
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the error is what we want
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccEIPAssociationConfig = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "main" {
|
||||
cidr_block = "10.1.0.0/21"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "main" {
|
||||
vpc_id = "${alicloud_vpc.main.id}"
|
||||
cidr_block = "10.1.1.0/24"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
depends_on = [
|
||||
"alicloud_vpc.main"]
|
||||
}
|
||||
|
||||
resource "alicloud_instance" "instance" {
|
||||
# cn-beijing
|
||||
vswitch_id = "${alicloud_vswitch.main.id}"
|
||||
image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd"
|
||||
|
||||
# series II
|
||||
instance_type = "ecs.n1.medium"
|
||||
io_optimized = "optimized"
|
||||
system_disk_category = "cloud_efficiency"
|
||||
|
||||
security_groups = ["${alicloud_security_group.group.id}"]
|
||||
instance_name = "test_foo"
|
||||
|
||||
tags {
|
||||
Name = "TerraformTest-instance"
|
||||
}
|
||||
}
|
||||
|
||||
resource "alicloud_eip" "eip" {
|
||||
}
|
||||
|
||||
resource "alicloud_eip_association" "foo" {
|
||||
allocation_id = "${alicloud_eip.eip.id}"
|
||||
instance_id = "${alicloud_instance.instance.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "group" {
|
||||
name = "terraform-test-group"
|
||||
description = "New security group"
|
||||
vpc_id = "${alicloud_vpc.main.id}"
|
||||
}
|
||||
`
|
|
@ -1,131 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
)
|
||||
|
||||
func TestAccAlicloudEIP_basic(t *testing.T) {
|
||||
var eip ecs.EipAddressSetType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_eip.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEIPDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEIPConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEIPExists(
|
||||
"alicloud_eip.foo", &eip),
|
||||
testAccCheckEIPAttributes(&eip),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccEIPConfigTwo,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEIPExists(
|
||||
"alicloud_eip.foo", &eip),
|
||||
testAccCheckEIPAttributes(&eip),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_eip.foo",
|
||||
"bandwidth",
|
||||
"10"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckEIPExists(n string, eip *ecs.EipAddressSetType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No EIP ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
d, err := client.DescribeEipAddress(rs.Primary.ID)
|
||||
|
||||
log.Printf("[WARN] eip id %#v", rs.Primary.ID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d == nil || d.IpAddress == "" {
|
||||
return fmt.Errorf("EIP not found")
|
||||
}
|
||||
|
||||
*eip = *d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckEIPAttributes(eip *ecs.EipAddressSetType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if eip.IpAddress == "" {
|
||||
return fmt.Errorf("Empty Ip address")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckEIPDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_eip" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to find the EIP
|
||||
conn := client.ecsconn
|
||||
|
||||
args := &ecs.DescribeEipAddressesArgs{
|
||||
RegionId: client.Region,
|
||||
AllocationId: rs.Primary.ID,
|
||||
}
|
||||
d, _, err := conn.DescribeEipAddresses(args)
|
||||
|
||||
if d != nil && len(d) > 0 {
|
||||
return fmt.Errorf("Error EIP still exist")
|
||||
}
|
||||
|
||||
// Verify the error is what we want
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccEIPConfig = `
|
||||
resource "alicloud_eip" "foo" {
|
||||
}
|
||||
`
|
||||
|
||||
const testAccEIPConfigTwo = `
|
||||
resource "alicloud_eip" "foo" {
|
||||
bandwidth = "10"
|
||||
internet_charge_type = "PayByBandwidth"
|
||||
}
|
||||
`
|
|
@ -1,320 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/denverdino/aliyungo/ess"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAlicloudEssScalingConfiguration() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunEssScalingConfigurationCreate,
|
||||
Read: resourceAliyunEssScalingConfigurationRead,
|
||||
Update: resourceAliyunEssScalingConfigurationUpdate,
|
||||
Delete: resourceAliyunEssScalingConfigurationDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"active": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"enable": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
},
|
||||
"scaling_group_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
"image_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
"instance_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
"io_optimized": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateIoOptimized,
|
||||
},
|
||||
"security_group_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
"scaling_configuration_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"internet_charge_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validateInternetChargeType,
|
||||
},
|
||||
"internet_max_bandwidth_in": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
"internet_max_bandwidth_out": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateInternetMaxBandWidthOut,
|
||||
},
|
||||
"system_disk_category": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validateAllowedStringValue([]string{
|
||||
string(ecs.DiskCategoryCloud),
|
||||
string(ecs.DiskCategoryCloudSSD),
|
||||
string(ecs.DiskCategoryCloudEfficiency),
|
||||
string(ecs.DiskCategoryEphemeralSSD),
|
||||
}),
|
||||
},
|
||||
"data_disk": &schema.Schema{
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Type: schema.TypeList,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"category": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"snapshot_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"device": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"instance_ids": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Optional: true,
|
||||
MaxItems: 20,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingConfigurationCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
args, err := buildAlicloudEssScalingConfigurationArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
essconn := meta.(*AliyunClient).essconn
|
||||
|
||||
scaling, err := essconn.CreateScalingConfiguration(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(d.Get("scaling_group_id").(string) + COLON_SEPARATED + scaling.ScalingConfigurationId)
|
||||
|
||||
return resourceAliyunEssScalingConfigurationUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingConfigurationUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
if d.HasChange("active") {
|
||||
active := d.Get("active").(bool)
|
||||
if !active {
|
||||
return fmt.Errorf("Please active the scaling configuration directly.")
|
||||
}
|
||||
ids := strings.Split(d.Id(), COLON_SEPARATED)
|
||||
err := client.ActiveScalingConfigurationById(ids[0], ids[1])
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Active scaling configuration %s err: %#v", ids[1], err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := enableEssScalingConfiguration(d, meta); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceAliyunEssScalingConfigurationRead(d, meta)
|
||||
}
|
||||
|
||||
func enableEssScalingConfiguration(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
ids := strings.Split(d.Id(), COLON_SEPARATED)
|
||||
|
||||
if d.HasChange("enable") {
|
||||
d.SetPartial("enable")
|
||||
enable := d.Get("enable").(bool)
|
||||
if !enable {
|
||||
err := client.DisableScalingConfigurationById(ids[0])
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Disable scaling group %s err: %#v", ids[0], err)
|
||||
}
|
||||
}
|
||||
|
||||
instance_ids := []string{}
|
||||
if d.HasChange("instance_ids") {
|
||||
d.SetPartial("instance_ids")
|
||||
instances := d.Get("instance_ids").([]interface{})
|
||||
instance_ids = expandStringList(instances)
|
||||
}
|
||||
err := client.EnableScalingConfigurationById(ids[0], ids[1], instance_ids)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Enable scaling configuration %s err: %#v", ids[1], err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingConfigurationRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
client := meta.(*AliyunClient)
|
||||
ids := strings.Split(d.Id(), COLON_SEPARATED)
|
||||
c, err := client.DescribeScalingConfigurationById(ids[0], ids[1])
|
||||
if err != nil {
|
||||
if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error Describe ESS scaling configuration Attribute: %#v", err)
|
||||
}
|
||||
|
||||
d.Set("scaling_group_id", c.ScalingGroupId)
|
||||
d.Set("active", c.LifecycleState == ess.Active)
|
||||
d.Set("image_id", c.ImageId)
|
||||
d.Set("instance_type", c.InstanceType)
|
||||
d.Set("io_optimized", c.IoOptimized)
|
||||
d.Set("security_group_id", c.SecurityGroupId)
|
||||
d.Set("scaling_configuration_name", c.ScalingConfigurationName)
|
||||
d.Set("internet_charge_type", c.InternetChargeType)
|
||||
d.Set("internet_max_bandwidth_in", c.InternetMaxBandwidthIn)
|
||||
d.Set("internet_max_bandwidth_out", c.InternetMaxBandwidthOut)
|
||||
d.Set("system_disk_category", c.SystemDiskCategory)
|
||||
d.Set("data_disk", flattenDataDiskMappings(c.DataDisks.DataDisk))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingConfigurationDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
ids := strings.Split(d.Id(), COLON_SEPARATED)
|
||||
err := client.DeleteScalingConfigurationById(ids[0], ids[1])
|
||||
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == IncorrectScalingConfigurationLifecycleState {
|
||||
return resource.NonRetryableError(
|
||||
fmt.Errorf("Scaling configuration is active - please active another one and trying again."))
|
||||
}
|
||||
if e.ErrorResponse.Code != InvalidScalingGroupIdNotFound {
|
||||
return resource.RetryableError(
|
||||
fmt.Errorf("Scaling configuration in use - trying again while it is deleted."))
|
||||
}
|
||||
}
|
||||
|
||||
_, err = client.DescribeScalingConfigurationById(ids[0], ids[1])
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
return resource.RetryableError(
|
||||
fmt.Errorf("Scaling configuration in use - trying again while it is deleted."))
|
||||
})
|
||||
}
|
||||
|
||||
func buildAlicloudEssScalingConfigurationArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScalingConfigurationArgs, error) {
|
||||
args := &ess.CreateScalingConfigurationArgs{
|
||||
ScalingGroupId: d.Get("scaling_group_id").(string),
|
||||
ImageId: d.Get("image_id").(string),
|
||||
InstanceType: d.Get("instance_type").(string),
|
||||
IoOptimized: ecs.IoOptimized(d.Get("io_optimized").(string)),
|
||||
SecurityGroupId: d.Get("security_group_id").(string),
|
||||
}
|
||||
|
||||
if v := d.Get("scaling_configuration_name").(string); v != "" {
|
||||
args.ScalingConfigurationName = v
|
||||
}
|
||||
|
||||
if v := d.Get("internet_charge_type").(string); v != "" {
|
||||
args.InternetChargeType = common.InternetChargeType(v)
|
||||
}
|
||||
|
||||
if v := d.Get("internet_max_bandwidth_in").(int); v != 0 {
|
||||
args.InternetMaxBandwidthIn = v
|
||||
}
|
||||
|
||||
if v := d.Get("internet_max_bandwidth_out").(int); v != 0 {
|
||||
args.InternetMaxBandwidthOut = v
|
||||
}
|
||||
|
||||
if v := d.Get("system_disk_category").(string); v != "" {
|
||||
args.SystemDisk_Category = common.UnderlineString(v)
|
||||
}
|
||||
|
||||
dds, ok := d.GetOk("data_disk")
|
||||
if ok {
|
||||
disks := dds.([]interface{})
|
||||
diskTypes := []ess.DataDiskType{}
|
||||
|
||||
for _, e := range disks {
|
||||
pack := e.(map[string]interface{})
|
||||
disk := ess.DataDiskType{
|
||||
Size: pack["size"].(int),
|
||||
Category: pack["category"].(string),
|
||||
SnapshotId: pack["snapshot_id"].(string),
|
||||
Device: pack["device"].(string),
|
||||
}
|
||||
if v := pack["size"].(int); v != 0 {
|
||||
disk.Size = v
|
||||
}
|
||||
if v := pack["category"].(string); v != "" {
|
||||
disk.Category = v
|
||||
}
|
||||
if v := pack["snapshot_id"].(string); v != "" {
|
||||
disk.SnapshotId = v
|
||||
}
|
||||
if v := pack["device"].(string); v != "" {
|
||||
disk.Device = v
|
||||
}
|
||||
diskTypes = append(diskTypes, disk)
|
||||
}
|
||||
args.DataDisk = diskTypes
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,495 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ess"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudEssScalingConfiguration_basic(t *testing.T) {
|
||||
var sc ess.ScalingConfigurationItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_ess_scaling_configuration.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEssScalingConfigurationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingConfigurationConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingConfigurationExists(
|
||||
"alicloud_ess_scaling_configuration.foo", &sc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.foo",
|
||||
"instance_type",
|
||||
"ecs.s2.large"),
|
||||
resource.TestMatchResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.foo",
|
||||
"image_id",
|
||||
regexp.MustCompile("^centos_6")),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudEssScalingConfiguration_multiConfig(t *testing.T) {
|
||||
var sc ess.ScalingConfigurationItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_ess_scaling_configuration.bar",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEssScalingConfigurationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingConfiguration_multiConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingConfigurationExists(
|
||||
"alicloud_ess_scaling_configuration.bar", &sc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.bar",
|
||||
"active",
|
||||
"false"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.bar",
|
||||
"instance_type",
|
||||
"ecs.s2.large"),
|
||||
resource.TestMatchResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.bar",
|
||||
"image_id",
|
||||
regexp.MustCompile("^centos_6")),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func SkipTestAccAlicloudEssScalingConfiguration_active(t *testing.T) {
|
||||
var sc ess.ScalingConfigurationItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_ess_scaling_configuration.bar",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEssScalingConfigurationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingConfiguration_active,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingConfigurationExists(
|
||||
"alicloud_ess_scaling_configuration.bar", &sc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.bar",
|
||||
"active",
|
||||
"true"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.bar",
|
||||
"instance_type",
|
||||
"ecs.s2.large"),
|
||||
resource.TestMatchResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.bar",
|
||||
"image_id",
|
||||
regexp.MustCompile("^centos_6")),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingConfiguration_inActive,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingConfigurationExists(
|
||||
"alicloud_ess_scaling_configuration.bar", &sc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.bar",
|
||||
"active",
|
||||
"false"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.bar",
|
||||
"instance_type",
|
||||
"ecs.s2.large"),
|
||||
resource.TestMatchResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.bar",
|
||||
"image_id",
|
||||
regexp.MustCompile("^centos_6")),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func SkipTestAccAlicloudEssScalingConfiguration_enable(t *testing.T) {
|
||||
var sc ess.ScalingConfigurationItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_ess_scaling_configuration.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEssScalingConfigurationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingConfiguration_enable,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingConfigurationExists(
|
||||
"alicloud_ess_scaling_configuration.foo", &sc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.foo",
|
||||
"enable",
|
||||
"true"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.foo",
|
||||
"instance_type",
|
||||
"ecs.s2.large"),
|
||||
resource.TestMatchResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.foo",
|
||||
"image_id",
|
||||
regexp.MustCompile("^centos_6")),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingConfiguration_disable,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingConfigurationExists(
|
||||
"alicloud_ess_scaling_configuration.foo", &sc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.foo",
|
||||
"enable",
|
||||
"false"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.foo",
|
||||
"instance_type",
|
||||
"ecs.s2.large"),
|
||||
resource.TestMatchResourceAttr(
|
||||
"alicloud_ess_scaling_configuration.foo",
|
||||
"image_id",
|
||||
regexp.MustCompile("^centos_6")),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckEssScalingConfigurationExists(n string, d *ess.ScalingConfigurationItemType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ESS Scaling Configuration ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
ids := strings.Split(rs.Primary.ID, COLON_SEPARATED)
|
||||
attr, err := client.DescribeScalingConfigurationById(ids[0], ids[1])
|
||||
log.Printf("[DEBUG] check scaling configuration %s attribute %#v", rs.Primary.ID, attr)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if attr == nil {
|
||||
return fmt.Errorf("Scaling Configuration not found")
|
||||
}
|
||||
|
||||
*d = *attr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckEssScalingConfigurationDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_ess_scaling_configuration" {
|
||||
continue
|
||||
}
|
||||
ids := strings.Split(rs.Primary.ID, COLON_SEPARATED)
|
||||
ins, err := client.DescribeScalingConfigurationById(ids[0], ids[1])
|
||||
|
||||
if ins != nil {
|
||||
return fmt.Errorf("Error ESS scaling configuration still exist")
|
||||
}
|
||||
|
||||
// Verify the error is what we want
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == InstanceNotfound {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccEssScalingConfigurationConfig = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "foo" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "foo"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.foo.id}"
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccEssScalingConfiguration_multiConfig = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "foo" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "foo"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.foo.id}"
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "bar" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.foo.id}"
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccEssScalingConfiguration_active = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "foo" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "foo"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.foo.id}"
|
||||
active = true
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccEssScalingConfiguration_inActive = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "foo" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "foo"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.foo.id}"
|
||||
active = false
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccEssScalingConfiguration_enable = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "foo" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "foo"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.foo.id}"
|
||||
enable = true
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccEssScalingConfiguration_disable = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "foo" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "foo"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.foo.id}"
|
||||
enable = false
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
`
|
|
@ -1,209 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ess"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAlicloudEssScalingGroup() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunEssScalingGroupCreate,
|
||||
Read: resourceAliyunEssScalingGroupRead,
|
||||
Update: resourceAliyunEssScalingGroupUpdate,
|
||||
Delete: resourceAliyunEssScalingGroupDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"min_size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ValidateFunc: validateIntegerInRange(0, 100),
|
||||
},
|
||||
"max_size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ValidateFunc: validateIntegerInRange(0, 100),
|
||||
},
|
||||
"scaling_group_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"default_cooldown": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Default: 300,
|
||||
Optional: true,
|
||||
ValidateFunc: validateIntegerInRange(0, 86400),
|
||||
},
|
||||
"vswitch_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"removal_policies": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Optional: true,
|
||||
MaxItems: 2,
|
||||
},
|
||||
"db_instance_ids": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Optional: true,
|
||||
MaxItems: 3,
|
||||
},
|
||||
"loadbalancer_ids": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
args, err := buildAlicloudEssScalingGroupArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
essconn := meta.(*AliyunClient).essconn
|
||||
|
||||
scaling, err := essconn.CreateScalingGroup(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(scaling.ScalingGroupId)
|
||||
|
||||
return resourceAliyunEssScalingGroupUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingGroupRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
scaling, err := client.DescribeScalingGroupById(d.Id())
|
||||
if err != nil {
|
||||
if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error Describe ESS scaling group Attribute: %#v", err)
|
||||
}
|
||||
|
||||
d.Set("min_size", scaling.MinSize)
|
||||
d.Set("max_size", scaling.MaxSize)
|
||||
d.Set("scaling_group_name", scaling.ScalingGroupName)
|
||||
d.Set("default_cooldown", scaling.DefaultCooldown)
|
||||
d.Set("removal_policies", scaling.RemovalPolicies)
|
||||
d.Set("db_instance_ids", scaling.DBInstanceIds)
|
||||
d.Set("loadbalancer_ids", scaling.LoadBalancerId)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingGroupUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).essconn
|
||||
args := &ess.ModifyScalingGroupArgs{
|
||||
ScalingGroupId: d.Id(),
|
||||
}
|
||||
|
||||
if d.HasChange("scaling_group_name") {
|
||||
args.ScalingGroupName = d.Get("scaling_group_name").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("min_size") {
|
||||
args.MinSize = d.Get("min_size").(int)
|
||||
}
|
||||
|
||||
if d.HasChange("max_size") {
|
||||
args.MaxSize = d.Get("max_size").(int)
|
||||
}
|
||||
|
||||
if d.HasChange("default_cooldown") {
|
||||
args.DefaultCooldown = d.Get("default_cooldown").(int)
|
||||
}
|
||||
|
||||
if d.HasChange("removal_policies") {
|
||||
policyStrings := d.Get("removal_policies").([]interface{})
|
||||
args.RemovalPolicy = expandStringList(policyStrings)
|
||||
}
|
||||
|
||||
if _, err := conn.ModifyScalingGroup(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceAliyunEssScalingGroupRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
return resource.Retry(2*time.Minute, func() *resource.RetryError {
|
||||
err := client.DeleteScalingGroupById(d.Id())
|
||||
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code != InvalidScalingGroupIdNotFound {
|
||||
return resource.RetryableError(fmt.Errorf("Scaling group in use - trying again while it is deleted."))
|
||||
}
|
||||
}
|
||||
|
||||
_, err = client.DescribeScalingGroupById(d.Id())
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Scaling group in use - trying again while it is deleted."))
|
||||
})
|
||||
}
|
||||
|
||||
func buildAlicloudEssScalingGroupArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScalingGroupArgs, error) {
|
||||
client := meta.(*AliyunClient)
|
||||
args := &ess.CreateScalingGroupArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
MinSize: d.Get("min_size").(int),
|
||||
MaxSize: d.Get("max_size").(int),
|
||||
DefaultCooldown: d.Get("default_cooldown").(int),
|
||||
}
|
||||
|
||||
if v := d.Get("scaling_group_name").(string); v != "" {
|
||||
args.ScalingGroupName = v
|
||||
}
|
||||
|
||||
if v := d.Get("vswitch_id").(string); v != "" {
|
||||
args.VSwitchId = v
|
||||
|
||||
// get vpcId
|
||||
vpcId, err := client.GetVpcIdByVSwitchId(v)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("VswitchId %s is not valid of current region", v)
|
||||
}
|
||||
// fill vpcId by vswitchId
|
||||
args.VpcId = vpcId
|
||||
|
||||
}
|
||||
|
||||
dbs, ok := d.GetOk("db_instance_ids")
|
||||
if ok {
|
||||
dbsStrings := dbs.([]interface{})
|
||||
args.DBInstanceId = expandStringList(dbsStrings)
|
||||
}
|
||||
|
||||
lbs, ok := d.GetOk("loadbalancer_ids")
|
||||
if ok {
|
||||
lbsStrings := lbs.([]interface{})
|
||||
args.LoadBalancerId = strings.Join(expandStringList(lbsStrings), COMMA_SEPARATED)
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,297 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ess"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudEssScalingGroup_basic(t *testing.T) {
|
||||
var sg ess.ScalingGroupItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_ess_scaling_group.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEssScalingGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingGroupConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingGroupExists(
|
||||
"alicloud_ess_scaling_group.foo", &sg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"min_size",
|
||||
"1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"max_size",
|
||||
"1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"scaling_group_name",
|
||||
"foo"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"removal_policies.#",
|
||||
"2",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudEssScalingGroup_update(t *testing.T) {
|
||||
var sg ess.ScalingGroupItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_ess_scaling_group.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEssScalingGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingGroup,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingGroupExists(
|
||||
"alicloud_ess_scaling_group.foo", &sg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"min_size",
|
||||
"1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"max_size",
|
||||
"1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"scaling_group_name",
|
||||
"foo"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"removal_policies.#",
|
||||
"2",
|
||||
),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingGroup_update,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingGroupExists(
|
||||
"alicloud_ess_scaling_group.foo", &sg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"min_size",
|
||||
"2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"max_size",
|
||||
"2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"scaling_group_name",
|
||||
"update"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"removal_policies.#",
|
||||
"1",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func SkipTestAccAlicloudEssScalingGroup_vpc(t *testing.T) {
|
||||
var sg ess.ScalingGroupItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_ess_scaling_group.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEssScalingGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingGroup_vpc,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingGroupExists(
|
||||
"alicloud_ess_scaling_group.foo", &sg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"min_size",
|
||||
"1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"max_size",
|
||||
"1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"scaling_group_name",
|
||||
"foo"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_group.foo",
|
||||
"removal_policies.#",
|
||||
"2",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckEssScalingGroupExists(n string, d *ess.ScalingGroupItemType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ESS Scaling Group ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
attr, err := client.DescribeScalingGroupById(rs.Primary.ID)
|
||||
log.Printf("[DEBUG] check scaling group %s attribute %#v", rs.Primary.ID, attr)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if attr == nil {
|
||||
return fmt.Errorf("Scaling Group not found")
|
||||
}
|
||||
|
||||
*d = *attr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckEssScalingGroupDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_ess_scaling_group" {
|
||||
continue
|
||||
}
|
||||
|
||||
ins, err := client.DescribeScalingGroupById(rs.Primary.ID)
|
||||
|
||||
if ins != nil {
|
||||
return fmt.Errorf("Error ESS scaling group still exist")
|
||||
}
|
||||
|
||||
// Verify the error is what we want
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == InstanceNotfound {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccEssScalingGroupConfig = `
|
||||
resource "alicloud_ess_scaling_group" "foo" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "foo"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
`
|
||||
|
||||
const testAccEssScalingGroup = `
|
||||
resource "alicloud_ess_scaling_group" "foo" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "foo"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
`
|
||||
|
||||
const testAccEssScalingGroup_update = `
|
||||
resource "alicloud_ess_scaling_group" "foo" {
|
||||
min_size = 2
|
||||
max_size = 2
|
||||
scaling_group_name = "update"
|
||||
removal_policies = ["OldestInstance"]
|
||||
}
|
||||
`
|
||||
const testAccEssScalingGroup_vpc = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
data "alicloud_zones" "default" {
|
||||
"available_disk_category"= "cloud_efficiency"
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
description = "foo"
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "foo" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "foo"
|
||||
default_cooldown = 20
|
||||
vswitch_id = "${alicloud_vswitch.foo.id}"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.foo.id}"
|
||||
enable = true
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.n1.medium"
|
||||
io_optimized = "optimized"
|
||||
system_disk_category = "cloud_efficiency"
|
||||
internet_charge_type = "PayByTraffic"
|
||||
internet_max_bandwidth_out = 10
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
`
|
|
@ -1,168 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ess"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAlicloudEssScalingRule() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunEssScalingRuleCreate,
|
||||
Read: resourceAliyunEssScalingRuleRead,
|
||||
Update: resourceAliyunEssScalingRuleUpdate,
|
||||
Delete: resourceAliyunEssScalingRuleDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"scaling_group_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"adjustment_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validateAllowedStringValue([]string{string(ess.QuantityChangeInCapacity),
|
||||
string(ess.PercentChangeInCapacity), string(ess.TotalCapacity)}),
|
||||
},
|
||||
"adjustment_value": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"scaling_rule_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
"ari": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"cooldown": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ValidateFunc: validateIntegerInRange(0, 86400),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingRuleCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
args, err := buildAlicloudEssScalingRuleArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
essconn := meta.(*AliyunClient).essconn
|
||||
|
||||
rule, err := essconn.CreateScalingRule(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(d.Get("scaling_group_id").(string) + COLON_SEPARATED + rule.ScalingRuleId)
|
||||
|
||||
return resourceAliyunEssScalingRuleUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingRuleRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
client := meta.(*AliyunClient)
|
||||
ids := strings.Split(d.Id(), COLON_SEPARATED)
|
||||
|
||||
rule, err := client.DescribeScalingRuleById(ids[0], ids[1])
|
||||
if err != nil {
|
||||
if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error Describe ESS scaling rule Attribute: %#v", err)
|
||||
}
|
||||
|
||||
d.Set("scaling_group_id", rule.ScalingGroupId)
|
||||
d.Set("ari", rule.ScalingRuleAri)
|
||||
d.Set("adjustment_type", rule.AdjustmentType)
|
||||
d.Set("adjustment_value", rule.AdjustmentValue)
|
||||
d.Set("scaling_rule_name", rule.ScalingRuleName)
|
||||
d.Set("cooldown", rule.Cooldown)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingRuleDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
ids := strings.Split(d.Id(), COLON_SEPARATED)
|
||||
|
||||
return resource.Retry(2*time.Minute, func() *resource.RetryError {
|
||||
err := client.DeleteScalingRuleById(ids[1])
|
||||
|
||||
if err != nil {
|
||||
return resource.RetryableError(fmt.Errorf("Scaling rule in use - trying again while it is deleted."))
|
||||
}
|
||||
|
||||
_, err = client.DescribeScalingRuleById(ids[0], ids[1])
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Scaling rule in use - trying again while it is deleted."))
|
||||
})
|
||||
}
|
||||
|
||||
func resourceAliyunEssScalingRuleUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).essconn
|
||||
ids := strings.Split(d.Id(), COLON_SEPARATED)
|
||||
|
||||
args := &ess.ModifyScalingRuleArgs{
|
||||
ScalingRuleId: ids[1],
|
||||
}
|
||||
|
||||
if d.HasChange("adjustment_type") {
|
||||
args.AdjustmentType = ess.AdjustmentType(d.Get("adjustment_type").(string))
|
||||
}
|
||||
|
||||
if d.HasChange("adjustment_value") {
|
||||
args.AdjustmentValue = d.Get("adjustment_value").(int)
|
||||
}
|
||||
|
||||
if d.HasChange("scaling_rule_name") {
|
||||
args.ScalingRuleName = d.Get("scaling_rule_name").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("cooldown") {
|
||||
args.Cooldown = d.Get("cooldown").(int)
|
||||
}
|
||||
|
||||
if _, err := conn.ModifyScalingRule(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceAliyunEssScalingRuleRead(d, meta)
|
||||
}
|
||||
|
||||
func buildAlicloudEssScalingRuleArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScalingRuleArgs, error) {
|
||||
args := &ess.CreateScalingRuleArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
ScalingGroupId: d.Get("scaling_group_id").(string),
|
||||
AdjustmentType: ess.AdjustmentType(d.Get("adjustment_type").(string)),
|
||||
AdjustmentValue: d.Get("adjustment_value").(int),
|
||||
}
|
||||
|
||||
if v := d.Get("scaling_rule_name").(string); v != "" {
|
||||
args.ScalingRuleName = v
|
||||
}
|
||||
|
||||
if v := d.Get("cooldown").(int); v != 0 {
|
||||
args.Cooldown = v
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,290 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ess"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudEssScalingRule_basic(t *testing.T) {
|
||||
var sc ess.ScalingRuleItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_ess_scaling_rule.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEssScalingRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingRuleConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingRuleExists(
|
||||
"alicloud_ess_scaling_rule.foo", &sc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_rule.foo",
|
||||
"adjustment_type",
|
||||
"TotalCapacity"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_rule.foo",
|
||||
"adjustment_value",
|
||||
"1"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudEssScalingRule_update(t *testing.T) {
|
||||
var sc ess.ScalingRuleItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_ess_scaling_rule.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEssScalingRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingRule,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingRuleExists(
|
||||
"alicloud_ess_scaling_rule.foo", &sc),
|
||||
testAccCheckEssScalingRuleExists(
|
||||
"alicloud_ess_scaling_rule.foo", &sc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_rule.foo",
|
||||
"adjustment_type",
|
||||
"TotalCapacity"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_rule.foo",
|
||||
"adjustment_value",
|
||||
"1"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccEssScalingRule_update,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScalingRuleExists(
|
||||
"alicloud_ess_scaling_rule.foo", &sc),
|
||||
testAccCheckEssScalingRuleExists(
|
||||
"alicloud_ess_scaling_rule.foo", &sc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_rule.foo",
|
||||
"adjustment_type",
|
||||
"TotalCapacity"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_scaling_rule.foo",
|
||||
"adjustment_value",
|
||||
"2"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckEssScalingRuleExists(n string, d *ess.ScalingRuleItemType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ESS Scaling Rule ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
ids := strings.Split(rs.Primary.ID, COLON_SEPARATED)
|
||||
attr, err := client.DescribeScalingRuleById(ids[0], ids[1])
|
||||
log.Printf("[DEBUG] check scaling rule %s attribute %#v", rs.Primary.ID, attr)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if attr == nil {
|
||||
return fmt.Errorf("Scaling rule not found")
|
||||
}
|
||||
|
||||
*d = *attr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckEssScalingRuleDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_ess_scaling_rule" {
|
||||
continue
|
||||
}
|
||||
ids := strings.Split(rs.Primary.ID, COLON_SEPARATED)
|
||||
ins, err := client.DescribeScalingRuleById(ids[0], ids[1])
|
||||
|
||||
if ins != nil {
|
||||
return fmt.Errorf("Error ESS scaling rule still exist")
|
||||
}
|
||||
|
||||
// Verify the error is what we want
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == InstanceNotfound {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccEssScalingRuleConfig = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "bar" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "bar"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.bar.id}"
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_rule" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.bar.id}"
|
||||
adjustment_type = "TotalCapacity"
|
||||
adjustment_value = 1
|
||||
cooldown = 120
|
||||
}
|
||||
`
|
||||
|
||||
const testAccEssScalingRule = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "bar" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "bar"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.bar.id}"
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_rule" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.bar.id}"
|
||||
adjustment_type = "TotalCapacity"
|
||||
adjustment_value = 1
|
||||
cooldown = 120
|
||||
}
|
||||
`
|
||||
|
||||
const testAccEssScalingRule_update = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "bar" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "bar"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.bar.id}"
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_rule" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.bar.id}"
|
||||
adjustment_type = "TotalCapacity"
|
||||
adjustment_value = 2
|
||||
cooldown = 60
|
||||
}
|
||||
`
|
|
@ -1,220 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ess"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAlicloudEssSchedule() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunEssScheduleCreate,
|
||||
Read: resourceAliyunEssScheduleRead,
|
||||
Update: resourceAliyunEssScheduleUpdate,
|
||||
Delete: resourceAliyunEssScheduleDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"scheduled_action": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"launch_time": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"scheduled_task_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
"launch_expiration_time": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Default: 600,
|
||||
Optional: true,
|
||||
ValidateFunc: validateIntegerInRange(0, 21600),
|
||||
},
|
||||
"recurrence_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
ValidateFunc: validateAllowedStringValue([]string{string(ess.Daily),
|
||||
string(ess.Weekly), string(ess.Monthly)}),
|
||||
},
|
||||
"recurrence_value": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
"recurrence_end_time": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
"task_enabled": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Default: true,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunEssScheduleCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
args, err := buildAlicloudEssScheduleArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
essconn := meta.(*AliyunClient).essconn
|
||||
|
||||
rule, err := essconn.CreateScheduledTask(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(rule.ScheduledTaskId)
|
||||
|
||||
return resourceAliyunEssScheduleUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunEssScheduleRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
rule, err := client.DescribeScheduleById(d.Id())
|
||||
if err != nil {
|
||||
if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error Describe ESS schedule Attribute: %#v", err)
|
||||
}
|
||||
|
||||
d.Set("scheduled_action", rule.ScheduledAction)
|
||||
d.Set("launch_time", rule.LaunchTime)
|
||||
d.Set("scheduled_task_name", rule.ScheduledTaskName)
|
||||
d.Set("description", rule.Description)
|
||||
d.Set("launch_expiration_time", rule.LaunchExpirationTime)
|
||||
d.Set("recurrence_type", rule.RecurrenceType)
|
||||
d.Set("recurrence_value", rule.RecurrenceValue)
|
||||
d.Set("recurrence_end_time", rule.RecurrenceEndTime)
|
||||
d.Set("task_enabled", rule.TaskEnabled)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunEssScheduleUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).essconn
|
||||
|
||||
args := &ess.ModifyScheduledTaskArgs{
|
||||
ScheduledTaskId: d.Id(),
|
||||
}
|
||||
|
||||
if d.HasChange("scheduled_task_name") {
|
||||
args.ScheduledTaskName = d.Get("scheduled_task_name").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("description") {
|
||||
args.Description = d.Get("description").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("scheduled_action") {
|
||||
args.ScheduledAction = d.Get("scheduled_action").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("launch_time") {
|
||||
args.LaunchTime = d.Get("launch_time").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("launch_expiration_time") {
|
||||
args.LaunchExpirationTime = d.Get("launch_expiration_time").(int)
|
||||
}
|
||||
|
||||
if d.HasChange("recurrence_type") {
|
||||
args.RecurrenceType = ess.RecurrenceType(d.Get("recurrence_type").(string))
|
||||
}
|
||||
|
||||
if d.HasChange("recurrence_value") {
|
||||
args.RecurrenceValue = d.Get("recurrence_value").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("recurrence_end_time") {
|
||||
args.RecurrenceEndTime = d.Get("recurrence_end_time").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("task_enabled") {
|
||||
args.TaskEnabled = d.Get("task_enabled").(bool)
|
||||
}
|
||||
|
||||
if _, err := conn.ModifyScheduledTask(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceAliyunEssScheduleRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunEssScheduleDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
return resource.Retry(2*time.Minute, func() *resource.RetryError {
|
||||
err := client.DeleteScheduleById(d.Id())
|
||||
|
||||
if err != nil {
|
||||
return resource.RetryableError(fmt.Errorf("Scaling schedule in use - trying again while it is deleted."))
|
||||
}
|
||||
|
||||
_, err = client.DescribeScheduleById(d.Id())
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Scaling schedule in use - trying again while it is deleted."))
|
||||
})
|
||||
}
|
||||
|
||||
func buildAlicloudEssScheduleArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScheduledTaskArgs, error) {
|
||||
args := &ess.CreateScheduledTaskArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
ScheduledAction: d.Get("scheduled_action").(string),
|
||||
LaunchTime: d.Get("launch_time").(string),
|
||||
TaskEnabled: d.Get("task_enabled").(bool),
|
||||
}
|
||||
|
||||
if v := d.Get("scheduled_task_name").(string); v != "" {
|
||||
args.ScheduledTaskName = v
|
||||
}
|
||||
|
||||
if v := d.Get("description").(string); v != "" {
|
||||
args.Description = v
|
||||
}
|
||||
|
||||
if v := d.Get("recurrence_type").(string); v != "" {
|
||||
args.RecurrenceType = ess.RecurrenceType(v)
|
||||
}
|
||||
|
||||
if v := d.Get("recurrence_value").(string); v != "" {
|
||||
args.RecurrenceValue = v
|
||||
}
|
||||
|
||||
if v := d.Get("recurrence_end_time").(string); v != "" {
|
||||
args.RecurrenceEndTime = v
|
||||
}
|
||||
|
||||
if v := d.Get("launch_expiration_time").(int); v != 0 {
|
||||
args.LaunchExpirationTime = v
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,151 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ess"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudEssSchedule_basic(t *testing.T) {
|
||||
var sc ess.ScheduledTaskItemType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_ess_schedule.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckEssScheduleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccEssScheduleConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckEssScheduleExists(
|
||||
"alicloud_ess_schedule.foo", &sc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_schedule.foo",
|
||||
"launch_time",
|
||||
"2017-04-29T07:30Z"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_ess_schedule.foo",
|
||||
"task_enabled",
|
||||
"true"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckEssScheduleExists(n string, d *ess.ScheduledTaskItemType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ESS Schedule ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
attr, err := client.DescribeScheduleById(rs.Primary.ID)
|
||||
log.Printf("[DEBUG] check schedule %s attribute %#v", rs.Primary.ID, attr)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if attr == nil {
|
||||
return fmt.Errorf("Ess schedule not found")
|
||||
}
|
||||
|
||||
*d = *attr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckEssScheduleDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_ess_schedule" {
|
||||
continue
|
||||
}
|
||||
ins, err := client.DescribeScheduleById(rs.Primary.ID)
|
||||
|
||||
if ins != nil {
|
||||
return fmt.Errorf("Error ESS schedule still exist")
|
||||
}
|
||||
|
||||
// Verify the error is what we want
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == InstanceNotfound {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccEssScheduleConfig = `
|
||||
data "alicloud_images" "ecs_image" {
|
||||
most_recent = true
|
||||
name_regex = "^centos_6\\w{1,5}[64].*"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
name = "tf_test_foo"
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_group" "bar" {
|
||||
min_size = 1
|
||||
max_size = 1
|
||||
scaling_group_name = "bar"
|
||||
removal_policies = ["OldestInstance", "NewestInstance"]
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_configuration" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.bar.id}"
|
||||
|
||||
image_id = "${data.alicloud_images.ecs_image.images.0.id}"
|
||||
instance_type = "ecs.s2.large"
|
||||
io_optimized = "optimized"
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_ess_scaling_rule" "foo" {
|
||||
scaling_group_id = "${alicloud_ess_scaling_group.bar.id}"
|
||||
adjustment_type = "TotalCapacity"
|
||||
adjustment_value = 2
|
||||
cooldown = 60
|
||||
}
|
||||
|
||||
resource "alicloud_ess_schedule" "foo" {
|
||||
scheduled_action = "${alicloud_ess_scaling_rule.foo.ari}"
|
||||
launch_time = "2017-04-29T07:30Z"
|
||||
scheduled_task_name = "tf-foo"
|
||||
}
|
||||
`
|
|
@ -1,165 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceAliyunForwardEntry() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunForwardEntryCreate,
|
||||
Read: resourceAliyunForwardEntryRead,
|
||||
Update: resourceAliyunForwardEntryUpdate,
|
||||
Delete: resourceAliyunForwardEntryDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"forward_table_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"external_ip": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"external_port": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validateForwardPort,
|
||||
},
|
||||
"ip_protocol": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validateAllowedStringValue([]string{"tcp", "udp", "any"}),
|
||||
},
|
||||
"internal_ip": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"internal_port": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validateForwardPort,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunForwardEntryCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).vpcconn
|
||||
|
||||
args := &ecs.CreateForwardEntryArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
ForwardTableId: d.Get("forward_table_id").(string),
|
||||
ExternalIp: d.Get("external_ip").(string),
|
||||
ExternalPort: d.Get("external_port").(string),
|
||||
IpProtocol: d.Get("ip_protocol").(string),
|
||||
InternalIp: d.Get("internal_ip").(string),
|
||||
InternalPort: d.Get("internal_port").(string),
|
||||
}
|
||||
|
||||
resp, err := conn.CreateForwardEntry(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("CreateForwardEntry got error: %#v", err)
|
||||
}
|
||||
|
||||
d.SetId(resp.ForwardEntryId)
|
||||
d.Set("forward_table_id", d.Get("forward_table_id").(string))
|
||||
|
||||
return resourceAliyunForwardEntryRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunForwardEntryRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
forwardEntry, err := client.DescribeForwardEntry(d.Get("forward_table_id").(string), d.Id())
|
||||
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
d.Set("forward_table_id", forwardEntry.ForwardTableId)
|
||||
d.Set("external_ip", forwardEntry.ExternalIp)
|
||||
d.Set("external_port", forwardEntry.ExternalPort)
|
||||
d.Set("ip_protocol", forwardEntry.IpProtocol)
|
||||
d.Set("internal_ip", forwardEntry.InternalIp)
|
||||
d.Set("internal_port", forwardEntry.InternalPort)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunForwardEntryUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.vpcconn
|
||||
|
||||
forwardEntry, err := client.DescribeForwardEntry(d.Get("forward_table_id").(string), d.Id())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Partial(true)
|
||||
attributeUpdate := false
|
||||
args := &ecs.ModifyForwardEntryArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
ForwardTableId: forwardEntry.ForwardTableId,
|
||||
ForwardEntryId: forwardEntry.ForwardEntryId,
|
||||
ExternalIp: forwardEntry.ExternalIp,
|
||||
IpProtocol: forwardEntry.IpProtocol,
|
||||
ExternalPort: forwardEntry.ExternalPort,
|
||||
InternalIp: forwardEntry.InternalIp,
|
||||
InternalPort: forwardEntry.InternalPort,
|
||||
}
|
||||
|
||||
if d.HasChange("external_port") {
|
||||
d.SetPartial("external_port")
|
||||
args.ExternalPort = d.Get("external_port").(string)
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("ip_protocol") {
|
||||
d.SetPartial("ip_protocol")
|
||||
args.IpProtocol = d.Get("ip_protocol").(string)
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("internal_port") {
|
||||
d.SetPartial("internal_port")
|
||||
args.InternalPort = d.Get("internal_port").(string)
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if attributeUpdate {
|
||||
if err := conn.ModifyForwardEntry(args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceAliyunForwardEntryRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunForwardEntryDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.vpcconn
|
||||
|
||||
forwardEntryId := d.Id()
|
||||
forwardTableId := d.Get("forward_table_id").(string)
|
||||
|
||||
args := &ecs.DeleteForwardEntryArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
ForwardTableId: forwardTableId,
|
||||
ForwardEntryId: forwardEntryId,
|
||||
}
|
||||
|
||||
if err := conn.DeleteForwardEntry(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,216 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudForward_basic(t *testing.T) {
|
||||
var forward ecs.ForwardTableEntrySetType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_forward_entry.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckForwardEntryDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccForwardEntryConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckForwardEntryExists(
|
||||
"alicloud_forward_entry.foo", &forward),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccForwardEntryUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckForwardEntryExists(
|
||||
"alicloud_forward_entry.foo", &forward),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckForwardEntryDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_snat_entry" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to find the Snat entry
|
||||
instance, err := client.DescribeForwardEntry(rs.Primary.Attributes["forward_table_id"], rs.Primary.ID)
|
||||
|
||||
//this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error"
|
||||
if instance.ForwardEntryId == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if instance.ForwardEntryId != "" {
|
||||
return fmt.Errorf("Forward entry still exist")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
|
||||
if !notFoundError(e) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckForwardEntryExists(n string, snat *ecs.ForwardTableEntrySetType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ForwardEntry ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
instance, err := client.DescribeForwardEntry(rs.Primary.Attributes["forward_table_id"], rs.Primary.ID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if instance.ForwardEntryId == "" {
|
||||
return fmt.Errorf("ForwardEntry not found")
|
||||
}
|
||||
|
||||
*snat = instance
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccForwardEntryConfig = `
|
||||
provider "alicloud"{
|
||||
region = "cn-hangzhou"
|
||||
}
|
||||
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_nat_gateway" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
spec = "Small"
|
||||
name = "test_foo"
|
||||
bandwidth_packages = [{
|
||||
ip_count = 1
|
||||
bandwidth = 5
|
||||
zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
},{
|
||||
ip_count = 1
|
||||
bandwidth = 6
|
||||
zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}]
|
||||
depends_on = [
|
||||
"alicloud_vswitch.foo"]
|
||||
}
|
||||
|
||||
resource "alicloud_forward_entry" "foo"{
|
||||
forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}"
|
||||
external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}"
|
||||
external_port = "80"
|
||||
ip_protocol = "tcp"
|
||||
internal_ip = "172.16.0.3"
|
||||
internal_port = "8080"
|
||||
}
|
||||
|
||||
resource "alicloud_forward_entry" "foo1"{
|
||||
forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}"
|
||||
external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}"
|
||||
external_port = "443"
|
||||
ip_protocol = "udp"
|
||||
internal_ip = "172.16.0.4"
|
||||
internal_port = "8080"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccForwardEntryUpdate = `
|
||||
provider "alicloud"{
|
||||
region = "cn-hangzhou"
|
||||
}
|
||||
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_nat_gateway" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
spec = "Small"
|
||||
name = "test_foo"
|
||||
bandwidth_packages = [{
|
||||
ip_count = 1
|
||||
bandwidth = 5
|
||||
zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
},{
|
||||
ip_count = 1
|
||||
bandwidth = 6
|
||||
zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}]
|
||||
depends_on = [
|
||||
"alicloud_vswitch.foo"]
|
||||
}
|
||||
|
||||
resource "alicloud_forward_entry" "foo"{
|
||||
forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}"
|
||||
external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}"
|
||||
external_port = "80"
|
||||
ip_protocol = "tcp"
|
||||
internal_ip = "172.16.0.3"
|
||||
internal_port = "8081"
|
||||
}
|
||||
|
||||
|
||||
resource "alicloud_forward_entry" "foo1"{
|
||||
forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}"
|
||||
external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}"
|
||||
external_port = "22"
|
||||
ip_protocol = "udp"
|
||||
internal_ip = "172.16.0.4"
|
||||
internal_port = "8080"
|
||||
}
|
||||
`
|
|
@ -1,700 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunInstance() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunInstanceCreate,
|
||||
Read: resourceAliyunInstanceRead,
|
||||
Update: resourceAliyunInstanceUpdate,
|
||||
Delete: resourceAliyunInstanceDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"availability_zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"image_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"instance_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"security_groups": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"allocate_public_ip": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"instance_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "ECS-Instance",
|
||||
ValidateFunc: validateInstanceName,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateInstanceDescription,
|
||||
},
|
||||
|
||||
"internet_charge_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateInternetChargeType,
|
||||
},
|
||||
"internet_max_bandwidth_in": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"internet_max_bandwidth_out": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateInternetMaxBandWidthOut,
|
||||
},
|
||||
"host_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"password": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
"io_optimized": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateIoOptimized,
|
||||
},
|
||||
|
||||
"system_disk_category": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Default: "cloud",
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateAllowedStringValue([]string{
|
||||
string(ecs.DiskCategoryCloud),
|
||||
string(ecs.DiskCategoryCloudSSD),
|
||||
string(ecs.DiskCategoryCloudEfficiency),
|
||||
string(ecs.DiskCategoryEphemeralSSD),
|
||||
}),
|
||||
},
|
||||
"system_disk_size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validateIntegerInRange(40, 500),
|
||||
},
|
||||
|
||||
//subnet_id and vswitch_id both exists, cause compatible old version, and aws habit.
|
||||
"subnet_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true, //add this schema cause subnet_id not used enter parameter, will different, so will be ForceNew
|
||||
},
|
||||
|
||||
"vswitch_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"instance_charge_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateInstanceChargeType,
|
||||
},
|
||||
"period": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"public_ip": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"private_ip": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"status": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"user_data": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"tags": tagsSchema(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunInstanceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
// create postpaid instance by runInstances API
|
||||
if v := d.Get("instance_charge_type").(string); v != string(common.PrePaid) {
|
||||
return resourceAliyunRunInstance(d, meta)
|
||||
}
|
||||
|
||||
args, err := buildAliyunInstanceArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
instanceID, err := conn.CreateInstance(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Aliyun ecs instance: %#v", err)
|
||||
}
|
||||
|
||||
d.SetId(instanceID)
|
||||
|
||||
d.Set("password", d.Get("password"))
|
||||
|
||||
// after instance created, its status is pending,
|
||||
// so we need to wait it become to stopped and then start it
|
||||
if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Stopped, defaultTimeout); err != nil {
|
||||
return fmt.Errorf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Stopped, err)
|
||||
}
|
||||
|
||||
if err := allocateIpAndBandWidthRelative(d, meta); err != nil {
|
||||
return fmt.Errorf("allocateIpAndBandWidthRelative err: %#v", err)
|
||||
}
|
||||
|
||||
if err := conn.StartInstance(d.Id()); err != nil {
|
||||
return fmt.Errorf("Start instance got error: %#v", err)
|
||||
}
|
||||
|
||||
if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Running, defaultTimeout); err != nil {
|
||||
return fmt.Errorf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Running, err)
|
||||
}
|
||||
|
||||
return resourceAliyunInstanceUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunRunInstance(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
newConn := meta.(*AliyunClient).ecsNewconn
|
||||
|
||||
args, err := buildAliyunInstanceArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if args.IoOptimized == "optimized" {
|
||||
args.IoOptimized = ecs.IoOptimized("true")
|
||||
} else {
|
||||
args.IoOptimized = ecs.IoOptimized("false")
|
||||
}
|
||||
|
||||
runArgs, err := buildAliyunRunInstancesArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runArgs.CreateInstanceArgs = *args
|
||||
|
||||
// runInstances is support in version 2016-03-14
|
||||
instanceIds, err := newConn.RunInstances(runArgs)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Aliyun ecs instance: %#v", err)
|
||||
}
|
||||
|
||||
d.SetId(instanceIds[0])
|
||||
|
||||
d.Set("password", d.Get("password"))
|
||||
d.Set("system_disk_category", d.Get("system_disk_category"))
|
||||
d.Set("system_disk_size", d.Get("system_disk_size"))
|
||||
|
||||
// after instance created, its status change from pending, starting to running
|
||||
if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Running, defaultTimeout); err != nil {
|
||||
return fmt.Errorf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Running, err)
|
||||
}
|
||||
|
||||
if err := allocateIpAndBandWidthRelative(d, meta); err != nil {
|
||||
return fmt.Errorf("allocateIpAndBandWidthRelative err: %#v", err)
|
||||
}
|
||||
|
||||
if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Running, defaultTimeout); err != nil {
|
||||
return fmt.Errorf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Running, err)
|
||||
}
|
||||
|
||||
return resourceAliyunInstanceUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
instance, err := client.QueryInstancesById(d.Id())
|
||||
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error DescribeInstanceAttribute: %#v", err)
|
||||
}
|
||||
|
||||
disk, diskErr := client.QueryInstanceSystemDisk(d.Id())
|
||||
|
||||
if diskErr != nil {
|
||||
if notFoundError(diskErr) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error DescribeSystemDisk: %#v", err)
|
||||
}
|
||||
|
||||
d.Set("instance_name", instance.InstanceName)
|
||||
d.Set("description", instance.Description)
|
||||
d.Set("status", instance.Status)
|
||||
d.Set("availability_zone", instance.ZoneId)
|
||||
d.Set("host_name", instance.HostName)
|
||||
d.Set("image_id", instance.ImageId)
|
||||
d.Set("instance_type", instance.InstanceType)
|
||||
d.Set("system_disk_category", disk.Category)
|
||||
d.Set("system_disk_size", disk.Size)
|
||||
|
||||
// In Classic network, internet_charge_type is valid in any case, and its default value is 'PayByBanwidth'.
|
||||
// In VPC network, internet_charge_type is valid when instance has public ip, and its default value is 'PayByBanwidth'.
|
||||
d.Set("internet_charge_type", instance.InternetChargeType)
|
||||
|
||||
if d.Get("allocate_public_ip").(bool) {
|
||||
d.Set("public_ip", instance.PublicIpAddress.IpAddress[0])
|
||||
}
|
||||
|
||||
if ecs.StringOrBool(instance.IoOptimized).Value {
|
||||
d.Set("io_optimized", "optimized")
|
||||
} else {
|
||||
d.Set("io_optimized", "none")
|
||||
}
|
||||
|
||||
if d.Get("subnet_id").(string) != "" || d.Get("vswitch_id").(string) != "" {
|
||||
ipAddress := instance.VpcAttributes.PrivateIpAddress.IpAddress[0]
|
||||
d.Set("private_ip", ipAddress)
|
||||
d.Set("subnet_id", instance.VpcAttributes.VSwitchId)
|
||||
d.Set("vswitch_id", instance.VpcAttributes.VSwitchId)
|
||||
} else {
|
||||
ipAddress := strings.Join(ecs.IpAddressSetType(instance.InnerIpAddress).IpAddress, ",")
|
||||
d.Set("private_ip", ipAddress)
|
||||
}
|
||||
|
||||
if d.Get("user_data").(string) != "" {
|
||||
ud, err := conn.DescribeUserdata(&ecs.DescribeUserdataArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
InstanceId: d.Id(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] DescribeUserData for instance got error: %#v", err)
|
||||
}
|
||||
d.Set("user_data", userDataHashSum(ud.UserData))
|
||||
}
|
||||
|
||||
tags, _, err := conn.DescribeTags(&ecs.DescribeTagsArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
ResourceType: ecs.TagResourceInstance,
|
||||
ResourceId: d.Id(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] DescribeTags for instance got error: %#v", err)
|
||||
}
|
||||
d.Set("tags", tagsToMap(tags))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
if err := setTags(client, ecs.TagResourceInstance, d); err != nil {
|
||||
log.Printf("[DEBUG] Set tags for instance got error: %#v", err)
|
||||
return fmt.Errorf("Set tags for instance got error: %#v", err)
|
||||
} else {
|
||||
d.SetPartial("tags")
|
||||
}
|
||||
|
||||
imageUpdate := false
|
||||
if d.HasChange("image_id") && !d.IsNewResource() {
|
||||
log.Printf("[DEBUG] Replace instance system disk via changing image_id")
|
||||
replaceSystemArgs := &ecs.ReplaceSystemDiskArgs{
|
||||
InstanceId: d.Id(),
|
||||
ImageId: d.Get("image_id").(string),
|
||||
SystemDisk: ecs.SystemDiskType{
|
||||
Size: d.Get("system_disk_size").(int),
|
||||
},
|
||||
}
|
||||
if v, ok := d.GetOk("status"); ok && v.(string) != "" {
|
||||
if ecs.InstanceStatus(d.Get("status").(string)) == ecs.Running {
|
||||
log.Printf("[DEBUG] StopInstance before change system disk")
|
||||
if err := conn.StopInstance(d.Id(), true); err != nil {
|
||||
return fmt.Errorf("Force Stop Instance got an error: %#v", err)
|
||||
}
|
||||
if err := conn.WaitForInstance(d.Id(), ecs.Stopped, 60); err != nil {
|
||||
return fmt.Errorf("WaitForInstance got error: %#v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err := conn.ReplaceSystemDisk(replaceSystemArgs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Replace system disk got an error: %#v", err)
|
||||
}
|
||||
// Ensure instance's image has been replaced successfully.
|
||||
timeout := ecs.InstanceDefaultTimeout
|
||||
for {
|
||||
instance, errDesc := conn.DescribeInstanceAttribute(d.Id())
|
||||
if errDesc != nil {
|
||||
return fmt.Errorf("Describe instance got an error: %#v", errDesc)
|
||||
}
|
||||
if instance.ImageId == d.Get("image_id") {
|
||||
break
|
||||
}
|
||||
time.Sleep(ecs.DefaultWaitForInterval * time.Second)
|
||||
timeout = timeout - ecs.DefaultWaitForInterval
|
||||
if timeout <= 0 {
|
||||
return common.GetClientErrorFromString("Timeout")
|
||||
}
|
||||
}
|
||||
imageUpdate = true
|
||||
d.SetPartial("system_disk_size")
|
||||
d.SetPartial("image_id")
|
||||
}
|
||||
// Provider doesn't support change 'system_disk_size'separately.
|
||||
if d.HasChange("system_disk_size") && !d.HasChange("image_id") {
|
||||
return fmt.Errorf("Update resource failed. 'system_disk_size' isn't allowed to change separately. You can update it via renewing instance or replacing system disk.")
|
||||
}
|
||||
|
||||
attributeUpdate := false
|
||||
args := &ecs.ModifyInstanceAttributeArgs{
|
||||
InstanceId: d.Id(),
|
||||
}
|
||||
|
||||
if d.HasChange("instance_name") && !d.IsNewResource() {
|
||||
log.Printf("[DEBUG] ModifyInstanceAttribute instance_name")
|
||||
d.SetPartial("instance_name")
|
||||
args.InstanceName = d.Get("instance_name").(string)
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("description") && !d.IsNewResource() {
|
||||
log.Printf("[DEBUG] ModifyInstanceAttribute description")
|
||||
d.SetPartial("description")
|
||||
args.Description = d.Get("description").(string)
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("host_name") && !d.IsNewResource() {
|
||||
log.Printf("[DEBUG] ModifyInstanceAttribute host_name")
|
||||
d.SetPartial("host_name")
|
||||
args.HostName = d.Get("host_name").(string)
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
passwordUpdate := false
|
||||
if d.HasChange("password") && !d.IsNewResource() {
|
||||
log.Printf("[DEBUG] ModifyInstanceAttribute password")
|
||||
d.SetPartial("password")
|
||||
args.Password = d.Get("password").(string)
|
||||
|
||||
attributeUpdate = true
|
||||
passwordUpdate = true
|
||||
}
|
||||
|
||||
if attributeUpdate {
|
||||
if err := conn.ModifyInstanceAttribute(args); err != nil {
|
||||
return fmt.Errorf("Modify instance attribute got error: %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if imageUpdate || passwordUpdate {
|
||||
instance, errDesc := conn.DescribeInstanceAttribute(d.Id())
|
||||
if errDesc != nil {
|
||||
return fmt.Errorf("Describe instance got an error: %#v", errDesc)
|
||||
}
|
||||
if instance.Status != ecs.Running && instance.Status != ecs.Stopped {
|
||||
return fmt.Errorf("ECS instance's status doesn't support to start or reboot operation after replace image_id or update password. The current instance's status is %#v", instance.Status)
|
||||
} else if instance.Status == ecs.Running {
|
||||
log.Printf("[DEBUG] Reboot instance after change image or password")
|
||||
if err := conn.RebootInstance(d.Id(), false); err != nil {
|
||||
return fmt.Errorf("RebootInstance got error: %#v", err)
|
||||
}
|
||||
} else {
|
||||
log.Printf("[DEBUG] Start instance after change image or password")
|
||||
if err := conn.StartInstance(d.Id()); err != nil {
|
||||
return fmt.Errorf("StartInstance got error: %#v", err)
|
||||
}
|
||||
}
|
||||
// Start instance sometimes costs more than 6 minutes when os type is centos.
|
||||
if err := conn.WaitForInstance(d.Id(), ecs.Running, 400); err != nil {
|
||||
return fmt.Errorf("WaitForInstance got error: %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("security_groups") {
|
||||
o, n := d.GetChange("security_groups")
|
||||
os := o.(*schema.Set)
|
||||
ns := n.(*schema.Set)
|
||||
|
||||
rl := expandStringList(os.Difference(ns).List())
|
||||
al := expandStringList(ns.Difference(os).List())
|
||||
|
||||
if len(al) > 0 {
|
||||
err := client.JoinSecurityGroups(d.Id(), al)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(rl) > 0 {
|
||||
err := client.LeaveSecurityGroups(d.Id(), rl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.SetPartial("security_groups")
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
return resourceAliyunInstanceRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunInstanceDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
instance, err := client.QueryInstancesById(d.Id())
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Status != ecs.Stopped {
|
||||
if err := conn.StopInstance(d.Id(), true); err != nil {
|
||||
return resource.RetryableError(fmt.Errorf("ECS stop error - trying again."))
|
||||
}
|
||||
|
||||
if err := conn.WaitForInstance(d.Id(), ecs.Stopped, defaultTimeout); err != nil {
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for ecs stopped timeout - trying again."))
|
||||
}
|
||||
}
|
||||
|
||||
if err := conn.DeleteInstance(d.Id()); err != nil {
|
||||
return resource.RetryableError(fmt.Errorf("ECS Instance in use - trying again while it is deleted."))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func allocateIpAndBandWidthRelative(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
if d.Get("allocate_public_ip").(bool) {
|
||||
if d.Get("internet_max_bandwidth_out") == 0 {
|
||||
return fmt.Errorf("Error: if allocate_public_ip is true than the internet_max_bandwidth_out cannot equal zero.")
|
||||
}
|
||||
_, err := conn.AllocatePublicIpAddress(d.Id())
|
||||
if err != nil {
|
||||
return fmt.Errorf("[DEBUG] AllocatePublicIpAddress for instance got error: %#v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildAliyunRunInstancesArgs(d *schema.ResourceData, meta interface{}) (*ecs.RunInstanceArgs, error) {
|
||||
args := &ecs.RunInstanceArgs{
|
||||
MaxAmount: DEFAULT_INSTANCE_COUNT,
|
||||
MinAmount: DEFAULT_INSTANCE_COUNT,
|
||||
}
|
||||
|
||||
bussStr, err := json.Marshal(DefaultBusinessInfo)
|
||||
if err != nil {
|
||||
log.Printf("Failed to translate bussiness info %#v from json to string", DefaultBusinessInfo)
|
||||
}
|
||||
|
||||
args.BusinessInfo = string(bussStr)
|
||||
|
||||
subnetValue := d.Get("subnet_id").(string)
|
||||
vswitchValue := d.Get("vswitch_id").(string)
|
||||
//networkValue := d.Get("instance_network_type").(string)
|
||||
|
||||
// because runInstance is not compatible with createInstance, force NetworkType value to classic
|
||||
if subnetValue == "" && vswitchValue == "" {
|
||||
args.NetworkType = string(ClassicNet)
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func buildAliyunInstanceArgs(d *schema.ResourceData, meta interface{}) (*ecs.CreateInstanceArgs, error) {
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
args := &ecs.CreateInstanceArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
InstanceType: d.Get("instance_type").(string),
|
||||
}
|
||||
|
||||
imageID := d.Get("image_id").(string)
|
||||
|
||||
args.ImageId = imageID
|
||||
|
||||
systemDiskCategory := ecs.DiskCategory(d.Get("system_disk_category").(string))
|
||||
systemDiskSize := d.Get("system_disk_size").(int)
|
||||
|
||||
zoneID := d.Get("availability_zone").(string)
|
||||
// check instanceType and systemDiskCategory, when zoneID is not empty
|
||||
if zoneID != "" {
|
||||
zone, err := client.DescribeZone(zoneID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := client.ResourceAvailable(zone, ecs.ResourceTypeInstance); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := client.DiskAvailable(zone, systemDiskCategory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
args.ZoneId = zoneID
|
||||
|
||||
}
|
||||
|
||||
args.SystemDisk = ecs.SystemDiskType{
|
||||
Category: systemDiskCategory,
|
||||
Size: systemDiskSize,
|
||||
}
|
||||
|
||||
sgs, ok := d.GetOk("security_groups")
|
||||
|
||||
if ok {
|
||||
sgList := expandStringList(sgs.(*schema.Set).List())
|
||||
sg0 := sgList[0]
|
||||
// check security group instance exist
|
||||
_, err := client.DescribeSecurity(sg0)
|
||||
if err == nil {
|
||||
args.SecurityGroupId = sg0
|
||||
}
|
||||
}
|
||||
|
||||
if v := d.Get("instance_name").(string); v != "" {
|
||||
args.InstanceName = v
|
||||
}
|
||||
|
||||
if v := d.Get("description").(string); v != "" {
|
||||
args.Description = v
|
||||
}
|
||||
|
||||
if v := d.Get("internet_charge_type").(string); v != "" {
|
||||
args.InternetChargeType = common.InternetChargeType(v)
|
||||
}
|
||||
|
||||
if v := d.Get("internet_max_bandwidth_out").(int); v != 0 {
|
||||
args.InternetMaxBandwidthOut = v
|
||||
}
|
||||
|
||||
if v := d.Get("host_name").(string); v != "" {
|
||||
args.HostName = v
|
||||
}
|
||||
|
||||
if v := d.Get("password").(string); v != "" {
|
||||
args.Password = v
|
||||
}
|
||||
|
||||
if v := d.Get("io_optimized").(string); v != "" {
|
||||
args.IoOptimized = ecs.IoOptimized(v)
|
||||
}
|
||||
|
||||
vswitchValue := d.Get("subnet_id").(string)
|
||||
if vswitchValue == "" {
|
||||
vswitchValue = d.Get("vswitch_id").(string)
|
||||
}
|
||||
if vswitchValue != "" {
|
||||
args.VSwitchId = vswitchValue
|
||||
if d.Get("allocate_public_ip").(bool) && args.InternetMaxBandwidthOut <= 0 {
|
||||
return nil, fmt.Errorf("Invalid internet_max_bandwidth_out result in allocation public ip failed in the VPC.")
|
||||
}
|
||||
}
|
||||
|
||||
if v := d.Get("instance_charge_type").(string); v != "" {
|
||||
args.InstanceChargeType = common.InstanceChargeType(v)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] period is %d", d.Get("period").(int))
|
||||
if v := d.Get("period").(int); v != 0 {
|
||||
args.Period = v
|
||||
} else if args.InstanceChargeType == common.PrePaid {
|
||||
return nil, fmt.Errorf("period is required for instance_charge_type is PrePaid")
|
||||
}
|
||||
|
||||
if v := d.Get("user_data").(string); v != "" {
|
||||
args.UserData = v
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func userDataHashSum(user_data string) string {
|
||||
// Check whether the user_data is not Base64 encoded.
|
||||
// Always calculate hash of base64 decoded value since we
|
||||
// check against double-encoding when setting it
|
||||
v, base64DecodeError := base64.StdEncoding.DecodeString(user_data)
|
||||
if base64DecodeError != nil {
|
||||
v = []byte(user_data)
|
||||
}
|
||||
return string(v)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,374 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunNatGateway() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunNatGatewayCreate,
|
||||
Read: resourceAliyunNatGatewayRead,
|
||||
Update: resourceAliyunNatGatewayUpdate,
|
||||
Delete: resourceAliyunNatGatewayDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"vpc_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"spec": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"bandwidth_package_ids": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"snat_table_ids": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"forward_table_ids": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"bandwidth_packages": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"ip_count": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"bandwidth": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"public_ip_addresses": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: true,
|
||||
MaxItems: 4,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunNatGatewayCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).vpcconn
|
||||
|
||||
args := &ecs.CreateNatGatewayArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
VpcId: d.Get("vpc_id").(string),
|
||||
Spec: d.Get("spec").(string),
|
||||
}
|
||||
|
||||
bandwidthPackages := d.Get("bandwidth_packages").([]interface{})
|
||||
|
||||
bandwidthPackageTypes := []ecs.BandwidthPackageType{}
|
||||
|
||||
for _, e := range bandwidthPackages {
|
||||
pack := e.(map[string]interface{})
|
||||
bandwidthPackage := ecs.BandwidthPackageType{
|
||||
IpCount: pack["ip_count"].(int),
|
||||
Bandwidth: pack["bandwidth"].(int),
|
||||
}
|
||||
if pack["zone"].(string) != "" {
|
||||
bandwidthPackage.Zone = pack["zone"].(string)
|
||||
}
|
||||
|
||||
bandwidthPackageTypes = append(bandwidthPackageTypes, bandwidthPackage)
|
||||
}
|
||||
|
||||
args.BandwidthPackage = bandwidthPackageTypes
|
||||
|
||||
var name string
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
name = v.(string)
|
||||
}
|
||||
|
||||
args.Name = name
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
args.Description = v.(string)
|
||||
}
|
||||
resp, err := conn.CreateNatGateway(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("CreateNatGateway got error: %#v", err)
|
||||
}
|
||||
|
||||
d.SetId(resp.NatGatewayId)
|
||||
|
||||
return resourceAliyunNatGatewayRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunNatGatewayRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
natGateway, err := client.DescribeNatGateway(d.Id())
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
d.Set("name", natGateway.Name)
|
||||
d.Set("spec", natGateway.Spec)
|
||||
d.Set("bandwidth_package_ids", strings.Join(natGateway.BandwidthPackageIds.BandwidthPackageId, ","))
|
||||
d.Set("snat_table_ids", strings.Join(natGateway.SnatTableIds.SnatTableId, ","))
|
||||
d.Set("forward_table_ids", strings.Join(natGateway.ForwardTableIds.ForwardTableId, ","))
|
||||
d.Set("description", natGateway.Description)
|
||||
d.Set("vpc_id", natGateway.VpcId)
|
||||
bindWidthPackages, err := flattenBandWidthPackages(natGateway.BandwidthPackageIds.BandwidthPackageId, meta, d)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] bindWidthPackages flattenBandWidthPackages failed. natgateway id is %#v", d.Id())
|
||||
} else {
|
||||
d.Set("bandwidth_packages", bindWidthPackages)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunNatGatewayUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.vpcconn
|
||||
|
||||
natGateway, err := client.DescribeNatGateway(d.Id())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Partial(true)
|
||||
attributeUpdate := false
|
||||
args := &ecs.ModifyNatGatewayAttributeArgs{
|
||||
RegionId: natGateway.RegionId,
|
||||
NatGatewayId: natGateway.NatGatewayId,
|
||||
}
|
||||
|
||||
if d.HasChange("name") {
|
||||
d.SetPartial("name")
|
||||
var name string
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
name = v.(string)
|
||||
} else {
|
||||
return fmt.Errorf("cann't change name to empty string")
|
||||
}
|
||||
args.Name = name
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("description") {
|
||||
d.SetPartial("description")
|
||||
var description string
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
description = v.(string)
|
||||
} else {
|
||||
return fmt.Errorf("can to change description to empty string")
|
||||
}
|
||||
|
||||
args.Description = description
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if attributeUpdate {
|
||||
if err := conn.ModifyNatGatewayAttribute(args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("spec") {
|
||||
d.SetPartial("spec")
|
||||
var spec ecs.NatGatewaySpec
|
||||
if v, ok := d.GetOk("spec"); ok {
|
||||
spec = ecs.NatGatewaySpec(v.(string))
|
||||
} else {
|
||||
// set default to small spec
|
||||
spec = ecs.NatGatewaySmallSpec
|
||||
}
|
||||
|
||||
args := &ecs.ModifyNatGatewaySpecArgs{
|
||||
RegionId: natGateway.RegionId,
|
||||
NatGatewayId: natGateway.NatGatewayId,
|
||||
Spec: spec,
|
||||
}
|
||||
|
||||
err := conn.ModifyNatGatewaySpec(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%#v %#v", err, *args)
|
||||
}
|
||||
|
||||
}
|
||||
d.Partial(false)
|
||||
|
||||
return resourceAliyunNatGatewayRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunNatGatewayDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.vpcconn
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
|
||||
packages, err := conn.DescribeBandwidthPackages(&ecs.DescribeBandwidthPackagesArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
NatGatewayId: d.Id(),
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Describe bandwidth package is failed, natGateway Id: %s", d.Id())
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
retry := false
|
||||
for _, pack := range packages {
|
||||
err = conn.DeleteBandwidthPackage(&ecs.DeleteBandwidthPackageArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
BandwidthPackageId: pack.BandwidthPackageId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
er, _ := err.(*common.Error)
|
||||
if er.ErrorResponse.Code == NatGatewayInvalidRegionId {
|
||||
log.Printf("[ERROR] Delete bandwidth package is failed, bandwidthPackageId: %#v", pack.BandwidthPackageId)
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
|
||||
if retry {
|
||||
return resource.RetryableError(fmt.Errorf("Bandwidth package in use - trying again while it is deleted."))
|
||||
}
|
||||
|
||||
args := &ecs.DeleteNatGatewayArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
NatGatewayId: d.Id(),
|
||||
}
|
||||
|
||||
err = conn.DeleteNatGateway(args)
|
||||
if err != nil {
|
||||
er, _ := err.(*common.Error)
|
||||
if er.ErrorResponse.Code == DependencyViolationBandwidthPackages {
|
||||
return resource.RetryableError(fmt.Errorf("NatGateway in use - trying again while it is deleted."))
|
||||
}
|
||||
}
|
||||
|
||||
describeArgs := &ecs.DescribeNatGatewaysArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
NatGatewayId: d.Id(),
|
||||
}
|
||||
gw, _, gwErr := conn.DescribeNatGateways(describeArgs)
|
||||
|
||||
if gwErr != nil {
|
||||
log.Printf("[ERROR] Describe NatGateways failed.")
|
||||
return resource.NonRetryableError(gwErr)
|
||||
} else if gw == nil || len(gw) < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("NatGateway in use - trying again while it is deleted."))
|
||||
})
|
||||
}
|
||||
|
||||
func flattenBandWidthPackages(bandWidthPackageIds []string, meta interface{}, d *schema.ResourceData) ([]map[string]interface{}, error) {
|
||||
|
||||
packageLen := len(bandWidthPackageIds)
|
||||
result := make([]map[string]interface{}, 0, packageLen)
|
||||
|
||||
for i := packageLen - 1; i >= 0; i-- {
|
||||
packageId := bandWidthPackageIds[i]
|
||||
packages, err := getPackages(packageId, meta, d)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] NatGateways getPackages failed. packageId is %#v", packageId)
|
||||
return result, err
|
||||
}
|
||||
ipAddress := flattenPackPublicIp(packages.PublicIpAddresses.PublicIpAddresse)
|
||||
ipCont, ipContErr := strconv.Atoi(packages.IpCount)
|
||||
bandWidth, bandWidthErr := strconv.Atoi(packages.Bandwidth)
|
||||
if ipContErr != nil {
|
||||
log.Printf("[ERROR] NatGateways getPackages failed: ipCont convert error. packageId is %#v", packageId)
|
||||
return result, ipContErr
|
||||
}
|
||||
if bandWidthErr != nil {
|
||||
log.Printf("[ERROR] NatGateways getPackages failed: bandWidthErr convert error. packageId is %#v", packageId)
|
||||
return result, bandWidthErr
|
||||
}
|
||||
l := map[string]interface{}{
|
||||
"ip_count": ipCont,
|
||||
"bandwidth": bandWidth,
|
||||
"zone": packages.ZoneId,
|
||||
"public_ip_addresses": ipAddress,
|
||||
}
|
||||
result = append(result, l)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func getPackages(packageId string, meta interface{}, d *schema.ResourceData) (*ecs.DescribeBandwidthPackageType, error) {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.vpcconn
|
||||
packages, err := conn.DescribeBandwidthPackages(&ecs.DescribeBandwidthPackagesArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
BandwidthPackageId: packageId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Describe bandwidth package is failed, BandwidthPackageId Id: %s", packageId)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(packages) == 0 {
|
||||
return nil, common.GetClientErrorFromString(InstanceNotfound)
|
||||
}
|
||||
|
||||
return &packages[0], nil
|
||||
|
||||
}
|
||||
|
||||
func flattenPackPublicIp(publicIpAddressList []ecs.PublicIpAddresseType) string {
|
||||
var result []string
|
||||
|
||||
for _, publicIpAddresses := range publicIpAddressList {
|
||||
ipAddress := publicIpAddresses.IpAddress
|
||||
result = append(result, ipAddress)
|
||||
}
|
||||
|
||||
return strings.Join(result, ",")
|
||||
}
|
|
@ -1,288 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudNatGateway_basic(t *testing.T) {
|
||||
var nat ecs.NatGatewaySetType
|
||||
|
||||
testCheck := func(*terraform.State) error {
|
||||
if nat.BusinessStatus != "Normal" {
|
||||
return fmt.Errorf("abnormal instance status")
|
||||
}
|
||||
|
||||
if len(nat.BandwidthPackageIds.BandwidthPackageId) == 0 {
|
||||
return fmt.Errorf("no bandwidth package: %#v", nat.BandwidthPackageIds.BandwidthPackageId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_nat_gateway.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckNatGatewayDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccNatGatewayConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckNatGatewayExists(
|
||||
"alicloud_nat_gateway.foo", &nat),
|
||||
testCheck,
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_nat_gateway.foo",
|
||||
"spec",
|
||||
"Small"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_nat_gateway.foo",
|
||||
"name",
|
||||
"test_foo"),
|
||||
testAccCheckNatgatewayIpAddress("alicloud_nat_gateway.foo", &nat),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudNatGateway_spec(t *testing.T) {
|
||||
var nat ecs.NatGatewaySetType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_nat_gateway.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckNatGatewayDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccNatGatewayConfigSpec,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckNatGatewayExists(
|
||||
"alicloud_nat_gateway.foo", &nat),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_nat_gateway.foo",
|
||||
"spec",
|
||||
"Middle"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccNatGatewayConfigSpecUpgrade,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckNatGatewayExists(
|
||||
"alicloud_nat_gateway.foo", &nat),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_nat_gateway.foo",
|
||||
"spec",
|
||||
"Large"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckNatgatewayIpAddress(n string, nat *ecs.NatGatewaySetType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No NatGateway ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
natGateway, err := client.DescribeNatGateway(rs.Primary.ID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if natGateway == nil {
|
||||
return fmt.Errorf("Natgateway not found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckNatGatewayExists(n string, nat *ecs.NatGatewaySetType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No Gateway ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
instance, err := client.DescribeNatGateway(rs.Primary.ID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if instance == nil {
|
||||
return fmt.Errorf("Nat gateway not found")
|
||||
}
|
||||
|
||||
*nat = *instance
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckNatGatewayDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_nat_gateway" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to find the Nat gateway
|
||||
instance, err := client.DescribeNatGateway(rs.Primary.ID)
|
||||
|
||||
if instance != nil {
|
||||
return fmt.Errorf("Nat gateway still exist")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
|
||||
if !notFoundError(e) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccNatGatewayConfig = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_nat_gateway" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
spec = "Small"
|
||||
name = "test_foo"
|
||||
bandwidth_packages = [{
|
||||
ip_count = 1
|
||||
bandwidth = 5
|
||||
zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
}, {
|
||||
ip_count = 2
|
||||
bandwidth = 6
|
||||
zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
}, {
|
||||
ip_count = 3
|
||||
bandwidth = 7
|
||||
zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
}, {
|
||||
ip_count = 1
|
||||
bandwidth = 8
|
||||
zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
}]
|
||||
depends_on = [
|
||||
"alicloud_vswitch.foo"]
|
||||
}
|
||||
`
|
||||
|
||||
const testAccNatGatewayConfigSpec = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_nat_gateway" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
spec = "Middle"
|
||||
name = "test_foo"
|
||||
bandwidth_packages = [{
|
||||
ip_count = 1
|
||||
bandwidth = 5
|
||||
zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}, {
|
||||
ip_count = 2
|
||||
bandwidth = 10
|
||||
zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}]
|
||||
depends_on = [
|
||||
"alicloud_vswitch.foo"]
|
||||
}
|
||||
`
|
||||
|
||||
const testAccNatGatewayConfigSpecUpgrade = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_nat_gateway" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
spec = "Large"
|
||||
name = "test_foo"
|
||||
bandwidth_packages = [{
|
||||
ip_count = 1
|
||||
bandwidth = 5
|
||||
zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}, {
|
||||
ip_count = 2
|
||||
bandwidth = 10
|
||||
zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}]
|
||||
depends_on = [
|
||||
"alicloud_vswitch.foo"]
|
||||
}
|
||||
`
|
|
@ -1,174 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunSecurityGroup() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunSecurityGroupCreate,
|
||||
Read: resourceAliyunSecurityGroupRead,
|
||||
Update: resourceAliyunSecurityGroupUpdate,
|
||||
Delete: resourceAliyunSecurityGroupDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateSecurityGroupName,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateSecurityGroupDescription,
|
||||
},
|
||||
|
||||
"vpc_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
args, err := buildAliyunSecurityGroupArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
securityGroupID, err := conn.CreateSecurityGroup(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(securityGroupID)
|
||||
|
||||
return resourceAliyunSecurityGroupRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
args := &ecs.DescribeSecurityGroupAttributeArgs{
|
||||
SecurityGroupId: d.Id(),
|
||||
RegionId: getRegion(d, meta),
|
||||
}
|
||||
|
||||
sg, err := conn.DescribeSecurityGroupAttribute(args)
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error DescribeSecurityGroupAttribute: %#v", err)
|
||||
}
|
||||
|
||||
if sg == nil {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
d.Set("name", sg.SecurityGroupName)
|
||||
d.Set("description", sg.Description)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
d.Partial(true)
|
||||
attributeUpdate := false
|
||||
args := &ecs.ModifySecurityGroupAttributeArgs{
|
||||
SecurityGroupId: d.Id(),
|
||||
RegionId: getRegion(d, meta),
|
||||
}
|
||||
|
||||
if d.HasChange("name") {
|
||||
d.SetPartial("name")
|
||||
args.SecurityGroupName = d.Get("name").(string)
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("description") {
|
||||
d.SetPartial("description")
|
||||
args.Description = d.Get("description").(string)
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
if attributeUpdate {
|
||||
if err := conn.ModifySecurityGroupAttribute(args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := conn.DeleteSecurityGroup(getRegion(d, meta), d.Id())
|
||||
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == SgDependencyViolation {
|
||||
return resource.RetryableError(fmt.Errorf("Security group in use - trying again while it is deleted."))
|
||||
}
|
||||
}
|
||||
|
||||
sg, err := conn.DescribeSecurityGroupAttribute(&ecs.DescribeSecurityGroupAttributeArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
SecurityGroupId: d.Id(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == InvalidSecurityGroupIdNotFound {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
} else if sg == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Security group in use - trying again while it is deleted."))
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func buildAliyunSecurityGroupArgs(d *schema.ResourceData, meta interface{}) (*ecs.CreateSecurityGroupArgs, error) {
|
||||
|
||||
args := &ecs.CreateSecurityGroupArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
}
|
||||
|
||||
if v := d.Get("name").(string); v != "" {
|
||||
args.SecurityGroupName = v
|
||||
}
|
||||
|
||||
if v := d.Get("description").(string); v != "" {
|
||||
args.Description = v
|
||||
}
|
||||
|
||||
if v := d.Get("vpc_id").(string); v != "" {
|
||||
args.VpcId = v
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,352 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunSecurityGroupRule() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunSecurityGroupRuleCreate,
|
||||
Read: resourceAliyunSecurityGroupRuleRead,
|
||||
Delete: resourceAliyunSecurityGroupRuleDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateSecurityRuleType,
|
||||
Description: "Type of rule, ingress (inbound) or egress (outbound).",
|
||||
},
|
||||
|
||||
"ip_protocol": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateSecurityRuleIpProtocol,
|
||||
},
|
||||
|
||||
"nic_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validateSecurityRuleNicType,
|
||||
},
|
||||
|
||||
"policy": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateSecurityRulePolicy,
|
||||
},
|
||||
|
||||
"port_range": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"priority": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateSecurityPriority,
|
||||
},
|
||||
|
||||
"security_group_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"cidr_ip": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"source_security_group_id"},
|
||||
},
|
||||
|
||||
"source_security_group_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"cidr_ip"},
|
||||
},
|
||||
|
||||
"source_group_owner_account": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
direction := d.Get("type").(string)
|
||||
sgId := d.Get("security_group_id").(string)
|
||||
ptl := d.Get("ip_protocol").(string)
|
||||
port := d.Get("port_range").(string)
|
||||
nicType := d.Get("nic_type").(string)
|
||||
|
||||
var autherr error
|
||||
switch GroupRuleDirection(direction) {
|
||||
case GroupRuleIngress:
|
||||
args, err := buildAliyunSecurityIngressArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
autherr = conn.AuthorizeSecurityGroup(args)
|
||||
case GroupRuleEgress:
|
||||
args, err := buildAliyunSecurityEgressArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
autherr = conn.AuthorizeSecurityGroupEgress(args)
|
||||
default:
|
||||
return fmt.Errorf("Security Group Rule must be type 'ingress' or type 'egress'")
|
||||
}
|
||||
|
||||
if autherr != nil {
|
||||
return fmt.Errorf(
|
||||
"Error authorizing security group rule type %s: %s",
|
||||
direction, autherr)
|
||||
}
|
||||
|
||||
d.SetId(sgId + ":" + direction + ":" + ptl + ":" + port + ":" + nicType)
|
||||
|
||||
return resourceAliyunSecurityGroupRuleRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
parts := strings.Split(d.Id(), ":")
|
||||
sgId := parts[0]
|
||||
direction := parts[1]
|
||||
ip_protocol := parts[2]
|
||||
port_range := parts[3]
|
||||
nic_type := parts[4]
|
||||
rule, err := client.DescribeSecurityGroupRule(sgId, direction, nic_type, ip_protocol, port_range)
|
||||
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error SecurityGroup rule: %#v", err)
|
||||
}
|
||||
|
||||
d.Set("type", rule.Direction)
|
||||
d.Set("ip_protocol", strings.ToLower(string(rule.IpProtocol)))
|
||||
d.Set("nic_type", rule.NicType)
|
||||
d.Set("policy", strings.ToLower(string(rule.Policy)))
|
||||
d.Set("port_range", rule.PortRange)
|
||||
d.Set("priority", rule.Priority)
|
||||
d.Set("security_group_id", sgId)
|
||||
//support source and desc by type
|
||||
if GroupRuleDirection(direction) == GroupRuleIngress {
|
||||
d.Set("cidr_ip", rule.SourceCidrIp)
|
||||
d.Set("source_security_group_id", rule.SourceGroupId)
|
||||
d.Set("source_group_owner_account", rule.SourceGroupOwnerAccount)
|
||||
} else {
|
||||
d.Set("cidr_ip", rule.DestCidrIp)
|
||||
d.Set("source_security_group_id", rule.DestGroupId)
|
||||
d.Set("source_group_owner_account", rule.DestGroupOwnerAccount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteSecurityGroupRule(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
ruleType := d.Get("type").(string)
|
||||
|
||||
if GroupRuleDirection(ruleType) == GroupRuleIngress {
|
||||
args, err := buildAliyunSecurityIngressArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
revokeArgs := &ecs.RevokeSecurityGroupArgs{
|
||||
AuthorizeSecurityGroupArgs: *args,
|
||||
}
|
||||
return client.RevokeSecurityGroup(revokeArgs)
|
||||
}
|
||||
|
||||
args, err := buildAliyunSecurityEgressArgs(d, meta)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
revokeArgs := &ecs.RevokeSecurityGroupEgressArgs{
|
||||
AuthorizeSecurityGroupEgressArgs: *args,
|
||||
}
|
||||
return client.RevokeSecurityGroupEgress(revokeArgs)
|
||||
}
|
||||
|
||||
func resourceAliyunSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
parts := strings.Split(d.Id(), ":")
|
||||
sgId, direction, ip_protocol, port_range, nic_type := parts[0], parts[1], parts[2], parts[3], parts[4]
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := deleteSecurityGroupRule(d, meta)
|
||||
|
||||
if err != nil {
|
||||
resource.RetryableError(fmt.Errorf("Security group rule in use - trying again while it is deleted."))
|
||||
}
|
||||
|
||||
_, err = client.DescribeSecurityGroupRule(sgId, direction, nic_type, ip_protocol, port_range)
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Security group rule in use - trying again while it is deleted."))
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func checkCidrAndSourceGroupId(cidrIp, sourceGroupId string) error {
|
||||
if cidrIp == "" && sourceGroupId == "" {
|
||||
return fmt.Errorf("Either cidr_ip or source_security_group_id is required.")
|
||||
}
|
||||
|
||||
if cidrIp != "" && sourceGroupId != "" {
|
||||
return fmt.Errorf("You should set only one value of cidr_ip or source_security_group_id.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func buildAliyunSecurityIngressArgs(d *schema.ResourceData, meta interface{}) (*ecs.AuthorizeSecurityGroupArgs, error) {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
args := &ecs.AuthorizeSecurityGroupArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
}
|
||||
|
||||
if v := d.Get("ip_protocol").(string); v != "" {
|
||||
args.IpProtocol = ecs.IpProtocol(v)
|
||||
}
|
||||
|
||||
if v := d.Get("port_range").(string); v != "" {
|
||||
args.PortRange = v
|
||||
}
|
||||
|
||||
if v := d.Get("policy").(string); v != "" {
|
||||
args.Policy = ecs.PermissionPolicy(v)
|
||||
}
|
||||
|
||||
if v := d.Get("priority").(int); v != 0 {
|
||||
args.Priority = v
|
||||
}
|
||||
|
||||
if v := d.Get("cidr_ip").(string); v != "" {
|
||||
args.SourceCidrIp = v
|
||||
}
|
||||
|
||||
if v := d.Get("source_security_group_id").(string); v != "" {
|
||||
args.SourceGroupId = v
|
||||
}
|
||||
|
||||
if v := d.Get("source_group_owner_account").(string); v != "" {
|
||||
args.SourceGroupOwnerAccount = v
|
||||
}
|
||||
|
||||
sgId := d.Get("security_group_id").(string)
|
||||
|
||||
sgArgs := &ecs.DescribeSecurityGroupAttributeArgs{
|
||||
SecurityGroupId: sgId,
|
||||
RegionId: getRegion(d, meta),
|
||||
}
|
||||
|
||||
group, err := conn.DescribeSecurityGroupAttribute(sgArgs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error get security group %s error: %#v", sgId, err)
|
||||
}
|
||||
|
||||
if v := d.Get("nic_type").(string); v != "" {
|
||||
if (group != nil && group.VpcId != "") || args.SourceGroupId != "" {
|
||||
if GroupRuleNicType(v) != GroupRuleIntranet {
|
||||
return nil, fmt.Errorf("When security group in the vpc or authorizing permission for source security group, " +
|
||||
"the nic_type must be 'intranet'.")
|
||||
}
|
||||
}
|
||||
args.NicType = ecs.NicType(v)
|
||||
}
|
||||
|
||||
args.SecurityGroupId = sgId
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func buildAliyunSecurityEgressArgs(d *schema.ResourceData, meta interface{}) (*ecs.AuthorizeSecurityGroupEgressArgs, error) {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
args := &ecs.AuthorizeSecurityGroupEgressArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
}
|
||||
|
||||
if v := d.Get("ip_protocol").(string); v != "" {
|
||||
args.IpProtocol = ecs.IpProtocol(v)
|
||||
}
|
||||
|
||||
if v := d.Get("port_range").(string); v != "" {
|
||||
args.PortRange = v
|
||||
}
|
||||
|
||||
if v := d.Get("policy").(string); v != "" {
|
||||
args.Policy = ecs.PermissionPolicy(v)
|
||||
}
|
||||
|
||||
if v := d.Get("priority").(int); v != 0 {
|
||||
args.Priority = v
|
||||
}
|
||||
|
||||
if v := d.Get("cidr_ip").(string); v != "" {
|
||||
args.DestCidrIp = v
|
||||
}
|
||||
|
||||
if v := d.Get("source_security_group_id").(string); v != "" {
|
||||
args.DestGroupId = v
|
||||
}
|
||||
|
||||
if v := d.Get("source_group_owner_account").(string); v != "" {
|
||||
args.DestGroupOwnerAccount = v
|
||||
}
|
||||
|
||||
sgId := d.Get("security_group_id").(string)
|
||||
|
||||
sgArgs := &ecs.DescribeSecurityGroupAttributeArgs{
|
||||
SecurityGroupId: sgId,
|
||||
RegionId: getRegion(d, meta),
|
||||
}
|
||||
|
||||
group, err := conn.DescribeSecurityGroupAttribute(sgArgs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error get security group %s error: %#v", sgId, err)
|
||||
}
|
||||
|
||||
if v := d.Get("nic_type").(string); v != "" {
|
||||
if (group != nil && group.VpcId != "") || args.DestGroupId != "" {
|
||||
if GroupRuleNicType(v) != GroupRuleIntranet {
|
||||
return nil, fmt.Errorf("When security group in the vpc or authorizing permission for destination security group, " +
|
||||
"the nic_type must be 'intranet'.")
|
||||
}
|
||||
}
|
||||
args.NicType = ecs.NicType(v)
|
||||
}
|
||||
|
||||
args.SecurityGroupId = sgId
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,428 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudSecurityGroupRule_Ingress(t *testing.T) {
|
||||
var pt ecs.PermissionType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_security_group_rule.ingress",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSecurityGroupRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSecurityGroupRuleIngress,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSecurityGroupRuleExists(
|
||||
"alicloud_security_group_rule.ingress", &pt),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.ingress",
|
||||
"priority",
|
||||
"1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.ingress",
|
||||
"nic_type",
|
||||
"internet"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.ingress",
|
||||
"ip_protocol",
|
||||
"tcp"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudSecurityGroupRule_Egress(t *testing.T) {
|
||||
var pt ecs.PermissionType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_security_group_rule.egress",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSecurityGroupRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSecurityGroupRuleEgress,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSecurityGroupRuleExists(
|
||||
"alicloud_security_group_rule.egress", &pt),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.egress",
|
||||
"port_range",
|
||||
"80/80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.egress",
|
||||
"ip_protocol",
|
||||
"udp"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudSecurityGroupRule_EgressDefaultNicType(t *testing.T) {
|
||||
var pt ecs.PermissionType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_security_group_rule.egress",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSecurityGroupRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSecurityGroupRuleEgress_emptyNicType,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSecurityGroupRuleExists(
|
||||
"alicloud_security_group_rule.egress", &pt),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.egress",
|
||||
"port_range",
|
||||
"80/80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.egress",
|
||||
"nic_type",
|
||||
"internet"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudSecurityGroupRule_Vpc_Ingress(t *testing.T) {
|
||||
var pt ecs.PermissionType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_security_group_rule.ingress",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSecurityGroupRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSecurityGroupRuleVpcIngress,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSecurityGroupRuleExists(
|
||||
"alicloud_security_group_rule.ingress", &pt),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.ingress",
|
||||
"port_range",
|
||||
"1/200"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.ingress",
|
||||
"ip_protocol",
|
||||
"udp"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudSecurityGroupRule_MissParameterSourceCidrIp(t *testing.T) {
|
||||
var pt ecs.PermissionType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_security_group_rule.egress",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSecurityGroupRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSecurityGroupRule_missingSourceCidrIp,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSecurityGroupRuleExists(
|
||||
"alicloud_security_group_rule.egress", &pt),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.egress",
|
||||
"port_range",
|
||||
"80/80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.egress",
|
||||
"nic_type",
|
||||
"internet"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.egress",
|
||||
"ip_protocol",
|
||||
"udp"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudSecurityGroupRule_SourceSecurityGroup(t *testing.T) {
|
||||
var pt ecs.PermissionType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_security_group_rule.ingress",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSecurityGroupRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSecurityGroupRuleSourceSecurityGroup,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSecurityGroupRuleExists(
|
||||
"alicloud_security_group_rule.ingress", &pt),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.ingress",
|
||||
"port_range",
|
||||
"3306/3306"),
|
||||
resource.TestMatchResourceAttr(
|
||||
"alicloud_security_group_rule.ingress",
|
||||
"source_security_group_id",
|
||||
regexp.MustCompile("^sg-[a-zA-Z0-9_]+")),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group_rule.ingress",
|
||||
"cidr_ip",
|
||||
""),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckSecurityGroupRuleExists(n string, m *ecs.PermissionType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No SecurityGroup Rule ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
log.Printf("[WARN]get sg rule %s", rs.Primary.ID)
|
||||
parts := strings.Split(rs.Primary.ID, ":")
|
||||
// securityGroupId, direction, nicType, ipProtocol, portRange
|
||||
rule, err := client.DescribeSecurityGroupRule(parts[0], parts[1], parts[4], parts[2], parts[3])
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rule == nil {
|
||||
return fmt.Errorf("SecurityGroup not found")
|
||||
}
|
||||
|
||||
*m = *rule
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckSecurityGroupRuleDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_security_group_rule" {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.Split(rs.Primary.ID, ":")
|
||||
rule, err := client.DescribeSecurityGroupRule(parts[0], parts[1], parts[4], parts[2], parts[3])
|
||||
|
||||
if rule != nil {
|
||||
return fmt.Errorf("Error SecurityGroup Rule still exist")
|
||||
}
|
||||
|
||||
// Verify the error is what we want
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == InvalidSecurityGroupIdNotFound {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccSecurityGroupRuleIngress = `
|
||||
resource "alicloud_security_group" "foo" {
|
||||
name = "sg_foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ingress" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "1/200"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.foo.id}"
|
||||
cidr_ip = "10.159.6.18/12"
|
||||
}
|
||||
|
||||
|
||||
`
|
||||
|
||||
const testAccSecurityGroupRuleEgress = `
|
||||
resource "alicloud_security_group" "foo" {
|
||||
name = "sg_foo"
|
||||
}
|
||||
|
||||
|
||||
resource "alicloud_security_group_rule" "egress" {
|
||||
type = "egress"
|
||||
ip_protocol = "udp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "80/80"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.foo.id}"
|
||||
cidr_ip = "10.159.6.18/12"
|
||||
}
|
||||
|
||||
`
|
||||
|
||||
const testAccSecurityGroupRuleEgress_emptyNicType = `
|
||||
resource "alicloud_security_group" "foo" {
|
||||
name = "sg_foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "egress" {
|
||||
type = "egress"
|
||||
ip_protocol = "udp"
|
||||
policy = "accept"
|
||||
port_range = "80/80"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.foo.id}"
|
||||
cidr_ip = "10.159.6.18/12"
|
||||
}
|
||||
|
||||
`
|
||||
|
||||
const testAccSecurityGroupRuleVpcIngress = `
|
||||
resource "alicloud_security_group" "foo" {
|
||||
vpc_id = "${alicloud_vpc.vpc.id}"
|
||||
name = "sg_foo"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "vpc" {
|
||||
cidr_block = "10.1.0.0/21"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ingress" {
|
||||
type = "ingress"
|
||||
ip_protocol = "udp"
|
||||
nic_type = "intranet"
|
||||
policy = "accept"
|
||||
port_range = "1/200"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.foo.id}"
|
||||
cidr_ip = "10.159.6.18/12"
|
||||
}
|
||||
|
||||
`
|
||||
const testAccSecurityGroupRule_missingSourceCidrIp = `
|
||||
resource "alicloud_security_group" "foo" {
|
||||
name = "sg_foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "egress" {
|
||||
security_group_id = "${alicloud_security_group.foo.id}"
|
||||
type = "egress"
|
||||
cidr_ip= "0.0.0.0/0"
|
||||
policy = "accept"
|
||||
ip_protocol= "udp"
|
||||
port_range= "80/80"
|
||||
priority= 1
|
||||
}
|
||||
|
||||
`
|
||||
|
||||
const testAccSecurityGroupRuleMultiIngress = `
|
||||
resource "alicloud_security_group" "foo" {
|
||||
name = "sg_foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ingress1" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "1/200"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.foo.id}"
|
||||
cidr_ip = "10.159.6.18/12"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ingress2" {
|
||||
type = "ingress"
|
||||
ip_protocol = "gre"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "-1/-1"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.foo.id}"
|
||||
cidr_ip = "127.0.1.18/16"
|
||||
}
|
||||
|
||||
`
|
||||
|
||||
const testAccSecurityGroupRuleSourceSecurityGroup = `
|
||||
resource "alicloud_security_group" "foo" {
|
||||
name = "sg_foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "bar" {
|
||||
name = "sg_bar"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ingress" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "intranet"
|
||||
policy = "accept"
|
||||
port_range = "3306/3306"
|
||||
priority = 50
|
||||
security_group_id = "${alicloud_security_group.bar.id}"
|
||||
source_security_group_id = "${alicloud_security_group.foo.id}"
|
||||
}
|
||||
|
||||
|
||||
`
|
|
@ -1,151 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
)
|
||||
|
||||
func TestAccAlicloudSecurityGroup_basic(t *testing.T) {
|
||||
var sg ecs.DescribeSecurityGroupAttributeResponse
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_security_group.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSecurityGroupConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSecurityGroupExists(
|
||||
"alicloud_security_group.foo", &sg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_security_group.foo",
|
||||
"name",
|
||||
"sg_test"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudSecurityGroup_withVpc(t *testing.T) {
|
||||
var sg ecs.DescribeSecurityGroupAttributeResponse
|
||||
var vpc ecs.VpcSetType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_security_group.foo",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSecurityGroupConfig_withVpc,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSecurityGroupExists(
|
||||
"alicloud_security_group.foo", &sg),
|
||||
testAccCheckVpcExists(
|
||||
"alicloud_vpc.vpc", &vpc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckSecurityGroupExists(n string, sg *ecs.DescribeSecurityGroupAttributeResponse) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No SecurityGroup ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
args := &ecs.DescribeSecurityGroupAttributeArgs{
|
||||
RegionId: client.Region,
|
||||
SecurityGroupId: rs.Primary.ID,
|
||||
}
|
||||
d, err := conn.DescribeSecurityGroupAttribute(args)
|
||||
|
||||
log.Printf("[WARN] security group id %#v", rs.Primary.ID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d == nil {
|
||||
return fmt.Errorf("SecurityGroup not found")
|
||||
}
|
||||
|
||||
*sg = *d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckSecurityGroupDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
conn := client.ecsconn
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_security_group" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to find the SecurityGroup
|
||||
args := &ecs.DescribeSecurityGroupsArgs{
|
||||
RegionId: client.Region,
|
||||
}
|
||||
|
||||
groups, _, err := conn.DescribeSecurityGroups(args)
|
||||
|
||||
for _, sg := range groups {
|
||||
if sg.SecurityGroupId == rs.Primary.ID {
|
||||
return fmt.Errorf("Error SecurityGroup still exist")
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the error is what we want
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccSecurityGroupConfig = `
|
||||
resource "alicloud_security_group" "foo" {
|
||||
name = "sg_test"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccSecurityGroupConfig_withVpc = `
|
||||
resource "alicloud_security_group" "foo" {
|
||||
vpc_id = "${alicloud_vpc.vpc.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "vpc" {
|
||||
cidr_block = "10.1.0.0/21"
|
||||
}
|
||||
`
|
|
@ -1,611 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/slb"
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunSlb() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunSlbCreate,
|
||||
Read: resourceAliyunSlbRead,
|
||||
Update: resourceAliyunSlbUpdate,
|
||||
Delete: resourceAliyunSlbDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateSlbName,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"internet": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"vswitch_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"internet_charge_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: "paybytraffic",
|
||||
ValidateFunc: validateSlbInternetChargeType,
|
||||
},
|
||||
|
||||
"bandwidth": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ValidateFunc: validateSlbBandwidth,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"listener": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"instance_port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateInstancePort,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"lb_port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateInstancePort,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"lb_protocol": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateInstanceProtocol,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"bandwidth": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateSlbListenerBandwidth,
|
||||
Required: true,
|
||||
},
|
||||
"scheduler": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateSlbListenerScheduler,
|
||||
Optional: true,
|
||||
Default: slb.WRRScheduler,
|
||||
},
|
||||
//http & https
|
||||
"sticky_session": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue([]string{
|
||||
string(slb.OnFlag),
|
||||
string(slb.OffFlag)}),
|
||||
Optional: true,
|
||||
Default: slb.OffFlag,
|
||||
},
|
||||
//http & https
|
||||
"sticky_session_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue([]string{
|
||||
string(slb.InsertStickySessionType),
|
||||
string(slb.ServerStickySessionType)}),
|
||||
Optional: true,
|
||||
},
|
||||
//http & https
|
||||
"cookie_timeout": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateSlbListenerCookieTimeout,
|
||||
Optional: true,
|
||||
},
|
||||
//http & https
|
||||
"cookie": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateSlbListenerCookie,
|
||||
Optional: true,
|
||||
},
|
||||
//tcp & udp
|
||||
"persistence_timeout": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateSlbListenerPersistenceTimeout,
|
||||
Optional: true,
|
||||
Default: 0,
|
||||
},
|
||||
//http & https
|
||||
"health_check": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue([]string{
|
||||
string(slb.OnFlag),
|
||||
string(slb.OffFlag)}),
|
||||
Optional: true,
|
||||
Default: slb.OffFlag,
|
||||
},
|
||||
//tcp
|
||||
"health_check_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedStringValue([]string{
|
||||
string(slb.TCPHealthCheckType),
|
||||
string(slb.HTTPHealthCheckType)}),
|
||||
Optional: true,
|
||||
Default: slb.TCPHealthCheckType,
|
||||
},
|
||||
//http & https & tcp
|
||||
"health_check_domain": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateSlbListenerHealthCheckDomain,
|
||||
Optional: true,
|
||||
},
|
||||
//http & https & tcp
|
||||
"health_check_uri": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateSlbListenerHealthCheckUri,
|
||||
Optional: true,
|
||||
},
|
||||
"health_check_connect_port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateSlbListenerHealthCheckConnectPort,
|
||||
Optional: true,
|
||||
},
|
||||
"healthy_threshold": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateIntegerInRange(1, 10),
|
||||
Optional: true,
|
||||
},
|
||||
"unhealthy_threshold": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateIntegerInRange(1, 10),
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"health_check_timeout": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateIntegerInRange(1, 50),
|
||||
Optional: true,
|
||||
},
|
||||
"health_check_interval": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
ValidateFunc: validateIntegerInRange(1, 5),
|
||||
Optional: true,
|
||||
},
|
||||
//http & https & tcp
|
||||
"health_check_http_code": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validateAllowedSplitStringValue([]string{
|
||||
string(slb.HTTP_2XX),
|
||||
string(slb.HTTP_3XX),
|
||||
string(slb.HTTP_4XX),
|
||||
string(slb.HTTP_5XX)}, ","),
|
||||
Optional: true,
|
||||
},
|
||||
//https
|
||||
"ssl_certificate_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
//https
|
||||
//"ca_certificate_id": &schema.Schema{
|
||||
// Type: schema.TypeString,
|
||||
// Optional: true,
|
||||
//},
|
||||
},
|
||||
},
|
||||
Set: resourceAliyunSlbListenerHash,
|
||||
},
|
||||
|
||||
//deprecated
|
||||
"instances": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Optional: true,
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunSlbCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
slbconn := meta.(*AliyunClient).slbconn
|
||||
|
||||
var slbName string
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
slbName = v.(string)
|
||||
} else {
|
||||
slbName = resource.PrefixedUniqueId("tf-lb-")
|
||||
d.Set("name", slbName)
|
||||
}
|
||||
|
||||
slbArgs := &slb.CreateLoadBalancerArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
LoadBalancerName: slbName,
|
||||
}
|
||||
|
||||
if internet, ok := d.GetOk("internet"); ok && internet.(bool) {
|
||||
slbArgs.AddressType = slb.InternetAddressType
|
||||
d.Set("internet", true)
|
||||
} else {
|
||||
slbArgs.AddressType = slb.IntranetAddressType
|
||||
d.Set("internet", false)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("internet_charge_type"); ok && v.(string) != "" {
|
||||
slbArgs.InternetChargeType = slb.InternetChargeType(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("bandwidth"); ok && v.(int) != 0 {
|
||||
slbArgs.Bandwidth = v.(int)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("vswitch_id"); ok && v.(string) != "" {
|
||||
slbArgs.VSwitchId = v.(string)
|
||||
}
|
||||
slb, err := slbconn.CreateLoadBalancer(slbArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(slb.LoadBalancerId)
|
||||
|
||||
return resourceAliyunSlbUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunSlbRead(d *schema.ResourceData, meta interface{}) error {
|
||||
slbconn := meta.(*AliyunClient).slbconn
|
||||
loadBalancer, err := slbconn.DescribeLoadBalancerAttribute(d.Id())
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if loadBalancer == nil {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
d.Set("name", loadBalancer.LoadBalancerName)
|
||||
|
||||
if loadBalancer.AddressType == slb.InternetAddressType {
|
||||
d.Set("internal", true)
|
||||
} else {
|
||||
d.Set("internal", false)
|
||||
}
|
||||
d.Set("internet_charge_type", loadBalancer.InternetChargeType)
|
||||
d.Set("bandwidth", loadBalancer.Bandwidth)
|
||||
d.Set("vswitch_id", loadBalancer.VSwitchId)
|
||||
d.Set("address", loadBalancer.Address)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunSlbUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
slbconn := meta.(*AliyunClient).slbconn
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
if d.HasChange("name") {
|
||||
err := slbconn.SetLoadBalancerName(d.Id(), d.Get("name").(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetPartial("name")
|
||||
}
|
||||
|
||||
if d.Get("internet") == true && d.Get("internet_charge_type") == "paybybandwidth" {
|
||||
//don't intranet web and paybybandwidth, then can modify bandwidth
|
||||
if d.HasChange("bandwidth") {
|
||||
args := &slb.ModifyLoadBalancerInternetSpecArgs{
|
||||
LoadBalancerId: d.Id(),
|
||||
Bandwidth: d.Get("bandwidth").(int),
|
||||
}
|
||||
err := slbconn.ModifyLoadBalancerInternetSpec(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetPartial("bandwidth")
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("listener") {
|
||||
o, n := d.GetChange("listener")
|
||||
os := o.(*schema.Set)
|
||||
ns := n.(*schema.Set)
|
||||
|
||||
remove, _ := expandListeners(os.Difference(ns).List())
|
||||
add, _ := expandListeners(ns.Difference(os).List())
|
||||
|
||||
if len(remove) > 0 {
|
||||
for _, listener := range remove {
|
||||
err := slbconn.DeleteLoadBalancerListener(d.Id(), listener.LoadBalancerPort)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failure removing outdated SLB listeners: %#v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(add) > 0 {
|
||||
for _, listener := range add {
|
||||
err := createListener(slbconn, d.Id(), listener)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failure add SLB listeners: %#v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.SetPartial("listener")
|
||||
}
|
||||
|
||||
// If we currently have instances, or did have instances,
|
||||
// we want to figure out what to add and remove from the load
|
||||
// balancer
|
||||
if d.HasChange("instances") {
|
||||
o, n := d.GetChange("instances")
|
||||
os := o.(*schema.Set)
|
||||
ns := n.(*schema.Set)
|
||||
remove := expandBackendServers(os.Difference(ns).List())
|
||||
add := expandBackendServers(ns.Difference(os).List())
|
||||
|
||||
if len(add) > 0 {
|
||||
_, err := slbconn.AddBackendServers(d.Id(), add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(remove) > 0 {
|
||||
removeBackendServers := make([]string, 0, len(remove))
|
||||
for _, e := range remove {
|
||||
removeBackendServers = append(removeBackendServers, e.ServerId)
|
||||
}
|
||||
_, err := slbconn.RemoveBackendServers(d.Id(), removeBackendServers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.SetPartial("instances")
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceAliyunSlbRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunSlbDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).slbconn
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := conn.DeleteLoadBalancer(d.Id())
|
||||
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
loadBalancer, err := conn.DescribeLoadBalancerAttribute(d.Id())
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == LoadBalancerNotFound {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
if loadBalancer != nil {
|
||||
return resource.RetryableError(fmt.Errorf("LoadBalancer in use - trying again while it deleted."))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func resourceAliyunSlbListenerHash(v interface{}) int {
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
buf.WriteString(fmt.Sprintf("%d-", m["instance_port"].(int)))
|
||||
buf.WriteString(fmt.Sprintf("%d-", m["lb_port"].(int)))
|
||||
buf.WriteString(fmt.Sprintf("%s-",
|
||||
strings.ToLower(m["lb_protocol"].(string))))
|
||||
|
||||
buf.WriteString(fmt.Sprintf("%d-", m["bandwidth"].(int)))
|
||||
|
||||
if v, ok := m["ssl_certificate_id"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
|
||||
return hashcode.String(buf.String())
|
||||
}
|
||||
|
||||
func createListener(conn *slb.Client, loadBalancerId string, listener *Listener) error {
|
||||
|
||||
errTypeJudge := func(err error) error {
|
||||
if err != nil {
|
||||
if listenerType, ok := err.(*ListenerErr); ok {
|
||||
if listenerType.ErrType == HealthCheckErrType {
|
||||
return fmt.Errorf("When the HealthCheck is %s, then related HealthCheck parameter "+
|
||||
"must have.", slb.OnFlag)
|
||||
} else if listenerType.ErrType == StickySessionErrType {
|
||||
return fmt.Errorf("When the StickySession is %s, then StickySessionType parameter "+
|
||||
"must have.", slb.OnFlag)
|
||||
} else if listenerType.ErrType == CookieTimeOutErrType {
|
||||
return fmt.Errorf("When the StickySession is %s and StickySessionType is %s, "+
|
||||
"then CookieTimeout parameter must have.", slb.OnFlag, slb.InsertStickySessionType)
|
||||
} else if listenerType.ErrType == CookieErrType {
|
||||
return fmt.Errorf("When the StickySession is %s and StickySessionType is %s, "+
|
||||
"then Cookie parameter must have.", slb.OnFlag, slb.ServerStickySessionType)
|
||||
}
|
||||
return fmt.Errorf("slb listener check errtype not found.")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if listener.Protocol == strings.ToLower("tcp") {
|
||||
|
||||
args := getTcpListenerArgs(loadBalancerId, listener)
|
||||
|
||||
if err := conn.CreateLoadBalancerTCPListener(&args); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if listener.Protocol == strings.ToLower("http") {
|
||||
args, argsErr := getHttpListenerArgs(loadBalancerId, listener)
|
||||
if paramErr := errTypeJudge(argsErr); paramErr != nil {
|
||||
return paramErr
|
||||
}
|
||||
|
||||
if err := conn.CreateLoadBalancerHTTPListener(&args); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if listener.Protocol == strings.ToLower("https") {
|
||||
listenerType, err := getHttpListenerType(loadBalancerId, listener)
|
||||
if paramErr := errTypeJudge(err); paramErr != nil {
|
||||
return paramErr
|
||||
}
|
||||
|
||||
args := &slb.CreateLoadBalancerHTTPSListenerArgs{
|
||||
HTTPListenerType: listenerType,
|
||||
}
|
||||
if listener.SSLCertificateId == "" {
|
||||
return fmt.Errorf("Server Certificated Id cann't be null")
|
||||
}
|
||||
|
||||
args.ServerCertificateId = listener.SSLCertificateId
|
||||
|
||||
if err := conn.CreateLoadBalancerHTTPSListener(args); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if listener.Protocol == strings.ToLower("udp") {
|
||||
args := getUdpListenerArgs(loadBalancerId, listener)
|
||||
|
||||
if err := conn.CreateLoadBalancerUDPListener(&args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := conn.StartLoadBalancerListener(loadBalancerId, listener.LoadBalancerPort); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTcpListenerArgs(loadBalancerId string, listener *Listener) slb.CreateLoadBalancerTCPListenerArgs {
|
||||
args := slb.CreateLoadBalancerTCPListenerArgs{
|
||||
LoadBalancerId: loadBalancerId,
|
||||
ListenerPort: listener.LoadBalancerPort,
|
||||
BackendServerPort: listener.InstancePort,
|
||||
Bandwidth: listener.Bandwidth,
|
||||
Scheduler: listener.Scheduler,
|
||||
PersistenceTimeout: listener.PersistenceTimeout,
|
||||
HealthCheckType: listener.HealthCheckType,
|
||||
HealthCheckDomain: listener.HealthCheckDomain,
|
||||
HealthCheckURI: listener.HealthCheckURI,
|
||||
HealthCheckConnectPort: listener.HealthCheckConnectPort,
|
||||
HealthyThreshold: listener.HealthyThreshold,
|
||||
UnhealthyThreshold: listener.UnhealthyThreshold,
|
||||
HealthCheckConnectTimeout: listener.HealthCheckTimeout,
|
||||
HealthCheckInterval: listener.HealthCheckInterval,
|
||||
HealthCheckHttpCode: listener.HealthCheckHttpCode,
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func getUdpListenerArgs(loadBalancerId string, listener *Listener) slb.CreateLoadBalancerUDPListenerArgs {
|
||||
args := slb.CreateLoadBalancerUDPListenerArgs{
|
||||
LoadBalancerId: loadBalancerId,
|
||||
ListenerPort: listener.LoadBalancerPort,
|
||||
BackendServerPort: listener.InstancePort,
|
||||
Bandwidth: listener.Bandwidth,
|
||||
PersistenceTimeout: listener.PersistenceTimeout,
|
||||
HealthCheckConnectTimeout: listener.HealthCheckTimeout,
|
||||
HealthCheckInterval: listener.HealthCheckInterval,
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func getHttpListenerType(loadBalancerId string, listener *Listener) (listenType slb.HTTPListenerType, err error) {
|
||||
|
||||
if listener.HealthCheck == slb.OnFlag {
|
||||
if listener.HealthCheckURI == "" || listener.HealthCheckDomain == "" || listener.HealthCheckConnectPort == 0 ||
|
||||
listener.HealthyThreshold == 0 || listener.UnhealthyThreshold == 0 || listener.HealthCheckTimeout == 0 ||
|
||||
listener.HealthCheckHttpCode == "" || listener.HealthCheckInterval == 0 {
|
||||
|
||||
errMsg := errors.New("err: HealthCheck empty.")
|
||||
return listenType, &ListenerErr{HealthCheckErrType, errMsg}
|
||||
}
|
||||
}
|
||||
|
||||
if listener.StickySession == slb.OnFlag {
|
||||
if listener.StickySessionType == "" {
|
||||
errMsg := errors.New("err: stickySession empty.")
|
||||
return listenType, &ListenerErr{StickySessionErrType, errMsg}
|
||||
}
|
||||
|
||||
if listener.StickySessionType == slb.InsertStickySessionType {
|
||||
if listener.CookieTimeout == 0 {
|
||||
errMsg := errors.New("err: cookieTimeout empty.")
|
||||
return listenType, &ListenerErr{CookieTimeOutErrType, errMsg}
|
||||
}
|
||||
} else if listener.StickySessionType == slb.ServerStickySessionType {
|
||||
if listener.Cookie == "" {
|
||||
errMsg := errors.New("err: cookie empty.")
|
||||
return listenType, &ListenerErr{CookieErrType, errMsg}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
httpListenertType := slb.HTTPListenerType{
|
||||
LoadBalancerId: loadBalancerId,
|
||||
ListenerPort: listener.LoadBalancerPort,
|
||||
BackendServerPort: listener.InstancePort,
|
||||
Bandwidth: listener.Bandwidth,
|
||||
Scheduler: listener.Scheduler,
|
||||
HealthCheck: listener.HealthCheck,
|
||||
StickySession: listener.StickySession,
|
||||
StickySessionType: listener.StickySessionType,
|
||||
CookieTimeout: listener.CookieTimeout,
|
||||
Cookie: listener.Cookie,
|
||||
HealthCheckDomain: listener.HealthCheckDomain,
|
||||
HealthCheckURI: listener.HealthCheckURI,
|
||||
HealthCheckConnectPort: listener.HealthCheckConnectPort,
|
||||
HealthyThreshold: listener.HealthyThreshold,
|
||||
UnhealthyThreshold: listener.UnhealthyThreshold,
|
||||
HealthCheckTimeout: listener.HealthCheckTimeout,
|
||||
HealthCheckInterval: listener.HealthCheckInterval,
|
||||
HealthCheckHttpCode: listener.HealthCheckHttpCode,
|
||||
}
|
||||
|
||||
return httpListenertType, err
|
||||
}
|
||||
|
||||
func getHttpListenerArgs(loadBalancerId string, listener *Listener) (listenType slb.CreateLoadBalancerHTTPListenerArgs, err error) {
|
||||
httpListenerType, err := getHttpListenerType(loadBalancerId, listener)
|
||||
if err != nil {
|
||||
return listenType, err
|
||||
}
|
||||
|
||||
httpArgs := slb.CreateLoadBalancerHTTPListenerArgs(httpListenerType)
|
||||
return httpArgs, err
|
||||
}
|
|
@ -1,148 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func resourceAliyunSlbAttachment() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunSlbAttachmentCreate,
|
||||
Read: resourceAliyunSlbAttachmentRead,
|
||||
Update: resourceAliyunSlbAttachmentUpdate,
|
||||
Delete: resourceAliyunSlbAttachmentDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
|
||||
"slb_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"instances": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Required: true,
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"backend_servers": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunSlbAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
slbId := d.Get("slb_id").(string)
|
||||
|
||||
slbconn := meta.(*AliyunClient).slbconn
|
||||
|
||||
loadBalancer, err := slbconn.DescribeLoadBalancerAttribute(slbId)
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return fmt.Errorf("Special SLB Id not found: %#v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(loadBalancer.LoadBalancerId)
|
||||
|
||||
return resourceAliyunSlbAttachmentUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunSlbAttachmentRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
slbconn := meta.(*AliyunClient).slbconn
|
||||
loadBalancer, err := slbconn.DescribeLoadBalancerAttribute(d.Id())
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Read special SLB Id not found: %#v", err)
|
||||
}
|
||||
|
||||
if loadBalancer == nil {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
backendServerType := loadBalancer.BackendServers
|
||||
servers := backendServerType.BackendServer
|
||||
instanceIds := make([]string, 0, len(servers))
|
||||
if len(servers) > 0 {
|
||||
for _, e := range servers {
|
||||
instanceIds = append(instanceIds, e.ServerId)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.Set("slb_id", d.Id())
|
||||
d.Set("instances", instanceIds)
|
||||
d.Set("backend_servers", strings.Join(instanceIds, ","))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunSlbAttachmentUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
slbconn := meta.(*AliyunClient).slbconn
|
||||
if d.HasChange("instances") {
|
||||
o, n := d.GetChange("instances")
|
||||
os := o.(*schema.Set)
|
||||
ns := n.(*schema.Set)
|
||||
remove := expandBackendServers(os.Difference(ns).List())
|
||||
add := expandBackendServers(ns.Difference(os).List())
|
||||
|
||||
if len(add) > 0 {
|
||||
_, err := slbconn.AddBackendServers(d.Id(), add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(remove) > 0 {
|
||||
removeBackendServers := make([]string, 0, len(remove))
|
||||
for _, e := range remove {
|
||||
removeBackendServers = append(removeBackendServers, e.ServerId)
|
||||
}
|
||||
_, err := slbconn.RemoveBackendServers(d.Id(), removeBackendServers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return resourceAliyunSlbAttachmentRead(d, meta)
|
||||
|
||||
}
|
||||
|
||||
func resourceAliyunSlbAttachmentDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
slbconn := meta.(*AliyunClient).slbconn
|
||||
o := d.Get("instances")
|
||||
os := o.(*schema.Set)
|
||||
remove := expandBackendServers(os.List())
|
||||
|
||||
if len(remove) > 0 {
|
||||
removeBackendServers := make([]string, 0, len(remove))
|
||||
for _, e := range remove {
|
||||
removeBackendServers = append(removeBackendServers, e.ServerId)
|
||||
}
|
||||
_, err := slbconn.RemoveBackendServers(d.Id(), removeBackendServers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,131 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/slb"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudSlbAttachment_basic(t *testing.T) {
|
||||
var slb slb.LoadBalancerType
|
||||
|
||||
testCheckAttr := func() resource.TestCheckFunc {
|
||||
return func(*terraform.State) error {
|
||||
log.Printf("testCheckAttr slb BackendServers is: %#v", slb.BackendServers)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_slb_attachment.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSlbDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
//test internet_charge_type is paybybandwidth
|
||||
resource.TestStep{
|
||||
Config: testAccSlbAttachment,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSlbExists("alicloud_slb_attachment.foo", &slb),
|
||||
testCheckAttr(),
|
||||
testAccCheckAttachment("alicloud_instance.foo", &slb),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAttachment(n string, slb *slb.LoadBalancerType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ECS ID is set")
|
||||
}
|
||||
|
||||
ecsInstanceId := rs.Primary.ID
|
||||
|
||||
backendServers := slb.BackendServers.BackendServer
|
||||
|
||||
if len(backendServers) == 0 {
|
||||
return fmt.Errorf("no SLB backendServer: %#v", backendServers)
|
||||
}
|
||||
|
||||
log.Printf("slb bacnendservers: %#v", backendServers)
|
||||
|
||||
backendServersInstanceId := backendServers[0].ServerId
|
||||
|
||||
if ecsInstanceId != backendServersInstanceId {
|
||||
return fmt.Errorf("SLB attachment check invalid: ECS instance %s is not equal SLB backendServer %s",
|
||||
ecsInstanceId, backendServersInstanceId)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccSlbAttachment = `
|
||||
resource "alicloud_security_group" "foo" {
|
||||
name = "tf_test_foo"
|
||||
description = "foo"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "http-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "80/80"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ssh-in" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "internet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_instance" "foo" {
|
||||
# cn-beijing
|
||||
image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd"
|
||||
|
||||
# series II
|
||||
instance_type = "ecs.n1.medium"
|
||||
internet_charge_type = "PayByBandwidth"
|
||||
internet_max_bandwidth_out = "5"
|
||||
system_disk_category = "cloud_efficiency"
|
||||
io_optimized = "optimized"
|
||||
|
||||
security_groups = ["${alicloud_security_group.foo.id}"]
|
||||
instance_name = "test_foo"
|
||||
}
|
||||
|
||||
resource "alicloud_slb" "foo" {
|
||||
name = "tf_test_slb_bind"
|
||||
internet_charge_type = "paybybandwidth"
|
||||
bandwidth = "5"
|
||||
internet = "true"
|
||||
}
|
||||
|
||||
resource "alicloud_slb_attachment" "foo" {
|
||||
slb_id = "${alicloud_slb.foo.id}"
|
||||
instances = ["${alicloud_instance.foo.id}"]
|
||||
}
|
||||
|
||||
`
|
|
@ -1,322 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/slb"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudSlb_basic(t *testing.T) {
|
||||
var slb slb.LoadBalancerType
|
||||
|
||||
testCheckAttr := func() resource.TestCheckFunc {
|
||||
return func(*terraform.State) error {
|
||||
log.Printf("testCheckAttr slb AddressType is: %s", slb.AddressType)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_slb.bindwidth",
|
||||
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSlbDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
//test internet_charge_type is paybybandwidth
|
||||
resource.TestStep{
|
||||
Config: testAccSlbBindWidth,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSlbExists("alicloud_slb.bindwidth", &slb),
|
||||
testCheckAttr(),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_slb.bindwidth", "internet_charge_type", "paybybandwidth"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudSlb_traffic(t *testing.T) {
|
||||
var slb slb.LoadBalancerType
|
||||
|
||||
testCheckAttr := func() resource.TestCheckFunc {
|
||||
return func(*terraform.State) error {
|
||||
log.Printf("testCheckAttr slb AddressType is: %s", slb.AddressType)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_slb.traffic",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSlbDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
//test internet_charge_type is paybytraffic
|
||||
resource.TestStep{
|
||||
Config: testAccSlbTraffic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSlbExists("alicloud_slb.traffic", &slb),
|
||||
testCheckAttr(),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_slb.traffic", "name", "tf_test_slb_classic"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudSlb_listener(t *testing.T) {
|
||||
var slb slb.LoadBalancerType
|
||||
|
||||
testListener := func() resource.TestCheckFunc {
|
||||
return func(*terraform.State) error {
|
||||
listenerPorts := slb.ListenerPorts.ListenerPort[0]
|
||||
if listenerPorts != 2001 {
|
||||
return fmt.Errorf("bad loadbalancer listener: %#v", listenerPorts)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_slb.listener",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSlbDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSlbListener,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSlbExists("alicloud_slb.listener", &slb),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_slb.listener", "name", "tf_test_slb"),
|
||||
testAccCheckListenersExists("alicloud_slb.listener", &slb, "http"),
|
||||
testListener(),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAlicloudSlb_vpc(t *testing.T) {
|
||||
var slb slb.LoadBalancerType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_slb.vpc",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSlbDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSlb4Vpc,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSlbExists("alicloud_slb.vpc", &slb),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_slb.vpc", "name", "tf_test_slb_vpc"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckSlbExists(n string, slb *slb.LoadBalancerType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No SLB ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
instance, err := client.DescribeLoadBalancerAttribute(rs.Primary.ID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if instance == nil {
|
||||
return fmt.Errorf("SLB not found")
|
||||
}
|
||||
|
||||
*slb = *instance
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckListenersExists(n string, slb *slb.LoadBalancerType, p string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No SLB ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
instance, err := client.DescribeLoadBalancerAttribute(rs.Primary.ID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if instance == nil {
|
||||
return fmt.Errorf("SLB not found")
|
||||
}
|
||||
|
||||
exist := false
|
||||
for _, listener := range instance.ListenerPortsAndProtocol.ListenerPortAndProtocol {
|
||||
if listener.ListenerProtocol == p {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !exist {
|
||||
return fmt.Errorf("The %s protocol Listener not found.", p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckSlbDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_slb" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to find the Slb
|
||||
instance, err := client.DescribeLoadBalancerAttribute(rs.Primary.ID)
|
||||
|
||||
if instance != nil {
|
||||
return fmt.Errorf("SLB still exist")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
// Verify the error is what we want
|
||||
if e.ErrorResponse.Code != LoadBalancerNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccSlbBindWidth = `
|
||||
resource "alicloud_slb" "bindwidth" {
|
||||
name = "tf_test_slb_bindwidth"
|
||||
internet_charge_type = "paybybandwidth"
|
||||
bandwidth = 5
|
||||
internet = true
|
||||
}
|
||||
`
|
||||
|
||||
const testAccSlbTraffic = `
|
||||
resource "alicloud_slb" "traffic" {
|
||||
name = "tf_test_slb_classic"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccSlbListener = `
|
||||
resource "alicloud_slb" "listener" {
|
||||
name = "tf_test_slb"
|
||||
internet_charge_type = "paybybandwidth"
|
||||
bandwidth = 5
|
||||
internet = true
|
||||
listener = [
|
||||
{
|
||||
"instance_port" = "2111"
|
||||
"lb_port" = "21"
|
||||
"lb_protocol" = "tcp"
|
||||
"bandwidth" = 1
|
||||
"persistence_timeout" = 500
|
||||
"health_check_type" = "http"
|
||||
},{
|
||||
"instance_port" = "8000"
|
||||
"lb_port" = "80"
|
||||
"lb_protocol" = "http"
|
||||
"sticky_session" = "on"
|
||||
"sticky_session_type" = "insert"
|
||||
"cookie_timeout" = 800
|
||||
"bandwidth" = 1
|
||||
},{
|
||||
"instance_port" = "8001"
|
||||
"lb_port" = "81"
|
||||
"lb_protocol" = "http"
|
||||
"sticky_session" = "on"
|
||||
"sticky_session_type" = "server"
|
||||
"cookie" = "testslblistenercookie"
|
||||
"cookie_timeout" = 1800
|
||||
"health_check" = "on"
|
||||
"health_check_domain" = "$_ip"
|
||||
"health_check_uri" = "/console"
|
||||
"health_check_connect_port" = 20
|
||||
"healthy_threshold" = 8
|
||||
"unhealthy_threshold" = 8
|
||||
"health_check_timeout" = 8
|
||||
"health_check_interval" = 4
|
||||
"health_check_http_code" = "http_2xx"
|
||||
"bandwidth" = 1
|
||||
},{
|
||||
"instance_port" = "2001"
|
||||
"lb_port" = "2001"
|
||||
"lb_protocol" = "udp"
|
||||
"bandwidth" = 1
|
||||
"persistence_timeout" = 700
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testAccSlb4Vpc = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_slb" "vpc" {
|
||||
name = "tf_test_slb_vpc"
|
||||
//internet_charge_type = "paybybandwidth"
|
||||
vswitch_id = "${alicloud_vswitch.foo.id}"
|
||||
}
|
||||
`
|
|
@ -1,134 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceAliyunSnatEntry() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunSnatEntryCreate,
|
||||
Read: resourceAliyunSnatEntryRead,
|
||||
Update: resourceAliyunSnatEntryUpdate,
|
||||
Delete: resourceAliyunSnatEntryDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"snat_table_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"source_vswitch_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"snat_ip": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunSnatEntryCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).vpcconn
|
||||
|
||||
args := &ecs.CreateSnatEntryArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
SnatTableId: d.Get("snat_table_id").(string),
|
||||
SourceVSwitchId: d.Get("source_vswitch_id").(string),
|
||||
SnatIp: d.Get("snat_ip").(string),
|
||||
}
|
||||
|
||||
resp, err := conn.CreateSnatEntry(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("CreateSnatEntry got error: %#v", err)
|
||||
}
|
||||
|
||||
d.SetId(resp.SnatEntryId)
|
||||
d.Set("snat_table_id", d.Get("snat_table_id").(string))
|
||||
|
||||
return resourceAliyunSnatEntryRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunSnatEntryRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
snatEntry, err := client.DescribeSnatEntry(d.Get("snat_table_id").(string), d.Id())
|
||||
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
d.Set("snat_table_id", snatEntry.SnatTableId)
|
||||
d.Set("source_vswitch_id", snatEntry.SourceVSwitchId)
|
||||
d.Set("snat_ip", snatEntry.SnatIp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunSnatEntryUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.vpcconn
|
||||
|
||||
snatEntry, err := client.DescribeSnatEntry(d.Get("snat_table_id").(string), d.Id())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Partial(true)
|
||||
attributeUpdate := false
|
||||
args := &ecs.ModifySnatEntryArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
SnatTableId: snatEntry.SnatTableId,
|
||||
SnatEntryId: snatEntry.SnatEntryId,
|
||||
}
|
||||
|
||||
if d.HasChange("snat_ip") {
|
||||
d.SetPartial("snat_ip")
|
||||
var snat_ip string
|
||||
if v, ok := d.GetOk("snat_ip"); ok {
|
||||
snat_ip = v.(string)
|
||||
} else {
|
||||
return fmt.Errorf("cann't change snap_ip to empty string")
|
||||
}
|
||||
args.SnatIp = snat_ip
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if attributeUpdate {
|
||||
if err := conn.ModifySnatEntry(args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceAliyunSnatEntryRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunSnatEntryDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
conn := client.vpcconn
|
||||
|
||||
snatEntryId := d.Id()
|
||||
snatTableId := d.Get("snat_table_id").(string)
|
||||
|
||||
args := &ecs.DeleteSnatEntryArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
SnatTableId: snatTableId,
|
||||
SnatEntryId: snatEntryId,
|
||||
}
|
||||
|
||||
if err := conn.DeleteSnatEntry(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,180 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudSnat_basic(t *testing.T) {
|
||||
var snat ecs.SnatEntrySetType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_snat_entry.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckSnatEntryDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccSnatEntryConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSnatEntryExists(
|
||||
"alicloud_snat_entry.foo", &snat),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccSnatEntryUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSnatEntryExists(
|
||||
"alicloud_snat_entry.foo", &snat),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckSnatEntryDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_snat_entry" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to find the Snat entry
|
||||
instance, err := client.DescribeSnatEntry(rs.Primary.Attributes["snat_table_id"], rs.Primary.ID)
|
||||
|
||||
//this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error"
|
||||
if instance.SnatEntryId == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if instance.SnatEntryId != "" {
|
||||
return fmt.Errorf("Snat entry still exist")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
|
||||
if !notFoundError(e) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckSnatEntryExists(n string, snat *ecs.SnatEntrySetType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No SnatEntry ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
instance, err := client.DescribeSnatEntry(rs.Primary.Attributes["snat_table_id"], rs.Primary.ID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if instance.SnatEntryId == "" {
|
||||
return fmt.Errorf("SnatEntry not found")
|
||||
}
|
||||
|
||||
*snat = instance
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccSnatEntryConfig = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_nat_gateway" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
spec = "Small"
|
||||
name = "test_foo"
|
||||
bandwidth_packages = [{
|
||||
ip_count = 2
|
||||
bandwidth = 5
|
||||
zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
},{
|
||||
ip_count = 1
|
||||
bandwidth = 6
|
||||
zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
}]
|
||||
depends_on = [
|
||||
"alicloud_vswitch.foo"]
|
||||
}
|
||||
resource "alicloud_snat_entry" "foo"{
|
||||
snat_table_id = "${alicloud_nat_gateway.foo.snat_table_ids}"
|
||||
source_vswitch_id = "${alicloud_vswitch.foo.id}"
|
||||
snat_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccSnatEntryUpdate = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_nat_gateway" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
spec = "Small"
|
||||
name = "test_foo"
|
||||
bandwidth_packages = [{
|
||||
ip_count = 2
|
||||
bandwidth = 5
|
||||
zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
},{
|
||||
ip_count = 1
|
||||
bandwidth = 6
|
||||
zone = "${data.alicloud_zones.default.zones.2.id}"
|
||||
}]
|
||||
depends_on = [
|
||||
"alicloud_vswitch.foo"]
|
||||
}
|
||||
resource "alicloud_snat_entry" "foo"{
|
||||
snat_table_id = "${alicloud_nat_gateway.foo.snat_table_ids}"
|
||||
source_vswitch_id = "${alicloud_vswitch.foo.id}"
|
||||
snat_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.1.public_ip_addresses}"
|
||||
}
|
||||
`
|
|
@ -1,201 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunVpc() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunVpcCreate,
|
||||
Read: resourceAliyunVpcRead,
|
||||
Update: resourceAliyunVpcUpdate,
|
||||
Delete: resourceAliyunVpcDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cidr_block": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateCIDRNetworkAddress,
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < 2 || len(value) > 128 {
|
||||
errors = append(errors, fmt.Errorf("%s cannot be longer than 128 characters", k))
|
||||
}
|
||||
|
||||
if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") {
|
||||
errors = append(errors, fmt.Errorf("%s cannot starts with http:// or https://", k))
|
||||
}
|
||||
|
||||
return
|
||||
},
|
||||
},
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < 2 || len(value) > 256 {
|
||||
errors = append(errors, fmt.Errorf("%s cannot be longer than 256 characters", k))
|
||||
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
"router_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"router_table_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunVpcCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
args, err := buildAliyunVpcArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ecsconn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
var vpc *ecs.CreateVpcResponse
|
||||
err = resource.Retry(3*time.Minute, func() *resource.RetryError {
|
||||
resp, err := ecsconn.CreateVpc(args)
|
||||
if err != nil {
|
||||
if e, ok := err.(*common.Error); ok && (e.StatusCode == 400 || e.Code == UnknownError) {
|
||||
return resource.RetryableError(fmt.Errorf("Vpc is still creating result from some unknown error -- try again"))
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
vpc = resp
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Create vpc got an error :%#v", err)
|
||||
}
|
||||
|
||||
d.SetId(vpc.VpcId)
|
||||
d.Set("router_table_id", vpc.RouteTableId)
|
||||
|
||||
err = ecsconn.WaitForVpcAvailable(args.RegionId, vpc.VpcId, 60)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Timeout when WaitForVpcAvailable")
|
||||
}
|
||||
|
||||
return resourceAliyunVpcUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunVpcRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
vpc, err := client.DescribeVpc(d.Id())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if vpc == nil {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
d.Set("cidr_block", vpc.CidrBlock)
|
||||
d.Set("name", vpc.VpcName)
|
||||
d.Set("description", vpc.Description)
|
||||
d.Set("router_id", vpc.VRouterId)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunVpcUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
attributeUpdate := false
|
||||
args := &ecs.ModifyVpcAttributeArgs{
|
||||
VpcId: d.Id(),
|
||||
}
|
||||
|
||||
if d.HasChange("name") {
|
||||
d.SetPartial("name")
|
||||
args.VpcName = d.Get("name").(string)
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("description") {
|
||||
d.SetPartial("description")
|
||||
args.Description = d.Get("description").(string)
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if attributeUpdate {
|
||||
if err := conn.ModifyVpcAttribute(args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceAliyunVpcRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunVpcDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := conn.DeleteVpc(d.Id())
|
||||
|
||||
if err != nil {
|
||||
return resource.RetryableError(fmt.Errorf("Vpc in use - trying again while it is deleted."))
|
||||
}
|
||||
|
||||
args := &ecs.DescribeVpcsArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
VpcId: d.Id(),
|
||||
}
|
||||
vpc, _, descErr := conn.DescribeVpcs(args)
|
||||
if descErr != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
} else if vpc == nil || len(vpc) < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Vpc in use - trying again while it is deleted."))
|
||||
})
|
||||
}
|
||||
|
||||
func buildAliyunVpcArgs(d *schema.ResourceData, meta interface{}) (*ecs.CreateVpcArgs, error) {
|
||||
args := &ecs.CreateVpcArgs{
|
||||
RegionId: getRegion(d, meta),
|
||||
CidrBlock: d.Get("cidr_block").(string),
|
||||
}
|
||||
|
||||
if v := d.Get("name").(string); v != "" {
|
||||
args.VpcName = v
|
||||
}
|
||||
|
||||
if v := d.Get("description").(string); v != "" {
|
||||
args.Description = v
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAlicloudVpc_basic(t *testing.T) {
|
||||
var vpc ecs.VpcSetType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_vpc.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckVpcDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccVpcConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckVpcExists("alicloud_vpc.foo", &vpc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_vpc.foo", "cidr_block", "172.16.0.0/12"),
|
||||
resource.TestCheckResourceAttrSet(
|
||||
"alicloud_vpc.foo", "router_id"),
|
||||
resource.TestCheckResourceAttrSet(
|
||||
"alicloud_vpc.foo", "router_table_id"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAccAlicloudVpc_update(t *testing.T) {
|
||||
var vpc ecs.VpcSetType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckVpcDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccVpcConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckVpcExists("alicloud_vpc.foo", &vpc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_vpc.foo", "cidr_block", "172.16.0.0/12"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccVpcConfigUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckVpcExists("alicloud_vpc.foo", &vpc),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_vpc.foo", "name", "tf_test_bar"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckVpcExists(n string, vpc *ecs.VpcSetType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No VPC ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
instance, err := client.DescribeVpc(rs.Primary.ID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if instance == nil {
|
||||
return fmt.Errorf("VPC not found")
|
||||
}
|
||||
|
||||
*vpc = *instance
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckVpcDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_vpc" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to find the VPC
|
||||
instance, err := client.DescribeVpc(rs.Primary.ID)
|
||||
|
||||
if instance != nil {
|
||||
return fmt.Errorf("VPCs still exist")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
|
||||
if e.ErrorResponse.Code != "InvalidVpcID.NotFound" {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccVpcConfig = `
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccVpcConfigUpdate = `
|
||||
resource "alicloud_vpc" "foo" {
|
||||
cidr_block = "172.16.0.0/12"
|
||||
name = "tf_test_bar"
|
||||
}
|
||||
`
|
|
@ -1,145 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func resourceAliyunRouteEntry() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunRouteEntryCreate,
|
||||
Read: resourceAliyunRouteEntryRead,
|
||||
Delete: resourceAliyunRouteEntryDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"router_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"route_table_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"destination_cidrblock": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"nexthop_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateRouteEntryNextHopType,
|
||||
},
|
||||
"nexthop_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunRouteEntryCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
rtId := d.Get("route_table_id").(string)
|
||||
rId := d.Get("router_id").(string)
|
||||
cidr := d.Get("destination_cidrblock").(string)
|
||||
nt := d.Get("nexthop_type").(string)
|
||||
ni := d.Get("nexthop_id").(string)
|
||||
|
||||
args, err := buildAliyunRouteEntryArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = conn.CreateRouteEntry(args)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// route_table_id:router_id:destination_cidrblock:nexthop_type:nexthop_id
|
||||
d.SetId(rtId + ":" + rId + ":" + cidr + ":" + nt + ":" + ni)
|
||||
d.Set("router_id", rId)
|
||||
|
||||
if err := conn.WaitForAllRouteEntriesAvailable(rId, rtId, defaultTimeout); err != nil {
|
||||
return fmt.Errorf("WaitFor route entry got error: %#v", err)
|
||||
}
|
||||
return resourceAliyunRouteEntryRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunRouteEntryRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*AliyunClient)
|
||||
parts := strings.Split(d.Id(), ":")
|
||||
rtId := parts[0]
|
||||
//rId := parts[1]
|
||||
cidr := parts[2]
|
||||
nexthop_type := parts[3]
|
||||
nexthop_id := parts[4]
|
||||
|
||||
en, err := client.QueryRouteEntry(rtId, cidr, nexthop_type, nexthop_id)
|
||||
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error route entry: %#v", err)
|
||||
}
|
||||
|
||||
d.Set("route_table_id", en.RouteTableId)
|
||||
d.Set("destination_cidrblock", en.DestinationCidrBlock)
|
||||
d.Set("nexthop_type", en.NextHopType)
|
||||
d.Set("nexthop_id", en.InstanceId)
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunRouteEntryDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
con := meta.(*AliyunClient).ecsconn
|
||||
args, err := buildAliyunRouteEntryDeleteArgs(d, meta)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return con.DeleteRouteEntry(args)
|
||||
}
|
||||
|
||||
func buildAliyunRouteEntryArgs(d *schema.ResourceData, meta interface{}) (*ecs.CreateRouteEntryArgs, error) {
|
||||
|
||||
args := &ecs.CreateRouteEntryArgs{
|
||||
RouteTableId: d.Get("route_table_id").(string),
|
||||
DestinationCidrBlock: d.Get("destination_cidrblock").(string),
|
||||
}
|
||||
|
||||
if v := d.Get("nexthop_type").(string); v != "" {
|
||||
args.NextHopType = ecs.NextHopType(v)
|
||||
}
|
||||
|
||||
if v := d.Get("nexthop_id").(string); v != "" {
|
||||
args.NextHopId = v
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func buildAliyunRouteEntryDeleteArgs(d *schema.ResourceData, meta interface{}) (*ecs.DeleteRouteEntryArgs, error) {
|
||||
|
||||
args := &ecs.DeleteRouteEntryArgs{
|
||||
RouteTableId: d.Get("route_table_id").(string),
|
||||
DestinationCidrBlock: d.Get("destination_cidrblock").(string),
|
||||
}
|
||||
|
||||
if v := d.Get("destination_cidrblock").(string); v != "" {
|
||||
args.DestinationCidrBlock = v
|
||||
}
|
||||
|
||||
if v := d.Get("nexthop_id").(string); v != "" {
|
||||
args.NextHopId = v
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,186 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccAlicloudRouteEntry_Basic(t *testing.T) {
|
||||
var rt ecs.RouteTableSetType
|
||||
var rn ecs.RouteEntrySetType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_route_entry.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRouteEntryDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccRouteEntryConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableEntryExists(
|
||||
"alicloud_route_entry.foo", &rt, &rn),
|
||||
resource.TestCheckResourceAttrSet(
|
||||
"alicloud_route_entry.foo", "nexthop_id"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckRouteTableExists(rtId string, t *ecs.RouteTableSetType) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
//query route table
|
||||
rt, terr := client.QueryRouteTableById(rtId)
|
||||
|
||||
if terr != nil {
|
||||
return terr
|
||||
}
|
||||
|
||||
if rt == nil {
|
||||
return fmt.Errorf("Route Table not found")
|
||||
}
|
||||
|
||||
*t = *rt
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckRouteEntryExists(routeTableId, cidrBlock, nextHopType, nextHopId string, e *ecs.RouteEntrySetType) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
//query route table entry
|
||||
re, rerr := client.QueryRouteEntry(routeTableId, cidrBlock, nextHopType, nextHopId)
|
||||
|
||||
if rerr != nil {
|
||||
return rerr
|
||||
}
|
||||
|
||||
if re == nil {
|
||||
return fmt.Errorf("Route Table Entry not found")
|
||||
}
|
||||
|
||||
*e = *re
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckRouteTableEntryExists(n string, t *ecs.RouteTableSetType, e *ecs.RouteEntrySetType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No Route Entry ID is set")
|
||||
}
|
||||
|
||||
parts := strings.Split(rs.Primary.ID, ":")
|
||||
|
||||
//query route table
|
||||
err := testAccCheckRouteTableExists(parts[0], t)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//query route table entry
|
||||
err = testAccCheckRouteEntryExists(parts[0], parts[2], parts[3], parts[4], e)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckRouteEntryDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_route_entry" {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.Split(rs.Primary.ID, ":")
|
||||
re, err := client.QueryRouteEntry(parts[0], parts[2], parts[3], parts[4])
|
||||
|
||||
if re != nil {
|
||||
return fmt.Errorf("Error Route Entry still exist")
|
||||
}
|
||||
|
||||
// Verify the error is what we want
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccRouteEntryConfig = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "10.1.0.0/21"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "10.1.1.0/24"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_route_entry" "foo" {
|
||||
router_id = "${alicloud_vpc.foo.router_id}"
|
||||
route_table_id = "${alicloud_vpc.foo.router_table_id}"
|
||||
destination_cidrblock = "172.11.1.1/32"
|
||||
nexthop_type = "Instance"
|
||||
nexthop_id = "${alicloud_instance.foo.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group" "tf_test_foo" {
|
||||
name = "tf_test_foo"
|
||||
description = "foo"
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
}
|
||||
|
||||
resource "alicloud_security_group_rule" "ingress" {
|
||||
type = "ingress"
|
||||
ip_protocol = "tcp"
|
||||
nic_type = "intranet"
|
||||
policy = "accept"
|
||||
port_range = "22/22"
|
||||
priority = 1
|
||||
security_group_id = "${alicloud_security_group.tf_test_foo.id}"
|
||||
cidr_ip = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "alicloud_instance" "foo" {
|
||||
# cn-beijing
|
||||
security_groups = ["${alicloud_security_group.tf_test_foo.id}"]
|
||||
|
||||
vswitch_id = "${alicloud_vswitch.foo.id}"
|
||||
allocate_public_ip = true
|
||||
|
||||
# series II
|
||||
instance_charge_type = "PostPaid"
|
||||
instance_type = "ecs.n1.small"
|
||||
internet_charge_type = "PayByTraffic"
|
||||
internet_max_bandwidth_out = 5
|
||||
io_optimized = "optimized"
|
||||
|
||||
system_disk_category = "cloud_efficiency"
|
||||
image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd"
|
||||
instance_name = "test_foo"
|
||||
}
|
||||
|
||||
`
|
|
@ -1,232 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resourceAliyunSubnet() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAliyunSwitchCreate,
|
||||
Read: resourceAliyunSwitchRead,
|
||||
Update: resourceAliyunSwitchUpdate,
|
||||
Delete: resourceAliyunSwitchDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"availability_zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"vpc_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"cidr_block": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateSwitchCIDRNetworkAddress,
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAliyunSwitchCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
args, err := buildAliyunSwitchArgs(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var vswitchID string
|
||||
err = resource.Retry(3*time.Minute, func() *resource.RetryError {
|
||||
vswId, err := conn.CreateVSwitch(args)
|
||||
if err != nil {
|
||||
if e, ok := err.(*common.Error); ok && (e.StatusCode == 400 || e.Code == UnknownError) {
|
||||
return resource.RetryableError(fmt.Errorf("Vswitch is still creating result from some unknown error -- try again"))
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
vswitchID = vswId
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Create subnet got an error :%s", err)
|
||||
}
|
||||
|
||||
d.SetId(vswitchID)
|
||||
|
||||
err = conn.WaitForVSwitchAvailable(args.VpcId, vswitchID, 60)
|
||||
if err != nil {
|
||||
return fmt.Errorf("WaitForVSwitchAvailable got a error: %s", err)
|
||||
}
|
||||
|
||||
return resourceAliyunSwitchUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunSwitchRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
args := &ecs.DescribeVSwitchesArgs{
|
||||
VpcId: d.Get("vpc_id").(string),
|
||||
VSwitchId: d.Id(),
|
||||
}
|
||||
|
||||
vswitches, _, err := conn.DescribeVSwitches(args)
|
||||
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if len(vswitches) == 0 {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
vswitch := vswitches[0]
|
||||
|
||||
d.Set("availability_zone", vswitch.ZoneId)
|
||||
d.Set("vpc_id", vswitch.VpcId)
|
||||
d.Set("cidr_block", vswitch.CidrBlock)
|
||||
d.Set("name", vswitch.VSwitchName)
|
||||
d.Set("description", vswitch.Description)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAliyunSwitchUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
attributeUpdate := false
|
||||
args := &ecs.ModifyVSwitchAttributeArgs{
|
||||
VSwitchId: d.Id(),
|
||||
}
|
||||
|
||||
if d.HasChange("name") {
|
||||
d.SetPartial("name")
|
||||
args.VSwitchName = d.Get("name").(string)
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("description") {
|
||||
d.SetPartial("description")
|
||||
args.Description = d.Get("description").(string)
|
||||
|
||||
attributeUpdate = true
|
||||
}
|
||||
if attributeUpdate {
|
||||
if err := conn.ModifyVSwitchAttribute(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceAliyunSwitchRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAliyunSwitchDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AliyunClient).ecsconn
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
err := conn.DeleteVSwitch(d.Id())
|
||||
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code == VswitcInvalidRegionId {
|
||||
log.Printf("[ERROR] Delete Switch is failed.")
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Switch in use. -- trying again while it is deleted."))
|
||||
}
|
||||
|
||||
vsw, _, vswErr := conn.DescribeVSwitches(&ecs.DescribeVSwitchesArgs{
|
||||
VpcId: d.Get("vpc_id").(string),
|
||||
VSwitchId: d.Id(),
|
||||
})
|
||||
|
||||
if vswErr != nil {
|
||||
return resource.NonRetryableError(vswErr)
|
||||
} else if vsw == nil || len(vsw) < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Switch in use. -- trying again while it is deleted."))
|
||||
})
|
||||
}
|
||||
|
||||
func buildAliyunSwitchArgs(d *schema.ResourceData, meta interface{}) (*ecs.CreateVSwitchArgs, error) {
|
||||
|
||||
client := meta.(*AliyunClient)
|
||||
|
||||
vpcID := d.Get("vpc_id").(string)
|
||||
|
||||
vpc, err := client.DescribeVpc(vpcID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if vpc == nil {
|
||||
return nil, fmt.Errorf("vpc_id not found")
|
||||
}
|
||||
|
||||
zoneID := d.Get("availability_zone").(string)
|
||||
|
||||
zone, err := client.DescribeZone(zoneID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = client.ResourceAvailable(zone, ecs.ResourceTypeVSwitch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cidrBlock := d.Get("cidr_block").(string)
|
||||
|
||||
args := &ecs.CreateVSwitchArgs{
|
||||
VpcId: vpcID,
|
||||
ZoneId: zoneID,
|
||||
CidrBlock: cidrBlock,
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("name"); ok && v != "" {
|
||||
args.VSwitchName = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok && v != "" {
|
||||
args.Description = v.(string)
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAlicloudVswitch_basic(t *testing.T) {
|
||||
var vsw ecs.VSwitchSetType
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
|
||||
// module name
|
||||
IDRefreshName: "alicloud_vswitch.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckVswitchDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccVswitchConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckVswitchExists("alicloud_vswitch.foo", &vsw),
|
||||
resource.TestCheckResourceAttr(
|
||||
"alicloud_vswitch.foo", "cidr_block", "172.16.0.0/21"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckVswitchExists(n string, vpc *ecs.VSwitchSetType) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No Vswitch ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
instance, err := client.QueryVswitchById(rs.Primary.Attributes["vpc_id"], rs.Primary.ID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if instance == nil {
|
||||
return fmt.Errorf("Vswitch not found")
|
||||
}
|
||||
|
||||
*vpc = *instance
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckVswitchDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*AliyunClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "alicloud_vswitch" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to find the Vswitch
|
||||
instance, err := client.QueryVswitchById(rs.Primary.Attributes["vpc_id"], rs.Primary.ID)
|
||||
|
||||
if instance != nil {
|
||||
return fmt.Errorf("Vswitch still exist")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
e, _ := err.(*common.Error)
|
||||
|
||||
if e.ErrorResponse.Code != "InvalidVswitchID.NotFound" {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccVswitchConfig = `
|
||||
data "alicloud_zones" "default" {
|
||||
"available_resource_creation"= "VSwitch"
|
||||
}
|
||||
|
||||
resource "alicloud_vpc" "foo" {
|
||||
name = "tf_test_foo"
|
||||
cidr_block = "172.16.0.0/12"
|
||||
}
|
||||
|
||||
resource "alicloud_vswitch" "foo" {
|
||||
vpc_id = "${alicloud_vpc.foo.id}"
|
||||
cidr_block = "172.16.0.0/21"
|
||||
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
|
||||
}
|
||||
`
|
|
@ -1,259 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (client *AliyunClient) DescribeImage(imageId string) (*ecs.ImageType, error) {
|
||||
|
||||
pagination := common.Pagination{
|
||||
PageNumber: 1,
|
||||
}
|
||||
args := ecs.DescribeImagesArgs{
|
||||
Pagination: pagination,
|
||||
RegionId: client.Region,
|
||||
Status: ecs.ImageStatusAvailable,
|
||||
}
|
||||
|
||||
var allImages []ecs.ImageType
|
||||
|
||||
for {
|
||||
images, _, err := client.ecsconn.DescribeImages(&args)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if len(images) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
allImages = append(allImages, images...)
|
||||
|
||||
args.Pagination.PageNumber++
|
||||
}
|
||||
|
||||
if len(allImages) == 0 {
|
||||
return nil, common.GetClientErrorFromString("Not found")
|
||||
}
|
||||
|
||||
var image *ecs.ImageType
|
||||
imageIds := []string{}
|
||||
for _, im := range allImages {
|
||||
if im.ImageId == imageId {
|
||||
image = &im
|
||||
}
|
||||
imageIds = append(imageIds, im.ImageId)
|
||||
}
|
||||
|
||||
if image == nil {
|
||||
return nil, fmt.Errorf("image_id %s not exists in range %s, all images are %s", imageId, client.Region, imageIds)
|
||||
}
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// DescribeZone validate zoneId is valid in region
|
||||
func (client *AliyunClient) DescribeZone(zoneID string) (*ecs.ZoneType, error) {
|
||||
zones, err := client.ecsconn.DescribeZones(client.Region)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error to list zones not found")
|
||||
}
|
||||
|
||||
var zone *ecs.ZoneType
|
||||
zoneIds := []string{}
|
||||
for _, z := range zones {
|
||||
if z.ZoneId == zoneID {
|
||||
zone = &ecs.ZoneType{
|
||||
ZoneId: z.ZoneId,
|
||||
LocalName: z.LocalName,
|
||||
AvailableResourceCreation: z.AvailableResourceCreation,
|
||||
AvailableDiskCategories: z.AvailableDiskCategories,
|
||||
}
|
||||
}
|
||||
zoneIds = append(zoneIds, z.ZoneId)
|
||||
}
|
||||
|
||||
if zone == nil {
|
||||
return nil, fmt.Errorf("availability_zone not exists in range %s, all zones are %s", client.Region, zoneIds)
|
||||
}
|
||||
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// return multiIZ list of current region
|
||||
func (client *AliyunClient) DescribeMultiIZByRegion() (izs []string, err error) {
|
||||
resp, err := client.rdsconn.DescribeRegions()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error to list regions not found")
|
||||
}
|
||||
regions := resp.Regions.RDSRegion
|
||||
|
||||
zoneIds := []string{}
|
||||
for _, r := range regions {
|
||||
if r.RegionId == string(client.Region) && strings.Contains(r.ZoneId, MULTI_IZ_SYMBOL) {
|
||||
zoneIds = append(zoneIds, r.ZoneId)
|
||||
}
|
||||
}
|
||||
|
||||
return zoneIds, nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) QueryInstancesByIds(ids []string) (instances []ecs.InstanceAttributesType, err error) {
|
||||
idsStr, jerr := json.Marshal(ids)
|
||||
if jerr != nil {
|
||||
return nil, jerr
|
||||
}
|
||||
|
||||
args := ecs.DescribeInstancesArgs{
|
||||
RegionId: client.Region,
|
||||
InstanceIds: string(idsStr),
|
||||
}
|
||||
|
||||
instances, _, errs := client.ecsconn.DescribeInstances(&args)
|
||||
|
||||
if errs != nil {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) QueryInstancesById(id string) (instance *ecs.InstanceAttributesType, err error) {
|
||||
ids := []string{id}
|
||||
|
||||
instances, errs := client.QueryInstancesByIds(ids)
|
||||
if errs != nil {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
if len(instances) == 0 {
|
||||
return nil, GetNotFoundErrorFromString(InstanceNotfound)
|
||||
}
|
||||
|
||||
return &instances[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) QueryInstanceSystemDisk(id string) (disk *ecs.DiskItemType, err error) {
|
||||
args := ecs.DescribeDisksArgs{
|
||||
RegionId: client.Region,
|
||||
InstanceId: string(id),
|
||||
DiskType: ecs.DiskTypeAllSystem,
|
||||
}
|
||||
disks, _, err := client.ecsconn.DescribeDisks(&args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(disks) == 0 {
|
||||
return nil, common.GetClientErrorFromString(SystemDiskNotFound)
|
||||
}
|
||||
|
||||
return &disks[0], nil
|
||||
}
|
||||
|
||||
// ResourceAvailable check resource available for zone
|
||||
func (client *AliyunClient) ResourceAvailable(zone *ecs.ZoneType, resourceType ecs.ResourceType) error {
|
||||
available := false
|
||||
for _, res := range zone.AvailableResourceCreation.ResourceTypes {
|
||||
if res == resourceType {
|
||||
available = true
|
||||
}
|
||||
}
|
||||
if !available {
|
||||
return fmt.Errorf("%s is not available in %s zone of %s region", resourceType, zone.ZoneId, client.Region)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DiskAvailable(zone *ecs.ZoneType, diskCategory ecs.DiskCategory) error {
|
||||
available := false
|
||||
for _, dist := range zone.AvailableDiskCategories.DiskCategories {
|
||||
if dist == diskCategory {
|
||||
available = true
|
||||
}
|
||||
}
|
||||
if !available {
|
||||
return fmt.Errorf("%s is not available in %s zone of %s region", diskCategory, zone.ZoneId, client.Region)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// todo: support syc
|
||||
func (client *AliyunClient) JoinSecurityGroups(instanceId string, securityGroupIds []string) error {
|
||||
for _, sid := range securityGroupIds {
|
||||
err := client.ecsconn.JoinSecurityGroup(instanceId, sid)
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code != InvalidInstanceIdAlreadyExists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) LeaveSecurityGroups(instanceId string, securityGroupIds []string) error {
|
||||
for _, sid := range securityGroupIds {
|
||||
err := client.ecsconn.LeaveSecurityGroup(instanceId, sid)
|
||||
if err != nil {
|
||||
e, _ := err.(*common.Error)
|
||||
if e.ErrorResponse.Code != InvalidSecurityGroupIdNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeSecurity(securityGroupId string) (*ecs.DescribeSecurityGroupAttributeResponse, error) {
|
||||
|
||||
args := &ecs.DescribeSecurityGroupAttributeArgs{
|
||||
RegionId: client.Region,
|
||||
SecurityGroupId: securityGroupId,
|
||||
}
|
||||
|
||||
return client.ecsconn.DescribeSecurityGroupAttribute(args)
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeSecurityByAttr(securityGroupId, direction, nicType string) (*ecs.DescribeSecurityGroupAttributeResponse, error) {
|
||||
|
||||
args := &ecs.DescribeSecurityGroupAttributeArgs{
|
||||
RegionId: client.Region,
|
||||
SecurityGroupId: securityGroupId,
|
||||
Direction: direction,
|
||||
NicType: ecs.NicType(nicType),
|
||||
}
|
||||
|
||||
return client.ecsconn.DescribeSecurityGroupAttribute(args)
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeSecurityGroupRule(securityGroupId, direction, nicType, ipProtocol, portRange string) (*ecs.PermissionType, error) {
|
||||
sg, err := client.DescribeSecurityByAttr(securityGroupId, direction, nicType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, p := range sg.Permissions.Permission {
|
||||
if strings.ToLower(string(p.IpProtocol)) == ipProtocol && p.PortRange == portRange {
|
||||
return &p, nil
|
||||
}
|
||||
}
|
||||
return nil, GetNotFoundErrorFromString("Security group rule not found")
|
||||
|
||||
}
|
||||
|
||||
func (client *AliyunClient) RevokeSecurityGroup(args *ecs.RevokeSecurityGroupArgs) error {
|
||||
//when the rule is not exist, api will return success(200)
|
||||
return client.ecsconn.RevokeSecurityGroup(args)
|
||||
}
|
||||
|
||||
func (client *AliyunClient) RevokeSecurityGroupEgress(args *ecs.RevokeSecurityGroupEgressArgs) error {
|
||||
//when the rule is not exist, api will return success(200)
|
||||
return client.ecsconn.RevokeSecurityGroupEgress(args)
|
||||
}
|
|
@ -1,167 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"github.com/denverdino/aliyungo/ess"
|
||||
)
|
||||
|
||||
func (client *AliyunClient) DescribeScalingGroupById(sgId string) (*ess.ScalingGroupItemType, error) {
|
||||
args := ess.DescribeScalingGroupsArgs{
|
||||
RegionId: client.Region,
|
||||
ScalingGroupId: []string{sgId},
|
||||
}
|
||||
|
||||
sgs, _, err := client.essconn.DescribeScalingGroups(&args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(sgs) == 0 {
|
||||
return nil, GetNotFoundErrorFromString("Scaling group not found")
|
||||
}
|
||||
|
||||
return &sgs[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DeleteScalingGroupById(sgId string) error {
|
||||
args := ess.DeleteScalingGroupArgs{
|
||||
ScalingGroupId: sgId,
|
||||
ForceDelete: true,
|
||||
}
|
||||
|
||||
_, err := client.essconn.DeleteScalingGroup(&args)
|
||||
return err
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeScalingConfigurationById(sgId, configId string) (*ess.ScalingConfigurationItemType, error) {
|
||||
args := ess.DescribeScalingConfigurationsArgs{
|
||||
RegionId: client.Region,
|
||||
ScalingGroupId: sgId,
|
||||
ScalingConfigurationId: []string{configId},
|
||||
}
|
||||
|
||||
cs, _, err := client.essconn.DescribeScalingConfigurations(&args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(cs) == 0 {
|
||||
return nil, GetNotFoundErrorFromString("Scaling configuration not found")
|
||||
}
|
||||
|
||||
return &cs[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) ActiveScalingConfigurationById(sgId, configId string) error {
|
||||
args := ess.ModifyScalingGroupArgs{
|
||||
ScalingGroupId: sgId,
|
||||
ActiveScalingConfigurationId: configId,
|
||||
}
|
||||
|
||||
_, err := client.essconn.ModifyScalingGroup(&args)
|
||||
return err
|
||||
}
|
||||
|
||||
func (client *AliyunClient) EnableScalingConfigurationById(sgId, configId string, ids []string) error {
|
||||
args := ess.EnableScalingGroupArgs{
|
||||
ScalingGroupId: sgId,
|
||||
ActiveScalingConfigurationId: configId,
|
||||
}
|
||||
|
||||
if len(ids) > 0 {
|
||||
args.InstanceId = ids
|
||||
}
|
||||
|
||||
_, err := client.essconn.EnableScalingGroup(&args)
|
||||
return err
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DisableScalingConfigurationById(sgId string) error {
|
||||
args := ess.DisableScalingGroupArgs{
|
||||
ScalingGroupId: sgId,
|
||||
}
|
||||
|
||||
_, err := client.essconn.DisableScalingGroup(&args)
|
||||
return err
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DeleteScalingConfigurationById(sgId, configId string) error {
|
||||
args := ess.DeleteScalingConfigurationArgs{
|
||||
ScalingGroupId: sgId,
|
||||
ScalingConfigurationId: configId,
|
||||
}
|
||||
|
||||
_, err := client.essconn.DeleteScalingConfiguration(&args)
|
||||
return err
|
||||
}
|
||||
|
||||
// Flattens an array of datadisk into a []map[string]interface{}
|
||||
func flattenDataDiskMappings(list []ess.DataDiskItemType) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, len(list))
|
||||
for _, i := range list {
|
||||
l := map[string]interface{}{
|
||||
"size": i.Size,
|
||||
"category": i.Category,
|
||||
"snapshot_id": i.SnapshotId,
|
||||
"device": i.Device,
|
||||
}
|
||||
result = append(result, l)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeScalingRuleById(sgId, ruleId string) (*ess.ScalingRuleItemType, error) {
|
||||
args := ess.DescribeScalingRulesArgs{
|
||||
RegionId: client.Region,
|
||||
ScalingGroupId: sgId,
|
||||
ScalingRuleId: []string{ruleId},
|
||||
}
|
||||
|
||||
cs, _, err := client.essconn.DescribeScalingRules(&args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(cs) == 0 {
|
||||
return nil, GetNotFoundErrorFromString("Scaling rule not found")
|
||||
}
|
||||
|
||||
return &cs[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DeleteScalingRuleById(ruleId string) error {
|
||||
args := ess.DeleteScalingRuleArgs{
|
||||
RegionId: client.Region,
|
||||
ScalingRuleId: ruleId,
|
||||
}
|
||||
|
||||
_, err := client.essconn.DeleteScalingRule(&args)
|
||||
return err
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeScheduleById(scheduleId string) (*ess.ScheduledTaskItemType, error) {
|
||||
args := ess.DescribeScheduledTasksArgs{
|
||||
RegionId: client.Region,
|
||||
ScheduledTaskId: []string{scheduleId},
|
||||
}
|
||||
|
||||
cs, _, err := client.essconn.DescribeScheduledTasks(&args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(cs) == 0 {
|
||||
return nil, GetNotFoundErrorFromString("Schedule not found")
|
||||
}
|
||||
|
||||
return &cs[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DeleteScheduleById(scheduleId string) error {
|
||||
args := ess.DeleteScheduledTaskArgs{
|
||||
RegionId: client.Region,
|
||||
ScheduledTaskId: scheduleId,
|
||||
}
|
||||
|
||||
_, err := client.essconn.DeleteScheduledTask(&args)
|
||||
return err
|
||||
}
|
|
@ -1,288 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/rds"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//
|
||||
// _______________ _______________ _______________
|
||||
// | | ______param______\ | | _____request_____\ | |
|
||||
// | Business | | Service | | SDK/API |
|
||||
// | | __________________ | | __________________ | |
|
||||
// |______________| \ (obj, err) |______________| \ (status, cont) |______________|
|
||||
// | |
|
||||
// |A. {instance, nil} |a. {200, content}
|
||||
// |B. {nil, error} |b. {200, nil}
|
||||
// |c. {4xx, nil}
|
||||
//
|
||||
// The API return 200 for resource not found.
|
||||
// When getInstance is empty, then throw InstanceNotfound error.
|
||||
// That the business layer only need to check error.
|
||||
func (client *AliyunClient) DescribeDBInstanceById(id string) (instance *rds.DBInstanceAttribute, err error) {
|
||||
arrtArgs := rds.DescribeDBInstancesArgs{
|
||||
DBInstanceId: id,
|
||||
}
|
||||
resp, err := client.rdsconn.DescribeDBInstanceAttribute(&arrtArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attr := resp.Items.DBInstanceAttribute
|
||||
|
||||
if len(attr) <= 0 {
|
||||
return nil, GetNotFoundErrorFromString("DB instance not found")
|
||||
}
|
||||
|
||||
return &attr[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) CreateAccountByInfo(instanceId, username, pwd string) error {
|
||||
conn := client.rdsconn
|
||||
args := rds.CreateAccountArgs{
|
||||
DBInstanceId: instanceId,
|
||||
AccountName: username,
|
||||
AccountPassword: pwd,
|
||||
}
|
||||
|
||||
if _, err := conn.CreateAccount(&args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := conn.WaitForAccount(instanceId, username, rds.Available, 200); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) CreateDatabaseByInfo(instanceId, dbName, charset, desp string) error {
|
||||
conn := client.rdsconn
|
||||
args := rds.CreateDatabaseArgs{
|
||||
DBInstanceId: instanceId,
|
||||
DBName: dbName,
|
||||
CharacterSetName: charset,
|
||||
DBDescription: desp,
|
||||
}
|
||||
_, err := conn.CreateDatabase(&args)
|
||||
return err
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeDatabaseByName(instanceId, dbName string) (ds []rds.Database, err error) {
|
||||
conn := client.rdsconn
|
||||
args := rds.DescribeDatabasesArgs{
|
||||
DBInstanceId: instanceId,
|
||||
DBName: dbName,
|
||||
}
|
||||
|
||||
resp, err := conn.DescribeDatabases(&args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp.Databases.Database, nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) GrantDBPrivilege2Account(instanceId, username, dbName string) error {
|
||||
conn := client.rdsconn
|
||||
pargs := rds.GrantAccountPrivilegeArgs{
|
||||
DBInstanceId: instanceId,
|
||||
AccountName: username,
|
||||
DBName: dbName,
|
||||
AccountPrivilege: rds.ReadWrite,
|
||||
}
|
||||
if _, err := conn.GrantAccountPrivilege(&pargs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := conn.WaitForAccountPrivilege(instanceId, username, dbName, rds.ReadWrite, 200); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) AllocateDBPublicConnection(instanceId, port string) error {
|
||||
conn := client.rdsconn
|
||||
args := rds.AllocateInstancePublicConnectionArgs{
|
||||
DBInstanceId: instanceId,
|
||||
ConnectionStringPrefix: instanceId + "o",
|
||||
Port: port,
|
||||
}
|
||||
|
||||
if _, err := conn.AllocateInstancePublicConnection(&args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := conn.WaitForPublicConnection(instanceId, 600); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) ConfigDBBackup(instanceId, backupTime, backupPeriod string, retentionPeriod int) error {
|
||||
bargs := rds.BackupPolicy{
|
||||
PreferredBackupTime: backupTime,
|
||||
PreferredBackupPeriod: backupPeriod,
|
||||
BackupRetentionPeriod: retentionPeriod,
|
||||
}
|
||||
args := rds.ModifyBackupPolicyArgs{
|
||||
DBInstanceId: instanceId,
|
||||
BackupPolicy: bargs,
|
||||
}
|
||||
|
||||
if _, err := client.rdsconn.ModifyBackupPolicy(&args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := client.rdsconn.WaitForInstance(instanceId, rds.Running, 600); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) ModifyDBSecurityIps(instanceId, ips string) error {
|
||||
sargs := rds.DBInstanceIPArray{
|
||||
SecurityIps: ips,
|
||||
}
|
||||
|
||||
args := rds.ModifySecurityIpsArgs{
|
||||
DBInstanceId: instanceId,
|
||||
DBInstanceIPArray: sargs,
|
||||
}
|
||||
|
||||
if _, err := client.rdsconn.ModifySecurityIps(&args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := client.rdsconn.WaitForInstance(instanceId, rds.Running, 600); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeDBSecurityIps(instanceId string) (ips []rds.DBInstanceIPList, err error) {
|
||||
args := rds.DescribeDBInstanceIPsArgs{
|
||||
DBInstanceId: instanceId,
|
||||
}
|
||||
|
||||
resp, err := client.rdsconn.DescribeDBInstanceIPs(&args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Items.DBInstanceIPArray, nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) GetSecurityIps(instanceId string) ([]string, error) {
|
||||
arr, err := client.DescribeDBSecurityIps(instanceId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ips, separator string
|
||||
for _, ip := range arr {
|
||||
ips += separator + ip.SecurityIPList
|
||||
separator = COMMA_SEPARATED
|
||||
}
|
||||
return strings.Split(ips, COMMA_SEPARATED), nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) ModifyDBClassStorage(instanceId, class, storage string) error {
|
||||
conn := client.rdsconn
|
||||
args := rds.ModifyDBInstanceSpecArgs{
|
||||
DBInstanceId: instanceId,
|
||||
PayType: rds.Postpaid,
|
||||
DBInstanceClass: class,
|
||||
DBInstanceStorage: storage,
|
||||
}
|
||||
|
||||
if _, err := conn.ModifyDBInstanceSpec(&args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := conn.WaitForInstance(instanceId, rds.Running, 600); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// turn period to TimeType
|
||||
func TransformPeriod2Time(period int, chargeType string) (ut int, tt common.TimeType) {
|
||||
if chargeType == string(rds.Postpaid) {
|
||||
return 1, common.Day
|
||||
}
|
||||
|
||||
if period >= 1 && period <= 9 {
|
||||
return period, common.Month
|
||||
}
|
||||
|
||||
if period == 12 {
|
||||
return 1, common.Year
|
||||
}
|
||||
|
||||
if period == 24 {
|
||||
return 2, common.Year
|
||||
}
|
||||
return 0, common.Day
|
||||
|
||||
}
|
||||
|
||||
// turn TimeType to Period
|
||||
func TransformTime2Period(ut int, tt common.TimeType) (period int) {
|
||||
if tt == common.Year {
|
||||
return 12 * ut
|
||||
}
|
||||
|
||||
return ut
|
||||
|
||||
}
|
||||
|
||||
// Flattens an array of databases into a []map[string]interface{}
|
||||
func flattenDatabaseMappings(list []rds.Database) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, len(list))
|
||||
for _, i := range list {
|
||||
l := map[string]interface{}{
|
||||
"db_name": i.DBName,
|
||||
"character_set_name": i.CharacterSetName,
|
||||
"db_description": i.DBDescription,
|
||||
}
|
||||
result = append(result, l)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func flattenDBBackup(list []rds.BackupPolicy) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, len(list))
|
||||
for _, i := range list {
|
||||
l := map[string]interface{}{
|
||||
"preferred_backup_period": i.PreferredBackupPeriod,
|
||||
"preferred_backup_time": i.PreferredBackupTime,
|
||||
"backup_retention_period": i.LogBackupRetentionPeriod,
|
||||
}
|
||||
result = append(result, l)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func flattenDBSecurityIPs(list []rds.DBInstanceIPList) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, len(list))
|
||||
for _, i := range list {
|
||||
l := map[string]interface{}{
|
||||
"security_ips": i.SecurityIPList,
|
||||
}
|
||||
result = append(result, l)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Flattens an array of databases connection into a []map[string]interface{}
|
||||
func flattenDBConnections(list []rds.DBInstanceNetInfo) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, len(list))
|
||||
for _, i := range list {
|
||||
l := map[string]interface{}{
|
||||
"connection_string": i.ConnectionString,
|
||||
"ip_type": i.IPType,
|
||||
"ip_address": i.IPAddress,
|
||||
}
|
||||
result = append(result, l)
|
||||
}
|
||||
return result
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"github.com/denverdino/aliyungo/slb"
|
||||
)
|
||||
|
||||
func (client *AliyunClient) DescribeLoadBalancerAttribute(slbId string) (*slb.LoadBalancerType, error) {
|
||||
loadBalancer, err := client.slbconn.DescribeLoadBalancerAttribute(slbId)
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if loadBalancer != nil {
|
||||
return loadBalancer, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
|
@ -1,227 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (client *AliyunClient) DescribeEipAddress(allocationId string) (*ecs.EipAddressSetType, error) {
|
||||
|
||||
args := ecs.DescribeEipAddressesArgs{
|
||||
RegionId: client.Region,
|
||||
AllocationId: allocationId,
|
||||
}
|
||||
|
||||
eips, _, err := client.ecsconn.DescribeEipAddresses(&args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(eips) == 0 {
|
||||
return nil, common.GetClientErrorFromString("Not found")
|
||||
}
|
||||
|
||||
return &eips[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeNatGateway(natGatewayId string) (*ecs.NatGatewaySetType, error) {
|
||||
|
||||
args := &ecs.DescribeNatGatewaysArgs{
|
||||
RegionId: client.Region,
|
||||
NatGatewayId: natGatewayId,
|
||||
}
|
||||
|
||||
natGateways, _, err := client.vpcconn.DescribeNatGateways(args)
|
||||
//fmt.Println("natGateways %#v", natGateways)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(natGateways) == 0 {
|
||||
return nil, common.GetClientErrorFromString("Not found")
|
||||
}
|
||||
|
||||
return &natGateways[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeVpc(vpcId string) (*ecs.VpcSetType, error) {
|
||||
args := ecs.DescribeVpcsArgs{
|
||||
RegionId: client.Region,
|
||||
VpcId: vpcId,
|
||||
}
|
||||
|
||||
vpcs, _, err := client.ecsconn.DescribeVpcs(&args)
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(vpcs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &vpcs[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeSnatEntry(snatTableId string, snatEntryId string) (ecs.SnatEntrySetType, error) {
|
||||
|
||||
var resultSnat ecs.SnatEntrySetType
|
||||
|
||||
args := &ecs.DescribeSnatTableEntriesArgs{
|
||||
RegionId: client.Region,
|
||||
SnatTableId: snatTableId,
|
||||
}
|
||||
|
||||
snatEntries, _, err := client.vpcconn.DescribeSnatTableEntries(args)
|
||||
|
||||
//this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error"
|
||||
//so judge the snatEntries length priority
|
||||
if len(snatEntries) == 0 {
|
||||
return resultSnat, common.GetClientErrorFromString(InstanceNotfound)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return resultSnat, err
|
||||
}
|
||||
|
||||
findSnat := false
|
||||
|
||||
for _, snat := range snatEntries {
|
||||
if snat.SnatEntryId == snatEntryId {
|
||||
resultSnat = snat
|
||||
findSnat = true
|
||||
}
|
||||
}
|
||||
if !findSnat {
|
||||
return resultSnat, common.GetClientErrorFromString(NotFindSnatEntryBySnatId)
|
||||
}
|
||||
|
||||
return resultSnat, nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) DescribeForwardEntry(forwardTableId string, forwardEntryId string) (ecs.ForwardTableEntrySetType, error) {
|
||||
|
||||
var resultFoward ecs.ForwardTableEntrySetType
|
||||
|
||||
args := &ecs.DescribeForwardTableEntriesArgs{
|
||||
RegionId: client.Region,
|
||||
ForwardTableId: forwardTableId,
|
||||
}
|
||||
|
||||
forwardEntries, _, err := client.vpcconn.DescribeForwardTableEntries(args)
|
||||
|
||||
//this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error"
|
||||
//so judge the snatEntries length priority
|
||||
if len(forwardEntries) == 0 {
|
||||
return resultFoward, common.GetClientErrorFromString(InstanceNotfound)
|
||||
}
|
||||
|
||||
findForward := false
|
||||
|
||||
for _, forward := range forwardEntries {
|
||||
if forward.ForwardEntryId == forwardEntryId {
|
||||
resultFoward = forward
|
||||
findForward = true
|
||||
}
|
||||
}
|
||||
if !findForward {
|
||||
return resultFoward, common.GetClientErrorFromString(NotFindForwardEntryByForwardId)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return resultFoward, err
|
||||
}
|
||||
|
||||
return resultFoward, nil
|
||||
}
|
||||
|
||||
// describe vswitch by param filters
|
||||
func (client *AliyunClient) QueryVswitches(args *ecs.DescribeVSwitchesArgs) (vswitches []ecs.VSwitchSetType, err error) {
|
||||
vsws, _, err := client.ecsconn.DescribeVSwitches(args)
|
||||
if err != nil {
|
||||
if notFoundError(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vsws, nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) QueryVswitchById(vpcId, vswitchId string) (vsw *ecs.VSwitchSetType, err error) {
|
||||
args := &ecs.DescribeVSwitchesArgs{
|
||||
VpcId: vpcId,
|
||||
VSwitchId: vswitchId,
|
||||
}
|
||||
vsws, err := client.QueryVswitches(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(vsws) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &vsws[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) QueryRouteTables(args *ecs.DescribeRouteTablesArgs) (routeTables []ecs.RouteTableSetType, err error) {
|
||||
rts, _, err := client.ecsconn.DescribeRouteTables(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rts, nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) QueryRouteTableById(routeTableId string) (rt *ecs.RouteTableSetType, err error) {
|
||||
args := &ecs.DescribeRouteTablesArgs{
|
||||
RouteTableId: routeTableId,
|
||||
}
|
||||
rts, err := client.QueryRouteTables(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(rts) == 0 {
|
||||
return nil, &common.Error{ErrorResponse: common.ErrorResponse{Message: Notfound}}
|
||||
}
|
||||
|
||||
return &rts[0], nil
|
||||
}
|
||||
|
||||
func (client *AliyunClient) QueryRouteEntry(routeTableId, cidrBlock, nextHopType, nextHopId string) (rn *ecs.RouteEntrySetType, err error) {
|
||||
rt, errs := client.QueryRouteTableById(routeTableId)
|
||||
if errs != nil {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
for _, e := range rt.RouteEntrys.RouteEntry {
|
||||
if strings.ToLower(string(e.DestinationCidrBlock)) == cidrBlock {
|
||||
return &e, nil
|
||||
}
|
||||
}
|
||||
return nil, GetNotFoundErrorFromString("Vpc router entry not found")
|
||||
}
|
||||
|
||||
func (client *AliyunClient) GetVpcIdByVSwitchId(vswitchId string) (vpcId string, err error) {
|
||||
|
||||
vs, _, err := client.ecsconn.DescribeVpcs(&ecs.DescribeVpcsArgs{
|
||||
RegionId: client.Region,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, v := range vs {
|
||||
for _, sw := range v.VSwitchIds.VSwitchId {
|
||||
if sw == vswitchId {
|
||||
return v.VpcId, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", &common.Error{ErrorResponse: common.ErrorResponse{Message: Notfound}}
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// tagsSchema returns the schema to use for tags.
|
||||
//
|
||||
func tagsSchema() *schema.Schema {
|
||||
return &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
//Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Optional: true,
|
||||
}
|
||||
}
|
||||
|
||||
// setTags is a helper to set the tags for a resource. It expects the
|
||||
// tags field to be named "tags"
|
||||
func setTags(client *AliyunClient, resourceType ecs.TagResourceType, d *schema.ResourceData) error {
|
||||
|
||||
conn := client.ecsconn
|
||||
|
||||
if d.HasChange("tags") {
|
||||
oraw, nraw := d.GetChange("tags")
|
||||
o := oraw.(map[string]interface{})
|
||||
n := nraw.(map[string]interface{})
|
||||
create, remove := diffTags(tagsFromMap(o), tagsFromMap(n))
|
||||
|
||||
// Set tags
|
||||
if len(remove) > 0 {
|
||||
log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id())
|
||||
err := RemoveTags(conn, &RemoveTagsArgs{
|
||||
RegionId: client.Region,
|
||||
ResourceId: d.Id(),
|
||||
ResourceType: resourceType,
|
||||
Tag: remove,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Remove tags got error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(create) > 0 {
|
||||
log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id())
|
||||
err := AddTags(conn, &AddTagsArgs{
|
||||
RegionId: client.Region,
|
||||
ResourceId: d.Id(),
|
||||
ResourceType: resourceType,
|
||||
Tag: create,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Creating tags got error: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// diffTags takes our tags locally and the ones remotely and returns
|
||||
// the set of tags that must be created, and the set of tags that must
|
||||
// be destroyed.
|
||||
func diffTags(oldTags, newTags []Tag) ([]Tag, []Tag) {
|
||||
// First, we're creating everything we have
|
||||
create := make(map[string]interface{})
|
||||
for _, t := range newTags {
|
||||
create[t.Key] = t.Value
|
||||
}
|
||||
|
||||
// Build the list of what to remove
|
||||
var remove []Tag
|
||||
for _, t := range oldTags {
|
||||
old, ok := create[t.Key]
|
||||
if !ok || old != t.Value {
|
||||
// Delete it!
|
||||
remove = append(remove, t)
|
||||
}
|
||||
}
|
||||
|
||||
return tagsFromMap(create), remove
|
||||
}
|
||||
|
||||
// tagsFromMap returns the tags for the given map of data.
|
||||
func tagsFromMap(m map[string]interface{}) []Tag {
|
||||
result := make([]Tag, 0, len(m))
|
||||
for k, v := range m {
|
||||
result = append(result, Tag{
|
||||
Key: k,
|
||||
Value: v.(string),
|
||||
})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func tagsToMap(tags []ecs.TagItemType) map[string]string {
|
||||
result := make(map[string]string)
|
||||
for _, t := range tags {
|
||||
result[t.TagKey] = t.TagValue
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func tagsToString(tags []ecs.TagItemType) string {
|
||||
result := make([]string, 0, len(tags))
|
||||
|
||||
for _, tag := range tags {
|
||||
ecsTags := ecs.TagItemType{
|
||||
TagKey: tag.TagKey,
|
||||
TagValue: tag.TagValue,
|
||||
}
|
||||
result = append(result, ecsTags.TagKey+":"+ecsTags.TagValue)
|
||||
}
|
||||
|
||||
return strings.Join(result, ",")
|
||||
}
|
|
@ -1,578 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/denverdino/aliyungo/slb"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// common
|
||||
func validateInstancePort(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 1 || value > 65535 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid port between 1 and 65535",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateInstanceProtocol(v interface{}, k string) (ws []string, errors []error) {
|
||||
protocol := v.(string)
|
||||
if !isProtocolValid(protocol) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q is an invalid value. Valid values are either http, https, tcp or udp",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ecs
|
||||
func validateDiskCategory(v interface{}, k string) (ws []string, errors []error) {
|
||||
category := ecs.DiskCategory(v.(string))
|
||||
if category != ecs.DiskCategoryCloud && category != ecs.DiskCategoryCloudEfficiency && category != ecs.DiskCategoryCloudSSD {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s %s", k, ecs.DiskCategoryCloud, ecs.DiskCategoryCloudEfficiency, ecs.DiskCategoryCloudSSD))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateInstanceName(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < 2 || len(value) > 128 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 128 characters", k))
|
||||
}
|
||||
|
||||
if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") {
|
||||
errors = append(errors, fmt.Errorf("%s cannot starts with http:// or https://", k))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateInstanceDescription(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < 2 || len(value) > 256 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 256 characters", k))
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateDiskName(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
|
||||
if value == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if len(value) < 2 || len(value) > 128 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 128 characters", k))
|
||||
}
|
||||
|
||||
if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") {
|
||||
errors = append(errors, fmt.Errorf("%s cannot starts with http:// or https://", k))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateDiskDescription(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < 2 || len(value) > 256 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 256 characters", k))
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//security group
|
||||
func validateSecurityGroupName(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < 2 || len(value) > 128 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 128 characters", k))
|
||||
}
|
||||
|
||||
if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") {
|
||||
errors = append(errors, fmt.Errorf("%s cannot starts with http:// or https://", k))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateSecurityGroupDescription(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < 2 || len(value) > 256 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 256 characters", k))
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateSecurityRuleType(v interface{}, k string) (ws []string, errors []error) {
|
||||
rt := GroupRuleDirection(v.(string))
|
||||
if rt != GroupRuleIngress && rt != GroupRuleEgress {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s", k, GroupRuleIngress, GroupRuleEgress))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateSecurityRuleIpProtocol(v interface{}, k string) (ws []string, errors []error) {
|
||||
pt := GroupRuleIpProtocol(v.(string))
|
||||
if pt != GroupRuleTcp && pt != GroupRuleUdp && pt != GroupRuleIcmp && pt != GroupRuleGre && pt != GroupRuleAll {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s %s %s %s", k,
|
||||
GroupRuleTcp, GroupRuleUdp, GroupRuleIcmp, GroupRuleGre, GroupRuleAll))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateSecurityRuleNicType(v interface{}, k string) (ws []string, errors []error) {
|
||||
pt := GroupRuleNicType(v.(string))
|
||||
if pt != GroupRuleInternet && pt != GroupRuleIntranet {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s", k, GroupRuleInternet, GroupRuleIntranet))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateSecurityRulePolicy(v interface{}, k string) (ws []string, errors []error) {
|
||||
pt := GroupRulePolicy(v.(string))
|
||||
if pt != GroupRulePolicyAccept && pt != GroupRulePolicyDrop {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s", k, GroupRulePolicyAccept, GroupRulePolicyDrop))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateSecurityPriority(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 1 || value > 100 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid authorization policy priority between 1 and 100",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// validateCIDRNetworkAddress ensures that the string value is a valid CIDR that
|
||||
// represents a network address - it adds an error otherwise
|
||||
func validateCIDRNetworkAddress(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
_, ipnet, err := net.ParseCIDR(value)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid CIDR, got error parsing: %s", k, err))
|
||||
return
|
||||
}
|
||||
|
||||
if ipnet == nil || value != ipnet.String() {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid network CIDR, expected %q, got %q",
|
||||
k, ipnet, value))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateRouteEntryNextHopType(v interface{}, k string) (ws []string, errors []error) {
|
||||
nht := ecs.NextHopType(v.(string))
|
||||
if nht != ecs.NextHopIntance && nht != ecs.NextHopTunnel {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s", k,
|
||||
ecs.NextHopIntance, ecs.NextHopTunnel))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateSwitchCIDRNetworkAddress(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
_, ipnet, err := net.ParseCIDR(value)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid CIDR, got error parsing: %s", k, err))
|
||||
return
|
||||
}
|
||||
|
||||
if ipnet == nil || value != ipnet.String() {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid network CIDR, expected %q, got %q",
|
||||
k, ipnet, value))
|
||||
return
|
||||
}
|
||||
|
||||
mark, _ := strconv.Atoi(strings.Split(ipnet.String(), "/")[1])
|
||||
if mark < 16 || mark > 29 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a network CIDR which mark between 16 and 29",
|
||||
k))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// validateIoOptimized ensures that the string value is a valid IoOptimized that
|
||||
// represents a IoOptimized - it adds an error otherwise
|
||||
func validateIoOptimized(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
ioOptimized := ecs.IoOptimized(value)
|
||||
if ioOptimized != ecs.IoOptimizedNone &&
|
||||
ioOptimized != ecs.IoOptimizedOptimized {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid IoOptimized, expected %s or %s, got %q",
|
||||
k, ecs.IoOptimizedNone, ecs.IoOptimizedOptimized, ioOptimized))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// validateInstanceNetworkType ensures that the string value is a classic or vpc
|
||||
func validateInstanceNetworkType(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
network := InstanceNetWork(value)
|
||||
if network != ClassicNet &&
|
||||
network != VpcNet {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid InstanceNetworkType, expected %s or %s, go %q",
|
||||
k, ClassicNet, VpcNet, network))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateInstanceChargeType(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
chargeType := common.InstanceChargeType(value)
|
||||
if chargeType != common.PrePaid &&
|
||||
chargeType != common.PostPaid {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid InstanceChargeType, expected %s or %s, got %q",
|
||||
k, common.PrePaid, common.PostPaid, chargeType))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateInternetChargeType(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
chargeType := common.InternetChargeType(value)
|
||||
if chargeType != common.PayByBandwidth &&
|
||||
chargeType != common.PayByTraffic {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid InstanceChargeType, expected %s or %s, got %q",
|
||||
k, common.PayByBandwidth, common.PayByTraffic, chargeType))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateInternetMaxBandWidthOut(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 0 || value > 100 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid internet bandwidth out between 0 and 100",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SLB
|
||||
func validateSlbName(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
if len(value) < 1 || len(value) > 80 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid load balancer name characters between 1 and 80",
|
||||
k))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateSlbInternetChargeType(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
chargeType := common.InternetChargeType(value)
|
||||
|
||||
if chargeType != "paybybandwidth" &&
|
||||
chargeType != "paybytraffic" {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid InstanceChargeType, expected %s or %s, got %q",
|
||||
k, "paybybandwidth", "paybytraffic", value))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateSlbBandwidth(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 1 || value > 1000 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid load balancer bandwidth between 1 and 1000",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateSlbListenerBandwidth(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if (value < 1 || value > 1000) && value != -1 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid load balancer bandwidth between 1 and 1000 or -1",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateSlbListenerScheduler(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
scheduler := slb.SchedulerType(value)
|
||||
|
||||
if scheduler != "wrr" && scheduler != "wlc" {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid SchedulerType, expected %s or %s, got %q",
|
||||
k, "wrr", "wlc", value))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateSlbListenerCookie(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
if len(value) < 1 || len(value) > 200 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 200 characters", k))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateSlbListenerCookieTimeout(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 0 || value > 86400 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid load balancer cookie timeout between 0 and 86400",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateSlbListenerPersistenceTimeout(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 0 || value > 3600 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid load balancer persistence timeout between 0 and 86400",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateSlbListenerHealthCheckDomain(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
//the len add "$_ip",so to max is 84
|
||||
if len(value) < 1 || len(value) > 84 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 84 characters", k))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateSlbListenerHealthCheckUri(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
if len(value) < 1 || len(value) > 80 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateSlbListenerHealthCheckConnectPort(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 1 || value > 65535 {
|
||||
if value != -520 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid load balancer health check connect port between 1 and 65535 or -520",
|
||||
k))
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateDBBackupPeriod(v interface{}, k string) (ws []string, errors []error) {
|
||||
days := []string{"Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"}
|
||||
value := v.(string)
|
||||
exist := false
|
||||
for _, d := range days {
|
||||
if value == d {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exist {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid backup period value should in array %#v, got %q",
|
||||
k, days, value))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateAllowedStringValue(ss []string) schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
existed := false
|
||||
for _, s := range ss {
|
||||
if s == value {
|
||||
existed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !existed {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid string value should in array %#v, got %q",
|
||||
k, ss, value))
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func validateAllowedSplitStringValue(ss []string, splitStr string) schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
existed := false
|
||||
tsList := strings.Split(value, splitStr)
|
||||
|
||||
for _, ts := range tsList {
|
||||
existed = false
|
||||
for _, s := range ss {
|
||||
if ts == s {
|
||||
existed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !existed {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid string value should in %#v, got %q",
|
||||
k, ss, value))
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func validateAllowedIntValue(is []int) schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
existed := false
|
||||
for _, i := range is {
|
||||
if i == value {
|
||||
existed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !existed {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid int value should in array %#v, got %q",
|
||||
k, is, value))
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func validateIntegerInRange(min, max int) schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < min {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be lower than %d: %d", k, min, value))
|
||||
}
|
||||
if value > max {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be higher than %d: %d", k, max, value))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
//data source validate func
|
||||
//data_source_alicloud_image
|
||||
func validateNameRegex(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
|
||||
if _, err := regexp.Compile(value); err != nil {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q contains an invalid regular expression: %s",
|
||||
k, err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateImageOwners(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
owners := ecs.ImageOwnerAlias(value)
|
||||
if owners != ecs.ImageOwnerSystem &&
|
||||
owners != ecs.ImageOwnerSelf &&
|
||||
owners != ecs.ImageOwnerOthers &&
|
||||
owners != ecs.ImageOwnerMarketplace &&
|
||||
owners != ecs.ImageOwnerDefault {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid Image owner , expected %s, %s, %s, %s or %s, got %q",
|
||||
k, ecs.ImageOwnerSystem, ecs.ImageOwnerSelf, ecs.ImageOwnerOthers, ecs.ImageOwnerMarketplace, ecs.ImageOwnerDefault, owners))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateRegion(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
region := common.Region(value)
|
||||
var valid string
|
||||
for _, re := range common.ValidRegions {
|
||||
if region == re {
|
||||
return
|
||||
}
|
||||
valid = valid + ", " + string(re)
|
||||
}
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid Region ID , expected %#v, got %q",
|
||||
k, valid, value))
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateForwardPort(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if value != "any" {
|
||||
valueConv, err := strconv.Atoi(value)
|
||||
if err != nil || valueConv < 1 || valueConv > 65535 {
|
||||
errors = append(errors, fmt.Errorf("%q must be a valid port between 1 and 65535 or any ", k))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,502 +0,0 @@
|
|||
package alicloud
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestValidateInstancePort(t *testing.T) {
|
||||
validPorts := []int{1, 22, 80, 100, 8088, 65535}
|
||||
for _, v := range validPorts {
|
||||
_, errors := validateInstancePort(v, "instance_port")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid instance port number between 1 and 65535: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidPorts := []int{-10, -1, 0}
|
||||
for _, v := range invalidPorts {
|
||||
_, errors := validateInstancePort(v, "instance_port")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid instance port number", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInstanceProtocol(t *testing.T) {
|
||||
validProtocols := []string{"http", "tcp", "https", "udp"}
|
||||
for _, v := range validProtocols {
|
||||
_, errors := validateInstanceProtocol(v, "instance_protocol")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid instance protocol: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidProtocols := []string{"HTTP", "abc", "ecmp", "dubbo"}
|
||||
for _, v := range invalidProtocols {
|
||||
_, errors := validateInstanceProtocol(v, "instance_protocol")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid instance protocol", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInstanceDiskCategory(t *testing.T) {
|
||||
validDiskCategory := []string{"cloud", "cloud_efficiency", "cloud_ssd"}
|
||||
for _, v := range validDiskCategory {
|
||||
_, errors := validateDiskCategory(v, "instance_disk_category")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid instance disk category: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidDiskCategory := []string{"all", "ephemeral", "ephemeral_ssd", "ALL", "efficiency"}
|
||||
for _, v := range invalidDiskCategory {
|
||||
_, errors := validateDiskCategory(v, "instance_disk_category")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid instance disk category", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInstanceName(t *testing.T) {
|
||||
validInstanceName := []string{"hi", "hi http://", "some word + any word &", "http", "中文"}
|
||||
for _, v := range validInstanceName {
|
||||
_, errors := validateInstanceName(v, "instance_name")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid instance name: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidInstanceName := []string{"y", "http://", "https://", "+"}
|
||||
for _, v := range invalidInstanceName {
|
||||
_, errors := validateInstanceName(v, "instance_name")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid instance name", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInstanceDescription(t *testing.T) {
|
||||
validInstanceDescription := []string{"hi", "hi http://", "some word + any word &", "http://", "中文"}
|
||||
for _, v := range validInstanceDescription {
|
||||
_, errors := validateInstanceDescription(v, "instance_description")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid instance description: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidvalidInstanceDescription := []string{"y", ""}
|
||||
for _, v := range invalidvalidInstanceDescription {
|
||||
_, errors := validateInstanceName(v, "instance_description")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid instance description", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSecurityGroupName(t *testing.T) {
|
||||
validSecurityGroupName := []string{"hi", "hi http://", "some word + any word &", "http", "中文", "12345"}
|
||||
for _, v := range validSecurityGroupName {
|
||||
_, errors := validateSecurityGroupName(v, "security_group_name")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid security group name: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidSecurityGroupName := []string{"y", "http://", "https://", "+"}
|
||||
for _, v := range invalidSecurityGroupName {
|
||||
_, errors := validateSecurityGroupName(v, "security_group_name")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid security group name", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSecurityGroupDescription(t *testing.T) {
|
||||
validSecurityGroupDescription := []string{"hi", "hi http://", "some word + any word &", "http://", "中文"}
|
||||
for _, v := range validSecurityGroupDescription {
|
||||
_, errors := validateSecurityGroupDescription(v, "security_group_description")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid security group description: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidSecurityGroupDescription := []string{"y", ""}
|
||||
for _, v := range invalidSecurityGroupDescription {
|
||||
_, errors := validateSecurityGroupDescription(v, "security_group_description")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid security group description", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSecurityRuleType(t *testing.T) {
|
||||
validSecurityRuleType := []string{"ingress", "egress"}
|
||||
for _, v := range validSecurityRuleType {
|
||||
_, errors := validateSecurityRuleType(v, "security_rule_type")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid security rule type: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidSecurityRuleType := []string{"y", "gress", "in", "out"}
|
||||
for _, v := range invalidSecurityRuleType {
|
||||
_, errors := validateSecurityRuleType(v, "security_rule_type")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid security rule type", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSecurityRuleIpProtocol(t *testing.T) {
|
||||
validIpProtocol := []string{"tcp", "udp", "icmp", "gre", "all"}
|
||||
for _, v := range validIpProtocol {
|
||||
_, errors := validateSecurityRuleIpProtocol(v, "security_rule_ip_protocol")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid ip protocol: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidIpProtocol := []string{"y", "ecmp", "http", "https"}
|
||||
for _, v := range invalidIpProtocol {
|
||||
_, errors := validateSecurityRuleIpProtocol(v, "security_rule_ip_protocol")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid ip protocol", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSecurityRuleNicType(t *testing.T) {
|
||||
validRuleNicType := []string{"intranet", "internet"}
|
||||
for _, v := range validRuleNicType {
|
||||
_, errors := validateSecurityRuleNicType(v, "security_rule_nic_type")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid nic type: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidRuleNicType := []string{"inter", "ecmp", "http", "https"}
|
||||
for _, v := range invalidRuleNicType {
|
||||
_, errors := validateSecurityRuleNicType(v, "security_rule_nic_type")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid nic type", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSecurityRulePolicy(t *testing.T) {
|
||||
validRulePolicy := []string{"accept", "drop"}
|
||||
for _, v := range validRulePolicy {
|
||||
_, errors := validateSecurityRulePolicy(v, "security_rule_policy")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid security rule policy: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidRulePolicy := []string{"inter", "ecmp", "http", "https"}
|
||||
for _, v := range invalidRulePolicy {
|
||||
_, errors := validateSecurityRulePolicy(v, "security_rule_policy")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid security rule policy", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSecurityRulePriority(t *testing.T) {
|
||||
validPriority := []int{1, 50, 100}
|
||||
for _, v := range validPriority {
|
||||
_, errors := validateSecurityPriority(v, "security_rule_priority")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid security rule priority: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidPriority := []int{-1, 0, 101}
|
||||
for _, v := range invalidPriority {
|
||||
_, errors := validateSecurityPriority(v, "security_rule_priority")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid security rule priority", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateCIDRNetworkAddress(t *testing.T) {
|
||||
validCIDRNetworkAddress := []string{"192.168.10.0/24", "0.0.0.0/0", "10.121.10.0/24"}
|
||||
for _, v := range validCIDRNetworkAddress {
|
||||
_, errors := validateCIDRNetworkAddress(v, "cidr_network_address")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid cidr network address: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidCIDRNetworkAddress := []string{"1.2.3.4", "0x38732/21"}
|
||||
for _, v := range invalidCIDRNetworkAddress {
|
||||
_, errors := validateCIDRNetworkAddress(v, "cidr_network_address")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid cidr network address", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateRouteEntryNextHopType(t *testing.T) {
|
||||
validNexthopType := []string{"Instance", "Tunnel"}
|
||||
for _, v := range validNexthopType {
|
||||
_, errors := validateRouteEntryNextHopType(v, "route_entry_nexthop_type")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid route entry nexthop type: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidNexthopType := []string{"ri", "vpc"}
|
||||
for _, v := range invalidNexthopType {
|
||||
_, errors := validateRouteEntryNextHopType(v, "route_entry_nexthop_type")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid route entry nexthop type", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSwitchCIDRNetworkAddress(t *testing.T) {
|
||||
validSwitchCIDRNetworkAddress := []string{"192.168.10.0/24", "0.0.0.0/16", "127.0.0.0/29", "10.121.10.0/24"}
|
||||
for _, v := range validSwitchCIDRNetworkAddress {
|
||||
_, errors := validateSwitchCIDRNetworkAddress(v, "switch_cidr_network_address")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid switch cidr network address: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidSwitchCIDRNetworkAddress := []string{"1.2.3.4", "0x38732/21", "10.121.10.0/15", "10.121.10.0/30", "256.121.10.0/22"}
|
||||
for _, v := range invalidSwitchCIDRNetworkAddress {
|
||||
_, errors := validateSwitchCIDRNetworkAddress(v, "switch_cidr_network_address")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid switch cidr network address", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateIoOptimized(t *testing.T) {
|
||||
validIoOptimized := []string{"", "none", "optimized"}
|
||||
for _, v := range validIoOptimized {
|
||||
_, errors := validateIoOptimized(v, "ioOptimized")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid IoOptimized value: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidIoOptimized := []string{"true", "ioOptimized"}
|
||||
for _, v := range invalidIoOptimized {
|
||||
_, errors := validateIoOptimized(v, "ioOptimized")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid IoOptimized value", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInstanceNetworkType(t *testing.T) {
|
||||
validInstanceNetworkType := []string{"", "classic", "vpc"}
|
||||
for _, v := range validInstanceNetworkType {
|
||||
_, errors := validateInstanceNetworkType(v, "instance_network_type")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid instance network type value: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidInstanceNetworkType := []string{"Classic", "vswitch", "123"}
|
||||
for _, v := range invalidInstanceNetworkType {
|
||||
_, errors := validateInstanceNetworkType(v, "instance_network_type")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid instance network type value", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInstanceChargeType(t *testing.T) {
|
||||
validInstanceChargeType := []string{"", "PrePaid", "PostPaid"}
|
||||
for _, v := range validInstanceChargeType {
|
||||
_, errors := validateInstanceChargeType(v, "instance_charge_type")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid instance charge type value: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidInstanceChargeType := []string{"prepay", "yearly", "123"}
|
||||
for _, v := range invalidInstanceChargeType {
|
||||
_, errors := validateInstanceChargeType(v, "instance_charge_type")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid instance charge type value", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInternetChargeType(t *testing.T) {
|
||||
validInternetChargeType := []string{"", "PayByBandwidth", "PayByTraffic"}
|
||||
for _, v := range validInternetChargeType {
|
||||
_, errors := validateInternetChargeType(v, "internet_charge_type")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid internet charge type value: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidInternetChargeType := []string{"paybybandwidth", "paybytraffic", "123"}
|
||||
for _, v := range invalidInternetChargeType {
|
||||
_, errors := validateInternetChargeType(v, "internet_charge_type")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid internet charge type value", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInternetMaxBandWidthOut(t *testing.T) {
|
||||
validInternetMaxBandWidthOut := []int{1, 22, 100}
|
||||
for _, v := range validInternetMaxBandWidthOut {
|
||||
_, errors := validateInternetMaxBandWidthOut(v, "internet_max_bandwidth_out")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid internet max bandwidth out value: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidInternetMaxBandWidthOut := []int{-2, 101, 123}
|
||||
for _, v := range invalidInternetMaxBandWidthOut {
|
||||
_, errors := validateInternetMaxBandWidthOut(v, "internet_max_bandwidth_out")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid internet max bandwidth out value", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSlbName(t *testing.T) {
|
||||
validSlbName := []string{"h", "http://", "123", "hello, aliyun! "}
|
||||
for _, v := range validSlbName {
|
||||
_, errors := validateSlbName(v, "slb_name")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid slb name: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
// todo: add invalid case
|
||||
}
|
||||
|
||||
func TestValidateSlbInternetChargeType(t *testing.T) {
|
||||
validSlbInternetChargeType := []string{"paybybandwidth", "paybytraffic"}
|
||||
for _, v := range validSlbInternetChargeType {
|
||||
_, errors := validateSlbInternetChargeType(v, "slb_internet_charge_type")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid slb internet charge type value: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidSlbInternetChargeType := []string{"PayByBandwidth", "PayByTraffic"}
|
||||
for _, v := range invalidSlbInternetChargeType {
|
||||
_, errors := validateSlbInternetChargeType(v, "slb_internet_charge_type")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid slb internet charge type value", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSlbBandwidth(t *testing.T) {
|
||||
validSlbBandwidth := []int{1, 22, 1000}
|
||||
for _, v := range validSlbBandwidth {
|
||||
_, errors := validateSlbBandwidth(v, "slb_bandwidth")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid slb bandwidth value: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidSlbBandwidth := []int{-2, 0, 1001}
|
||||
for _, v := range invalidSlbBandwidth {
|
||||
_, errors := validateSlbBandwidth(v, "slb_bandwidth")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid slb bandwidth value", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSlbListenerBandwidth(t *testing.T) {
|
||||
validSlbListenerBandwidth := []int{-1, 1, 22, 1000}
|
||||
for _, v := range validSlbListenerBandwidth {
|
||||
_, errors := validateSlbListenerBandwidth(v, "slb_bandwidth")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid slb listener bandwidth value: %q", v, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidSlbListenerBandwidth := []int{-2, 0, -10, 1001}
|
||||
for _, v := range invalidSlbListenerBandwidth {
|
||||
_, errors := validateSlbListenerBandwidth(v, "slb_bandwidth")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid slb listener bandwidth value", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAllowedStringValue(t *testing.T) {
|
||||
exceptValues := []string{"aliyun", "alicloud", "alibaba"}
|
||||
validValues := []string{"aliyun"}
|
||||
for _, v := range validValues {
|
||||
_, errors := validateAllowedStringValue(exceptValues)(v, "allowvalue")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid value in %#v: %q", v, exceptValues, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidValues := []string{"ali", "alidata", "terraform"}
|
||||
for _, v := range invalidValues {
|
||||
_, errors := validateAllowedStringValue(exceptValues)(v, "allowvalue")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid value", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAllowedStringSplitValue(t *testing.T) {
|
||||
exceptValues := []string{"aliyun", "alicloud", "alibaba"}
|
||||
validValues := "aliyun,alicloud"
|
||||
_, errors := validateAllowedSplitStringValue(exceptValues, ",")(validValues, "allowvalue")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid value in %#v: %q", validValues, exceptValues, errors)
|
||||
}
|
||||
|
||||
invalidValues := "ali,alidata"
|
||||
_, invalidErr := validateAllowedSplitStringValue(exceptValues, ",")(invalidValues, "allowvalue")
|
||||
if len(invalidErr) == 0 {
|
||||
t.Fatalf("%q should be an invalid value", invalidValues)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAllowedIntValue(t *testing.T) {
|
||||
exceptValues := []int{1, 3, 5, 6}
|
||||
validValues := []int{1, 3, 5, 6}
|
||||
for _, v := range validValues {
|
||||
_, errors := validateAllowedIntValue(exceptValues)(v, "allowvalue")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be a valid value in %#v: %q", v, exceptValues, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidValues := []int{0, 7, 10}
|
||||
for _, v := range invalidValues {
|
||||
_, errors := validateAllowedIntValue(exceptValues)(v, "allowvalue")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an invalid value", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateIntegerInRange(t *testing.T) {
|
||||
validIntegers := []int{-259, 0, 1, 5, 999}
|
||||
min := -259
|
||||
max := 999
|
||||
for _, v := range validIntegers {
|
||||
_, errors := validateIntegerInRange(min, max)(v, "name")
|
||||
if len(errors) != 0 {
|
||||
t.Fatalf("%q should be an integer in range (%d, %d): %q", v, min, max, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidIntegers := []int{-260, -99999, 1000, 25678}
|
||||
for _, v := range invalidIntegers {
|
||||
_, errors := validateIntegerInRange(min, max)(v, "name")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should be an integer outside range (%d, %d)", v, min, max)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
*.zip
|
|
@ -1,48 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Archiver interface {
|
||||
ArchiveContent(content []byte, infilename string) error
|
||||
ArchiveFile(infilename string) error
|
||||
ArchiveDir(indirname string) error
|
||||
ArchiveMultiple(content map[string][]byte) error
|
||||
}
|
||||
|
||||
type ArchiverBuilder func(filepath string) Archiver
|
||||
|
||||
var archiverBuilders = map[string]ArchiverBuilder{
|
||||
"zip": NewZipArchiver,
|
||||
}
|
||||
|
||||
func getArchiver(archiveType string, filepath string) Archiver {
|
||||
if builder, ok := archiverBuilders[archiveType]; ok {
|
||||
return builder(filepath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func assertValidFile(infilename string) (os.FileInfo, error) {
|
||||
fi, err := os.Stat(infilename)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return fi, fmt.Errorf("could not archive missing file: %s", infilename)
|
||||
}
|
||||
return fi, err
|
||||
}
|
||||
|
||||
func assertValidDir(indirname string) (os.FileInfo, error) {
|
||||
fi, err := os.Stat(indirname)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fi, fmt.Errorf("could not archive missing directory: %s", indirname)
|
||||
}
|
||||
return fi, err
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return fi, fmt.Errorf("could not archive directory that is a file: %s", indirname)
|
||||
}
|
||||
return fi, nil
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func dataSourceFile() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceFileRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"source": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"content": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"filename": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
ConflictsWith: []string{"source_file", "source_dir", "source_content", "source_content_filename"},
|
||||
Set: func(v interface{}) int {
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["filename"].(string)))
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["content"].(string)))
|
||||
return hashcode.String(buf.String())
|
||||
},
|
||||
},
|
||||
"source_content": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"source_file", "source_dir"},
|
||||
},
|
||||
"source_content_filename": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"source_file", "source_dir"},
|
||||
},
|
||||
"source_file": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"source_content", "source_content_filename", "source_dir"},
|
||||
},
|
||||
"source_dir": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"source_content", "source_content_filename", "source_file"},
|
||||
},
|
||||
"output_path": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"output_size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"output_sha": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Description: "SHA1 checksum of output file",
|
||||
},
|
||||
"output_base64sha256": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Description: "Base64 Encoded SHA256 checksum of output file",
|
||||
},
|
||||
"output_md5": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Description: "MD5 of output file",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceFileRead(d *schema.ResourceData, meta interface{}) error {
|
||||
outputPath := d.Get("output_path").(string)
|
||||
|
||||
outputDirectory := path.Dir(outputPath)
|
||||
if outputDirectory != "" {
|
||||
if _, err := os.Stat(outputDirectory); err != nil {
|
||||
if err := os.MkdirAll(outputDirectory, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := archive(d); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate archived file stats
|
||||
fi, err := os.Stat(outputPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sha1, base64sha256, md5, err := genFileShas(outputPath)
|
||||
if err != nil {
|
||||
|
||||
return fmt.Errorf("could not generate file checksum sha256: %s", err)
|
||||
}
|
||||
d.Set("output_sha", sha1)
|
||||
d.Set("output_base64sha256", base64sha256)
|
||||
d.Set("output_md5", md5)
|
||||
|
||||
d.Set("output_size", fi.Size())
|
||||
d.SetId(d.Get("output_sha").(string))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func archive(d *schema.ResourceData) error {
|
||||
archiveType := d.Get("type").(string)
|
||||
outputPath := d.Get("output_path").(string)
|
||||
|
||||
archiver := getArchiver(archiveType, outputPath)
|
||||
if archiver == nil {
|
||||
return fmt.Errorf("archive type not supported: %s", archiveType)
|
||||
}
|
||||
|
||||
if dir, ok := d.GetOk("source_dir"); ok {
|
||||
if err := archiver.ArchiveDir(dir.(string)); err != nil {
|
||||
return fmt.Errorf("error archiving directory: %s", err)
|
||||
}
|
||||
} else if file, ok := d.GetOk("source_file"); ok {
|
||||
if err := archiver.ArchiveFile(file.(string)); err != nil {
|
||||
return fmt.Errorf("error archiving file: %s", err)
|
||||
}
|
||||
} else if filename, ok := d.GetOk("source_content_filename"); ok {
|
||||
content := d.Get("source_content").(string)
|
||||
if err := archiver.ArchiveContent([]byte(content), filename.(string)); err != nil {
|
||||
return fmt.Errorf("error archiving content: %s", err)
|
||||
}
|
||||
} else if v, ok := d.GetOk("source"); ok {
|
||||
vL := v.(*schema.Set).List()
|
||||
content := make(map[string][]byte)
|
||||
for _, v := range vL {
|
||||
src := v.(map[string]interface{})
|
||||
content[src["filename"].(string)] = []byte(src["content"].(string))
|
||||
}
|
||||
if err := archiver.ArchiveMultiple(content); err != nil {
|
||||
return fmt.Errorf("error archiving content: %s", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("one of 'source_dir', 'source_file', 'source_content_filename' must be specified")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func genFileShas(filename string) (string, string, string, error) {
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return "", "", "", fmt.Errorf("could not compute file '%s' checksum: %s", filename, err)
|
||||
}
|
||||
h := sha1.New()
|
||||
h.Write([]byte(data))
|
||||
sha1 := hex.EncodeToString(h.Sum(nil))
|
||||
|
||||
h256 := sha256.New()
|
||||
h256.Write([]byte(data))
|
||||
shaSum := h256.Sum(nil)
|
||||
sha256base64 := base64.StdEncoding.EncodeToString(shaSum[:])
|
||||
|
||||
md5 := md5.New()
|
||||
md5.Write([]byte(data))
|
||||
md5Sum := hex.EncodeToString(md5.Sum(nil))
|
||||
|
||||
return sha1, sha256base64, md5Sum, nil
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
r "github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccArchiveFile_Basic(t *testing.T) {
|
||||
var fileSize string
|
||||
r.Test(t, r.TestCase{
|
||||
Providers: testProviders,
|
||||
Steps: []r.TestStep{
|
||||
r.TestStep{
|
||||
Config: testAccArchiveFileContentConfig,
|
||||
Check: r.ComposeTestCheckFunc(
|
||||
testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize),
|
||||
r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize),
|
||||
|
||||
// We just check the hashes for syntax rather than exact
|
||||
// content since we don't want to break if the archive
|
||||
// library starts generating different bytes that are
|
||||
// functionally equivalent.
|
||||
r.TestMatchResourceAttr(
|
||||
"data.archive_file.foo", "output_base64sha256",
|
||||
regexp.MustCompile(`^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$`),
|
||||
),
|
||||
r.TestMatchResourceAttr(
|
||||
"data.archive_file.foo", "output_md5", regexp.MustCompile(`^[0-9a-f]{32}$`),
|
||||
),
|
||||
r.TestMatchResourceAttr(
|
||||
"data.archive_file.foo", "output_sha", regexp.MustCompile(`^[0-9a-f]{40}$`),
|
||||
),
|
||||
),
|
||||
},
|
||||
r.TestStep{
|
||||
Config: testAccArchiveFileFileConfig,
|
||||
Check: r.ComposeTestCheckFunc(
|
||||
testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize),
|
||||
r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize),
|
||||
),
|
||||
},
|
||||
r.TestStep{
|
||||
Config: testAccArchiveFileDirConfig,
|
||||
Check: r.ComposeTestCheckFunc(
|
||||
testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize),
|
||||
r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize),
|
||||
),
|
||||
},
|
||||
r.TestStep{
|
||||
Config: testAccArchiveFileMultiConfig,
|
||||
Check: r.ComposeTestCheckFunc(
|
||||
testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize),
|
||||
r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize),
|
||||
),
|
||||
},
|
||||
r.TestStep{
|
||||
Config: testAccArchiveFileOutputPath,
|
||||
Check: r.ComposeTestCheckFunc(
|
||||
testAccArchiveFileExists(fmt.Sprintf("%s/test.zip", tmpDir), &fileSize),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccArchiveFileExists(filename string, fileSize *string) r.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
*fileSize = ""
|
||||
fi, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*fileSize = fmt.Sprintf("%d", fi.Size())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var testAccArchiveFileContentConfig = `
|
||||
data "archive_file" "foo" {
|
||||
type = "zip"
|
||||
source_content = "This is some content"
|
||||
source_content_filename = "content.txt"
|
||||
output_path = "zip_file_acc_test.zip"
|
||||
}
|
||||
`
|
||||
|
||||
var tmpDir = os.TempDir() + "/test"
|
||||
var testAccArchiveFileOutputPath = fmt.Sprintf(`
|
||||
data "archive_file" "foo" {
|
||||
type = "zip"
|
||||
source_content = "This is some content"
|
||||
source_content_filename = "content.txt"
|
||||
output_path = "%s/test.zip"
|
||||
}
|
||||
`, tmpDir)
|
||||
|
||||
var testAccArchiveFileFileConfig = `
|
||||
data "archive_file" "foo" {
|
||||
type = "zip"
|
||||
source_file = "test-fixtures/test-file.txt"
|
||||
output_path = "zip_file_acc_test.zip"
|
||||
}
|
||||
`
|
||||
|
||||
var testAccArchiveFileDirConfig = `
|
||||
data "archive_file" "foo" {
|
||||
type = "zip"
|
||||
source_dir = "test-fixtures/test-dir"
|
||||
output_path = "zip_file_acc_test.zip"
|
||||
}
|
||||
`
|
||||
|
||||
var testAccArchiveFileMultiConfig = `
|
||||
data "archive_file" "foo" {
|
||||
type = "zip"
|
||||
source {
|
||||
filename = "content.txt"
|
||||
content = "This is some content"
|
||||
}
|
||||
output_path = "zip_file_acc_test.zip"
|
||||
}
|
||||
`
|
|
@ -1,20 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func Provider() terraform.ResourceProvider {
|
||||
return &schema.Provider{
|
||||
DataSourcesMap: map[string]*schema.Resource{
|
||||
"archive_file": dataSourceFile(),
|
||||
},
|
||||
ResourcesMap: map[string]*schema.Resource{
|
||||
"archive_file": schema.DataSourceResourceShim(
|
||||
"archive_file",
|
||||
dataSourceFile(),
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
var testProviders = map[string]terraform.ResourceProvider{
|
||||
"archive": Provider(),
|
||||
}
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
This is file 1
|
|
@ -1 +0,0 @@
|
|||
This is file 2
|
|
@ -1 +0,0 @@
|
|||
This is file 3
|
|
@ -1 +0,0 @@
|
|||
This is test content
|
|
@ -1,136 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type ZipArchiver struct {
|
||||
filepath string
|
||||
filewriter *os.File
|
||||
writer *zip.Writer
|
||||
}
|
||||
|
||||
func NewZipArchiver(filepath string) Archiver {
|
||||
return &ZipArchiver{
|
||||
filepath: filepath,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ZipArchiver) ArchiveContent(content []byte, infilename string) error {
|
||||
if err := a.open(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer a.close()
|
||||
|
||||
f, err := a.writer.Create(infilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Write(content)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *ZipArchiver) ArchiveFile(infilename string) error {
|
||||
fi, err := assertValidFile(infilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(infilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.ArchiveContent(content, fi.Name())
|
||||
}
|
||||
|
||||
func (a *ZipArchiver) ArchiveDir(indirname string) error {
|
||||
_, err := assertValidDir(indirname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := a.open(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer a.close()
|
||||
|
||||
return filepath.Walk(indirname, func(path string, info os.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relname, err := filepath.Rel(indirname, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error relativizing file for archival: %s", err)
|
||||
}
|
||||
f, err := a.writer.Create(relname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating file inside archive: %s", err)
|
||||
}
|
||||
content, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading file for archival: %s", err)
|
||||
}
|
||||
_, err = f.Write(content)
|
||||
return err
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (a *ZipArchiver) ArchiveMultiple(content map[string][]byte) error {
|
||||
if err := a.open(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer a.close()
|
||||
|
||||
// Ensure files are processed in the same order so hashes don't change
|
||||
keys := make([]string, len(content))
|
||||
i := 0
|
||||
for k := range content {
|
||||
keys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, filename := range keys {
|
||||
f, err := a.writer.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.Write(content[filename])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ZipArchiver) open() error {
|
||||
f, err := os.Create(a.filepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.filewriter = f
|
||||
a.writer = zip.NewWriter(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ZipArchiver) close() {
|
||||
if a.writer != nil {
|
||||
a.writer.Close()
|
||||
a.writer = nil
|
||||
}
|
||||
if a.filewriter != nil {
|
||||
a.filewriter.Close()
|
||||
a.filewriter = nil
|
||||
}
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestZipArchiver_Content(t *testing.T) {
|
||||
zipfilepath := "archive-content.zip"
|
||||
archiver := NewZipArchiver(zipfilepath)
|
||||
if err := archiver.ArchiveContent([]byte("This is some content"), "content.txt"); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
ensureContents(t, zipfilepath, map[string][]byte{
|
||||
"content.txt": []byte("This is some content"),
|
||||
})
|
||||
}
|
||||
|
||||
func TestZipArchiver_File(t *testing.T) {
|
||||
zipfilepath := "archive-file.zip"
|
||||
archiver := NewZipArchiver(zipfilepath)
|
||||
if err := archiver.ArchiveFile("./test-fixtures/test-file.txt"); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
ensureContents(t, zipfilepath, map[string][]byte{
|
||||
"test-file.txt": []byte("This is test content"),
|
||||
})
|
||||
}
|
||||
|
||||
func TestZipArchiver_Dir(t *testing.T) {
|
||||
zipfilepath := "archive-dir.zip"
|
||||
archiver := NewZipArchiver(zipfilepath)
|
||||
if err := archiver.ArchiveDir("./test-fixtures/test-dir"); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
ensureContents(t, zipfilepath, map[string][]byte{
|
||||
"file1.txt": []byte("This is file 1"),
|
||||
"file2.txt": []byte("This is file 2"),
|
||||
"file3.txt": []byte("This is file 3"),
|
||||
})
|
||||
}
|
||||
|
||||
func TestZipArchiver_Multiple(t *testing.T) {
|
||||
zipfilepath := "archive-content.zip"
|
||||
content := map[string][]byte{
|
||||
"file1.txt": []byte("This is file 1"),
|
||||
"file2.txt": []byte("This is file 2"),
|
||||
"file3.txt": []byte("This is file 3"),
|
||||
}
|
||||
|
||||
archiver := NewZipArchiver(zipfilepath)
|
||||
if err := archiver.ArchiveMultiple(content); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
ensureContents(t, zipfilepath, content)
|
||||
|
||||
}
|
||||
|
||||
func ensureContents(t *testing.T, zipfilepath string, wants map[string][]byte) {
|
||||
r, err := zip.OpenReader(zipfilepath)
|
||||
if err != nil {
|
||||
t.Fatalf("could not open zip file: %s", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
if len(r.File) != len(wants) {
|
||||
t.Errorf("mismatched file count, got %d, want %d", len(r.File), len(wants))
|
||||
}
|
||||
for _, cf := range r.File {
|
||||
ensureContent(t, wants, cf)
|
||||
}
|
||||
}
|
||||
|
||||
func ensureContent(t *testing.T, wants map[string][]byte, got *zip.File) {
|
||||
want, ok := wants[got.Name]
|
||||
if !ok {
|
||||
t.Errorf("additional file in zip: %s", got.Name)
|
||||
return
|
||||
}
|
||||
|
||||
r, err := got.Open()
|
||||
if err != nil {
|
||||
t.Errorf("could not open file: %s", err)
|
||||
}
|
||||
defer r.Close()
|
||||
gotContentBytes, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Errorf("could not read file: %s", err)
|
||||
}
|
||||
|
||||
wantContent := string(want)
|
||||
gotContent := string(gotContentBytes)
|
||||
if gotContent != wantContent {
|
||||
t.Errorf("mismatched content\ngot\n%s\nwant\n%s", gotContent, wantContent)
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
package arukas
|
||||
|
||||
import (
|
||||
API "github.com/arukasio/cli"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
JSONTokenParamName = "ARUKAS_JSON_API_TOKEN"
|
||||
JSONSecretParamName = "ARUKAS_JSON_API_SECRET"
|
||||
JSONUrlParamName = "ARUKAS_JSON_API_URL"
|
||||
JSONDebugParamName = "ARUKAS_DEBUG"
|
||||
JSONTimeoutParamName = "ARUKAS_TIMEOUT"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Token string
|
||||
Secret string
|
||||
URL string
|
||||
Trace string
|
||||
Timeout int
|
||||
}
|
||||
|
||||
func (c *Config) NewClient() (*ArukasClient, error) {
|
||||
|
||||
os.Setenv(JSONTokenParamName, c.Token)
|
||||
os.Setenv(JSONSecretParamName, c.Secret)
|
||||
os.Setenv(JSONUrlParamName, c.URL)
|
||||
os.Setenv(JSONDebugParamName, c.Trace)
|
||||
|
||||
client, err := API.NewClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.UserAgent = "Terraform for Arukas"
|
||||
|
||||
timeout := time.Duration(0)
|
||||
if c.Timeout > 0 {
|
||||
timeout = time.Duration(c.Timeout) * time.Second
|
||||
}
|
||||
|
||||
return &ArukasClient{
|
||||
Client: client,
|
||||
Timeout: timeout,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ArukasClient struct {
|
||||
*API.Client
|
||||
Timeout time.Duration
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
package arukas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// Provider returns a terraform.ResourceProvider.
|
||||
func Provider() terraform.ResourceProvider {
|
||||
return &schema.Provider{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"token": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc(JSONTokenParamName, nil),
|
||||
Description: "your Arukas APIKey(token)",
|
||||
},
|
||||
"secret": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc(JSONSecretParamName, nil),
|
||||
Description: "your Arukas APIKey(secret)",
|
||||
},
|
||||
"api_url": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc(JSONUrlParamName, "https://app.arukas.io/api/"),
|
||||
Description: "default Arukas API url",
|
||||
},
|
||||
"trace": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc(JSONDebugParamName, ""),
|
||||
},
|
||||
"timeout": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc(JSONTimeoutParamName, "900"),
|
||||
},
|
||||
},
|
||||
ResourcesMap: map[string]*schema.Resource{
|
||||
"arukas_container": resourceArukasContainer(),
|
||||
},
|
||||
ConfigureFunc: providerConfigure,
|
||||
}
|
||||
}
|
||||
|
||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||
|
||||
config := Config{
|
||||
Token: d.Get("token").(string),
|
||||
Secret: d.Get("secret").(string),
|
||||
URL: d.Get("api_url").(string),
|
||||
Trace: d.Get("trace").(string),
|
||||
Timeout: d.Get("timeout").(int),
|
||||
}
|
||||
|
||||
return config.NewClient()
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
package arukas
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
var testAccProviders map[string]terraform.ResourceProvider
|
||||
var testAccProvider *schema.Provider
|
||||
|
||||
func init() {
|
||||
testAccProvider = Provider().(*schema.Provider)
|
||||
testAccProviders = map[string]terraform.ResourceProvider{
|
||||
"arukas": testAccProvider,
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider_impl(t *testing.T) {
|
||||
var _ terraform.ResourceProvider = Provider()
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("ARUKAS_JSON_API_TOKEN"); v == "" {
|
||||
t.Fatal("ARUKAS_JSON_API_TOKEN must be set for acceptance tests")
|
||||
}
|
||||
if v := os.Getenv("ARUKAS_JSON_API_SECRET"); v == "" {
|
||||
t.Fatal("ARUKAS_JSON_API_SECRET must be set for acceptance tests")
|
||||
}
|
||||
}
|
|
@ -1,289 +0,0 @@
|
|||
package arukas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
API "github.com/arukasio/cli"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceArukasContainer() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceArukasContainerCreate,
|
||||
Read: resourceArukasContainerRead,
|
||||
Update: resourceArukasContainerUpdate,
|
||||
Delete: resourceArukasContainerDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"image": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"instances": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 1,
|
||||
ValidateFunc: validateIntegerInRange(1, 10),
|
||||
},
|
||||
"memory": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 256,
|
||||
ValidateFunc: validateIntInWord([]string{"256", "512"}),
|
||||
},
|
||||
"endpoint": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"ports": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Required: true,
|
||||
MaxItems: 20,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"protocol": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "tcp",
|
||||
ValidateFunc: validateStringInWord([]string{"tcp", "udp"}),
|
||||
},
|
||||
"number": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: "80",
|
||||
ValidateFunc: validateIntegerInRange(1, 65535),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"environments": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 20,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"value": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"cmd": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"port_mappings": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"host": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ipaddress": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"container_port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"service_port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"endpoint_full_hostname": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"endpoint_full_url": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"app_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceArukasContainerCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*ArukasClient)
|
||||
|
||||
var appSet API.AppSet
|
||||
|
||||
// create an app
|
||||
newApp := API.App{Name: d.Get("name").(string)}
|
||||
|
||||
var parsedEnvs API.Envs
|
||||
var parsedPorts API.Ports
|
||||
|
||||
if rawEnvs, ok := d.GetOk("environments"); ok {
|
||||
parsedEnvs = expandEnvs(rawEnvs)
|
||||
}
|
||||
if rawPorts, ok := d.GetOk("ports"); ok {
|
||||
parsedPorts = expandPorts(rawPorts)
|
||||
}
|
||||
|
||||
newContainer := API.Container{
|
||||
Envs: parsedEnvs,
|
||||
Ports: parsedPorts,
|
||||
ImageName: d.Get("image").(string),
|
||||
Mem: d.Get("memory").(int),
|
||||
Instances: d.Get("instances").(int),
|
||||
Cmd: d.Get("cmd").(string),
|
||||
|
||||
Name: d.Get("endpoint").(string),
|
||||
}
|
||||
newAppSet := API.AppSet{
|
||||
App: newApp,
|
||||
Container: newContainer,
|
||||
}
|
||||
|
||||
// create
|
||||
if err := client.Post(&appSet, "/app-sets", newAppSet); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// start container
|
||||
if err := client.Post(nil, fmt.Sprintf("/containers/%s/power", appSet.Container.ID), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(appSet.Container.ID)
|
||||
|
||||
stateConf := &resource.StateChangeConf{
|
||||
Target: []string{"running"},
|
||||
Pending: []string{"stopped", "booting"},
|
||||
Timeout: client.Timeout,
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
var container API.Container
|
||||
err := client.Get(&container, fmt.Sprintf("/containers/%s", appSet.Container.ID))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return container, container.StatusText, nil
|
||||
},
|
||||
}
|
||||
_, err := stateConf.WaitForState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceArukasContainerRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceArukasContainerRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*ArukasClient)
|
||||
|
||||
var container API.Container
|
||||
var app API.App
|
||||
|
||||
if err := client.Get(&container, fmt.Sprintf("/containers/%s", d.Id())); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := client.Get(&app, fmt.Sprintf("/apps/%s", container.AppID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Set("app_id", container.AppID)
|
||||
d.Set("name", app.Name)
|
||||
d.Set("image", container.ImageName)
|
||||
d.Set("instances", container.Instances)
|
||||
d.Set("memory", container.Mem)
|
||||
endpoint := container.Endpoint
|
||||
if strings.HasSuffix(endpoint, ".arukascloud.io") {
|
||||
endpoint = strings.Replace(endpoint, ".arukascloud.io", "", -1)
|
||||
}
|
||||
|
||||
d.Set("endpoint", endpoint)
|
||||
d.Set("endpoint_full_hostname", container.Endpoint)
|
||||
d.Set("endpoint_full_url", fmt.Sprintf("https://%s", container.Endpoint))
|
||||
|
||||
d.Set("cmd", container.Cmd)
|
||||
|
||||
//ports
|
||||
d.Set("ports", flattenPorts(container.Ports))
|
||||
|
||||
//port mappings
|
||||
d.Set("port_mappings", flattenPortMappings(container.PortMappings))
|
||||
|
||||
//envs
|
||||
d.Set("environments", flattenEnvs(container.Envs))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceArukasContainerUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
client := meta.(*ArukasClient)
|
||||
var container API.Container
|
||||
|
||||
if err := client.Get(&container, fmt.Sprintf("/containers/%s", d.Id())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var parsedEnvs API.Envs
|
||||
var parsedPorts API.Ports
|
||||
|
||||
if rawEnvs, ok := d.GetOk("environments"); ok {
|
||||
parsedEnvs = expandEnvs(rawEnvs)
|
||||
}
|
||||
if rawPorts, ok := d.GetOk("ports"); ok {
|
||||
parsedPorts = expandPorts(rawPorts)
|
||||
}
|
||||
|
||||
newContainer := API.Container{
|
||||
Envs: parsedEnvs,
|
||||
Ports: parsedPorts,
|
||||
ImageName: d.Get("image").(string),
|
||||
Mem: d.Get("memory").(int),
|
||||
Instances: d.Get("instances").(int),
|
||||
Cmd: d.Get("cmd").(string),
|
||||
Name: d.Get("endpoint").(string),
|
||||
}
|
||||
|
||||
// update
|
||||
if err := client.Patch(nil, fmt.Sprintf("/containers/%s", d.Id()), newContainer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceArukasContainerRead(d, meta)
|
||||
|
||||
}
|
||||
|
||||
func resourceArukasContainerDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*ArukasClient)
|
||||
var container API.Container
|
||||
|
||||
if err := client.Get(&container, fmt.Sprintf("/containers/%s", d.Id())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := client.Delete(fmt.Sprintf("/apps/%s", container.AppID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,301 +0,0 @@
|
|||
package arukas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
API "github.com/arukasio/cli"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccArukasContainer_Basic(t *testing.T) {
|
||||
var container API.Container
|
||||
randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
|
||||
name := fmt.Sprintf("terraform_acc_test_%s", randString)
|
||||
endpoint := fmt.Sprintf("terraform-acc-test-endpoint-%s", randString)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckArukasContainerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckArukasContainerConfig_basic(randString),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArukasContainerExists("arukas_container.foobar", &container),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "name", name),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "image", "nginx:latest"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "instances", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "memory", "256"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "endpoint", endpoint),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.#", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.0.protocol", "tcp"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.0.number", "80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.#", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.0.key", "key"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.0.value", "value"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "port_mappings.#", "1"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccArukasContainer_Update(t *testing.T) {
|
||||
var container API.Container
|
||||
randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
|
||||
name := fmt.Sprintf("terraform_acc_test_%s", randString)
|
||||
updatedName := fmt.Sprintf("terraform_acc_test_update_%s", randString)
|
||||
endpoint := fmt.Sprintf("terraform-acc-test-endpoint-%s", randString)
|
||||
updatedEndpoint := fmt.Sprintf("terraform-acc-test-endpoint-update-%s", randString)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckArukasContainerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckArukasContainerConfig_basic(randString),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArukasContainerExists("arukas_container.foobar", &container),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "name", name),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "image", "nginx:latest"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "instances", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "memory", "256"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "endpoint", endpoint),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.#", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.0.protocol", "tcp"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.0.number", "80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.#", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.0.key", "key"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.0.value", "value"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "port_mappings.#", "1"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccCheckArukasContainerConfig_update(randString),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArukasContainerExists("arukas_container.foobar", &container),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "name", updatedName),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "image", "nginx:latest"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "instances", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "memory", "512"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "endpoint", updatedEndpoint),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.#", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.0.protocol", "tcp"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.0.number", "80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.1.protocol", "tcp"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.1.number", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.#", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.0.key", "key"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.0.value", "value"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.1.key", "key_upd"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "environments.1.value", "value_upd"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "port_mappings.#", "4"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccArukasContainer_Minimum(t *testing.T) {
|
||||
var container API.Container
|
||||
randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
|
||||
name := fmt.Sprintf("terraform_acc_test_minimum_%s", randString)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckArukasContainerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckArukasContainerConfig_minimum(randString),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArukasContainerExists("arukas_container.foobar", &container),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "name", name),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "image", "nginx:latest"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "instances", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "memory", "256"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.#", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.0.protocol", "tcp"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.0.number", "80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "port_mappings.#", "1"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccArukasContainer_Import(t *testing.T) {
|
||||
resourceName := "arukas_container.foobar"
|
||||
randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckArukasContainerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckArukasContainerConfig_basic(randString),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckArukasContainerExists(n string, container *API.Container) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No Container ID is set")
|
||||
}
|
||||
client := testAccProvider.Meta().(*ArukasClient)
|
||||
var foundContainer API.Container
|
||||
err := client.Get(&foundContainer, fmt.Sprintf("/containers/%s", rs.Primary.ID))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if foundContainer.ID != rs.Primary.ID {
|
||||
return fmt.Errorf("Container not found")
|
||||
}
|
||||
|
||||
*container = foundContainer
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckArukasContainerDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*ArukasClient)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "arukas_container" {
|
||||
continue
|
||||
}
|
||||
|
||||
err := client.Get(nil, fmt.Sprintf("/containers/%s", rs.Primary.ID))
|
||||
|
||||
if err == nil {
|
||||
return fmt.Errorf("Note still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckArukasContainerConfig_basic(randString string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "arukas_container" "foobar" {
|
||||
name = "terraform_acc_test_%s"
|
||||
image = "nginx:latest"
|
||||
instances = 1
|
||||
memory = 256
|
||||
endpoint = "terraform-acc-test-endpoint-%s"
|
||||
ports = {
|
||||
protocol = "tcp"
|
||||
number = "80"
|
||||
}
|
||||
environments {
|
||||
key = "key"
|
||||
value = "value"
|
||||
}
|
||||
}`, randString, randString)
|
||||
}
|
||||
|
||||
func testAccCheckArukasContainerConfig_update(randString string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "arukas_container" "foobar" {
|
||||
name = "terraform_acc_test_update_%s"
|
||||
image = "nginx:latest"
|
||||
instances = 2
|
||||
memory = 512
|
||||
endpoint = "terraform-acc-test-endpoint-update-%s"
|
||||
ports = {
|
||||
protocol = "tcp"
|
||||
number = "80"
|
||||
}
|
||||
ports = {
|
||||
protocol = "tcp"
|
||||
number = "443"
|
||||
}
|
||||
environments {
|
||||
key = "key"
|
||||
value = "value"
|
||||
}
|
||||
environments {
|
||||
key = "key_upd"
|
||||
value = "value_upd"
|
||||
}
|
||||
}`, randString, randString)
|
||||
}
|
||||
|
||||
func testAccCheckArukasContainerConfig_minimum(randString string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "arukas_container" "foobar" {
|
||||
name = "terraform_acc_test_minimum_%s"
|
||||
image = "nginx:latest"
|
||||
ports = {
|
||||
number = "80"
|
||||
}
|
||||
}`, randString)
|
||||
}
|
|
@ -1,110 +0,0 @@
|
|||
package arukas
|
||||
|
||||
import (
|
||||
API "github.com/arukasio/cli"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Takes the result of flatmap.Expand for an array of strings
|
||||
// and returns a []string
|
||||
func expandStringList(configured []interface{}) []string {
|
||||
vs := make([]string, 0, len(configured))
|
||||
for _, v := range configured {
|
||||
vs = append(vs, string(v.(string)))
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// Takes the result of schema.Set of strings and returns a []string
|
||||
func expandStringSet(configured *schema.Set) []string {
|
||||
return expandStringList(configured.List())
|
||||
}
|
||||
|
||||
// Takes list of pointers to strings. Expand to an array
|
||||
// of raw strings and returns a []interface{}
|
||||
// to keep compatibility w/ schema.NewSetschema.NewSet
|
||||
func flattenStringList(list []string) []interface{} {
|
||||
vs := make([]interface{}, 0, len(list))
|
||||
for _, v := range list {
|
||||
vs = append(vs, v)
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func expandEnvs(configured interface{}) API.Envs {
|
||||
var envs API.Envs
|
||||
if configured == nil {
|
||||
return envs
|
||||
}
|
||||
rawEnvs := configured.([]interface{})
|
||||
for _, raw := range rawEnvs {
|
||||
env := raw.(map[string]interface{})
|
||||
envs = append(envs, API.Env{Key: env["key"].(string), Value: env["value"].(string)})
|
||||
}
|
||||
return envs
|
||||
}
|
||||
|
||||
func flattenEnvs(envs API.Envs) []interface{} {
|
||||
var ret []interface{}
|
||||
for _, env := range envs {
|
||||
r := map[string]interface{}{}
|
||||
r["key"] = env.Key
|
||||
r["value"] = env.Value
|
||||
ret = append(ret, r)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func expandPorts(configured interface{}) API.Ports {
|
||||
var ports API.Ports
|
||||
if configured == nil {
|
||||
return ports
|
||||
}
|
||||
rawPorts := configured.([]interface{})
|
||||
for _, raw := range rawPorts {
|
||||
port := raw.(map[string]interface{})
|
||||
ports = append(ports, API.Port{Protocol: port["protocol"].(string), Number: port["number"].(int)})
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
func flattenPorts(ports API.Ports) []interface{} {
|
||||
var ret []interface{}
|
||||
for _, port := range ports {
|
||||
r := map[string]interface{}{}
|
||||
r["protocol"] = port.Protocol
|
||||
r["number"] = port.Number
|
||||
ret = append(ret, r)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
func flattenPortMappings(ports API.PortMappings) []interface{} {
|
||||
var ret []interface{}
|
||||
for _, tasks := range ports {
|
||||
for _, port := range tasks {
|
||||
r := map[string]interface{}{}
|
||||
ip := ""
|
||||
|
||||
addrs, err := net.LookupHost(port.Host)
|
||||
if err == nil && len(addrs) > 0 {
|
||||
ip = addrs[0]
|
||||
}
|
||||
|
||||
r["host"] = port.Host
|
||||
r["ipaddress"] = ip
|
||||
r["container_port"] = port.ContainerPort
|
||||
r["service_port"] = port.ServicePort
|
||||
ret = append(ret, r)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func forceString(target interface{}) string {
|
||||
if target == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return target.(string)
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
package arukas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func validateMaxLength(minLength, maxLength int) schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < minLength {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be shorter than %d characters: %q", k, minLength, value))
|
||||
}
|
||||
if len(value) > maxLength {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be longer than %d characters: %q", k, maxLength, value))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func validateIntegerInRange(min, max int) schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < min {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be lower than %d: %d", k, min, value))
|
||||
}
|
||||
if value > max {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be higher than %d: %d", k, max, value))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func validateStringInWord(allowWords []string) schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
var found bool
|
||||
for _, t := range allowWords {
|
||||
if v.(string) == t {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
errors = append(errors, fmt.Errorf("%q must be one of [%s]", k, strings.Join(allowWords, "/")))
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func validateIntInWord(allowWords []string) schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
var found bool
|
||||
for _, t := range allowWords {
|
||||
if fmt.Sprintf("%d", v.(int)) == t {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
errors = append(errors, fmt.Errorf("%q must be one of [%s]", k, strings.Join(allowWords, "/")))
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func validateDNSRecordValue() schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
var rtype, value string
|
||||
|
||||
values := v.(map[string]interface{})
|
||||
rtype = values["type"].(string)
|
||||
value = values["value"].(string)
|
||||
switch rtype {
|
||||
case "MX", "NS", "CNAME":
|
||||
if rtype == "MX" {
|
||||
if values["priority"] == nil {
|
||||
errors = append(errors, fmt.Errorf("%q required when TYPE was MX", k))
|
||||
}
|
||||
}
|
||||
if !strings.HasSuffix(value, ".") {
|
||||
errors = append(errors, fmt.Errorf("%q must be period at the end [%s]", k, value))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
}
|
|
@ -1,149 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/atlas-go/v1"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func dataSourceAtlasArtifact() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceArtifactRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"build": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"version": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"metadata_keys": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"metadata": &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"file_url": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"metadata_full": &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"slug": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"version_real": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceArtifactRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*atlas.Client)
|
||||
|
||||
// Parse the slug from the name given of the artifact since the API
|
||||
// expects these to be split.
|
||||
user, name, err := atlas.ParseSlug(d.Get("name").(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter by version or build if given
|
||||
var build, version string
|
||||
if v, ok := d.GetOk("version"); ok {
|
||||
version = v.(string)
|
||||
} else if b, ok := d.GetOk("build"); ok {
|
||||
build = b.(string)
|
||||
}
|
||||
|
||||
// If we have neither, default to latest version
|
||||
if build == "" && version == "" {
|
||||
version = "latest"
|
||||
}
|
||||
|
||||
// Compile the metadata search params
|
||||
md := make(map[string]string)
|
||||
for _, v := range d.Get("metadata_keys").(*schema.Set).List() {
|
||||
md[v.(string)] = atlas.MetadataAnyValue
|
||||
}
|
||||
for k, v := range d.Get("metadata").(map[string]interface{}) {
|
||||
md[k] = v.(string)
|
||||
}
|
||||
|
||||
// Do the search!
|
||||
vs, err := client.ArtifactSearch(&atlas.ArtifactSearchOpts{
|
||||
User: user,
|
||||
Name: name,
|
||||
Type: d.Get("type").(string),
|
||||
Build: build,
|
||||
Version: version,
|
||||
Metadata: md,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error searching for artifact '%s/%s': %s",
|
||||
user, name, err)
|
||||
}
|
||||
|
||||
if len(vs) == 0 {
|
||||
return fmt.Errorf("No matching artifact for '%s/%s'", user, name)
|
||||
} else if len(vs) > 1 {
|
||||
return fmt.Errorf(
|
||||
"Got %d results for '%s/%s', only one is allowed",
|
||||
len(vs), user, name)
|
||||
}
|
||||
v := vs[0]
|
||||
|
||||
d.SetId(v.ID)
|
||||
if v.ID == "" {
|
||||
d.SetId(fmt.Sprintf("%s %d", v.Tag, v.Version))
|
||||
}
|
||||
d.Set("version_real", v.Version)
|
||||
d.Set("metadata_full", cleanMetadata(v.Metadata))
|
||||
d.Set("slug", v.Slug)
|
||||
|
||||
d.Set("file_url", "")
|
||||
if u, err := client.ArtifactFileURL(v); err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error reading file URL: %s", err)
|
||||
} else if u != nil {
|
||||
d.Set("file_url", u.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,150 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccDataSourceArtifact_basic(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDataArtifact_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDataArtifactState("name", "hashicorp/tf-provider-test"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccDataSourceArtifact_metadata(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDataArtifact_metadata,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDataArtifactState("name", "hashicorp/tf-provider-test"),
|
||||
testAccCheckDataArtifactState("id", "x86"),
|
||||
testAccCheckDataArtifactState("metadata_full.arch", "x86"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccDataSourceArtifact_metadataSet(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDataArtifact_metadataSet,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDataArtifactState("name", "hashicorp/tf-provider-test"),
|
||||
testAccCheckDataArtifactState("id", "x64"),
|
||||
testAccCheckDataArtifactState("metadata_full.arch", "x64"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccDataSourceArtifact_buildLatest(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDataArtifact_buildLatest,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDataArtifactState("name", "hashicorp/tf-provider-test"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccDataSourceArtifact_versionAny(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDataArtifact_versionAny,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckDataArtifactState("name", "hashicorp/tf-provider-test"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckDataArtifactState(key, value string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources["data.atlas_artifact.foobar"]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", "data.atlas_artifact.foobar")
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
p := rs.Primary
|
||||
if p.Attributes[key] != value {
|
||||
return fmt.Errorf(
|
||||
"%s != %s (actual: %s)", key, value, p.Attributes[key])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccDataArtifact_basic = `
|
||||
data "atlas_artifact" "foobar" {
|
||||
name = "hashicorp/tf-provider-test"
|
||||
type = "foo"
|
||||
}`
|
||||
|
||||
const testAccDataArtifact_metadata = `
|
||||
data "atlas_artifact" "foobar" {
|
||||
name = "hashicorp/tf-provider-test"
|
||||
type = "foo"
|
||||
metadata {
|
||||
arch = "x86"
|
||||
}
|
||||
version = "any"
|
||||
}`
|
||||
|
||||
const testAccDataArtifact_metadataSet = `
|
||||
data "atlas_artifact" "foobar" {
|
||||
name = "hashicorp/tf-provider-test"
|
||||
type = "foo"
|
||||
metadata_keys = ["arch"]
|
||||
version = "any"
|
||||
}`
|
||||
|
||||
const testAccDataArtifact_buildLatest = `
|
||||
data "atlas_artifact" "foobar" {
|
||||
name = "hashicorp/tf-provider-test"
|
||||
type = "foo"
|
||||
build = "latest"
|
||||
metadata {
|
||||
arch = "x86"
|
||||
}
|
||||
}`
|
||||
|
||||
const testAccDataArtifact_versionAny = `
|
||||
data "atlas_artifact" "foobar" {
|
||||
name = "hashicorp/tf-provider-test"
|
||||
type = "foo"
|
||||
version = "any"
|
||||
}`
|
|
@ -1,71 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/atlas-go/v1"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultAtlasServer is the default endpoint for Atlas if
|
||||
// none is specified.
|
||||
defaultAtlasServer = "https://atlas.hashicorp.com"
|
||||
)
|
||||
|
||||
// Provider returns a terraform.ResourceProvider.
|
||||
func Provider() terraform.ResourceProvider {
|
||||
return &schema.Provider{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"token": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("ATLAS_TOKEN", nil),
|
||||
Description: descriptions["token"],
|
||||
},
|
||||
|
||||
"address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("ATLAS_ADDRESS", defaultAtlasServer),
|
||||
Description: descriptions["address"],
|
||||
},
|
||||
},
|
||||
|
||||
DataSourcesMap: map[string]*schema.Resource{
|
||||
"atlas_artifact": dataSourceAtlasArtifact(),
|
||||
},
|
||||
|
||||
ResourcesMap: map[string]*schema.Resource{
|
||||
"atlas_artifact": resourceArtifact(),
|
||||
},
|
||||
|
||||
ConfigureFunc: providerConfigure,
|
||||
}
|
||||
}
|
||||
|
||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||
var err error
|
||||
client := atlas.DefaultClient()
|
||||
if v := d.Get("address").(string); v != "" {
|
||||
client, err = atlas.NewClient(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
client.DefaultHeader.Set(terraform.VersionHeader, terraform.VersionString())
|
||||
client.Token = d.Get("token").(string)
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
var descriptions map[string]string
|
||||
|
||||
func init() {
|
||||
descriptions = map[string]string{
|
||||
"address": "The address of the Atlas server. If blank, the public\n" +
|
||||
"server at atlas.hashicorp.com will be used.",
|
||||
|
||||
"token": "The access token for reading artifacts. This is required\n" +
|
||||
"if reading private artifacts.",
|
||||
}
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
var testAccProviders map[string]terraform.ResourceProvider
|
||||
var testAccProvider *schema.Provider
|
||||
|
||||
func init() {
|
||||
testAccProvider = Provider().(*schema.Provider)
|
||||
testAccProviders = map[string]terraform.ResourceProvider{
|
||||
"atlas": testAccProvider,
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider_impl(t *testing.T) {
|
||||
var _ terraform.ResourceProvider = Provider()
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("ATLAS_TOKEN"); v == "" {
|
||||
t.Fatal("ATLAS_TOKEN must be set for acceptance tests")
|
||||
}
|
||||
}
|
|
@ -1,176 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/hashicorp/atlas-go/v1"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
// saneMetaKey is used to sanitize the metadata keys so that
|
||||
// they can be accessed as a variable interpolation from TF
|
||||
saneMetaKey = regexp.MustCompile("[^a-zA-Z0-9-_]")
|
||||
)
|
||||
|
||||
func resourceArtifact() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceArtifactRead,
|
||||
Read: resourceArtifactRead,
|
||||
Delete: resourceArtifactDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Deprecated: `atlas_artifact is now deprecated. Use the Atlas Artifact Data Source instead. See https://www.terraform.io/docs/providers/terraform-enterprise/d/artifact.html`,
|
||||
},
|
||||
|
||||
"type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"build": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"version": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"metadata_keys": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"metadata": &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"file_url": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"metadata_full": &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"slug": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"version_real": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceArtifactRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*atlas.Client)
|
||||
|
||||
// Parse the slug from the name given of the artifact since the API
|
||||
// expects these to be split.
|
||||
user, name, err := atlas.ParseSlug(d.Get("name").(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter by version or build if given
|
||||
var build, version string
|
||||
if v, ok := d.GetOk("version"); ok {
|
||||
version = v.(string)
|
||||
} else if b, ok := d.GetOk("build"); ok {
|
||||
build = b.(string)
|
||||
}
|
||||
|
||||
// If we have neither, default to latest version
|
||||
if build == "" && version == "" {
|
||||
version = "latest"
|
||||
}
|
||||
|
||||
// Compile the metadata search params
|
||||
md := make(map[string]string)
|
||||
for _, v := range d.Get("metadata_keys").(*schema.Set).List() {
|
||||
md[v.(string)] = atlas.MetadataAnyValue
|
||||
}
|
||||
for k, v := range d.Get("metadata").(map[string]interface{}) {
|
||||
md[k] = v.(string)
|
||||
}
|
||||
|
||||
// Do the search!
|
||||
vs, err := client.ArtifactSearch(&atlas.ArtifactSearchOpts{
|
||||
User: user,
|
||||
Name: name,
|
||||
Type: d.Get("type").(string),
|
||||
Build: build,
|
||||
Version: version,
|
||||
Metadata: md,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error searching for artifact '%s/%s': %s",
|
||||
user, name, err)
|
||||
}
|
||||
|
||||
if len(vs) == 0 {
|
||||
return fmt.Errorf("No matching artifact for '%s/%s'", user, name)
|
||||
} else if len(vs) > 1 {
|
||||
return fmt.Errorf(
|
||||
"Got %d results for '%s/%s', only one is allowed",
|
||||
len(vs), user, name)
|
||||
}
|
||||
v := vs[0]
|
||||
|
||||
d.SetId(v.ID)
|
||||
if v.ID == "" {
|
||||
d.SetId(fmt.Sprintf("%s %d", v.Tag, v.Version))
|
||||
}
|
||||
d.Set("version_real", v.Version)
|
||||
d.Set("metadata_full", cleanMetadata(v.Metadata))
|
||||
d.Set("slug", v.Slug)
|
||||
|
||||
d.Set("file_url", "")
|
||||
if u, err := client.ArtifactFileURL(v); err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error reading file URL: %s", err)
|
||||
} else if u != nil {
|
||||
d.Set("file_url", u.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceArtifactDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
// This just always succeeds since this is a readonly element.
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanMetadata is used to ensure the metadata is accessible as
|
||||
// a variable by doing a simple re-write.
|
||||
func cleanMetadata(in map[string]string) map[string]string {
|
||||
out := make(map[string]string, len(in))
|
||||
for k, v := range in {
|
||||
sane := saneMetaKey.ReplaceAllString(k, "-")
|
||||
out[sane] = v
|
||||
}
|
||||
return out
|
||||
}
|
|
@ -1,150 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccArtifact_basic(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccArtifact_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArtifactState("name", "hashicorp/tf-provider-test"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccArtifact_metadata(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccArtifact_metadata,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArtifactState("name", "hashicorp/tf-provider-test"),
|
||||
testAccCheckArtifactState("id", "x86"),
|
||||
testAccCheckArtifactState("metadata_full.arch", "x86"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccArtifact_metadataSet(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccArtifact_metadataSet,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArtifactState("name", "hashicorp/tf-provider-test"),
|
||||
testAccCheckArtifactState("id", "x64"),
|
||||
testAccCheckArtifactState("metadata_full.arch", "x64"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccArtifact_buildLatest(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccArtifact_buildLatest,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArtifactState("name", "hashicorp/tf-provider-test"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccArtifact_versionAny(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccArtifact_versionAny,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArtifactState("name", "hashicorp/tf-provider-test"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckArtifactState(key, value string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources["atlas_artifact.foobar"]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", "atlas_artifact.foobar")
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
p := rs.Primary
|
||||
if p.Attributes[key] != value {
|
||||
return fmt.Errorf(
|
||||
"%s != %s (actual: %s)", key, value, p.Attributes[key])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccArtifact_basic = `
|
||||
resource "atlas_artifact" "foobar" {
|
||||
name = "hashicorp/tf-provider-test"
|
||||
type = "foo"
|
||||
}`
|
||||
|
||||
const testAccArtifact_metadata = `
|
||||
resource "atlas_artifact" "foobar" {
|
||||
name = "hashicorp/tf-provider-test"
|
||||
type = "foo"
|
||||
metadata {
|
||||
arch = "x86"
|
||||
}
|
||||
version = "any"
|
||||
}`
|
||||
|
||||
const testAccArtifact_metadataSet = `
|
||||
resource "atlas_artifact" "foobar" {
|
||||
name = "hashicorp/tf-provider-test"
|
||||
type = "foo"
|
||||
metadata_keys = ["arch"]
|
||||
version = "any"
|
||||
}`
|
||||
|
||||
const testAccArtifact_buildLatest = `
|
||||
resource "atlas_artifact" "foobar" {
|
||||
name = "hashicorp/tf-provider-test"
|
||||
type = "foo"
|
||||
build = "latest"
|
||||
metadata {
|
||||
arch = "x86"
|
||||
}
|
||||
}`
|
||||
|
||||
const testAccArtifact_versionAny = `
|
||||
resource "atlas_artifact" "foobar" {
|
||||
name = "hashicorp/tf-provider-test"
|
||||
type = "foo"
|
||||
version = "any"
|
||||
}`
|
|
@ -1,217 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
awsCredentials "github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
)
|
||||
|
||||
func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) {
|
||||
// If we have creds from instance profile, we can use metadata API
|
||||
if authProviderName == ec2rolecreds.ProviderName {
|
||||
log.Println("[DEBUG] Trying to get account ID via AWS Metadata API")
|
||||
|
||||
cfg := &aws.Config{}
|
||||
setOptionalEndpoint(cfg)
|
||||
sess, err := session.NewSession(cfg)
|
||||
if err != nil {
|
||||
return "", "", errwrap.Wrapf("Error creating AWS session: {{err}}", err)
|
||||
}
|
||||
|
||||
metadataClient := ec2metadata.New(sess)
|
||||
info, err := metadataClient.IAMInfo()
|
||||
if err != nil {
|
||||
// This can be triggered when no IAM Role is assigned
|
||||
// or AWS just happens to return invalid response
|
||||
return "", "", fmt.Errorf("Failed getting EC2 IAM info: %s", err)
|
||||
}
|
||||
|
||||
return parseAccountInfoFromArn(info.InstanceProfileArn)
|
||||
}
|
||||
|
||||
// Then try IAM GetUser
|
||||
log.Println("[DEBUG] Trying to get account ID via iam:GetUser")
|
||||
outUser, err := iamconn.GetUser(nil)
|
||||
if err == nil {
|
||||
return parseAccountInfoFromArn(*outUser.User.Arn)
|
||||
}
|
||||
|
||||
awsErr, ok := err.(awserr.Error)
|
||||
// AccessDenied and ValidationError can be raised
|
||||
// if credentials belong to federated profile, so we ignore these
|
||||
if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError" && awsErr.Code() != "InvalidClientTokenId") {
|
||||
return "", "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err)
|
||||
}
|
||||
log.Printf("[DEBUG] Getting account ID via iam:GetUser failed: %s", err)
|
||||
|
||||
// Then try STS GetCallerIdentity
|
||||
log.Println("[DEBUG] Trying to get account ID via sts:GetCallerIdentity")
|
||||
outCallerIdentity, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{})
|
||||
if err == nil {
|
||||
return parseAccountInfoFromArn(*outCallerIdentity.Arn)
|
||||
}
|
||||
log.Printf("[DEBUG] Getting account ID via sts:GetCallerIdentity failed: %s", err)
|
||||
|
||||
// Then try IAM ListRoles
|
||||
log.Println("[DEBUG] Trying to get account ID via iam:ListRoles")
|
||||
outRoles, err := iamconn.ListRoles(&iam.ListRolesInput{
|
||||
MaxItems: aws.Int64(int64(1)),
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Failed getting account ID via 'iam:ListRoles': %s", err)
|
||||
}
|
||||
|
||||
if len(outRoles.Roles) < 1 {
|
||||
return "", "", errors.New("Failed getting account ID via 'iam:ListRoles': No roles available")
|
||||
}
|
||||
|
||||
return parseAccountInfoFromArn(*outRoles.Roles[0].Arn)
|
||||
}
|
||||
|
||||
func parseAccountInfoFromArn(arn string) (string, string, error) {
|
||||
parts := strings.Split(arn, ":")
|
||||
if len(parts) < 5 {
|
||||
return "", "", fmt.Errorf("Unable to parse ID from invalid ARN: %q", arn)
|
||||
}
|
||||
return parts[1], parts[4], nil
|
||||
}
|
||||
|
||||
// This function is responsible for reading credentials from the
|
||||
// environment in the case that they're not explicitly specified
|
||||
// in the Terraform configuration.
|
||||
func GetCredentials(c *Config) (*awsCredentials.Credentials, error) {
|
||||
// build a chain provider, lazy-evaulated by aws-sdk
|
||||
providers := []awsCredentials.Provider{
|
||||
&awsCredentials.StaticProvider{Value: awsCredentials.Value{
|
||||
AccessKeyID: c.AccessKey,
|
||||
SecretAccessKey: c.SecretKey,
|
||||
SessionToken: c.Token,
|
||||
}},
|
||||
&awsCredentials.EnvProvider{},
|
||||
&awsCredentials.SharedCredentialsProvider{
|
||||
Filename: c.CredsFilename,
|
||||
Profile: c.Profile,
|
||||
},
|
||||
}
|
||||
|
||||
// Build isolated HTTP client to avoid issues with globally-shared settings
|
||||
client := cleanhttp.DefaultClient()
|
||||
|
||||
// Keep the timeout low as we don't want to wait in non-EC2 environments
|
||||
client.Timeout = 100 * time.Millisecond
|
||||
cfg := &aws.Config{
|
||||
HTTPClient: client,
|
||||
}
|
||||
usedEndpoint := setOptionalEndpoint(cfg)
|
||||
|
||||
if !c.SkipMetadataApiCheck {
|
||||
// Real AWS should reply to a simple metadata request.
|
||||
// We check it actually does to ensure something else didn't just
|
||||
// happen to be listening on the same IP:Port
|
||||
metadataClient := ec2metadata.New(session.New(cfg))
|
||||
if metadataClient.Available() {
|
||||
providers = append(providers, &ec2rolecreds.EC2RoleProvider{
|
||||
Client: metadataClient,
|
||||
})
|
||||
log.Print("[INFO] AWS EC2 instance detected via default metadata" +
|
||||
" API endpoint, EC2RoleProvider added to the auth chain")
|
||||
} else {
|
||||
if usedEndpoint == "" {
|
||||
usedEndpoint = "default location"
|
||||
}
|
||||
log.Printf("[INFO] Ignoring AWS metadata API endpoint at %s "+
|
||||
"as it doesn't return any instance-id", usedEndpoint)
|
||||
}
|
||||
}
|
||||
|
||||
// This is the "normal" flow (i.e. not assuming a role)
|
||||
if c.AssumeRoleARN == "" {
|
||||
return awsCredentials.NewChainCredentials(providers), nil
|
||||
}
|
||||
|
||||
// Otherwise we need to construct and STS client with the main credentials, and verify
|
||||
// that we can assume the defined role.
|
||||
log.Printf("[INFO] Attempting to AssumeRole %s (SessionName: %q, ExternalId: %q, Policy: %q)",
|
||||
c.AssumeRoleARN, c.AssumeRoleSessionName, c.AssumeRoleExternalID, c.AssumeRolePolicy)
|
||||
|
||||
creds := awsCredentials.NewChainCredentials(providers)
|
||||
cp, err := creds.Get()
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
|
||||
return nil, errors.New(`No valid credential sources found for AWS Provider.
|
||||
Please see https://terraform.io/docs/providers/aws/index.html for more information on
|
||||
providing credentials for the AWS Provider`)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
|
||||
|
||||
awsConfig := &aws.Config{
|
||||
Credentials: creds,
|
||||
Region: aws.String(c.Region),
|
||||
MaxRetries: aws.Int(c.MaxRetries),
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle),
|
||||
}
|
||||
|
||||
stsclient := sts.New(session.New(awsConfig))
|
||||
assumeRoleProvider := &stscreds.AssumeRoleProvider{
|
||||
Client: stsclient,
|
||||
RoleARN: c.AssumeRoleARN,
|
||||
}
|
||||
if c.AssumeRoleSessionName != "" {
|
||||
assumeRoleProvider.RoleSessionName = c.AssumeRoleSessionName
|
||||
}
|
||||
if c.AssumeRoleExternalID != "" {
|
||||
assumeRoleProvider.ExternalID = aws.String(c.AssumeRoleExternalID)
|
||||
}
|
||||
if c.AssumeRolePolicy != "" {
|
||||
assumeRoleProvider.Policy = aws.String(c.AssumeRolePolicy)
|
||||
}
|
||||
|
||||
providers = []awsCredentials.Provider{assumeRoleProvider}
|
||||
|
||||
assumeRoleCreds := awsCredentials.NewChainCredentials(providers)
|
||||
_, err = assumeRoleCreds.Get()
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
|
||||
return nil, fmt.Errorf("The role %q cannot be assumed.\n\n"+
|
||||
" There are a number of possible causes of this - the most common are:\n"+
|
||||
" * The credentials used in order to assume the role are invalid\n"+
|
||||
" * The credentials do not have appropriate permission to assume the role\n"+
|
||||
" * The role ARN is not valid",
|
||||
c.AssumeRoleARN)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
|
||||
}
|
||||
|
||||
return assumeRoleCreds, nil
|
||||
}
|
||||
|
||||
func setOptionalEndpoint(cfg *aws.Config) string {
|
||||
endpoint := os.Getenv("AWS_METADATA_URL")
|
||||
if endpoint != "" {
|
||||
log.Printf("[INFO] Setting custom metadata endpoint: %q", endpoint)
|
||||
cfg.Endpoint = aws.String(endpoint)
|
||||
return endpoint
|
||||
}
|
||||
return ""
|
||||
}
|
|
@ -1,902 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
)
|
||||
|
||||
func TestAWSGetAccountInfo_shouldBeValid_fromEC2Role(t *testing.T) {
|
||||
resetEnv := unsetEnv(t)
|
||||
defer resetEnv()
|
||||
// capture the test server's close method, to call after the test returns
|
||||
awsTs := awsEnv(t)
|
||||
defer awsTs()
|
||||
|
||||
closeEmpty, emptySess, err := getMockedAwsApiSession("zero", []*awsMockEndpoint{})
|
||||
defer closeEmpty()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(emptySess)
|
||||
stsConn := sts.New(emptySess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, ec2rolecreds.ProviderName)
|
||||
if err != nil {
|
||||
t.Fatalf("Getting account ID from EC2 metadata API failed: %s", err)
|
||||
}
|
||||
|
||||
expectedPart := "aws"
|
||||
if part != expectedPart {
|
||||
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
|
||||
}
|
||||
|
||||
expectedAccountId := "123456789013"
|
||||
if id != expectedAccountId {
|
||||
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldBeValid_EC2RoleHasPriority(t *testing.T) {
|
||||
resetEnv := unsetEnv(t)
|
||||
defer resetEnv()
|
||||
// capture the test server's close method, to call after the test returns
|
||||
awsTs := awsEnv(t)
|
||||
defer awsTs()
|
||||
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{200, iamResponse_GetUser_valid, "text/xml"},
|
||||
},
|
||||
}
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
iamConn := iam.New(iamSess)
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{})
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, ec2rolecreds.ProviderName)
|
||||
if err != nil {
|
||||
t.Fatalf("Getting account ID from EC2 metadata API failed: %s", err)
|
||||
}
|
||||
|
||||
expectedPart := "aws"
|
||||
if part != expectedPart {
|
||||
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
|
||||
}
|
||||
|
||||
expectedAccountId := "123456789013"
|
||||
if id != expectedAccountId {
|
||||
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldBeValid_fromIamUser(t *testing.T) {
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{200, iamResponse_GetUser_valid, "text/xml"},
|
||||
},
|
||||
}
|
||||
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{})
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(iamSess)
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Getting account ID via GetUser failed: %s", err)
|
||||
}
|
||||
|
||||
expectedPart := "aws"
|
||||
if part != expectedPart {
|
||||
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
|
||||
}
|
||||
|
||||
expectedAccountId := "123456789012"
|
||||
if id != expectedAccountId {
|
||||
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldBeValid_fromGetCallerIdentity(t *testing.T) {
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{403, iamResponse_GetUser_unauthorized, "text/xml"},
|
||||
},
|
||||
}
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stsEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetCallerIdentity&Version=2011-06-15"},
|
||||
Response: &awsMockResponse{200, stsResponse_GetCallerIdentity_valid, "text/xml"},
|
||||
},
|
||||
}
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", stsEndpoints)
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(iamSess)
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Getting account ID via GetUser failed: %s", err)
|
||||
}
|
||||
|
||||
expectedPart := "aws"
|
||||
if part != expectedPart {
|
||||
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
|
||||
}
|
||||
|
||||
expectedAccountId := "123456789012"
|
||||
if id != expectedAccountId {
|
||||
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldBeValid_fromIamListRoles(t *testing.T) {
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{403, iamResponse_GetUser_unauthorized, "text/xml"},
|
||||
},
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{200, iamResponse_ListRoles_valid, "text/xml"},
|
||||
},
|
||||
}
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stsEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetCallerIdentity&Version=2011-06-15"},
|
||||
Response: &awsMockResponse{403, stsResponse_GetCallerIdentity_unauthorized, "text/xml"},
|
||||
},
|
||||
}
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", stsEndpoints)
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(iamSess)
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Getting account ID via ListRoles failed: %s", err)
|
||||
}
|
||||
|
||||
expectedPart := "aws"
|
||||
if part != expectedPart {
|
||||
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
|
||||
}
|
||||
|
||||
expectedAccountId := "123456789012"
|
||||
if id != expectedAccountId {
|
||||
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldBeValid_federatedRole(t *testing.T) {
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{400, iamResponse_GetUser_federatedFailure, "text/xml"},
|
||||
},
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{200, iamResponse_ListRoles_valid, "text/xml"},
|
||||
},
|
||||
}
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{})
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(iamSess)
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Getting account ID via ListRoles failed: %s", err)
|
||||
}
|
||||
|
||||
expectedPart := "aws"
|
||||
if part != expectedPart {
|
||||
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
|
||||
}
|
||||
|
||||
expectedAccountId := "123456789012"
|
||||
if id != expectedAccountId {
|
||||
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldError_unauthorizedFromIam(t *testing.T) {
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{403, iamResponse_GetUser_unauthorized, "text/xml"},
|
||||
},
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{403, iamResponse_ListRoles_unauthorized, "text/xml"},
|
||||
},
|
||||
}
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{})
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(iamSess)
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, "")
|
||||
if err == nil {
|
||||
t.Fatal("Expected error when getting account ID")
|
||||
}
|
||||
|
||||
if part != "" {
|
||||
t.Fatalf("Expected no partition, given: %s", part)
|
||||
}
|
||||
|
||||
if id != "" {
|
||||
t.Fatalf("Expected no account ID, given: %s", id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSParseAccountInfoFromArn(t *testing.T) {
|
||||
validArn := "arn:aws:iam::101636750127:instance-profile/aws-elasticbeanstalk-ec2-role"
|
||||
expectedPart := "aws"
|
||||
expectedId := "101636750127"
|
||||
part, id, err := parseAccountInfoFromArn(validArn)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when parsing valid ARN: %s", err)
|
||||
}
|
||||
if part != expectedPart {
|
||||
t.Fatalf("Parsed part doesn't match with expected (%q != %q)", part, expectedPart)
|
||||
}
|
||||
if id != expectedId {
|
||||
t.Fatalf("Parsed id doesn't match with expected (%q != %q)", id, expectedId)
|
||||
}
|
||||
|
||||
invalidArn := "blablah"
|
||||
part, id, err = parseAccountInfoFromArn(invalidArn)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when parsing invalid ARN (%q)", invalidArn)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetCredentials_shouldError(t *testing.T) {
|
||||
resetEnv := unsetEnv(t)
|
||||
defer resetEnv()
|
||||
cfg := Config{}
|
||||
|
||||
c, err := GetCredentials(&cfg)
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
if awsErr.Code() != "NoCredentialProviders" {
|
||||
t.Fatal("Expected NoCredentialProviders error")
|
||||
}
|
||||
}
|
||||
_, err = c.Get()
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
if awsErr.Code() != "NoCredentialProviders" {
|
||||
t.Fatal("Expected NoCredentialProviders error")
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error with empty env, keys, and IAM in AWS Config")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetCredentials_shouldBeStatic(t *testing.T) {
|
||||
simple := []struct {
|
||||
Key, Secret, Token string
|
||||
}{
|
||||
{
|
||||
Key: "test",
|
||||
Secret: "secret",
|
||||
}, {
|
||||
Key: "test",
|
||||
Secret: "test",
|
||||
Token: "test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range simple {
|
||||
cfg := Config{
|
||||
AccessKey: c.Key,
|
||||
SecretKey: c.Secret,
|
||||
Token: c.Token,
|
||||
}
|
||||
|
||||
creds, err := GetCredentials(&cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
if creds == nil {
|
||||
t.Fatal("Expected a static creds provider to be returned")
|
||||
}
|
||||
|
||||
v, err := creds.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
|
||||
if v.AccessKeyID != c.Key {
|
||||
t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", c.Key, v.AccessKeyID)
|
||||
}
|
||||
if v.SecretAccessKey != c.Secret {
|
||||
t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", c.Secret, v.SecretAccessKey)
|
||||
}
|
||||
if v.SessionToken != c.Token {
|
||||
t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", c.Token, v.SessionToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAWSGetCredentials_shouldIAM is designed to test the scenario of running Terraform
|
||||
// from an EC2 instance, without environment variables or manually supplied
|
||||
// credentials.
|
||||
func TestAWSGetCredentials_shouldIAM(t *testing.T) {
|
||||
// clear AWS_* environment variables
|
||||
resetEnv := unsetEnv(t)
|
||||
defer resetEnv()
|
||||
|
||||
// capture the test server's close method, to call after the test returns
|
||||
ts := awsEnv(t)
|
||||
defer ts()
|
||||
|
||||
// An empty config, no key supplied
|
||||
cfg := Config{}
|
||||
|
||||
creds, err := GetCredentials(&cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
if creds == nil {
|
||||
t.Fatal("Expected a static creds provider to be returned")
|
||||
}
|
||||
|
||||
v, err := creds.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
if v.AccessKeyID != "somekey" {
|
||||
t.Fatalf("AccessKeyID mismatch, expected: (somekey), got (%s)", v.AccessKeyID)
|
||||
}
|
||||
if v.SecretAccessKey != "somesecret" {
|
||||
t.Fatalf("SecretAccessKey mismatch, expected: (somesecret), got (%s)", v.SecretAccessKey)
|
||||
}
|
||||
if v.SessionToken != "sometoken" {
|
||||
t.Fatalf("SessionToken mismatch, expected: (sometoken), got (%s)", v.SessionToken)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAWSGetCredentials_shouldIAM is designed to test the scenario of running Terraform
|
||||
// from an EC2 instance, without environment variables or manually supplied
|
||||
// credentials.
|
||||
func TestAWSGetCredentials_shouldIgnoreIAM(t *testing.T) {
|
||||
resetEnv := unsetEnv(t)
|
||||
defer resetEnv()
|
||||
// capture the test server's close method, to call after the test returns
|
||||
ts := awsEnv(t)
|
||||
defer ts()
|
||||
simple := []struct {
|
||||
Key, Secret, Token string
|
||||
}{
|
||||
{
|
||||
Key: "test",
|
||||
Secret: "secret",
|
||||
}, {
|
||||
Key: "test",
|
||||
Secret: "test",
|
||||
Token: "test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range simple {
|
||||
cfg := Config{
|
||||
AccessKey: c.Key,
|
||||
SecretKey: c.Secret,
|
||||
Token: c.Token,
|
||||
}
|
||||
|
||||
creds, err := GetCredentials(&cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
if creds == nil {
|
||||
t.Fatal("Expected a static creds provider to be returned")
|
||||
}
|
||||
|
||||
v, err := creds.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
if v.AccessKeyID != c.Key {
|
||||
t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", c.Key, v.AccessKeyID)
|
||||
}
|
||||
if v.SecretAccessKey != c.Secret {
|
||||
t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", c.Secret, v.SecretAccessKey)
|
||||
}
|
||||
if v.SessionToken != c.Token {
|
||||
t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", c.Token, v.SessionToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetCredentials_shouldErrorWithInvalidEndpoint(t *testing.T) {
|
||||
resetEnv := unsetEnv(t)
|
||||
defer resetEnv()
|
||||
// capture the test server's close method, to call after the test returns
|
||||
ts := invalidAwsEnv(t)
|
||||
defer ts()
|
||||
|
||||
creds, err := GetCredentials(&Config{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
if creds == nil {
|
||||
t.Fatal("Expected a static creds provider to be returned")
|
||||
}
|
||||
|
||||
v, err := creds.Get()
|
||||
if err == nil {
|
||||
t.Fatal("Expected error returned when getting creds w/ invalid EC2 endpoint")
|
||||
}
|
||||
|
||||
if v.ProviderName != "" {
|
||||
t.Fatalf("Expected provider name to be empty, %q given", v.ProviderName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetCredentials_shouldIgnoreInvalidEndpoint(t *testing.T) {
|
||||
resetEnv := unsetEnv(t)
|
||||
defer resetEnv()
|
||||
// capture the test server's close method, to call after the test returns
|
||||
ts := invalidAwsEnv(t)
|
||||
defer ts()
|
||||
|
||||
creds, err := GetCredentials(&Config{AccessKey: "accessKey", SecretKey: "secretKey"})
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
v, err := creds.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("Getting static credentials w/ invalid EC2 endpoint failed: %s", err)
|
||||
}
|
||||
if creds == nil {
|
||||
t.Fatal("Expected a static creds provider to be returned")
|
||||
}
|
||||
|
||||
if v.ProviderName != "StaticProvider" {
|
||||
t.Fatalf("Expected provider name to be %q, %q given", "StaticProvider", v.ProviderName)
|
||||
}
|
||||
|
||||
if v.AccessKeyID != "accessKey" {
|
||||
t.Fatalf("Static Access Key %q doesn't match: %s", "accessKey", v.AccessKeyID)
|
||||
}
|
||||
|
||||
if v.SecretAccessKey != "secretKey" {
|
||||
t.Fatalf("Static Secret Key %q doesn't match: %s", "secretKey", v.SecretAccessKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetCredentials_shouldCatchEC2RoleProvider(t *testing.T) {
|
||||
resetEnv := unsetEnv(t)
|
||||
defer resetEnv()
|
||||
// capture the test server's close method, to call after the test returns
|
||||
ts := awsEnv(t)
|
||||
defer ts()
|
||||
|
||||
creds, err := GetCredentials(&Config{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
if creds == nil {
|
||||
t.Fatal("Expected an EC2Role creds provider to be returned")
|
||||
}
|
||||
|
||||
v, err := creds.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when getting creds: %s", err)
|
||||
}
|
||||
expectedProvider := "EC2RoleProvider"
|
||||
if v.ProviderName != expectedProvider {
|
||||
t.Fatalf("Expected provider name to be %q, %q given",
|
||||
expectedProvider, v.ProviderName)
|
||||
}
|
||||
}
|
||||
|
||||
var credentialsFileContents = `[myprofile]
|
||||
aws_access_key_id = accesskey
|
||||
aws_secret_access_key = secretkey
|
||||
`
|
||||
|
||||
func TestAWSGetCredentials_shouldBeShared(t *testing.T) {
|
||||
file, err := ioutil.TempFile(os.TempDir(), "terraform_aws_cred")
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing temporary credentials file: %s", err)
|
||||
}
|
||||
_, err = file.WriteString(credentialsFileContents)
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing temporary credentials to file: %s", err)
|
||||
}
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Error closing temporary credentials file: %s", err)
|
||||
}
|
||||
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
resetEnv := unsetEnv(t)
|
||||
defer resetEnv()
|
||||
|
||||
if err := os.Setenv("AWS_PROFILE", "myprofile"); err != nil {
|
||||
t.Fatalf("Error resetting env var AWS_PROFILE: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", file.Name()); err != nil {
|
||||
t.Fatalf("Error resetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err)
|
||||
}
|
||||
|
||||
creds, err := GetCredentials(&Config{Profile: "myprofile", CredsFilename: file.Name()})
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
if creds == nil {
|
||||
t.Fatal("Expected a provider chain to be returned")
|
||||
}
|
||||
|
||||
v, err := creds.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
|
||||
if v.AccessKeyID != "accesskey" {
|
||||
t.Fatalf("AccessKeyID mismatch, expected (%s), got (%s)", "accesskey", v.AccessKeyID)
|
||||
}
|
||||
|
||||
if v.SecretAccessKey != "secretkey" {
|
||||
t.Fatalf("SecretAccessKey mismatch, expected (%s), got (%s)", "accesskey", v.AccessKeyID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSGetCredentials_shouldBeENV(t *testing.T) {
|
||||
// need to set the environment variables to a dummy string, as we don't know
|
||||
// what they may be at runtime without hardcoding here
|
||||
s := "some_env"
|
||||
resetEnv := setEnv(s, t)
|
||||
|
||||
defer resetEnv()
|
||||
|
||||
cfg := Config{}
|
||||
creds, err := GetCredentials(&cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
if creds == nil {
|
||||
t.Fatalf("Expected a static creds provider to be returned")
|
||||
}
|
||||
|
||||
v, err := creds.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("Error gettings creds: %s", err)
|
||||
}
|
||||
if v.AccessKeyID != s {
|
||||
t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", s, v.AccessKeyID)
|
||||
}
|
||||
if v.SecretAccessKey != s {
|
||||
t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", s, v.SecretAccessKey)
|
||||
}
|
||||
if v.SessionToken != s {
|
||||
t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", s, v.SessionToken)
|
||||
}
|
||||
}
|
||||
|
||||
// unsetEnv unsets environment variables for testing a "clean slate" with no
|
||||
// credentials in the environment
|
||||
func unsetEnv(t *testing.T) func() {
|
||||
// Grab any existing AWS keys and preserve. In some tests we'll unset these, so
|
||||
// we need to have them and restore them after
|
||||
e := getEnv()
|
||||
if err := os.Unsetenv("AWS_ACCESS_KEY_ID"); err != nil {
|
||||
t.Fatalf("Error unsetting env var AWS_ACCESS_KEY_ID: %s", err)
|
||||
}
|
||||
if err := os.Unsetenv("AWS_SECRET_ACCESS_KEY"); err != nil {
|
||||
t.Fatalf("Error unsetting env var AWS_SECRET_ACCESS_KEY: %s", err)
|
||||
}
|
||||
if err := os.Unsetenv("AWS_SESSION_TOKEN"); err != nil {
|
||||
t.Fatalf("Error unsetting env var AWS_SESSION_TOKEN: %s", err)
|
||||
}
|
||||
if err := os.Unsetenv("AWS_PROFILE"); err != nil {
|
||||
t.Fatalf("Error unsetting env var AWS_PROFILE: %s", err)
|
||||
}
|
||||
if err := os.Unsetenv("AWS_SHARED_CREDENTIALS_FILE"); err != nil {
|
||||
t.Fatalf("Error unsetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err)
|
||||
}
|
||||
|
||||
return func() {
|
||||
// re-set all the envs we unset above
|
||||
if err := os.Setenv("AWS_ACCESS_KEY_ID", e.Key); err != nil {
|
||||
t.Fatalf("Error resetting env var AWS_ACCESS_KEY_ID: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_SECRET_ACCESS_KEY", e.Secret); err != nil {
|
||||
t.Fatalf("Error resetting env var AWS_SECRET_ACCESS_KEY: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_SESSION_TOKEN", e.Token); err != nil {
|
||||
t.Fatalf("Error resetting env var AWS_SESSION_TOKEN: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_PROFILE", e.Profile); err != nil {
|
||||
t.Fatalf("Error resetting env var AWS_PROFILE: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", e.CredsFilename); err != nil {
|
||||
t.Fatalf("Error resetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setEnv(s string, t *testing.T) func() {
|
||||
e := getEnv()
|
||||
// Set all the envs to a dummy value
|
||||
if err := os.Setenv("AWS_ACCESS_KEY_ID", s); err != nil {
|
||||
t.Fatalf("Error setting env var AWS_ACCESS_KEY_ID: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_SECRET_ACCESS_KEY", s); err != nil {
|
||||
t.Fatalf("Error setting env var AWS_SECRET_ACCESS_KEY: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_SESSION_TOKEN", s); err != nil {
|
||||
t.Fatalf("Error setting env var AWS_SESSION_TOKEN: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_PROFILE", s); err != nil {
|
||||
t.Fatalf("Error setting env var AWS_PROFILE: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", s); err != nil {
|
||||
t.Fatalf("Error setting env var AWS_SHARED_CREDENTIALS_FLE: %s", err)
|
||||
}
|
||||
|
||||
return func() {
|
||||
// re-set all the envs we unset above
|
||||
if err := os.Setenv("AWS_ACCESS_KEY_ID", e.Key); err != nil {
|
||||
t.Fatalf("Error resetting env var AWS_ACCESS_KEY_ID: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_SECRET_ACCESS_KEY", e.Secret); err != nil {
|
||||
t.Fatalf("Error resetting env var AWS_SECRET_ACCESS_KEY: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_SESSION_TOKEN", e.Token); err != nil {
|
||||
t.Fatalf("Error resetting env var AWS_SESSION_TOKEN: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_PROFILE", e.Profile); err != nil {
|
||||
t.Fatalf("Error setting env var AWS_PROFILE: %s", err)
|
||||
}
|
||||
if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", s); err != nil {
|
||||
t.Fatalf("Error setting env var AWS_SHARED_CREDENTIALS_FLE: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// awsEnv establishes a httptest server to mock out the internal AWS Metadata
|
||||
// service. IAM Credentials are retrieved by the EC2RoleProvider, which makes
|
||||
// API calls to this internal URL. By replacing the server with a test server,
|
||||
// we can simulate an AWS environment
|
||||
func awsEnv(t *testing.T) func() {
|
||||
routes := routes{}
|
||||
if err := json.Unmarshal([]byte(metadataApiRoutes), &routes); err != nil {
|
||||
t.Fatalf("Failed to unmarshal JSON in AWS ENV test: %s", err)
|
||||
}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Header().Add("Server", "MockEC2")
|
||||
log.Printf("[DEBUG] Mocker server received request to %q", r.RequestURI)
|
||||
for _, e := range routes.Endpoints {
|
||||
if r.RequestURI == e.Uri {
|
||||
fmt.Fprintln(w, e.Body)
|
||||
w.WriteHeader(200)
|
||||
return
|
||||
}
|
||||
}
|
||||
w.WriteHeader(400)
|
||||
}))
|
||||
|
||||
os.Setenv("AWS_METADATA_URL", ts.URL+"/latest")
|
||||
return ts.Close
|
||||
}
|
||||
|
||||
// invalidAwsEnv establishes a httptest server to simulate behaviour
|
||||
// when endpoint doesn't respond as expected
|
||||
func invalidAwsEnv(t *testing.T) func() {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(400)
|
||||
}))
|
||||
|
||||
os.Setenv("AWS_METADATA_URL", ts.URL+"/latest")
|
||||
return ts.Close
|
||||
}
|
||||
|
||||
func getEnv() *currentEnv {
|
||||
// Grab any existing AWS keys and preserve. In some tests we'll unset these, so
|
||||
// we need to have them and restore them after
|
||||
return ¤tEnv{
|
||||
Key: os.Getenv("AWS_ACCESS_KEY_ID"),
|
||||
Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"),
|
||||
Token: os.Getenv("AWS_SESSION_TOKEN"),
|
||||
Profile: os.Getenv("AWS_PROFILE"),
|
||||
CredsFilename: os.Getenv("AWS_SHARED_CREDENTIALS_FILE"),
|
||||
}
|
||||
}
|
||||
|
||||
// struct to preserve the current environment
|
||||
type currentEnv struct {
|
||||
Key, Secret, Token, Profile, CredsFilename string
|
||||
}
|
||||
|
||||
type routes struct {
|
||||
Endpoints []*endpoint `json:"endpoints"`
|
||||
}
|
||||
type endpoint struct {
|
||||
Uri string `json:"uri"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
const metadataApiRoutes = `
|
||||
{
|
||||
"endpoints": [
|
||||
{
|
||||
"uri": "/latest/meta-data/instance-id",
|
||||
"body": "mock-instance-id"
|
||||
},
|
||||
{
|
||||
"uri": "/latest/meta-data/iam/info",
|
||||
"body": "{\"Code\": \"Success\",\"LastUpdated\": \"2016-03-17T12:27:32Z\",\"InstanceProfileArn\": \"arn:aws:iam::123456789013:instance-profile/my-instance-profile\",\"InstanceProfileId\": \"AIPAABCDEFGHIJKLMN123\"}"
|
||||
},
|
||||
{
|
||||
"uri": "/latest/meta-data/iam/security-credentials",
|
||||
"body": "test_role"
|
||||
},
|
||||
{
|
||||
"uri": "/latest/meta-data/iam/security-credentials/test_role",
|
||||
"body": "{\"Code\":\"Success\",\"LastUpdated\":\"2015-12-11T17:17:25Z\",\"Type\":\"AWS-HMAC\",\"AccessKeyId\":\"somekey\",\"SecretAccessKey\":\"somesecret\",\"Token\":\"sometoken\"}"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
const iamResponse_GetUser_valid = `<GetUserResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<GetUserResult>
|
||||
<User>
|
||||
<UserId>AIDACKCEVSQ6C2EXAMPLE</UserId>
|
||||
<Path>/division_abc/subdivision_xyz/</Path>
|
||||
<UserName>Bob</UserName>
|
||||
<Arn>arn:aws:iam::123456789012:user/division_abc/subdivision_xyz/Bob</Arn>
|
||||
<CreateDate>2013-10-02T17:01:44Z</CreateDate>
|
||||
<PasswordLastUsed>2014-10-10T14:37:51Z</PasswordLastUsed>
|
||||
</User>
|
||||
</GetUserResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</GetUserResponse>`
|
||||
|
||||
const iamResponse_GetUser_unauthorized = `<ErrorResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<Error>
|
||||
<Type>Sender</Type>
|
||||
<Code>AccessDenied</Code>
|
||||
<Message>User: arn:aws:iam::123456789012:user/Bob is not authorized to perform: iam:GetUser on resource: arn:aws:iam::123456789012:user/Bob</Message>
|
||||
</Error>
|
||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||
</ErrorResponse>`
|
||||
|
||||
const stsResponse_GetCallerIdentity_valid = `<GetCallerIdentityResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
|
||||
<GetCallerIdentityResult>
|
||||
<Arn>arn:aws:iam::123456789012:user/Alice</Arn>
|
||||
<UserId>AKIAI44QH8DHBEXAMPLE</UserId>
|
||||
<Account>123456789012</Account>
|
||||
</GetCallerIdentityResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>01234567-89ab-cdef-0123-456789abcdef</RequestId>
|
||||
</ResponseMetadata>
|
||||
</GetCallerIdentityResponse>`
|
||||
|
||||
const stsResponse_GetCallerIdentity_unauthorized = `<ErrorResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
|
||||
<Error>
|
||||
<Type>Sender</Type>
|
||||
<Code>AccessDenied</Code>
|
||||
<Message>User: arn:aws:iam::123456789012:user/Bob is not authorized to perform: sts:GetCallerIdentity</Message>
|
||||
</Error>
|
||||
<RequestId>01234567-89ab-cdef-0123-456789abcdef</RequestId>
|
||||
</ErrorResponse>`
|
||||
|
||||
const iamResponse_GetUser_federatedFailure = `<ErrorResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<Error>
|
||||
<Type>Sender</Type>
|
||||
<Code>ValidationError</Code>
|
||||
<Message>Must specify userName when calling with non-User credentials</Message>
|
||||
</Error>
|
||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||
</ErrorResponse>`
|
||||
|
||||
const iamResponse_ListRoles_valid = `<ListRolesResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ListRolesResult>
|
||||
<IsTruncated>true</IsTruncated>
|
||||
<Marker>AWceSSsKsazQ4IEplT9o4hURCzBs00iavlEvEXAMPLE</Marker>
|
||||
<Roles>
|
||||
<member>
|
||||
<Path>/</Path>
|
||||
<AssumeRolePolicyDocument>%7B%22Version%22%3A%222008-10-17%22%2C%22Statement%22%3A%5B%7B%22Sid%22%3A%22%22%2C%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22Service%22%3A%22ec2.amazonaws.com%22%7D%2C%22Action%22%3A%22sts%3AAssumeRole%22%7D%5D%7D</AssumeRolePolicyDocument>
|
||||
<RoleId>AROACKCEVSQ6C2EXAMPLE</RoleId>
|
||||
<RoleName>elasticbeanstalk-role</RoleName>
|
||||
<Arn>arn:aws:iam::123456789012:role/elasticbeanstalk-role</Arn>
|
||||
<CreateDate>2013-10-02T17:01:44Z</CreateDate>
|
||||
</member>
|
||||
</Roles>
|
||||
</ListRolesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListRolesResponse>`
|
||||
|
||||
const iamResponse_ListRoles_unauthorized = `<ErrorResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<Error>
|
||||
<Type>Sender</Type>
|
||||
<Code>AccessDenied</Code>
|
||||
<Message>User: arn:aws:iam::123456789012:user/Bob is not authorized to perform: iam:ListRoles on resource: arn:aws:iam::123456789012:role/</Message>
|
||||
</Error>
|
||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||
</ErrorResponse>`
|
|
@ -1,317 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
// autoscalingTagSchema returns the schema to use for the tag element.
|
||||
func autoscalingTagSchema() *schema.Schema {
|
||||
return &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"value": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"propagate_at_launch": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Set: autoscalingTagToHash,
|
||||
}
|
||||
}
|
||||
|
||||
func autoscalingTagToHash(v interface{}) int {
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["key"].(string)))
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["value"].(string)))
|
||||
buf.WriteString(fmt.Sprintf("%t-", m["propagate_at_launch"].(bool)))
|
||||
|
||||
return hashcode.String(buf.String())
|
||||
}
|
||||
|
||||
// setTags is a helper to set the tags for a resource. It expects the
|
||||
// tags field to be named "tag"
|
||||
func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) error {
|
||||
resourceID := d.Get("name").(string)
|
||||
var createTags, removeTags []*autoscaling.Tag
|
||||
|
||||
if d.HasChange("tag") || d.HasChange("tags") {
|
||||
oraw, nraw := d.GetChange("tag")
|
||||
o := setToMapByKey(oraw.(*schema.Set), "key")
|
||||
n := setToMapByKey(nraw.(*schema.Set), "key")
|
||||
|
||||
old, err := autoscalingTagsFromMap(o, resourceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
new, err := autoscalingTagsFromMap(n, resourceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, r, err := diffAutoscalingTags(old, new, resourceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createTags = append(createTags, c...)
|
||||
removeTags = append(removeTags, r...)
|
||||
|
||||
oraw, nraw = d.GetChange("tags")
|
||||
old, err = autoscalingTagsFromList(oraw.([]interface{}), resourceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
new, err = autoscalingTagsFromList(nraw.([]interface{}), resourceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, r, err = diffAutoscalingTags(old, new, resourceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createTags = append(createTags, c...)
|
||||
removeTags = append(removeTags, r...)
|
||||
}
|
||||
|
||||
// Set tags
|
||||
if len(removeTags) > 0 {
|
||||
log.Printf("[DEBUG] Removing autoscaling tags: %#v", removeTags)
|
||||
|
||||
remove := autoscaling.DeleteTagsInput{
|
||||
Tags: removeTags,
|
||||
}
|
||||
|
||||
if _, err := conn.DeleteTags(&remove); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(createTags) > 0 {
|
||||
log.Printf("[DEBUG] Creating autoscaling tags: %#v", createTags)
|
||||
|
||||
create := autoscaling.CreateOrUpdateTagsInput{
|
||||
Tags: createTags,
|
||||
}
|
||||
|
||||
if _, err := conn.CreateOrUpdateTags(&create); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// diffTags takes our tags locally and the ones remotely and returns
|
||||
// the set of tags that must be created, and the set of tags that must
|
||||
// be destroyed.
|
||||
func diffAutoscalingTags(oldTags, newTags []*autoscaling.Tag, resourceID string) ([]*autoscaling.Tag, []*autoscaling.Tag, error) {
|
||||
// First, we're creating everything we have
|
||||
create := make(map[string]interface{})
|
||||
for _, t := range newTags {
|
||||
tag := map[string]interface{}{
|
||||
"key": *t.Key,
|
||||
"value": *t.Value,
|
||||
"propagate_at_launch": *t.PropagateAtLaunch,
|
||||
}
|
||||
create[*t.Key] = tag
|
||||
}
|
||||
|
||||
// Build the list of what to remove
|
||||
var remove []*autoscaling.Tag
|
||||
for _, t := range oldTags {
|
||||
old, ok := create[*t.Key].(map[string]interface{})
|
||||
|
||||
if !ok || old["value"] != *t.Value || old["propagate_at_launch"] != *t.PropagateAtLaunch {
|
||||
// Delete it!
|
||||
remove = append(remove, t)
|
||||
}
|
||||
}
|
||||
|
||||
createTags, err := autoscalingTagsFromMap(create, resourceID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return createTags, remove, nil
|
||||
}
|
||||
|
||||
func autoscalingTagsFromList(vs []interface{}, resourceID string) ([]*autoscaling.Tag, error) {
|
||||
result := make([]*autoscaling.Tag, 0, len(vs))
|
||||
for _, tag := range vs {
|
||||
attr, ok := tag.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
t, err := autoscalingTagFromMap(attr, resourceID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t != nil {
|
||||
result = append(result, t)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// tagsFromMap returns the tags for the given map of data.
|
||||
func autoscalingTagsFromMap(m map[string]interface{}, resourceID string) ([]*autoscaling.Tag, error) {
|
||||
result := make([]*autoscaling.Tag, 0, len(m))
|
||||
for _, v := range m {
|
||||
attr, ok := v.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
t, err := autoscalingTagFromMap(attr, resourceID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t != nil {
|
||||
result = append(result, t)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func autoscalingTagFromMap(attr map[string]interface{}, resourceID string) (*autoscaling.Tag, error) {
|
||||
if _, ok := attr["key"]; !ok {
|
||||
return nil, fmt.Errorf("%s: invalid tag attributes: key missing", resourceID)
|
||||
}
|
||||
|
||||
if _, ok := attr["value"]; !ok {
|
||||
return nil, fmt.Errorf("%s: invalid tag attributes: value missing", resourceID)
|
||||
}
|
||||
|
||||
if _, ok := attr["propagate_at_launch"]; !ok {
|
||||
return nil, fmt.Errorf("%s: invalid tag attributes: propagate_at_launch missing", resourceID)
|
||||
}
|
||||
|
||||
var propagateAtLaunch bool
|
||||
var err error
|
||||
|
||||
if v, ok := attr["propagate_at_launch"].(bool); ok {
|
||||
propagateAtLaunch = v
|
||||
}
|
||||
|
||||
if v, ok := attr["propagate_at_launch"].(string); ok {
|
||||
if propagateAtLaunch, err = strconv.ParseBool(v); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"%s: invalid tag attribute: invalid value for propagate_at_launch: %s",
|
||||
resourceID,
|
||||
v,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
t := &autoscaling.Tag{
|
||||
Key: aws.String(attr["key"].(string)),
|
||||
Value: aws.String(attr["value"].(string)),
|
||||
PropagateAtLaunch: aws.Bool(propagateAtLaunch),
|
||||
ResourceId: aws.String(resourceID),
|
||||
ResourceType: aws.String("auto-scaling-group"),
|
||||
}
|
||||
|
||||
if tagIgnoredAutoscaling(t) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// autoscalingTagsToMap turns the list of tags into a map.
|
||||
func autoscalingTagsToMap(ts []*autoscaling.Tag) map[string]interface{} {
|
||||
tags := make(map[string]interface{})
|
||||
for _, t := range ts {
|
||||
tag := map[string]interface{}{
|
||||
"key": *t.Key,
|
||||
"value": *t.Value,
|
||||
"propagate_at_launch": *t.PropagateAtLaunch,
|
||||
}
|
||||
tags[*t.Key] = tag
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
// autoscalingTagDescriptionsToMap turns the list of tags into a map.
|
||||
func autoscalingTagDescriptionsToMap(ts *[]*autoscaling.TagDescription) map[string]map[string]interface{} {
|
||||
tags := make(map[string]map[string]interface{})
|
||||
for _, t := range *ts {
|
||||
tag := map[string]interface{}{
|
||||
"key": *t.Key,
|
||||
"value": *t.Value,
|
||||
"propagate_at_launch": *t.PropagateAtLaunch,
|
||||
}
|
||||
tags[*t.Key] = tag
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
// autoscalingTagDescriptionsToSlice turns the list of tags into a slice.
|
||||
func autoscalingTagDescriptionsToSlice(ts []*autoscaling.TagDescription) []map[string]interface{} {
|
||||
tags := make([]map[string]interface{}, 0, len(ts))
|
||||
for _, t := range ts {
|
||||
tags = append(tags, map[string]interface{}{
|
||||
"key": *t.Key,
|
||||
"value": *t.Value,
|
||||
"propagate_at_launch": *t.PropagateAtLaunch,
|
||||
})
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func setToMapByKey(s *schema.Set, key string) map[string]interface{} {
|
||||
result := make(map[string]interface{})
|
||||
for _, rawData := range s.List() {
|
||||
data := rawData.(map[string]interface{})
|
||||
result[data[key].(string)] = data
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// compare a tag against a list of strings and checks if it should
|
||||
// be ignored or not
|
||||
func tagIgnoredAutoscaling(t *autoscaling.Tag) bool {
|
||||
filter := []string{"^aws:"}
|
||||
for _, v := range filter {
|
||||
log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key)
|
||||
if r, _ := regexp.MatchString(v, *t.Key); r == true {
|
||||
log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,158 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestDiffAutoscalingTags(t *testing.T) {
|
||||
cases := []struct {
|
||||
Old, New map[string]interface{}
|
||||
Create, Remove map[string]interface{}
|
||||
}{
|
||||
// Basic add/remove
|
||||
{
|
||||
Old: map[string]interface{}{
|
||||
"Name": map[string]interface{}{
|
||||
"key": "Name",
|
||||
"value": "bar",
|
||||
"propagate_at_launch": true,
|
||||
},
|
||||
},
|
||||
New: map[string]interface{}{
|
||||
"DifferentTag": map[string]interface{}{
|
||||
"key": "DifferentTag",
|
||||
"value": "baz",
|
||||
"propagate_at_launch": true,
|
||||
},
|
||||
},
|
||||
Create: map[string]interface{}{
|
||||
"DifferentTag": map[string]interface{}{
|
||||
"key": "DifferentTag",
|
||||
"value": "baz",
|
||||
"propagate_at_launch": true,
|
||||
},
|
||||
},
|
||||
Remove: map[string]interface{}{
|
||||
"Name": map[string]interface{}{
|
||||
"key": "Name",
|
||||
"value": "bar",
|
||||
"propagate_at_launch": true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Modify
|
||||
{
|
||||
Old: map[string]interface{}{
|
||||
"Name": map[string]interface{}{
|
||||
"key": "Name",
|
||||
"value": "bar",
|
||||
"propagate_at_launch": true,
|
||||
},
|
||||
},
|
||||
New: map[string]interface{}{
|
||||
"Name": map[string]interface{}{
|
||||
"key": "Name",
|
||||
"value": "baz",
|
||||
"propagate_at_launch": false,
|
||||
},
|
||||
},
|
||||
Create: map[string]interface{}{
|
||||
"Name": map[string]interface{}{
|
||||
"key": "Name",
|
||||
"value": "baz",
|
||||
"propagate_at_launch": false,
|
||||
},
|
||||
},
|
||||
Remove: map[string]interface{}{
|
||||
"Name": map[string]interface{}{
|
||||
"key": "Name",
|
||||
"value": "bar",
|
||||
"propagate_at_launch": true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var resourceID = "sample"
|
||||
|
||||
for i, tc := range cases {
|
||||
awsTagsOld, err := autoscalingTagsFromMap(tc.Old, resourceID)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: unexpected error convertig old tags: %v", i, err)
|
||||
}
|
||||
|
||||
awsTagsNew, err := autoscalingTagsFromMap(tc.New, resourceID)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: unexpected error convertig new tags: %v", i, err)
|
||||
}
|
||||
|
||||
c, r, err := diffAutoscalingTags(awsTagsOld, awsTagsNew, resourceID)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: unexpected error diff'ing tags: %v", i, err)
|
||||
}
|
||||
|
||||
cm := autoscalingTagsToMap(c)
|
||||
rm := autoscalingTagsToMap(r)
|
||||
if !reflect.DeepEqual(cm, tc.Create) {
|
||||
t.Fatalf("%d: bad create: \n%#v\n%#v", i, cm, tc.Create)
|
||||
}
|
||||
if !reflect.DeepEqual(rm, tc.Remove) {
|
||||
t.Fatalf("%d: bad remove: \n%#v\n%#v", i, rm, tc.Remove)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// testAccCheckTags can be used to check the tags on a resource.
|
||||
func testAccCheckAutoscalingTags(
|
||||
ts *[]*autoscaling.TagDescription, key string, expected map[string]interface{}) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
m := autoscalingTagDescriptionsToMap(ts)
|
||||
v, ok := m[key]
|
||||
if !ok {
|
||||
return fmt.Errorf("Missing tag: %s", key)
|
||||
}
|
||||
|
||||
if v["value"] != expected["value"].(string) ||
|
||||
v["propagate_at_launch"] != expected["propagate_at_launch"].(bool) {
|
||||
return fmt.Errorf("%s: bad value: %s", key, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckAutoscalingTagNotExists(ts *[]*autoscaling.TagDescription, key string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
m := autoscalingTagDescriptionsToMap(ts)
|
||||
if _, ok := m[key]; ok {
|
||||
return fmt.Errorf("Tag exists when it should not: %s", key)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnoringTagsAutoscaling(t *testing.T) {
|
||||
var ignoredTags []*autoscaling.Tag
|
||||
ignoredTags = append(ignoredTags, &autoscaling.Tag{
|
||||
Key: aws.String("aws:cloudformation:logical-id"),
|
||||
Value: aws.String("foo"),
|
||||
})
|
||||
ignoredTags = append(ignoredTags, &autoscaling.Tag{
|
||||
Key: aws.String("aws:foo:bar"),
|
||||
Value: aws.String("baz"),
|
||||
})
|
||||
for _, tag := range ignoredTags {
|
||||
if !tagIgnoredAutoscaling(tag) {
|
||||
t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
resource.TestMain(m)
|
||||
}
|
||||
|
||||
// sharedClientForRegion returns a common AWSClient setup needed for the sweeper
|
||||
// functions for a given region
|
||||
func sharedClientForRegion(region string) (interface{}, error) {
|
||||
if os.Getenv("AWS_ACCESS_KEY_ID") == "" {
|
||||
return nil, fmt.Errorf("empty AWS_ACCESS_KEY_ID")
|
||||
}
|
||||
|
||||
if os.Getenv("AWS_SECRET_ACCESS_KEY") == "" {
|
||||
return nil, fmt.Errorf("empty AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
|
||||
conf := &Config{
|
||||
Region: region,
|
||||
}
|
||||
|
||||
// configures a default client for the region, using the above env vars
|
||||
client, err := conf.Client()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting AWS client")
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
func isAWSErr(err error, code string, message string) bool {
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
return err.Code() == code && strings.Contains(err.Message(), message)
|
||||
}
|
||||
return false
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,510 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/acm"
|
||||
"github.com/aws/aws-sdk-go/service/apigateway"
|
||||
"github.com/aws/aws-sdk-go/service/applicationautoscaling"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
"github.com/aws/aws-sdk-go/service/cloudformation"
|
||||
"github.com/aws/aws-sdk-go/service/cloudfront"
|
||||
"github.com/aws/aws-sdk-go/service/cloudtrail"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatchevents"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
||||
"github.com/aws/aws-sdk-go/service/codebuild"
|
||||
"github.com/aws/aws-sdk-go/service/codecommit"
|
||||
"github.com/aws/aws-sdk-go/service/codedeploy"
|
||||
"github.com/aws/aws-sdk-go/service/codepipeline"
|
||||
"github.com/aws/aws-sdk-go/service/cognitoidentity"
|
||||
"github.com/aws/aws-sdk-go/service/configservice"
|
||||
"github.com/aws/aws-sdk-go/service/databasemigrationservice"
|
||||
"github.com/aws/aws-sdk-go/service/devicefarm"
|
||||
"github.com/aws/aws-sdk-go/service/directoryservice"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ecr"
|
||||
"github.com/aws/aws-sdk-go/service/ecs"
|
||||
"github.com/aws/aws-sdk-go/service/efs"
|
||||
"github.com/aws/aws-sdk-go/service/elasticache"
|
||||
"github.com/aws/aws-sdk-go/service/elasticbeanstalk"
|
||||
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
|
||||
"github.com/aws/aws-sdk-go/service/elastictranscoder"
|
||||
"github.com/aws/aws-sdk-go/service/elb"
|
||||
"github.com/aws/aws-sdk-go/service/elbv2"
|
||||
"github.com/aws/aws-sdk-go/service/emr"
|
||||
"github.com/aws/aws-sdk-go/service/firehose"
|
||||
"github.com/aws/aws-sdk-go/service/glacier"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/inspector"
|
||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||
"github.com/aws/aws-sdk-go/service/kms"
|
||||
"github.com/aws/aws-sdk-go/service/lambda"
|
||||
"github.com/aws/aws-sdk-go/service/lightsail"
|
||||
"github.com/aws/aws-sdk-go/service/opsworks"
|
||||
"github.com/aws/aws-sdk-go/service/rds"
|
||||
"github.com/aws/aws-sdk-go/service/redshift"
|
||||
"github.com/aws/aws-sdk-go/service/route53"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/ses"
|
||||
"github.com/aws/aws-sdk-go/service/sfn"
|
||||
"github.com/aws/aws-sdk-go/service/simpledb"
|
||||
"github.com/aws/aws-sdk-go/service/sns"
|
||||
"github.com/aws/aws-sdk-go/service/sqs"
|
||||
"github.com/aws/aws-sdk-go/service/ssm"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/aws/aws-sdk-go/service/waf"
|
||||
"github.com/aws/aws-sdk-go/service/wafregional"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/terraform/helper/logging"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
CredsFilename string
|
||||
Profile string
|
||||
Token string
|
||||
Region string
|
||||
MaxRetries int
|
||||
|
||||
AssumeRoleARN string
|
||||
AssumeRoleExternalID string
|
||||
AssumeRoleSessionName string
|
||||
AssumeRolePolicy string
|
||||
|
||||
AllowedAccountIds []interface{}
|
||||
ForbiddenAccountIds []interface{}
|
||||
|
||||
CloudFormationEndpoint string
|
||||
CloudWatchEndpoint string
|
||||
CloudWatchEventsEndpoint string
|
||||
CloudWatchLogsEndpoint string
|
||||
DynamoDBEndpoint string
|
||||
DeviceFarmEndpoint string
|
||||
Ec2Endpoint string
|
||||
ElbEndpoint string
|
||||
IamEndpoint string
|
||||
KinesisEndpoint string
|
||||
KmsEndpoint string
|
||||
RdsEndpoint string
|
||||
S3Endpoint string
|
||||
SnsEndpoint string
|
||||
SqsEndpoint string
|
||||
Insecure bool
|
||||
|
||||
SkipCredsValidation bool
|
||||
SkipGetEC2Platforms bool
|
||||
SkipRegionValidation bool
|
||||
SkipRequestingAccountId bool
|
||||
SkipMetadataApiCheck bool
|
||||
S3ForcePathStyle bool
|
||||
}
|
||||
|
||||
type AWSClient struct {
|
||||
cfconn *cloudformation.CloudFormation
|
||||
cloudfrontconn *cloudfront.CloudFront
|
||||
cloudtrailconn *cloudtrail.CloudTrail
|
||||
cloudwatchconn *cloudwatch.CloudWatch
|
||||
cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs
|
||||
cloudwatcheventsconn *cloudwatchevents.CloudWatchEvents
|
||||
cognitoconn *cognitoidentity.CognitoIdentity
|
||||
configconn *configservice.ConfigService
|
||||
devicefarmconn *devicefarm.DeviceFarm
|
||||
dmsconn *databasemigrationservice.DatabaseMigrationService
|
||||
dsconn *directoryservice.DirectoryService
|
||||
dynamodbconn *dynamodb.DynamoDB
|
||||
ec2conn *ec2.EC2
|
||||
ecrconn *ecr.ECR
|
||||
ecsconn *ecs.ECS
|
||||
efsconn *efs.EFS
|
||||
elbconn *elb.ELB
|
||||
elbv2conn *elbv2.ELBV2
|
||||
emrconn *emr.EMR
|
||||
esconn *elasticsearch.ElasticsearchService
|
||||
acmconn *acm.ACM
|
||||
apigateway *apigateway.APIGateway
|
||||
appautoscalingconn *applicationautoscaling.ApplicationAutoScaling
|
||||
autoscalingconn *autoscaling.AutoScaling
|
||||
s3conn *s3.S3
|
||||
sesConn *ses.SES
|
||||
simpledbconn *simpledb.SimpleDB
|
||||
sqsconn *sqs.SQS
|
||||
snsconn *sns.SNS
|
||||
stsconn *sts.STS
|
||||
redshiftconn *redshift.Redshift
|
||||
r53conn *route53.Route53
|
||||
partition string
|
||||
accountid string
|
||||
supportedplatforms []string
|
||||
region string
|
||||
rdsconn *rds.RDS
|
||||
iamconn *iam.IAM
|
||||
kinesisconn *kinesis.Kinesis
|
||||
kmsconn *kms.KMS
|
||||
firehoseconn *firehose.Firehose
|
||||
inspectorconn *inspector.Inspector
|
||||
elasticacheconn *elasticache.ElastiCache
|
||||
elasticbeanstalkconn *elasticbeanstalk.ElasticBeanstalk
|
||||
elastictranscoderconn *elastictranscoder.ElasticTranscoder
|
||||
lambdaconn *lambda.Lambda
|
||||
lightsailconn *lightsail.Lightsail
|
||||
opsworksconn *opsworks.OpsWorks
|
||||
glacierconn *glacier.Glacier
|
||||
codebuildconn *codebuild.CodeBuild
|
||||
codedeployconn *codedeploy.CodeDeploy
|
||||
codecommitconn *codecommit.CodeCommit
|
||||
codepipelineconn *codepipeline.CodePipeline
|
||||
sfnconn *sfn.SFN
|
||||
ssmconn *ssm.SSM
|
||||
wafconn *waf.WAF
|
||||
wafregionalconn *wafregional.WAFRegional
|
||||
}
|
||||
|
||||
func (c *AWSClient) S3() *s3.S3 {
|
||||
return c.s3conn
|
||||
}
|
||||
|
||||
func (c *AWSClient) DynamoDB() *dynamodb.DynamoDB {
|
||||
return c.dynamodbconn
|
||||
}
|
||||
|
||||
func (c *AWSClient) IsGovCloud() bool {
|
||||
if c.region == "us-gov-west-1" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *AWSClient) IsChinaCloud() bool {
|
||||
if c.region == "cn-north-1" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Client configures and returns a fully initialized AWSClient
|
||||
func (c *Config) Client() (interface{}, error) {
|
||||
// Get the auth and region. This can fail if keys/regions were not
|
||||
// specified and we're attempting to use the environment.
|
||||
if c.SkipRegionValidation {
|
||||
log.Println("[INFO] Skipping region validation")
|
||||
} else {
|
||||
log.Println("[INFO] Building AWS region structure")
|
||||
err := c.ValidateRegion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var client AWSClient
|
||||
// store AWS region in client struct, for region specific operations such as
|
||||
// bucket storage in S3
|
||||
client.region = c.Region
|
||||
|
||||
log.Println("[INFO] Building AWS auth structure")
|
||||
creds, err := GetCredentials(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Call Get to check for credential provider. If nothing found, we'll get an
|
||||
// error, and we can present it nicely to the user
|
||||
cp, err := creds.Get()
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
|
||||
return nil, errors.New(`No valid credential sources found for AWS Provider.
|
||||
Please see https://terraform.io/docs/providers/aws/index.html for more information on
|
||||
providing credentials for the AWS Provider`)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
|
||||
|
||||
awsConfig := &aws.Config{
|
||||
Credentials: creds,
|
||||
Region: aws.String(c.Region),
|
||||
MaxRetries: aws.Int(c.MaxRetries),
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle),
|
||||
}
|
||||
|
||||
if logging.IsDebugOrHigher() {
|
||||
awsConfig.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody)
|
||||
awsConfig.Logger = awsLogger{}
|
||||
}
|
||||
|
||||
if c.Insecure {
|
||||
transport := awsConfig.HTTPClient.Transport.(*http.Transport)
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Set up base session
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("Error creating AWS session: {{err}}", err)
|
||||
}
|
||||
|
||||
sess.Handlers.Build.PushBackNamed(addTerraformVersionToUserAgent)
|
||||
|
||||
if extraDebug := os.Getenv("TERRAFORM_AWS_AUTHFAILURE_DEBUG"); extraDebug != "" {
|
||||
sess.Handlers.UnmarshalError.PushFrontNamed(debugAuthFailure)
|
||||
}
|
||||
|
||||
// This restriction should only be used for Route53 sessions.
|
||||
// Other resources that have restrictions should allow the API to fail, rather
|
||||
// than Terraform abstracting the region for the user. This can lead to breaking
|
||||
// changes if that resource is ever opened up to more regions.
|
||||
r53Sess := sess.Copy(&aws.Config{Region: aws.String("us-east-1")})
|
||||
|
||||
// Some services have user-configurable endpoints
|
||||
awsCfSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudFormationEndpoint)})
|
||||
awsCwSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEndpoint)})
|
||||
awsCweSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEventsEndpoint)})
|
||||
awsCwlSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchLogsEndpoint)})
|
||||
awsDynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)})
|
||||
awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)})
|
||||
awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)})
|
||||
awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)})
|
||||
awsKinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)})
|
||||
awsKmsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KmsEndpoint)})
|
||||
awsRdsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.RdsEndpoint)})
|
||||
awsS3Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.S3Endpoint)})
|
||||
awsSnsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.SnsEndpoint)})
|
||||
awsSqsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.SqsEndpoint)})
|
||||
awsDeviceFarmSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DeviceFarmEndpoint)})
|
||||
|
||||
log.Println("[INFO] Initializing DeviceFarm SDK connection")
|
||||
client.devicefarmconn = devicefarm.New(awsDeviceFarmSess)
|
||||
|
||||
// These two services need to be set up early so we can check on AccountID
|
||||
client.iamconn = iam.New(awsIamSess)
|
||||
client.stsconn = sts.New(sess)
|
||||
|
||||
if !c.SkipCredsValidation {
|
||||
err = c.ValidateCredentials(client.stsconn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !c.SkipRequestingAccountId {
|
||||
partition, accountId, err := GetAccountInfo(client.iamconn, client.stsconn, cp.ProviderName)
|
||||
if err == nil {
|
||||
client.partition = partition
|
||||
client.accountid = accountId
|
||||
}
|
||||
}
|
||||
|
||||
authErr := c.ValidateAccountId(client.accountid)
|
||||
if authErr != nil {
|
||||
return nil, authErr
|
||||
}
|
||||
|
||||
client.ec2conn = ec2.New(awsEc2Sess)
|
||||
|
||||
if !c.SkipGetEC2Platforms {
|
||||
supportedPlatforms, err := GetSupportedEC2Platforms(client.ec2conn)
|
||||
if err != nil {
|
||||
// We intentionally fail *silently* because there's a chance
|
||||
// user just doesn't have ec2:DescribeAccountAttributes permissions
|
||||
log.Printf("[WARN] Unable to get supported EC2 platforms: %s", err)
|
||||
} else {
|
||||
client.supportedplatforms = supportedPlatforms
|
||||
}
|
||||
}
|
||||
|
||||
client.acmconn = acm.New(sess)
|
||||
client.apigateway = apigateway.New(sess)
|
||||
client.appautoscalingconn = applicationautoscaling.New(sess)
|
||||
client.autoscalingconn = autoscaling.New(sess)
|
||||
client.cfconn = cloudformation.New(awsCfSess)
|
||||
client.cloudfrontconn = cloudfront.New(sess)
|
||||
client.cloudtrailconn = cloudtrail.New(sess)
|
||||
client.cloudwatchconn = cloudwatch.New(awsCwSess)
|
||||
client.cloudwatcheventsconn = cloudwatchevents.New(awsCweSess)
|
||||
client.cloudwatchlogsconn = cloudwatchlogs.New(awsCwlSess)
|
||||
client.codecommitconn = codecommit.New(sess)
|
||||
client.codebuildconn = codebuild.New(sess)
|
||||
client.codedeployconn = codedeploy.New(sess)
|
||||
client.configconn = configservice.New(sess)
|
||||
client.cognitoconn = cognitoidentity.New(sess)
|
||||
client.dmsconn = databasemigrationservice.New(sess)
|
||||
client.codepipelineconn = codepipeline.New(sess)
|
||||
client.dsconn = directoryservice.New(sess)
|
||||
client.dynamodbconn = dynamodb.New(awsDynamoSess)
|
||||
client.ecrconn = ecr.New(sess)
|
||||
client.ecsconn = ecs.New(sess)
|
||||
client.efsconn = efs.New(sess)
|
||||
client.elasticacheconn = elasticache.New(sess)
|
||||
client.elasticbeanstalkconn = elasticbeanstalk.New(sess)
|
||||
client.elastictranscoderconn = elastictranscoder.New(sess)
|
||||
client.elbconn = elb.New(awsElbSess)
|
||||
client.elbv2conn = elbv2.New(awsElbSess)
|
||||
client.emrconn = emr.New(sess)
|
||||
client.esconn = elasticsearch.New(sess)
|
||||
client.firehoseconn = firehose.New(sess)
|
||||
client.inspectorconn = inspector.New(sess)
|
||||
client.glacierconn = glacier.New(sess)
|
||||
client.kinesisconn = kinesis.New(awsKinesisSess)
|
||||
client.kmsconn = kms.New(awsKmsSess)
|
||||
client.lambdaconn = lambda.New(sess)
|
||||
client.lightsailconn = lightsail.New(sess)
|
||||
client.opsworksconn = opsworks.New(sess)
|
||||
client.r53conn = route53.New(r53Sess)
|
||||
client.rdsconn = rds.New(awsRdsSess)
|
||||
client.redshiftconn = redshift.New(sess)
|
||||
client.simpledbconn = simpledb.New(sess)
|
||||
client.s3conn = s3.New(awsS3Sess)
|
||||
client.sesConn = ses.New(sess)
|
||||
client.sfnconn = sfn.New(sess)
|
||||
client.snsconn = sns.New(awsSnsSess)
|
||||
client.sqsconn = sqs.New(awsSqsSess)
|
||||
client.ssmconn = ssm.New(sess)
|
||||
client.wafconn = waf.New(sess)
|
||||
client.wafregionalconn = wafregional.New(sess)
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// ValidateRegion returns an error if the configured region is not a
|
||||
// valid aws region and nil otherwise.
|
||||
func (c *Config) ValidateRegion() error {
|
||||
var regions = []string{
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ca-central-1",
|
||||
"cn-north-1",
|
||||
"eu-central-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-gov-west-1",
|
||||
"us-west-1",
|
||||
"us-west-2",
|
||||
}
|
||||
|
||||
for _, valid := range regions {
|
||||
if c.Region == valid {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Not a valid region: %s", c.Region)
|
||||
}
|
||||
|
||||
// Validate credentials early and fail before we do any graph walking.
|
||||
func (c *Config) ValidateCredentials(stsconn *sts.STS) error {
|
||||
_, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{})
|
||||
return err
|
||||
}
|
||||
|
||||
// ValidateAccountId returns a context-specific error if the configured account
|
||||
// id is explicitly forbidden or not authorised; and nil if it is authorised.
|
||||
func (c *Config) ValidateAccountId(accountId string) error {
|
||||
if c.AllowedAccountIds == nil && c.ForbiddenAccountIds == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Println("[INFO] Validating account ID")
|
||||
|
||||
if c.ForbiddenAccountIds != nil {
|
||||
for _, id := range c.ForbiddenAccountIds {
|
||||
if id == accountId {
|
||||
return fmt.Errorf("Forbidden account ID (%s)", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.AllowedAccountIds != nil {
|
||||
for _, id := range c.AllowedAccountIds {
|
||||
if id == accountId {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Account ID not allowed (%s)", accountId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetSupportedEC2Platforms(conn *ec2.EC2) ([]string, error) {
|
||||
attrName := "supported-platforms"
|
||||
|
||||
input := ec2.DescribeAccountAttributesInput{
|
||||
AttributeNames: []*string{aws.String(attrName)},
|
||||
}
|
||||
attributes, err := conn.DescribeAccountAttributes(&input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var platforms []string
|
||||
for _, attr := range attributes.AccountAttributes {
|
||||
if *attr.AttributeName == attrName {
|
||||
for _, v := range attr.AttributeValues {
|
||||
platforms = append(platforms, *v.AttributeValue)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(platforms) == 0 {
|
||||
return nil, fmt.Errorf("No EC2 platforms detected")
|
||||
}
|
||||
|
||||
return platforms, nil
|
||||
}
|
||||
|
||||
// addTerraformVersionToUserAgent is a named handler that will add Terraform's
|
||||
// version information to requests made by the AWS SDK.
|
||||
var addTerraformVersionToUserAgent = request.NamedHandler{
|
||||
Name: "terraform.TerraformVersionUserAgentHandler",
|
||||
Fn: request.MakeAddToUserAgentHandler(
|
||||
"APN/1.0 HashiCorp/1.0 Terraform", terraform.VersionString()),
|
||||
}
|
||||
|
||||
var debugAuthFailure = request.NamedHandler{
|
||||
Name: "terraform.AuthFailureAdditionalDebugHandler",
|
||||
Fn: func(req *request.Request) {
|
||||
if isAWSErr(req.Error, "AuthFailure", "AWS was not able to validate the provided access credentials") {
|
||||
log.Printf("[INFO] Additional AuthFailure Debugging Context")
|
||||
log.Printf("[INFO] Current system UTC time: %s", time.Now().UTC())
|
||||
log.Printf("[INFO] Request object: %s", spew.Sdump(req))
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
type awsLogger struct{}
|
||||
|
||||
func (l awsLogger) Log(args ...interface{}) {
|
||||
tokens := make([]string, 0, len(args))
|
||||
for _, arg := range args {
|
||||
if token, ok := arg.(string); ok {
|
||||
tokens = append(tokens, token)
|
||||
}
|
||||
}
|
||||
log.Printf("[DEBUG] [aws-sdk-go] %s", strings.Join(tokens, " "))
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue