Merge remote-tracking branch 'origin/master' into f-oracle-compute

This commit is contained in:
Jake Champlin 2017-04-07 15:21:39 -04:00
commit 4db7d69a48
No known key found for this signature in database
GPG Key ID: DC31F41958EF4AC2
154 changed files with 5250 additions and 944 deletions

View File

@ -12,6 +12,7 @@ FEATURES:
IMPROVEMENTS:
* core: add `-lock-timeout` option, which will block and retry locks for the given duration [GH-13262]
* core: new `chomp` interpolation function which returns the given string with any trailing newline characters removed [GH-13419]
* backend/remote-state: Add support for assume role extensions to s3 backend [GH-13236]
* config: New interpolation functions `basename` and `dirname`, for file path manipulation [GH-13080]
* helper/resource: Allow unknown "pending" states [GH-13099]
@ -35,11 +36,12 @@ IMPROVEMENTS:
* provider/aws: Migrate `aws_dms_*` resources away from AWS waiters [GH-13291]
* provider/aws: Add support for treat_missing_data to cloudwatch_metric_alarm [GH-13358]
* provider/aws: Add support for evaluate_low_sample_count_percentiles to cloudwatch_metric_alarm [GH-13371]
* provider/aws: Fix `aws_s3_bucket` drift detection of logging options [GH-13281]
* provider/aws: Add `name_prefix` to `aws_alb_target_group` [GH-13442]
* provider/bitbucket: Improved error handling [GH-13390]
* provider/cloudstack: Do not force a new resource when updating `cloudstack_loadbalancer_rule` members [GH-11786]
* provider/github: Handle the case when issue labels already exist [GH-13182]
* provider/google: Mark `google_container_cluster`'s `client_key` & `password` inside `master_auth` as sensitive [GH-13148]
* provider/openstack: Add support for 'value_specs' options to `openstack_compute_servergroup_v2` [GH-13380]
* provider/triton: Move to joyent/triton-go [GH-13225]
BUG FIXES:
@ -66,12 +68,18 @@ BUG FIXES:
* provider/aws: Increase subnet deletion timeout [GH-13356]
* provider/aws: Increase launch_configuration creation timeout [GH-13357]
* provider/aws: Increase Beanstalk env 'ready' timeout [GH-13359]
* provider/aws: Raise timeout for deleting APIG REST API [GH-13414]
* provider/aws: Raise timeout for attaching/detaching VPN Gateway [GH-13457]
* provider/aws: Recreate opsworks_stack on change of service_role_arn [GH-13325]
* provider/aws: Fix KMS Key reading with Exists method [GH-13348]
* provider/aws: Fix DynamoDB issues about GSIs indexes [GH-13256]
* provider/aws: Fix `aws_s3_bucket` drift detection of logging options [GH-13281]
* provider/aws: Update ElasticTranscoderPreset to have default for MaxFrameRate [GH-13422]
* provider/azurerm: Network Security Group - ignoring protocol casing at Import time [GH-13153]
* provider/azurerm: Fix crash when importing Local Network Gateways [GH-13261]
* provider/bitbucket: Fixed issue where provider would fail with an "EOF" error on some operations [GH-13390]
* provider/openstack: Refresh volume_attachment from state if NotFound [GH-13342]
* provider/openstack: Add SOFT_DELETED to delete status [GH-13444]
* provider/profitbricks: Changed output type of ips variable of ip_block ProfitBricks resource [GH-13290]
## 0.9.2 (March 28, 2017)

View File

@ -12,6 +12,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
@ -37,10 +38,18 @@ func resourceAwsAlbTargetGroup() *schema.Resource {
},
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"name_prefix"},
ValidateFunc: validateAwsAlbTargetGroupName,
},
"name_prefix": {
Type: schema.TypeString,
Required: true,
Optional: true,
ForceNew: true,
ValidateFunc: validateAwsAlbTargetGroupName,
ValidateFunc: validateAwsAlbTargetGroupNamePrefix,
},
"port": {
@ -172,8 +181,17 @@ func resourceAwsAlbTargetGroup() *schema.Resource {
func resourceAwsAlbTargetGroupCreate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
var groupName string
if v, ok := d.GetOk("name"); ok {
groupName = v.(string)
} else if v, ok := d.GetOk("name_prefix"); ok {
groupName = resource.PrefixedUniqueId(v.(string))
} else {
groupName = resource.PrefixedUniqueId("tf-")
}
params := &elbv2.CreateTargetGroupInput{
Name: aws.String(d.Get("name").(string)),
Name: aws.String(groupName),
Port: aws.Int64(int64(d.Get("port").(int))),
Protocol: aws.String(d.Get("protocol").(string)),
VpcId: aws.String(d.Get("vpc_id").(string)),
@ -463,14 +481,6 @@ func validateAwsAlbTargetGroupHealthCheckProtocol(v interface{}, k string) (ws [
return
}
func validateAwsAlbTargetGroupName(v interface{}, k string) (ws []string, errors []error) {
name := v.(string)
if len(name) > 32 {
errors = append(errors, fmt.Errorf("%q (%q) cannot be longer than '32' characters", k, name))
}
return
}
func validateAwsAlbTargetGroupPort(v interface{}, k string) (ws []string, errors []error) {
port := v.(int)
if port < 1 || port > 65536 {

View File

@ -3,6 +3,7 @@ package aws
import (
"errors"
"fmt"
"regexp"
"testing"
"github.com/aws/aws-sdk-go/aws"
@ -85,6 +86,45 @@ func TestAccAWSALBTargetGroup_basic(t *testing.T) {
})
}
func TestAccAWSALBTargetGroup_namePrefix(t *testing.T) {
var conf elbv2.TargetGroup
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
IDRefreshName: "aws_alb_target_group.test",
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSALBTargetGroupDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSALBTargetGroupConfig_namePrefix,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf),
resource.TestMatchResourceAttr("aws_alb_target_group.test", "name", regexp.MustCompile("^tf-")),
),
},
},
})
}
func TestAccAWSALBTargetGroup_generatedName(t *testing.T) {
var conf elbv2.TargetGroup
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
IDRefreshName: "aws_alb_target_group.test",
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSALBTargetGroupDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSALBTargetGroupConfig_generatedName,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf),
),
},
},
})
}
func TestAccAWSALBTargetGroup_changeNameForceNew(t *testing.T) {
var before, after elbv2.TargetGroup
targetGroupNameBefore := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum))
@ -715,3 +755,28 @@ resource "aws_vpc" "test" {
}
}`, targetGroupName, stickinessBlock)
}
const testAccAWSALBTargetGroupConfig_namePrefix = `
resource "aws_alb_target_group" "test" {
name_prefix = "tf-"
port = 80
protocol = "HTTP"
vpc_id = "${aws_vpc.test.id}"
}
resource "aws_vpc" "test" {
cidr_block = "10.0.0.0/16"
}
`
const testAccAWSALBTargetGroupConfig_generatedName = `
resource "aws_alb_target_group" "test" {
port = 80
protocol = "HTTP"
vpc_id = "${aws_vpc.test.id}"
}
resource "aws_vpc" "test" {
cidr_block = "10.0.0.0/16"
}
`

View File

@ -172,7 +172,7 @@ func resourceAwsApiGatewayRestApiDelete(d *schema.ResourceData, meta interface{}
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
return resource.Retry(10*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteRestApi(&apigateway.DeleteRestApiInput{
RestApiId: aws.String(d.Id()),
})

View File

@ -39,6 +39,9 @@ func resourceAwsDynamoDbTable() *schema.Resource {
State: schema.ImportStatePassthrough,
},
SchemaVersion: 1,
MigrateState: resourceAwsDynamoDbTableMigrateState,
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
@ -157,15 +160,6 @@ func resourceAwsDynamoDbTable() *schema.Resource {
},
},
},
// GSI names are the uniqueness constraint
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["name"].(string)))
buf.WriteString(fmt.Sprintf("%d-", m["write_capacity"].(int)))
buf.WriteString(fmt.Sprintf("%d-", m["read_capacity"].(int)))
return hashcode.String(buf.String())
},
},
"stream_enabled": {
Type: schema.TypeBool,
@ -533,9 +527,8 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
table := tableDescription.Table
updates := []*dynamodb.GlobalSecondaryIndexUpdate{}
for _, updatedgsidata := range gsiSet.List() {
updates := []*dynamodb.GlobalSecondaryIndexUpdate{}
gsidata := updatedgsidata.(map[string]interface{})
gsiName := gsidata["name"].(string)
gsiWriteCapacity := gsidata["write_capacity"].(int)
@ -584,6 +577,10 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
log.Printf("[DEBUG] Error updating table: %s", err)
return err
}
if err := waitForGSIToBeActive(d.Id(), gsiName, meta); err != nil {
return errwrap.Wrapf("Error waiting for Dynamo DB GSI to be active: {{err}}", err)
}
}
}
}

View File

@ -0,0 +1,70 @@
package aws
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
"strings"
)
func resourceAwsDynamoDbTableMigrateState(
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
switch v {
case 0:
log.Println("[INFO] Found AWS DynamoDB Table State v0; migrating to v1")
return migrateDynamoDBStateV0toV1(is)
default:
return is, fmt.Errorf("Unexpected schema version: %d", v)
}
}
func migrateDynamoDBStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
if is.Empty() {
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
return is, nil
}
log.Printf("[DEBUG] DynamoDB Table Attributes before Migration: %#v", is.Attributes)
prefix := "global_secondary_index"
entity := resourceAwsDynamoDbTable()
// Read old keys
reader := &schema.MapFieldReader{
Schema: entity.Schema,
Map: schema.BasicMapReader(is.Attributes),
}
result, err := reader.ReadField([]string{prefix})
if err != nil {
return nil, err
}
oldKeys, ok := result.Value.(*schema.Set)
if !ok {
return nil, fmt.Errorf("Got unexpected value from state: %#v", result.Value)
}
// Delete old keys
for k := range is.Attributes {
if strings.HasPrefix(k, fmt.Sprintf("%s.", prefix)) {
delete(is.Attributes, k)
}
}
// Write new keys
writer := schema.MapFieldWriter{
Schema: entity.Schema,
}
if err := writer.WriteField([]string{prefix}, oldKeys); err != nil {
return is, err
}
for k, v := range writer.Map() {
is.Attributes[k] = v
}
log.Printf("[DEBUG] DynamoDB Table Attributes after State Migration: %#v", is.Attributes)
return is, nil
}

View File

@ -14,6 +14,8 @@ import (
)
func TestAccAWSDynamoDbTable_basic(t *testing.T) {
var conf dynamodb.DescribeTableOutput
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -22,7 +24,8 @@ func TestAccAWSDynamoDbTable_basic(t *testing.T) {
{
Config: testAccAWSDynamoDbConfigInitialState(),
Check: resource.ComposeTestCheckFunc(
testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table"),
testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf),
testAccCheckInitialAWSDynamoDbTableConf("aws_dynamodb_table.basic-dynamodb-table"),
),
},
{
@ -36,6 +39,8 @@ func TestAccAWSDynamoDbTable_basic(t *testing.T) {
}
func TestAccAWSDynamoDbTable_streamSpecification(t *testing.T) {
var conf dynamodb.DescribeTableOutput
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -44,7 +49,8 @@ func TestAccAWSDynamoDbTable_streamSpecification(t *testing.T) {
{
Config: testAccAWSDynamoDbConfigStreamSpecification(),
Check: resource.ComposeTestCheckFunc(
testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table"),
testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf),
testAccCheckInitialAWSDynamoDbTableConf("aws_dynamodb_table.basic-dynamodb-table"),
resource.TestCheckResourceAttr(
"aws_dynamodb_table.basic-dynamodb-table", "stream_enabled", "true"),
resource.TestCheckResourceAttr(
@ -56,6 +62,8 @@ func TestAccAWSDynamoDbTable_streamSpecification(t *testing.T) {
}
func TestAccAWSDynamoDbTable_tags(t *testing.T) {
var conf dynamodb.DescribeTableOutput
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -64,7 +72,8 @@ func TestAccAWSDynamoDbTable_tags(t *testing.T) {
{
Config: testAccAWSDynamoDbConfigTags(),
Check: resource.ComposeTestCheckFunc(
testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table"),
testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf),
testAccCheckInitialAWSDynamoDbTableConf("aws_dynamodb_table.basic-dynamodb-table"),
resource.TestCheckResourceAttr(
"aws_dynamodb_table.basic-dynamodb-table", "tags.%", "3"),
),
@ -73,6 +82,32 @@ func TestAccAWSDynamoDbTable_tags(t *testing.T) {
})
}
// https://github.com/hashicorp/terraform/issues/13243
func TestAccAWSDynamoDbTable_gsiUpdate(t *testing.T) {
var conf dynamodb.DescribeTableOutput
name := acctest.RandString(10)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDynamoDbTableDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSDynamoDbConfigGsiUpdate(name),
Check: resource.ComposeTestCheckFunc(
testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.test", &conf),
),
},
{
Config: testAccAWSDynamoDbConfigGsiUpdated(name),
Check: resource.ComposeTestCheckFunc(
testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.test", &conf),
),
},
},
})
}
func TestResourceAWSDynamoDbTableStreamViewType_validation(t *testing.T) {
cases := []struct {
Value string
@ -143,7 +178,37 @@ func testAccCheckAWSDynamoDbTableDestroy(s *terraform.State) error {
return nil
}
func testAccCheckInitialAWSDynamoDbTableExists(n string) resource.TestCheckFunc {
func testAccCheckInitialAWSDynamoDbTableExists(n string, table *dynamodb.DescribeTableOutput) resource.TestCheckFunc {
return func(s *terraform.State) error {
log.Printf("[DEBUG] Trying to create initial table state!")
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DynamoDB table name specified!")
}
conn := testAccProvider.Meta().(*AWSClient).dynamodbconn
params := &dynamodb.DescribeTableInput{
TableName: aws.String(rs.Primary.ID),
}
resp, err := conn.DescribeTable(params)
if err != nil {
return fmt.Errorf("[ERROR] Problem describing table '%s': %s", rs.Primary.ID, err)
}
*table = *resp
return nil
}
}
func testAccCheckInitialAWSDynamoDbTableConf(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
log.Printf("[DEBUG] Trying to create initial table state!")
rs, ok := s.RootModule().Resources[n]
@ -301,123 +366,141 @@ func dynamoDbAttributesToMap(attributes *[]*dynamodb.AttributeDefinition) map[st
func testAccAWSDynamoDbConfigInitialState() string {
return fmt.Sprintf(`
resource "aws_dynamodb_table" "basic-dynamodb-table" {
name = "TerraformTestTable-%d"
read_capacity = 10
write_capacity = 20
hash_key = "TestTableHashKey"
range_key = "TestTableRangeKey"
attribute {
name = "TestTableHashKey"
type = "S"
}
attribute {
name = "TestTableRangeKey"
type = "S"
}
attribute {
name = "TestLSIRangeKey"
type = "N"
}
attribute {
name = "TestGSIRangeKey"
type = "S"
}
local_secondary_index {
name = "TestTableLSI"
range_key = "TestLSIRangeKey"
projection_type = "ALL"
}
global_secondary_index {
name = "InitialTestTableGSI"
hash_key = "TestTableHashKey"
range_key = "TestGSIRangeKey"
write_capacity = 10
read_capacity = 10
projection_type = "KEYS_ONLY"
}
name = "TerraformTestTable-%d"
read_capacity = 10
write_capacity = 20
hash_key = "TestTableHashKey"
range_key = "TestTableRangeKey"
attribute {
name = "TestTableHashKey"
type = "S"
}
attribute {
name = "TestTableRangeKey"
type = "S"
}
attribute {
name = "TestLSIRangeKey"
type = "N"
}
attribute {
name = "TestGSIRangeKey"
type = "S"
}
local_secondary_index {
name = "TestTableLSI"
range_key = "TestLSIRangeKey"
projection_type = "ALL"
}
global_secondary_index {
name = "InitialTestTableGSI"
hash_key = "TestTableHashKey"
range_key = "TestGSIRangeKey"
write_capacity = 10
read_capacity = 10
projection_type = "KEYS_ONLY"
}
}
`, acctest.RandInt())
}
const testAccAWSDynamoDbConfigAddSecondaryGSI = `
resource "aws_dynamodb_table" "basic-dynamodb-table" {
name = "TerraformTestTable"
read_capacity = 20
write_capacity = 20
hash_key = "TestTableHashKey"
range_key = "TestTableRangeKey"
attribute {
name = "TestTableHashKey"
type = "S"
}
attribute {
name = "TestTableRangeKey"
type = "S"
}
attribute {
name = "TestLSIRangeKey"
type = "N"
}
attribute {
name = "ReplacementGSIRangeKey"
type = "N"
}
local_secondary_index {
name = "TestTableLSI"
range_key = "TestLSIRangeKey"
projection_type = "ALL"
}
global_secondary_index {
name = "ReplacementTestTableGSI"
hash_key = "TestTableHashKey"
range_key = "ReplacementGSIRangeKey"
write_capacity = 5
read_capacity = 5
projection_type = "INCLUDE"
non_key_attributes = ["TestNonKeyAttribute"]
}
name = "TerraformTestTable"
read_capacity = 20
write_capacity = 20
hash_key = "TestTableHashKey"
range_key = "TestTableRangeKey"
attribute {
name = "TestTableHashKey"
type = "S"
}
attribute {
name = "TestTableRangeKey"
type = "S"
}
attribute {
name = "TestLSIRangeKey"
type = "N"
}
attribute {
name = "ReplacementGSIRangeKey"
type = "N"
}
local_secondary_index {
name = "TestTableLSI"
range_key = "TestLSIRangeKey"
projection_type = "ALL"
}
global_secondary_index {
name = "ReplacementTestTableGSI"
hash_key = "TestTableHashKey"
range_key = "ReplacementGSIRangeKey"
write_capacity = 5
read_capacity = 5
projection_type = "INCLUDE"
non_key_attributes = ["TestNonKeyAttribute"]
}
}
`
func testAccAWSDynamoDbConfigStreamSpecification() string {
return fmt.Sprintf(`
resource "aws_dynamodb_table" "basic-dynamodb-table" {
name = "TerraformTestStreamTable-%d"
read_capacity = 10
write_capacity = 20
hash_key = "TestTableHashKey"
range_key = "TestTableRangeKey"
attribute {
name = "TestTableHashKey"
type = "S"
}
attribute {
name = "TestTableRangeKey"
type = "S"
}
attribute {
name = "TestLSIRangeKey"
type = "N"
}
attribute {
name = "TestGSIRangeKey"
type = "S"
}
local_secondary_index {
name = "TestTableLSI"
range_key = "TestLSIRangeKey"
projection_type = "ALL"
}
global_secondary_index {
name = "InitialTestTableGSI"
hash_key = "TestTableHashKey"
range_key = "TestGSIRangeKey"
write_capacity = 10
read_capacity = 10
projection_type = "KEYS_ONLY"
}
stream_enabled = true
stream_view_type = "KEYS_ONLY"
name = "TerraformTestStreamTable-%d"
read_capacity = 10
write_capacity = 20
hash_key = "TestTableHashKey"
range_key = "TestTableRangeKey"
attribute {
name = "TestTableHashKey"
type = "S"
}
attribute {
name = "TestTableRangeKey"
type = "S"
}
attribute {
name = "TestLSIRangeKey"
type = "N"
}
attribute {
name = "TestGSIRangeKey"
type = "S"
}
local_secondary_index {
name = "TestTableLSI"
range_key = "TestLSIRangeKey"
projection_type = "ALL"
}
global_secondary_index {
name = "InitialTestTableGSI"
hash_key = "TestTableHashKey"
range_key = "TestGSIRangeKey"
write_capacity = 10
read_capacity = 10
projection_type = "KEYS_ONLY"
}
stream_enabled = true
stream_view_type = "KEYS_ONLY"
}
`, acctest.RandInt())
}
@ -425,45 +508,170 @@ resource "aws_dynamodb_table" "basic-dynamodb-table" {
func testAccAWSDynamoDbConfigTags() string {
return fmt.Sprintf(`
resource "aws_dynamodb_table" "basic-dynamodb-table" {
name = "TerraformTestTable-%d"
read_capacity = 10
write_capacity = 20
hash_key = "TestTableHashKey"
range_key = "TestTableRangeKey"
attribute {
name = "TestTableHashKey"
type = "S"
}
attribute {
name = "TestTableRangeKey"
type = "S"
}
attribute {
name = "TestLSIRangeKey"
type = "N"
}
attribute {
name = "TestGSIRangeKey"
type = "S"
}
local_secondary_index {
name = "TestTableLSI"
range_key = "TestLSIRangeKey"
projection_type = "ALL"
}
global_secondary_index {
name = "InitialTestTableGSI"
hash_key = "TestTableHashKey"
range_key = "TestGSIRangeKey"
write_capacity = 10
read_capacity = 10
projection_type = "KEYS_ONLY"
}
tags {
Name = "terraform-test-table-%d"
AccTest = "yes"
Testing = "absolutely"
}
name = "TerraformTestTable-%d"
read_capacity = 10
write_capacity = 20
hash_key = "TestTableHashKey"
range_key = "TestTableRangeKey"
attribute {
name = "TestTableHashKey"
type = "S"
}
attribute {
name = "TestTableRangeKey"
type = "S"
}
attribute {
name = "TestLSIRangeKey"
type = "N"
}
attribute {
name = "TestGSIRangeKey"
type = "S"
}
local_secondary_index {
name = "TestTableLSI"
range_key = "TestLSIRangeKey"
projection_type = "ALL"
}
global_secondary_index {
name = "InitialTestTableGSI"
hash_key = "TestTableHashKey"
range_key = "TestGSIRangeKey"
write_capacity = 10
read_capacity = 10
projection_type = "KEYS_ONLY"
}
tags {
Name = "terraform-test-table-%d"
AccTest = "yes"
Testing = "absolutely"
}
}
`, acctest.RandInt(), acctest.RandInt())
}
func testAccAWSDynamoDbConfigGsiUpdate(name string) string {
return fmt.Sprintf(`
variable "capacity" {
default = 10
}
resource "aws_dynamodb_table" "test" {
name = "tf-acc-test-%s"
read_capacity = "${var.capacity}"
write_capacity = "${var.capacity}"
hash_key = "id"
attribute {
name = "id"
type = "S"
}
attribute {
name = "att1"
type = "S"
}
attribute {
name = "att2"
type = "S"
}
attribute {
name = "att3"
type = "S"
}
global_secondary_index {
name = "att1-index"
hash_key = "att1"
write_capacity = "${var.capacity}"
read_capacity = "${var.capacity}"
projection_type = "ALL"
}
global_secondary_index {
name = "att2-index"
hash_key = "att2"
write_capacity = "${var.capacity}"
read_capacity = "${var.capacity}"
projection_type = "ALL"
}
global_secondary_index {
name = "att3-index"
hash_key = "att3"
write_capacity = "${var.capacity}"
read_capacity = "${var.capacity}"
projection_type = "ALL"
}
}
`, name)
}
func testAccAWSDynamoDbConfigGsiUpdated(name string) string {
return fmt.Sprintf(`
variable "capacity" {
default = 20
}
resource "aws_dynamodb_table" "test" {
name = "tf-acc-test-%s"
read_capacity = "${var.capacity}"
write_capacity = "${var.capacity}"
hash_key = "id"
attribute {
name = "id"
type = "S"
}
attribute {
name = "att1"
type = "S"
}
attribute {
name = "att2"
type = "S"
}
attribute {
name = "att3"
type = "S"
}
global_secondary_index {
name = "att1-index"
hash_key = "att1"
write_capacity = "${var.capacity}"
read_capacity = "${var.capacity}"
projection_type = "ALL"
}
global_secondary_index {
name = "att2-index"
hash_key = "att2"
write_capacity = "${var.capacity}"
read_capacity = "${var.capacity}"
projection_type = "ALL"
}
global_secondary_index {
name = "att3-index"
hash_key = "att3"
write_capacity = "${var.capacity}"
read_capacity = "${var.capacity}"
projection_type = "ALL"
}
}
`, name)
}

View File

@ -213,6 +213,7 @@ func resourceAwsElasticTranscoderPreset() *schema.Resource {
"max_frame_rate": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "30",
ForceNew: true,
},
"max_height": &schema.Schema{

View File

@ -205,7 +205,7 @@ func resourceAwsVpnGatewayAttach(d *schema.ResourceData, meta interface{}) error
Pending: []string{"detached", "attaching"},
Target: []string{"attached"},
Refresh: vpnGatewayAttachStateRefreshFunc(conn, d.Id(), "available"),
Timeout: 1 * time.Minute,
Timeout: 5 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf(
@ -266,7 +266,7 @@ func resourceAwsVpnGatewayDetach(d *schema.ResourceData, meta interface{}) error
Pending: []string{"attached", "detaching", "available"},
Target: []string{"detached"},
Refresh: vpnGatewayAttachStateRefreshFunc(conn, d.Id(), "detached"),
Timeout: 1 * time.Minute,
Timeout: 5 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf(

View File

@ -1154,3 +1154,19 @@ func validateDbOptionGroupNamePrefix(v interface{}, k string) (ws []string, erro
}
return
}
func validateAwsAlbTargetGroupName(v interface{}, k string) (ws []string, errors []error) {
name := v.(string)
if len(name) > 32 {
errors = append(errors, fmt.Errorf("%q (%q) cannot be longer than '32' characters", k, name))
}
return
}
func validateAwsAlbTargetGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) {
name := v.(string)
if len(name) > 32 {
errors = append(errors, fmt.Errorf("%q (%q) cannot be longer than '6' characters", k, name))
}
return
}

View File

@ -919,7 +919,7 @@ func resourceComputeInstanceV2Delete(d *schema.ResourceData, meta interface{}) e
stateConf := &resource.StateChangeConf{
Pending: []string{"ACTIVE", "SHUTOFF"},
Target: []string{"DELETED"},
Target: []string{"DELETED", "SOFT_DELETED"},
Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()),
Timeout: d.Timeout(schema.TimeoutDelete),
Delay: 10 * time.Second,

View File

@ -678,9 +678,11 @@ func testAccCheckComputeV2InstanceDestroy(s *terraform.State) error {
continue
}
_, err := servers.Get(computeClient, rs.Primary.ID).Extract()
server, err := servers.Get(computeClient, rs.Primary.ID).Extract()
if err == nil {
return fmt.Errorf("Instance still exists")
if server.Status != "SOFT_DELETED" {
return fmt.Errorf("Instance still exists")
}
}
}

View File

@ -41,6 +41,11 @@ func resourceComputeServerGroupV2() *schema.Resource {
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"value_specs": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
},
}
}
@ -52,10 +57,14 @@ func resourceComputeServerGroupV2Create(d *schema.ResourceData, meta interface{}
return fmt.Errorf("Error creating OpenStack compute client: %s", err)
}
createOpts := &servergroups.CreateOpts{
Name: d.Get("name").(string),
Policies: resourceServerGroupPoliciesV2(d),
createOpts := ServerGroupCreateOpts{
servergroups.CreateOpts{
Name: d.Get("name").(string),
Policies: resourceServerGroupPoliciesV2(d),
},
MapValueSpecs(d),
}
log.Printf("[DEBUG] Create Options: %#v", createOpts)
newSG, err := servergroups.Create(computeClient, createOpts).Extract()
if err != nil {

View File

@ -10,6 +10,7 @@ import (
"strings"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups"
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls"
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies"
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules"
@ -250,6 +251,18 @@ func (opts RuleCreateOpts) ToRuleCreateMap() (map[string]interface{}, error) {
return b, nil
}
// ServerGroupCreateOpts represents the attributes used when creating a new router.
type ServerGroupCreateOpts struct {
servergroups.CreateOpts
ValueSpecs map[string]string `json:"value_specs,omitempty"`
}
// ToServerGroupCreateMap casts a CreateOpts struct to a map.
// It overrides routers.ToServerGroupCreateMap to add the ValueSpecs field.
func (opts ServerGroupCreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) {
return BuildRequest(opts, "server_group")
}
// SubnetCreateOpts represents the attributes used when creating a new subnet.
type SubnetCreateOpts struct {
subnets.CreateOpts

View File

@ -58,6 +58,7 @@ func Funcs() map[string]ast.Function {
"base64encode": interpolationFuncBase64Encode(),
"base64sha256": interpolationFuncBase64Sha256(),
"ceil": interpolationFuncCeil(),
"chomp": interpolationFuncChomp(),
"cidrhost": interpolationFuncCidrHost(),
"cidrnetmask": interpolationFuncCidrNetmask(),
"cidrsubnet": interpolationFuncCidrSubnet(),
@ -459,6 +460,18 @@ func interpolationFuncCeil() ast.Function {
}
}
// interpolationFuncChomp removes trailing newlines from the given string
func interpolationFuncChomp() ast.Function {
newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`)
return ast.Function{
ArgTypes: []ast.Type{ast.TypeString},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
return newlines.ReplaceAllString(args[0].(string), ""), nil
},
}
}
// interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument
func interpolationFuncFloor() ast.Function {
return ast.Function{

View File

@ -370,6 +370,60 @@ func TestInterpolateFuncCeil(t *testing.T) {
})
}
func TestInterpolateFuncChomp(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
{
`${chomp()}`,
nil,
true,
},
{
`${chomp("hello world")}`,
"hello world",
false,
},
{
fmt.Sprintf(`${chomp("%s")}`, "goodbye\ncruel\nworld"),
"goodbye\ncruel\nworld",
false,
},
{
fmt.Sprintf(`${chomp("%s")}`, "goodbye\r\nwindows\r\nworld"),
"goodbye\r\nwindows\r\nworld",
false,
},
{
fmt.Sprintf(`${chomp("%s")}`, "goodbye\ncruel\nworld\n"),
"goodbye\ncruel\nworld",
false,
},
{
fmt.Sprintf(`${chomp("%s")}`, "goodbye\ncruel\nworld\n\n\n\n"),
"goodbye\ncruel\nworld",
false,
},
{
fmt.Sprintf(`${chomp("%s")}`, "goodbye\r\nwindows\r\nworld\r\n"),
"goodbye\r\nwindows\r\nworld",
false,
},
{
fmt.Sprintf(`${chomp("%s")}`, "goodbye\r\nwindows\r\nworld\r\n\r\n\r\n\r\n"),
"goodbye\r\nwindows\r\nworld",
false,
},
},
})
}
func TestInterpolateFuncMap(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{

View File

@ -43,6 +43,7 @@ type detailResponse struct {
ProcessingOn string `json:"ProcessingOn"`
DownTimes int `json:"DownTimes,string"`
Sensitive bool `json:"Sensitive"`
TriggerRate int `json:"string,TriggerRate"`
}
func (d *detailResponse) test() *Test {
@ -64,5 +65,6 @@ func (d *detailResponse) test() *Test {
FindString: d.FindString,
DoNotFind: d.DoNotFind,
Port: d.Port,
TriggerRate: d.TriggerRate,
}
}

View File

@ -1,3 +1,16 @@
Release v1.8.10 (2017-04-06)
===
### Service Client Updates
* `service/elbv2`: Updates service documentation
Release v1.8.9 (2017-04-05)
===
### Service Client Updates
* `service/elasticache`: Updates service API, documentation, paginators, and examples
* ElastiCache added support for testing the Elasticache Multi-AZ feature with Automatic Failover.
Release v1.8.8 (2017-04-04)
===

View File

@ -2,4 +2,4 @@
### SDK Enhancements
### SDK Bugs
### SDK Bugs

View File

@ -139,9 +139,14 @@ func (l *HandlerList) PushFrontNamed(n NamedHandler) {
// Remove removes a NamedHandler n
func (l *HandlerList) Remove(n NamedHandler) {
l.RemoveByName(n.Name)
}
// RemoveByName removes a NamedHandler by name.
func (l *HandlerList) RemoveByName(name string) {
for i := 0; i < len(l.list); i++ {
m := l.list[i]
if m.Name == n.Name {
if m.Name == name {
// Shift array preventing creating new arrays
copy(l.list[i:], l.list[i+1:])
l.list[len(l.list)-1] = NamedHandler{}

View File

@ -16,10 +16,20 @@ import (
"github.com/aws/aws-sdk-go/aws/client/metadata"
)
// CanceledErrorCode is the error code that will be returned by an
// API request that was canceled. Requests given a aws.Context may
// return this error when canceled.
const CanceledErrorCode = "RequestCanceled"
const (
// ErrCodeSerialization is the serialization error code that is received
// during protocol unmarshaling.
ErrCodeSerialization = "SerializationError"
// ErrCodeResponseTimeout is the connection timeout error that is recieved
// during body reads.
ErrCodeResponseTimeout = "ResponseTimeout"
// CanceledErrorCode is the error code that will be returned by an
// API request that was canceled. Requests given a aws.Context may
// return this error when canceled.
CanceledErrorCode = "RequestCanceled"
)
// A Request is the service request to be made.
type Request struct {
@ -349,7 +359,7 @@ func (r *Request) ResetBody() {
// Related golang/go#18257
l, err := computeBodyLength(r.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to compute request body size", err)
r.Error = awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
return
}

View File

@ -1,6 +1,9 @@
package request
import (
"net"
"os"
"syscall"
"time"
"github.com/aws/aws-sdk-go/aws"
@ -28,6 +31,7 @@ func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
var retryableCodes = map[string]struct{}{
"RequestError": {},
"RequestTimeout": {},
ErrCodeResponseTimeout: {},
"RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
}
@ -69,12 +73,32 @@ func isCodeExpiredCreds(code string) bool {
return ok
}
func isSerializationErrorRetryable(err error) bool {
if err == nil {
return false
}
if aerr, ok := err.(awserr.Error); ok {
return isCodeRetryable(aerr.Code())
}
if opErr, ok := err.(*net.OpError); ok {
if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
return sysErr.Err == syscall.ECONNRESET
}
}
return false
}
// IsErrorRetryable returns whether the error is retryable, based on its Code.
// Returns false if the request has no Error set.
func (r *Request) IsErrorRetryable() bool {
if r.Error != nil {
if err, ok := r.Error.(awserr.Error); ok {
if err, ok := r.Error.(awserr.Error); ok && err.Code() != ErrCodeSerialization {
return isCodeRetryable(err.Code())
} else if ok {
return isSerializationErrorRetryable(err.OrigErr())
}
}
return false

View File

@ -0,0 +1,94 @@
package request
import (
"io"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
)
var timeoutErr = awserr.New(
ErrCodeResponseTimeout,
"read on body has reached the timeout limit",
nil,
)
type readResult struct {
n int
err error
}
// timeoutReadCloser will handle body reads that take too long.
// We will return a ErrReadTimeout error if a timeout occurs.
type timeoutReadCloser struct {
reader io.ReadCloser
duration time.Duration
}
// Read will spin off a goroutine to call the reader's Read method. We will
// select on the timer's channel or the read's channel. Whoever completes first
// will be returned.
func (r *timeoutReadCloser) Read(b []byte) (int, error) {
timer := time.NewTimer(r.duration)
c := make(chan readResult, 1)
go func() {
n, err := r.reader.Read(b)
timer.Stop()
c <- readResult{n: n, err: err}
}()
select {
case data := <-c:
return data.n, data.err
case <-timer.C:
return 0, timeoutErr
}
}
func (r *timeoutReadCloser) Close() error {
return r.reader.Close()
}
const (
// HandlerResponseTimeout is what we use to signify the name of the
// response timeout handler.
HandlerResponseTimeout = "ResponseTimeoutHandler"
)
// adaptToResponseTimeoutError is a handler that will replace any top level error
// to a ErrCodeResponseTimeout, if its child is that.
func adaptToResponseTimeoutError(req *Request) {
if err, ok := req.Error.(awserr.Error); ok {
aerr, ok := err.OrigErr().(awserr.Error)
if ok && aerr.Code() == ErrCodeResponseTimeout {
req.Error = aerr
}
}
}
// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
// This will allow for per read timeouts. If a timeout occurred, we will return the
// ErrCodeResponseTimeout.
//
// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
func WithResponseReadTimeout(duration time.Duration) Option {
return func(r *Request) {
var timeoutHandler = NamedHandler{
HandlerResponseTimeout,
func(req *Request) {
req.HTTPResponse.Body = &timeoutReadCloser{
reader: req.HTTPResponse.Body,
duration: duration,
}
}}
// remove the handler so we are not stomping over any new durations.
r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
r.Handlers.Send.PushBackNamed(timeoutHandler)
r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
}
}

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.8.8"
const SDKVersion = "1.8.10"

View File

@ -58,7 +58,7 @@ func (c *ElastiCache) AddTagsToResourceRequest(input *AddTagsToResourceInput) (r
// AddTagsToResource API operation for Amazon ElastiCache.
//
// Adds up to 10 cost allocation tags to the named resource. A cost allocation
// Adds up to 50 cost allocation tags to the named resource. A cost allocation
// tag is a key-value pair where the key and value are case-sensitive. You can
// use cost allocation tags to categorize and track your AWS costs.
//
@ -87,7 +87,7 @@ func (c *ElastiCache) AddTagsToResourceRequest(input *AddTagsToResourceInput) (r
// * ErrCodeTagQuotaPerResourceExceeded "TagQuotaPerResourceExceeded"
// The request cannot be processed because it would cause the resource to have
// more than the allowed number of tags. The maximum number of tags permitted
// on a resource is 10.
// on a resource is 50.
//
// * ErrCodeInvalidARNFault "InvalidARN"
// The requested Amazon Resource Name (ARN) does not refer to an existing resource.
@ -477,7 +477,7 @@ func (c *ElastiCache) CreateCacheClusterRequest(input *CreateCacheClusterInput)
// * ErrCodeTagQuotaPerResourceExceeded "TagQuotaPerResourceExceeded"
// The request cannot be processed because it would cause the resource to have
// more than the allowed number of tags. The maximum number of tags permitted
// on a resource is 10.
// on a resource is 50.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
@ -552,8 +552,20 @@ func (c *ElastiCache) CreateCacheParameterGroupRequest(input *CreateCacheParamet
// CreateCacheParameterGroup API operation for Amazon ElastiCache.
//
// Creates a new cache parameter group. A cache parameter group is a collection
// of parameters that you apply to all of the nodes in a cache cluster.
// Creates a new Amazon ElastiCache cache parameter group. An ElastiCache cache
// parameter group is a collection of parameters and their values that are applied
// to all of the nodes in any cache cluster or replication group using the CacheParameterGroup.
//
// A newly created CacheParameterGroup is an exact duplicate of the default
// parameter group for the CacheParameterGroupFamily. To customize the newly
// created CacheParameterGroup you can change the values of specific parameters.
// For more information, see:
//
// * ModifyCacheParameterGroup (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheParameterGroup.html)
// in the ElastiCache API Reference.
//
// * Parameters and Parameter Groups (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/ParameterGroups.html)
// in the ElastiCache User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -855,7 +867,11 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou
// When a Redis (cluster mode disabled) replication group has been successfully
// created, you can add one or more read replicas to it, up to a total of 5
// read replicas. You cannot alter a Redis (cluster mode enabled) replication
// group after it has been created.
// group after it has been created. However, if you need to increase or decrease
// the number of node groups (console: shards), you can avail yourself of ElastiCache
// for Redis' enhanced backup and restore. For more information, see Restoring
// From a Backup with Cluster Resizing (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/backups-restoring.html)
// in the ElastiCache User Guide.
//
// This operation is valid for Redis only.
//
@ -910,7 +926,7 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou
// * ErrCodeTagQuotaPerResourceExceeded "TagQuotaPerResourceExceeded"
// The request cannot be processed because it would cause the resource to have
// more than the allowed number of tags. The maximum number of tags permitted
// on a resource is 10.
// on a resource is 50.
//
// * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded"
// The request cannot be processed because it would exceed the maximum of 15
@ -1724,15 +1740,15 @@ func (c *ElastiCache) DescribeCacheClustersRequest(input *DescribeCacheClustersI
// identifier is specified, or about a specific cache cluster if a cache cluster
// identifier is supplied.
//
// By default, abbreviated information about the cache clusters are returned.
// You can use the optional ShowDetails flag to retrieve detailed information
// By default, abbreviated information about the cache clusters is returned.
// You can use the optional ShowCacheNodeInfo flag to retrieve detailed information
// about the cache nodes associated with the cache clusters. These details include
// the DNS address and port for the cache node endpoint.
//
// If the cluster is in the CREATING state, only cluster-level information is
// If the cluster is in the creating state, only cluster-level information is
// displayed until all of the nodes are successfully provisioned.
//
// If the cluster is in the DELETING state, only cluster-level information is
// If the cluster is in the deleting state, only cluster-level information is
// displayed.
//
// If cache nodes are currently being added to the cache cluster, node endpoint
@ -3543,7 +3559,7 @@ func (c *ElastiCache) ListTagsForResourceRequest(input *ListTagsForResourceInput
// optional. You can use cost allocation tags to categorize and track your AWS
// costs.
//
// You can have a maximum of 10 cost allocation tags on an ElastiCache resource.
// You can have a maximum of 50 cost allocation tags on an ElastiCache resource.
// For more information, see Using Cost Allocation Tags in Amazon ElastiCache
// (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/BestPractices.html).
//
@ -4477,6 +4493,151 @@ func (c *ElastiCache) RevokeCacheSecurityGroupIngressWithContext(ctx aws.Context
return out, req.Send()
}
const opTestFailover = "TestFailover"
// TestFailoverRequest generates a "aws/request.Request" representing the
// client's request for the TestFailover operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See TestFailover for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the TestFailover method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the TestFailoverRequest method.
// req, resp := client.TestFailoverRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailover
func (c *ElastiCache) TestFailoverRequest(input *TestFailoverInput) (req *request.Request, output *TestFailoverOutput) {
op := &request.Operation{
Name: opTestFailover,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &TestFailoverInput{}
}
output = &TestFailoverOutput{}
req = c.newRequest(op, input, output)
return
}
// TestFailover API operation for Amazon ElastiCache.
//
// Represents the input of a TestFailover operation which test automatic failover
// on a specified node group (called shard in the console) in a replication
// group (called cluster in the console).
//
// Note the following
//
// * A customer can use this operation to test automatic failover on up to
// 5 shards (called node groups in the ElastiCache API and AWS CLI) in any
// rolling 24-hour period.
//
// * If calling this operation on shards in different clusters (called replication
// groups in the API and CLI), the calls can be made concurrently.
//
// * If calling this operation multiple times on different shards in the
// same Redis (cluster mode enabled) replication group, the first node replacement
// must complete before a subsequent call can be made.
//
// * To determine whether the node replacement is complete you can check
// Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache
// API. Look for the following automatic failover related events, listed
// here in order of occurrance:
//
// Replication group message: Test Failover API called for node group <node-group-id>
//
// Cache cluster message: Failover from master node <primary-node-id> to replica
// node <node-id> completed
//
// Replication group message: Failover from master node <primary-node-id> to
// replica node <node-id> completed
//
// Cache cluster message: Recovering cache nodes <node-id>
//
// Cache cluster message: Finished recovery for cache nodes <node-id>
//
// For more information see:
//
// Viewing ElastiCache Events (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/ECEvents.Viewing.html)
// in the ElastiCache User Guide
//
// DescribeEvents (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeEvents.html)
// in the ElastiCache API Reference
//
// Also see, Testing Multi-AZ with Automatic Failover (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/AutoFailover.html#auto-failover-test)
// in the ElastiCache User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation TestFailover for usage and error information.
//
// Returned Error Codes:
// * ErrCodeAPICallRateForCustomerExceededFault "APICallRateForCustomerExceeded"
// The customer has exceeded the allowed rate of API calls.
//
// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState"
// The requested cache cluster is not in the available state.
//
// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState"
// The requested replication group is not in the available state.
//
// * ErrCodeNodeGroupNotFoundFault "NodeGroupNotFoundFault"
// The node group specified by the NodeGroupId parameter could not be found.
// Please verify that the node group exists and that you spelled the NodeGroupId
// value correctly.
//
// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault"
// The specified replication group does not exist.
//
// * ErrCodeTestFailoverNotAvailableFault "TestFailoverNotAvailableFault"
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailover
func (c *ElastiCache) TestFailover(input *TestFailoverInput) (*TestFailoverOutput, error) {
req, out := c.TestFailoverRequest(input)
return out, req.Send()
}
// TestFailoverWithContext is the same as TestFailover with the addition of
// the ability to pass a context and additional request options.
//
// See TestFailover for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) TestFailoverWithContext(ctx aws.Context, input *TestFailoverInput, opts ...request.Option) (*TestFailoverOutput, error) {
req, out := c.TestFailoverRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// Represents the input of an AddTagsToResource operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AddTagsToResourceMessage
type AddTagsToResourceInput struct {
@ -4739,8 +4900,11 @@ type CacheCluster struct {
// library.
ClientDownloadLandingPage *string `type:"string"`
// Represents the information required for client programs to connect to a cache
// node.
// Represents a Memcached cluster endpoint which, if Automatic Discovery is
// enabled on the cluster, can be used by an application to connect to any node
// in the cluster. The configuration endpoint will always have .cfg in it.
//
// Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211
ConfigurationEndpoint *Endpoint `type:"structure"`
// The name of the cache engine (memcached or redis) to be used for this cache
@ -6507,8 +6671,8 @@ type CreateReplicationGroupInput struct {
// ReplicaCount.
//
// If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode
// enabled) replication group, you can use this parameter to configure one node
// group (shard) or you can omit this parameter.
// enabled) replication group, you can use this parameter to individually configure
// each node group (shard), or you can omit this parameter.
NodeGroupConfiguration []*NodeGroupConfiguration `locationNameList:"NodeGroupConfiguration" type:"list"`
// The Amazon Resource Name (ARN) of the Amazon Simple Notification Service
@ -6522,7 +6686,10 @@ type CreateReplicationGroupInput struct {
// This parameter is not used if there is more than one node group (shard).
// You should use ReplicasPerNodeGroup instead.
//
// If Multi-AZ is enabled, the value of this parameter must be at least 2.
// If AutomaticFailoverEnabled is true, the value of this parameter must be
// at least 2. If AutomaticFailoverEnabled is false you can omit this parameter
// (it will default to 1), or you can explicitly set it to a value between 2
// and 6.
//
// The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas).
NumCacheClusters *int64 `type:"integer"`
@ -6620,9 +6787,11 @@ type CreateReplicationGroupInput struct {
// A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB
// snapshot files stored in Amazon S3. The snapshot files are used to populate
// the replication group. The Amazon S3 object name in the ARN cannot contain
// any commas. The list must match the number of node groups (shards) in the
// replication group, which means you cannot repartition.
// the new replication group. The Amazon S3 object name in the ARN cannot contain
// any commas. The new replication group will have the number of node groups
// (console: shards) specified by the parameter NumNodeGroups or the number
// of node groups configured by NodeGroupConfiguration regardless of the number
// of ARNs specified here.
//
// This parameter is only valid if the Engine parameter is redis.
//
@ -7376,6 +7545,11 @@ type DescribeCacheClustersInput struct {
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
// An optional flag that can be included in the DescribeCacheCluster request
// to show only nodes (API/CLI: clusters) that are not members of a replication
// group. In practice, this mean Memcached and single node Redis clusters.
ShowCacheClustersNotInReplicationGroups *bool `type:"boolean"`
// An optional flag that can be included in the DescribeCacheCluster request
// to retrieve information about the individual cache nodes.
ShowCacheNodeInfo *bool `type:"boolean"`
@ -7409,6 +7583,12 @@ func (s *DescribeCacheClustersInput) SetMaxRecords(v int64) *DescribeCacheCluste
return s
}
// SetShowCacheClustersNotInReplicationGroups sets the ShowCacheClustersNotInReplicationGroups field's value.
func (s *DescribeCacheClustersInput) SetShowCacheClustersNotInReplicationGroups(v bool) *DescribeCacheClustersInput {
s.ShowCacheClustersNotInReplicationGroups = &v
return s
}
// SetShowCacheNodeInfo sets the ShowCacheNodeInfo field's value.
func (s *DescribeCacheClustersInput) SetShowCacheNodeInfo(v bool) *DescribeCacheClustersInput {
s.ShowCacheNodeInfo = &v
@ -8052,11 +8232,13 @@ func (s *DescribeEngineDefaultParametersOutput) SetEngineDefaults(v *EngineDefau
type DescribeEventsInput struct {
_ struct{} `type:"structure"`
// The number of minutes' worth of events to retrieve.
// The number of minutes worth of events to retrieve.
Duration *int64 `type:"integer"`
// The end of the time interval for which to retrieve events, specified in ISO
// 8601 format.
//
// Example: 2017-03-30T07:03:49.555Z
EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// An optional marker returned from a prior request. Use this marker for pagination
@ -8083,6 +8265,8 @@ type DescribeEventsInput struct {
// The beginning of the time interval to retrieve events for, specified in ISO
// 8601 format.
//
// Example: 2017-03-30T07:03:49.555Z
StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
}
@ -8977,10 +9161,18 @@ func (s *ListAllowedNodeTypeModificationsInput) SetReplicationGroupId(v string)
return s
}
// Represents the allowed node types you can use to modify your cache cluster
// or replication group.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AllowedNodeTypeModificationsMessage
type ListAllowedNodeTypeModificationsOutput struct {
_ struct{} `type:"structure"`
// A string list, each element of which specifies a cache node type which you
// can use to scale your cache cluster or replication group.
//
// When scaling up a Redis cluster or replication group using ModifyCacheCluster
// or ModifyReplicationGroup, use a value from this list for the CacheNodeType
// parameter.
ScaleUpModifications []*string `type:"list"`
}
@ -9649,6 +9841,9 @@ type ModifyReplicationGroupInput struct {
// and create it anew with the earlier engine version.
EngineVersion *string `type:"string"`
// The name of the Node Group (called shard in the console).
NodeGroupId *string `type:"string"`
// The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications
// are sent.
//
@ -9794,6 +9989,12 @@ func (s *ModifyReplicationGroupInput) SetEngineVersion(v string) *ModifyReplicat
return s
}
// SetNodeGroupId sets the NodeGroupId field's value.
func (s *ModifyReplicationGroupInput) SetNodeGroupId(v string) *ModifyReplicationGroupInput {
s.NodeGroupId = &v
return s
}
// SetNotificationTopicArn sets the NotificationTopicArn field's value.
func (s *ModifyReplicationGroupInput) SetNotificationTopicArn(v string) *ModifyReplicationGroupInput {
s.NotificationTopicArn = &v
@ -9964,8 +10165,8 @@ type NodeGroupConfiguration struct {
// The number of read replica nodes in this node group (shard).
ReplicaCount *int64 `type:"integer"`
// A string that specifies the keyspaces as a series of comma separated values.
// Keyspaces are 0 to 16,383. The string is in the format startkey-endkey.
// A string that specifies the keyspace for a particular node group. Keyspaces
// range from 0 to 16,383. The string is in the format startkey-endkey.
//
// Example: "0-3999"
Slots *string `type:"string"`
@ -10661,6 +10862,17 @@ type ReplicationGroup struct {
// Redis (cluster mode enabled): T1 node types.
AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"`
// The name of the compute and memory capacity node type for each node in the
// replication group.
CacheNodeType *string `type:"string"`
// A flag indicating whether or not this replication group is cluster enabled;
// i.e., whether its data can be partitioned across multiple shards (API/CLI:
// node groups).
//
// Valid values: true | false
ClusterEnabled *bool `type:"boolean"`
// The configuration endpoint for this replicaiton group. Use the configuration
// endpoint to connect to this replication group.
ConfigurationEndpoint *Endpoint `type:"structure"`
@ -10727,6 +10939,18 @@ func (s *ReplicationGroup) SetAutomaticFailover(v string) *ReplicationGroup {
return s
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *ReplicationGroup) SetCacheNodeType(v string) *ReplicationGroup {
s.CacheNodeType = &v
return s
}
// SetClusterEnabled sets the ClusterEnabled field's value.
func (s *ReplicationGroup) SetClusterEnabled(v bool) *ReplicationGroup {
s.ClusterEnabled = &v
return s
}
// SetConfigurationEndpoint sets the ConfigurationEndpoint field's value.
func (s *ReplicationGroup) SetConfigurationEndpoint(v *Endpoint) *ReplicationGroup {
s.ConfigurationEndpoint = v
@ -11681,10 +11905,10 @@ func (s *Subnet) SetSubnetIdentifier(v string) *Subnet {
type Tag struct {
_ struct{} `type:"structure"`
// The key for the tag.
// The key for the tag. May not be null.
Key *string `type:"string"`
// The tag's value. May not be null.
// The tag's value. May be null.
Value *string `type:"string"`
}
@ -11710,7 +11934,7 @@ func (s *Tag) SetValue(v string) *Tag {
return s
}
// Represents the output from the AddTagsToResource, ListTagsOnResource, and
// Represents the output from the AddTagsToResource, ListTagsForResource, and
// RemoveTagsFromResource operations.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TagListMessage
type TagListMessage struct {
@ -11736,6 +11960,86 @@ func (s *TagListMessage) SetTagList(v []*Tag) *TagListMessage {
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailoverMessage
type TestFailoverInput struct {
_ struct{} `type:"structure"`
// The name of the node group (called shard in the console) in this replication
// group on which automatic failover is to be tested. You may test automatic
// failover on up to 5 node groups in any rolling 24-hour period.
//
// NodeGroupId is a required field
NodeGroupId *string `type:"string" required:"true"`
// The name of the replication group (console: cluster) whose automatic failover
// is being tested by this operation.
//
// ReplicationGroupId is a required field
ReplicationGroupId *string `type:"string" required:"true"`
}
// String returns the string representation
func (s TestFailoverInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TestFailoverInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *TestFailoverInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "TestFailoverInput"}
if s.NodeGroupId == nil {
invalidParams.Add(request.NewErrParamRequired("NodeGroupId"))
}
if s.ReplicationGroupId == nil {
invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetNodeGroupId sets the NodeGroupId field's value.
func (s *TestFailoverInput) SetNodeGroupId(v string) *TestFailoverInput {
s.NodeGroupId = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *TestFailoverInput) SetReplicationGroupId(v string) *TestFailoverInput {
s.ReplicationGroupId = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailoverResult
type TestFailoverOutput struct {
_ struct{} `type:"structure"`
// Contains all of the attributes of a specific Redis replication group.
ReplicationGroup *ReplicationGroup `type:"structure"`
}
// String returns the string representation
func (s TestFailoverOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TestFailoverOutput) GoString() string {
return s.String()
}
// SetReplicationGroup sets the ReplicationGroup field's value.
func (s *TestFailoverOutput) SetReplicationGroup(v *ReplicationGroup) *TestFailoverOutput {
s.ReplicationGroup = v
return s
}
const (
// AZModeSingleAz is a AZMode enum value
AZModeSingleAz = "single-az"

View File

@ -4,6 +4,12 @@ package elasticache
const (
// ErrCodeAPICallRateForCustomerExceededFault for service response error code
// "APICallRateForCustomerExceeded".
//
// The customer has exceeded the allowed rate of API calls.
ErrCodeAPICallRateForCustomerExceededFault = "APICallRateForCustomerExceeded"
// ErrCodeAuthorizationAlreadyExistsFault for service response error code
// "AuthorizationAlreadyExists".
//
@ -180,6 +186,14 @@ const (
// The VPC network is in an invalid state.
ErrCodeInvalidVPCNetworkStateFault = "InvalidVPCNetworkStateFault"
// ErrCodeNodeGroupNotFoundFault for service response error code
// "NodeGroupNotFoundFault".
//
// The node group specified by the NodeGroupId parameter could not be found.
// Please verify that the node group exists and that you spelled the NodeGroupId
// value correctly.
ErrCodeNodeGroupNotFoundFault = "NodeGroupNotFoundFault"
// ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault for service response error code
// "NodeGroupsPerReplicationGroupQuotaExceeded".
//
@ -288,6 +302,10 @@ const (
//
// The request cannot be processed because it would cause the resource to have
// more than the allowed number of tags. The maximum number of tags permitted
// on a resource is 10.
// on a resource is 50.
ErrCodeTagQuotaPerResourceExceeded = "TagQuotaPerResourceExceeded"
// ErrCodeTestFailoverNotAvailableFault for service response error code
// "TestFailoverNotAvailableFault".
ErrCodeTestFailoverNotAvailableFault = "TestFailoverNotAvailableFault"
)

View File

@ -303,7 +303,7 @@ func (c *ELBV2) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *
//
// Returned Error Codes:
// * ErrCodeDuplicateLoadBalancerNameException "DuplicateLoadBalancerName"
// A load balancer with the specified name already exists for this account.
// A load balancer with the specified name already exists.
//
// * ErrCodeTooManyLoadBalancersException "TooManyLoadBalancers"
// You've reached the limit on the number of load balancers for your AWS account.
@ -1477,7 +1477,8 @@ func (c *ELBV2) DescribeSSLPoliciesRequest(input *DescribeSSLPoliciesInput) (req
//
// Describes the specified policies or all policies used for SSL negotiation.
//
// Note that the only supported policy at this time is ELBSecurityPolicy-2015-05.
// For more information, see Security Policies (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies)
// in the Application Load Balancers Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -1557,7 +1558,8 @@ func (c *ELBV2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Requ
// DescribeTags API operation for Elastic Load Balancing.
//
// Describes the tags for the specified resources.
// Describes the tags for the specified resources. You can describe the tags
// for one or more Application Load Balancers and target groups.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -1964,7 +1966,7 @@ func (c *ELBV2) ModifyListenerRequest(input *ModifyListenerInput) (req *request.
// Any properties that you do not specify retain their current values. However,
// changing the protocol from HTTPS to HTTP removes the security policy and
// SSL certificate properties. If you change the protocol from HTTP to HTTPS,
// you must add the security policy.
// you must add the security policy and server certificate.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -3304,7 +3306,7 @@ type CreateLoadBalancerInput struct {
// The name of the load balancer.
//
// This name must be unique within your AWS account, can have a maximum of 32
// This name must be unique per region per account, can have a maximum of 32
// characters, must contain only alphanumeric characters or hyphens, and must
// not begin or end with a hyphen.
//
@ -3446,10 +3448,25 @@ type CreateRuleInput struct {
// Actions is a required field
Actions []*Action `type:"list" required:"true"`
// A condition. Each condition has the field path-pattern and specifies one
// path pattern. A path pattern is case sensitive, can be up to 128 characters
// in length, and can contain any of the following characters. Note that you
// can include up to three wildcard characters in a path pattern.
// A condition. Each condition specifies a field name and a single value.
//
// If the field name is host-header, you can specify a single host name (for
// example, my.example.com). A host name is case insensitive, can be up to 128
// characters in length, and can contain any of the following characters. Note
// that you can include up to three wildcard characters.
//
// * A-Z, a-z, 0-9
//
// * - .
//
// * * (matches 0 or more characters)
//
// * ? (matches exactly 1 character)
//
// If the field name is path-pattern, you can specify a single path pattern.
// A path pattern is case sensitive, can be up to 128 characters in length,
// and can contain any of the following characters. Note that you can include
// up to three wildcard characters.
//
// * A-Z, a-z, 0-9
//
@ -3604,6 +3621,10 @@ type CreateTargetGroupInput struct {
// The name of the target group.
//
// This name must be unique per region per account, can have a maximum of 32
// characters, must contain only alphanumeric characters or hyphens, and must
// not begin or end with a hyphen.
//
// Name is a required field
Name *string `type:"string" required:"true"`
@ -4240,7 +4261,8 @@ func (s *DescribeLoadBalancerAttributesOutput) SetAttributes(v []*LoadBalancerAt
type DescribeLoadBalancersInput struct {
_ struct{} `type:"structure"`
// The Amazon Resource Names (ARN) of the load balancers.
// The Amazon Resource Names (ARN) of the load balancers. You can specify up
// to 20 load balancers in a single call.
LoadBalancerArns []*string `type:"list"`
// The marker for the next set of results. (You received this marker from a
@ -5106,8 +5128,9 @@ func (s *LoadBalancerState) SetReason(v string) *LoadBalancerState {
type Matcher struct {
_ struct{} `type:"structure"`
// The HTTP codes. The default value is 200. You can specify multiple values
// (for example, "200,202") or a range of values (for example, "200-299").
// The HTTP codes. You can specify values between 200 and 499. The default value
// is 200. You can specify multiple values (for example, "200,202") or a range
// of values (for example, "200-299").
//
// HttpCode is a required field
HttpCode *string `type:"string" required:"true"`
@ -5163,7 +5186,9 @@ type ModifyListenerInput struct {
// The protocol for connections from clients to the load balancer.
Protocol *string `type:"string" enum:"ProtocolEnum"`
// The security policy that defines which ciphers and protocols are supported.
// The security policy that defines which protocols and ciphers are supported.
// For more information, see Security Policies (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies)
// in the Application Load Balancers Guide.
SslPolicy *string `type:"string"`
}
@ -5881,14 +5906,28 @@ func (s *Rule) SetRuleArn(v string) *Rule {
type RuleCondition struct {
_ struct{} `type:"structure"`
// The only possible value is path-pattern.
// The name of the field. The possible values are host-header and path-pattern.
Field *string `type:"string"`
// The path pattern. You can specify a single path pattern.
// The condition value.
//
// A path pattern is case sensitive, can be up to 128 characters in length,
// and can contain any of the following characters. Note that you can include
// up to three wildcard characters in a path pattern.
// If the field name is host-header, you can specify a single host name (for
// example, my.example.com). A host name is case insensitive, can be up to 128
// characters in length, and can contain any of the following characters. Note
// that you can include up to three wildcard characters.
//
// * A-Z, a-z, 0-9
//
// * - .
//
// * * (matches 0 or more characters)
//
// * ? (matches exactly 1 character)
//
// If the field name is path-pattern, you can specify a single path pattern
// (for example, /img/*). A path pattern is case sensitive, can be up to 128
// characters in length, and can contain any of the following characters. Note
// that you can include up to three wildcard characters.
//
// * A-Z, a-z, 0-9
//

View File

@ -19,7 +19,7 @@ const (
// ErrCodeDuplicateLoadBalancerNameException for service response error code
// "DuplicateLoadBalancerName".
//
// A load balancer with the specified name already exists for this account.
// A load balancer with the specified name already exists.
ErrCodeDuplicateLoadBalancerNameException = "DuplicateLoadBalancerName"
// ErrCodeDuplicateTagKeysException for service response error code

View File

@ -0,0 +1,22 @@
package kinesis
import (
"time"
"github.com/aws/aws-sdk-go/aws/request"
)
var readDuration = 5 * time.Second
func init() {
ops := []string{
opGetRecords,
}
initRequest = func(r *request.Request) {
for _, operation := range ops {
if r.Operation.Name == operation {
r.ApplyOptions(request.WithResponseReadTimeout(readDuration))
}
}
}
}

642
vendor/vendor.json vendored
View File

@ -375,10 +375,10 @@
"revision": "edd0930276e7f1a5f2cf3e7835b5dc42a3217669"
},
{
"checksumSHA1": "oHtkxzPF9DIWqua2uA5MiVFRq+Q=",
"checksumSHA1": "jtSV16UIYcS+MTy2bor1Nd+6tM8=",
"path": "github.com/DreamItGetIT/statuscake",
"revision": "93fe653ce590267167708b20d7f49e0cc7021d99",
"revisionTime": "2017-02-15T23:13:05Z"
"revision": "2eaa583e3badecb05ab0e963ed19f3d7f1a23273",
"revisionTime": "2017-04-07T12:51:49Z"
},
{
"checksumSHA1": "nomT+8bvze/Qmc0tK0r0mwgHV6M=",
@ -494,628 +494,628 @@
"revisionTime": "2017-01-23T00:46:44Z"
},
{
"checksumSHA1": "YSO8t4sb+6eeyWkhGWZmhdrcT5w=",
"checksumSHA1": "wvNp7Z0aIf9CCLYtzXpcO90YWbg=",
"path": "github.com/aws/aws-sdk-go",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "7N25Nj1APtvRF3NElp7gNrHYJkE=",
"checksumSHA1": "FQz+RL20lsUYIpT2CNpYeyKn8Lg=",
"path": "github.com/aws/aws-sdk-go/aws",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=",
"path": "github.com/aws/aws-sdk-go/aws/awserr",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=",
"path": "github.com/aws/aws-sdk-go/aws/awsutil",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "iThCyNRL/oQFD9CF2SYgBGl+aww=",
"path": "github.com/aws/aws-sdk-go/aws/client",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=",
"path": "github.com/aws/aws-sdk-go/aws/client/metadata",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "0Gfk83qXYimO87ZoK1lL9+ifWHo=",
"path": "github.com/aws/aws-sdk-go/aws/corehandlers",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "P7gt3PNk6bDOoTZ2N9QOonkaGWw=",
"path": "github.com/aws/aws-sdk-go/aws/credentials",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=",
"path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=",
"path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "6cj/zsRmcxkE1TLS+v910GbQYg0=",
"path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "l2O7P/kvovK2zxKhuFehFNXLk+Q=",
"path": "github.com/aws/aws-sdk-go/aws/defaults",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=",
"path": "github.com/aws/aws-sdk-go/aws/ec2metadata",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "+yCOae0vRONrO27QiITkGWblOKk=",
"path": "github.com/aws/aws-sdk-go/aws/endpoints",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "f5/e+cN80DRK0I2gqbZ0ikSJqhM=",
"checksumSHA1": "/L6UweKsmfyHTu01qrFD1ijzSbE=",
"path": "github.com/aws/aws-sdk-go/aws/request",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "5pzA5afgeU1alfACFh8z2CDUMao=",
"path": "github.com/aws/aws-sdk-go/aws/session",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "SvIsunO8D9MEKbetMENA4WRnyeE=",
"path": "github.com/aws/aws-sdk-go/aws/signer/v4",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=",
"path": "github.com/aws/aws-sdk-go/private/protocol",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=",
"path": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "O6hcK24yI6w7FA+g4Pbr+eQ7pys=",
"path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=",
"path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=",
"path": "github.com/aws/aws-sdk-go/private/protocol/query",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "Drt1JfLMa0DQEZLWrnMlTWaIcC8=",
"path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "VCTh+dEaqqhog5ncy/WTt9+/gFM=",
"path": "github.com/aws/aws-sdk-go/private/protocol/rest",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=",
"path": "github.com/aws/aws-sdk-go/private/protocol/restjson",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=",
"path": "github.com/aws/aws-sdk-go/private/protocol/restxml",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "lZ1z4xAbT8euCzKoAsnEYic60VE=",
"path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=",
"path": "github.com/aws/aws-sdk-go/private/signer/v2",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "ZmojxECvjM6BeI752BPyZAmOhlo=",
"path": "github.com/aws/aws-sdk-go/service/acm",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "H3h5AMX7c9oT50oovfJIfmkvoBg=",
"path": "github.com/aws/aws-sdk-go/service/apigateway",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "3ykAVetHFs9T3YivIPvRyiNFdys=",
"path": "github.com/aws/aws-sdk-go/service/applicationautoscaling",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "/d8U22aF2+qYhWYscPzClHTDCP4=",
"path": "github.com/aws/aws-sdk-go/service/autoscaling",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "n6v4S6jPpkHsS59Oj1EZPQIdRNg=",
"path": "github.com/aws/aws-sdk-go/service/cloudformation",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "QLEaEFA3V4n+ohwENEoWV+AXBj4=",
"path": "github.com/aws/aws-sdk-go/service/cloudfront",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "Vh3PtQEwIUabpoE7PsCZItUZuVc=",
"path": "github.com/aws/aws-sdk-go/service/cloudtrail",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "aGx2atOHEXSowjXUQ3UoJ/t2LSI=",
"path": "github.com/aws/aws-sdk-go/service/cloudwatch",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "Ez3+aU0QGRe4isLDFQuHNRyF3zA=",
"path": "github.com/aws/aws-sdk-go/service/cloudwatchevents",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "+AjVMO3KUY7Wkh0vHRnJqRG8kGc=",
"path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "uTt6pA8eB+udA7tC8ElLbr2eeK4=",
"path": "github.com/aws/aws-sdk-go/service/codebuild",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "sqppuUIMPMBOnTRVR4BhHAoaTrY=",
"path": "github.com/aws/aws-sdk-go/service/codecommit",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "u6cK2krOuDqi8gy5V316FvH34t0=",
"path": "github.com/aws/aws-sdk-go/service/codedeploy",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "fK7MOfX/cV2DJ176+umySuuYh2s=",
"path": "github.com/aws/aws-sdk-go/service/codepipeline",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "gSm1lj0J4klQMw7jHE0fU/RV+4Y=",
"path": "github.com/aws/aws-sdk-go/service/configservice",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "SP6m/hn+Hj72wkgaAZ8NM/7s/18=",
"path": "github.com/aws/aws-sdk-go/service/databasemigrationservice",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "2Su2xzCbUPbCdVkyWuXcmxAI2Rs=",
"path": "github.com/aws/aws-sdk-go/service/directoryservice",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "Y4Wg7dxPIU3W1dqN3vnpSLA1ChQ=",
"path": "github.com/aws/aws-sdk-go/service/dynamodb",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "2PIG7uhrvvDAjiNZINBVCgW/Uds=",
"path": "github.com/aws/aws-sdk-go/service/ec2",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "ClGPl4TLpf457zUeOEWyTvqBRjc=",
"path": "github.com/aws/aws-sdk-go/service/ecr",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "c6KWQtc1bRCFs/IuIe/jgZXalBw=",
"path": "github.com/aws/aws-sdk-go/service/ecs",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "4mBZS9FSCW73hcjj0CikPqpikag=",
"path": "github.com/aws/aws-sdk-go/service/efs",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "i1XF+NR9mzU/ftbzd2zoxl07x1A=",
"checksumSHA1": "P7GrpZV3eYQASV8Z+DeFuo9zbm4=",
"path": "github.com/aws/aws-sdk-go/service/elasticache",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "DXs9Zpa2Db2adBjDi/EyFp6913E=",
"path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "dv1QkeLjDyUlMQkbnLjm6l0mJHo=",
"path": "github.com/aws/aws-sdk-go/service/elasticsearchservice",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "ir6xGAYAwIdWKgk7BVHNQWvlA/g=",
"path": "github.com/aws/aws-sdk-go/service/elastictranscoder",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "sdFllfq+lllwyk0yMFmWzg+qs9Y=",
"path": "github.com/aws/aws-sdk-go/service/elb",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "ky/x/8q7MyKV495TI9wkMKXZFp0=",
"checksumSHA1": "oJQzYnuAHAhKAtAuinSPEeDsXoU=",
"path": "github.com/aws/aws-sdk-go/service/elbv2",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "tLfj5mQiTOOhWdeU6hL5PYRAEP0=",
"path": "github.com/aws/aws-sdk-go/service/emr",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "Yy7CkVZR1/vrcdMPWJmQMC2i5hk=",
"path": "github.com/aws/aws-sdk-go/service/firehose",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "tuoOAm2gCN2txnIq1jKbCHqeQQM=",
"path": "github.com/aws/aws-sdk-go/service/glacier",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "NoG5QpuGo3iLNk6DwwWsDCogfGY=",
"path": "github.com/aws/aws-sdk-go/service/iam",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "5ElupFtEcDvKa1yXTh6nR9HijMU=",
"path": "github.com/aws/aws-sdk-go/service/inspector",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "g36tdw9s90aUjSoUmpcLViHKQdI=",
"checksumSHA1": "Yzxk0tkTh2D9JP5I8gspLQLKu0U=",
"path": "github.com/aws/aws-sdk-go/service/kinesis",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "zeEh/FDxM81fU3X2ftWU2Z++iQg=",
"path": "github.com/aws/aws-sdk-go/service/kms",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "bHA5BLaVmAq8G5R40tv/X3HF5J0=",
"path": "github.com/aws/aws-sdk-go/service/lambda",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "GFXjkh1wWzohbefi1k0N+zbkmU4=",
"path": "github.com/aws/aws-sdk-go/service/lightsail",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "AB2pSc+tsnoNxFg0fSMDn7rFZbM=",
"path": "github.com/aws/aws-sdk-go/service/opsworks",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "5Br7nJBgOm6y67Z95CGZtOaxlFY=",
"path": "github.com/aws/aws-sdk-go/service/rds",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "COvVop5UbeJ4P0cMu+0ekubPLtE=",
"path": "github.com/aws/aws-sdk-go/service/redshift",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "e/lUvi2TAO9hms6HOzpX61exefw=",
"path": "github.com/aws/aws-sdk-go/service/route53",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "o7qpn0kxj43Ej/RwfCb9JbzfbfQ=",
"path": "github.com/aws/aws-sdk-go/service/s3",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "/2UKYWNc/LRv+M/LQRpJqukcXzc=",
"path": "github.com/aws/aws-sdk-go/service/ses",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "eUrUJOZg3sQHWyYKPRPO9OeN+a4=",
"path": "github.com/aws/aws-sdk-go/service/sfn",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "CVWvzoJ3YBvEI8TdQWlqUxOt9lk=",
"path": "github.com/aws/aws-sdk-go/service/simpledb",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "bJ8g3OhBAkxM+QaFrQCD0L0eWY8=",
"path": "github.com/aws/aws-sdk-go/service/sns",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "jzKBnso2Psx3CyS+0VR1BzvuccU=",
"path": "github.com/aws/aws-sdk-go/service/sqs",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "GPD+dDmDtseJFG8lB8aU58aszDg=",
"path": "github.com/aws/aws-sdk-go/service/ssm",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "SdsHiTUR9eRarThv/i7y6/rVyF4=",
"path": "github.com/aws/aws-sdk-go/service/sts",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "w3+CyiPRk1WUFFmueIRZkgQuHH0=",
"path": "github.com/aws/aws-sdk-go/service/waf",
"revision": "3a4119172097bf8725eb7c1b96b7957cfe2d92dc",
"revisionTime": "2017-04-04T17:58:04Z",
"version": "v1.8.8",
"versionExact": "v1.8.8"
"revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736",
"revisionTime": "2017-04-06T18:01:00Z",
"version": "v1.8.10",
"versionExact": "v1.8.10"
},
{
"checksumSHA1": "nqw2Qn5xUklssHTubS5HDvEL9L4=",

View File

@ -301,7 +301,7 @@
&:before {
content: '\25CF';
color: $white;
font-size: 28px;
font-size: 20px;
line-height: 100%;
height: 100%;
}

View File

@ -1,52 +0,0 @@
---
layout: "backend-types"
page_title: "Backend Type: atlas"
sidebar_current: "docs-backends-types-standard-atlas"
description: |-
Terraform can store the state in Atlas.
---
# atlas
**Kind: Standard (with no locking)**
Stores the state in [Atlas](https://atlas.hashicorp.com/).
You can create a new environment in the
[Environments section](https://atlas.hashicorp.com/environments)
and generate new token in the
[Tokens page](https://atlas.hashicorp.com/settings/tokens) under Settings.
## Example Configuration
```hcl
terraform {
backend "atlas" {
name = "bigbang/example"
access_token = "foo"
}
}
```
Note that for the access token we recommend using a
[partial configuration](/docs/backends/config.html).
## Example Referencing
```hcl
data "terraform_remote_state" "foo" {
backend = "atlas"
config {
name = "bigbang/example"
access_token = "X2iTFefU5aWOjg.atlasv1.YaDa"
}
}
```
## Configuration variables
The following configuration options / environment variables are supported:
* `name` - (Required) Full name of the environment (`<username>/<name>`)
* `access_token` / `ATLAS_TOKEN` - (Required) Atlas API token
* `address` - (Optional) Address to alternative Atlas location (Atlas Enterprise endpoint)

View File

@ -0,0 +1,56 @@
---
layout: "backend-types"
page_title: "Backend Type: terraform enterprise"
sidebar_current: "docs-backends-types-standard-terraform-enterprise"
description: |-
Terraform can store the state in Terraform Enterprise
---
# terraform enterprise
**Kind: Standard (with no locking)**
Stores the state in [Terraform Enterprise](https://www.terraform.io/docs/providers/index.html).
You can create a new environment in the
Environments section and generate new token in the Tokens page under Settings.
~> **Why is this called "atlas"?** Atlas was previously a commercial offering
from HashiCorp that included a full suite of enterprise products. The products
have since been broken apart into their individual products, like **Terraform
Enterprise**. While this transition is in progress, you may see references to
"atlas" in the documentation. We apologize for the inconvenience.
## Example Configuration
```hcl
terraform {
backend "atlas" {
name = "bigbang/example"
access_token = "foo"
}
}
```
Note that for the access token we recommend using a
[partial configuration](/docs/backends/config.html).
## Example Referencing
```hcl
data "terraform_remote_state" "foo" {
backend = "atlas"
config {
name = "bigbang/example"
access_token = "X2iTFefU5aWOjg.atlasv1.YaDa"
}
}
```
## Configuration variables
The following configuration options / environment variables are supported:
* `name` - (Required) Full name of the environment (`<username>/<name>`)
* `access_token` / `ATLAS_TOKEN` - (Required) Terraform Enterprise API token
* `address` - (Optional) Address to alternative Terraform Enterprise location (Terraform Enterprise endpoint)

View File

@ -41,7 +41,7 @@ Common commands:
init Initialize a new or existing Terraform configuration
output Read an output from a state file
plan Generate and show an execution plan
push Upload this Terraform module to Atlas to run
push Upload this Terraform module to Terraform Enterprise to run
refresh Update local state file against real resources
show Inspect Terraform state or plan
taint Manually mark a resource for recreation

View File

@ -3,25 +3,25 @@ layout: "docs"
page_title: "Command: push"
sidebar_current: "docs-commands-push"
description: |-
The `terraform push` command is used to upload the Terraform configuration to HashiCorp's Atlas service for automatically managing your infrastructure in the cloud.
The `terraform push` command is used to upload the Terraform configuration to HashiCorp's Terraform Enterprise service for automatically managing your infrastructure in the cloud.
---
# Command: push
The `terraform push` command uploads your Terraform configuration to
be managed by HashiCorp's [Atlas](https://atlas.hashicorp.com).
By uploading your configuration to Atlas, Atlas can automatically run
be managed by HashiCorp's [Terraform Enterprise](https://www.hashicorp.com/products/terraform/).
By uploading your configuration to Terraform Enterprise, you can automatically run
Terraform for you, will save all state transitions, will save plans,
and will keep a history of all Terraform runs.
This makes it significantly easier to use Terraform as a team: team
members modify the Terraform configurations locally and continue to
use normal version control. When the Terraform configurations are ready
to be run, they are pushed to Atlas, and any member of your team can
to be run, they are pushed to Terraform Enterprise, and any member of your team can
run Terraform with the push of a button.
Atlas can also be used to set ACLs on who can run Terraform, and a
future update of Atlas will allow parallel Terraform runs and automatically
Terraform Enterprise can also be used to set ACLs on who can run Terraform, and a
future update of Terraform Enterprise will allow parallel Terraform runs and automatically
perform infrastructure locking so only one run is modifying the same
infrastructure at a time.
@ -34,31 +34,31 @@ The `path` argument is the same as for the
The command-line flags are all optional. The list of available flags are:
* `-atlas-address=<url>` - An alternate address to an Atlas instance.
* `-atlas-address=<url>` - An alternate address to an instance.
Defaults to `https://atlas.hashicorp.com`.
* `-upload-modules=true` - If true (default), then the
[modules](/docs/modules/index.html)
being used are all locked at their current checkout and uploaded
completely to Atlas. This prevents Atlas from running `terraform get`
completely. This prevents Terraform Enterprise from running `terraform get`
for you.
* `-name=<name>` - Name of the infrastructure configuration in Atlas.
* `-name=<name>` - Name of the infrastructure configuration in Terraform Enterprise.
The format of this is: "username/name" so that you can upload
configurations not just to your account but to other accounts and
organizations. This setting can also be set in the configuration
in the
[Atlas section](/docs/configuration/atlas.html).
[Terraform Enterprise section](/docs/configuration/terraform-enterprise.html).
* `-no-color` - Disables output with coloring
* `-overwrite=foo` - Marks a specific variable to be updated on Atlas.
Normally, if a variable is already set in Atlas, Terraform will not
* `-overwrite=foo` - Marks a specific variable to be updated.
Normally, if a variable is already set Terraform will not
send the local value (even if it is different). This forces it to
send the local value to Atlas. This flag can be repeated multiple times.
send the local value to Terraform Enterprise. This flag can be repeated multiple times.
* `-token=<token>` - Atlas API token to use to authorize the upload.
* `-token=<token>` - Terraform Enterprise API token to use to authorize the upload.
If blank or unspecified, the `ATLAS_TOKEN` environment variable
will be used.
@ -88,21 +88,21 @@ The reason Terraform uploads all of these files is because Terraform
cannot know what is and isn't being used for provisioning, so it uploads
all the files to be safe. To exclude certain files, specify the `-exclude`
flag when pushing, or specify the `exclude` parameter in the
[Atlas configuration section](/docs/configuration/atlas.html).
[Terraform Enterprise configuration section](/docs/configuration/terraform-enterprise.html).
## Terraform Variables
When you `push`, Terraform will automatically set the local values of
your Terraform variables on Atlas. The values are only set if they
don't already exist on Atlas. If you want to force push a certain
your Terraform variables on Terraform Enterprise. The values are only set if they
don't already exist. If you want to force push a certain
variable value to update it, use the `-overwrite` flag.
All the variable values stored on Atlas are encrypted and secured
All the variable values stored are encrypted and secured
using [Vault](https://www.vaultproject.io). We blogged about the
[architecture of our secure storage system](https://www.hashicorp.com/blog/how-atlas-uses-vault-for-managing-secrets.html) if you want more detail.
The variable values can be updated using the `-overwrite` flag or via
the [Atlas website](https://atlas.hashicorp.com). An example of updating
the [Terraform Enterprise website](https://www.hashicorp.com/products/terraform/). An example of updating
just a single variable `foo` is shown below:
```shell
@ -111,7 +111,7 @@ $ terraform push -var 'foo=bar' -overwrite foo
Both the `-var` and `-overwrite` flag are required. The `-var` flag
sets the value locally (the exact same process as commands such as apply
or plan), and the `-overwrite` flag tells the push command to update Atlas.
or plan), and the `-overwrite` flag tells the push command to update Terraform Enterprise.
## Remote State Requirement
@ -122,11 +122,11 @@ configuration to be managed remotely. For it to keep the state in sync
and for you to be able to easily access that state, remote state must
be enabled instead of juggling local files.
While `terraform push` sends your configuration to be managed by Atlas,
the remote state backend _does not_ have to be Atlas. It can be anything
as long as it is accessible by the public internet, since Atlas will need
While `terraform push` sends your configuration to be managed by Terraform Enterprise,
the remote state backend _does not_ have to be Terraform Enterprise. It can be anything
as long as it is accessible by the public internet, since Terraform Enterprise will need
to be able to communicate to it.
**Warning:** The credentials for accessing the remote state will be
sent up to Atlas as well. Therefore, we recommend you use access keys
sent up to Terraform Enterprise as well. Therefore, we recommend you use access keys
that are restricted if possible.

View File

@ -1,58 +0,0 @@
---
layout: "docs"
page_title: "Configuring Atlas"
sidebar_current: "docs-config-atlas"
description: |-
Atlas is the ideal way to use Terraform in a team environment. Atlas will run Terraform for you, safely handle parallelization across different team members, save run history along with plans, and more.
---
# Atlas Configuration
Terraform can be configured to be able to upload to HashiCorp's
[Atlas](https://atlas.hashicorp.com). This configuration doesn't change
the behavior of Terraform itself, it only configures your Terraform
configuration to support being uploaded to Atlas via the
[push command](/docs/commands/push.html).
For more information on the benefits of uploading your Terraform
configuration to Atlas, please see the
[push command documentation](/docs/commands/push.html).
This page assumes you're familiar with the
[configuration syntax](/docs/configuration/syntax.html)
already.
## Example
Atlas configuration looks like the following:
```hcl
atlas {
name = "mitchellh/production-example"
}
```
## Description
The `atlas` block configures the settings when Terraform is
[pushed](/docs/commands/push.html) to Atlas. Only one `atlas` block
is allowed.
Within the block (the `{ }`) is configuration for Atlas uploading.
No keys are required, but the key typically set is `name`.
**No value within the `atlas` block can use interpolations.** Due
to the nature of this configuration, interpolations are not possible.
If you want to parameterize these settings, use the Atlas block to
set defaults, then use the command-line flags of the
[push command](/docs/commands/push.html) to override.
## Syntax
The full syntax is:
```text
atlas {
name = VALUE
}
```

View File

@ -21,7 +21,7 @@ present read-only views into pre-existing data, or they compute
new values on the fly within Terraform itself.
For example, a data source may retrieve artifact information from
Atlas, configuration information from Consul, or look up a pre-existing
Terraform Enterprise, configuration information from Consul, or look up a pre-existing
AWS resource by filtering on its attributes and tags.
Every data source in Terraform is mapped to a provider based

View File

@ -154,6 +154,8 @@ The supported built-in functions are:
* `ceil(float)` - Returns the least integer value greater than or equal
to the argument.
* `chomp(string)` - Removes trailing newlines from the given string.
* `cidrhost(iprange, hostnum)` - Takes an IP address range in CIDR notation
and creates an IP address with the given host number. For example,
`cidrhost("10.0.0.0/8", 2)` returns `10.0.0.2`.

View File

@ -114,3 +114,14 @@ KEY {
CONFIG
}
```
## Interpolation
Providers support [interpolation syntax](/docs/configuration/interpolation.html) allowing dynamic configuration at run time.
```hcl
provider "aws" {
region = "${var.aws_region}"
}
```
-> **NOTE:** Because providers are one of the first things loaded when Terraform parses the graph, it is not possible to use the output from modules or resources as inputs to the provider. At this time, only [variables](/docs/configuration/variables.html) and [data sources](/docs/configuration/data-sources.html), including [remote state](/docs/providers/terraform/d/remote_state.html) may be used in an interpolation inside a provider stanza.

View File

@ -0,0 +1,64 @@
---
layout: "docs"
page_title: "Configuring Terraform Enterprise"
sidebar_current: "docs-config-terraform-enterprise"
description: |-
Terraform Enterprise is the ideal way to use Terraform in a team environment. Terraform Enterprise will run Terraform for you, safely handle parallelization across different team members, save run history along with plans, and more.
---
# Terraform Enterprise Configuration
Terraform can be configured to be able to upload to HashiCorp's
[Terraform Enterprise](https://www.hashicorp.com/products/terraform/). This configuration doesn't change
the behavior of Terraform itself, it only configures your Terraform
configuration to support being uploaded to Terraform Enterprise via the
[push command](/docs/commands/push.html).
For more information on the benefits of uploading your Terraform
configuration to Terraform Enterprise, please see the
[push command documentation](/docs/commands/push.html).
This page assumes you're familiar with the
[configuration syntax](/docs/configuration/syntax.html)
already.
~> **Why is this called "atlas"?** Atlas was previously a commercial offering
from HashiCorp that included a full suite of enterprise products. The products
have since been broken apart into their individual products, like **Terraform
Enterprise**. While this transition is in progress, you may see references to
"atlas" in the documentation. We apologize for the inconvenience.
## Example
Terraform Enterprise configuration looks like the following:
```hcl
atlas {
name = "mitchellh/production-example"
}
```
## Description
The `atlas` block configures the settings when Terraform is
[pushed](/docs/commands/push.html) to Terraform Enterprise. Only one `atlas` block
is allowed.
Within the block (the `{ }`) is configuration for Atlas uploading.
No keys are required, but the key typically set is `name`.
**No value within the `atlas` block can use interpolations.** Due
to the nature of this configuration, interpolations are not possible.
If you want to parameterize these settings, use the Atlas block to
set defaults, then use the command-line flags of the
[push command](/docs/commands/push.html) to override.
## Syntax
The full syntax is:
```text
atlas {
name = VALUE
}
```

View File

@ -0,0 +1,174 @@
---
layout: "enterprise"
page_title: "Configurations - API - Terraform Enterprise"
sidebar_current: "docs-enterprise-api-configurations"
description: |-
A configuration represents settings associated with a resource that runs
Terraform with versions of Terraform configuration.
---
# Configuration API
A configuration version represents versions of Terraform configuration. Each set
of changes to Terraform HCL files or the scripts used in the files should have
an associated configuration version.
When creating versions via the API, the variables attribute can be sent to
include the necessary variables for the Terraform configuration. A configuration
represents settings associated with a resource that runs Terraform with versions
of Terraform configuration. Configurations have many configuration versions
which represent versions of Terraform configuration templates and other
associated configuration. Most operations take place on the configuration
version, not the configuration.
## Get Latest Configuration Version
This endpoint gets the latest configuration version.
| Method | Path |
| :----- | :------------- |
| `GET` | `/terraform/configurations/:username/:name/versions/latest` |
### Parameters
- `:username` `(string: <required>)` - Specifies the username or organization
name under which to get the latest configuration version. This username must
already exist in the system, and the user must have permission to create new
configuration versions under this namespace. This is specified as part of the
URL.
- `:name` `(string: <required>)` - Specifies the name of the configuration for
which to get the latest configuration. This is specified as part of the URL.
### Sample Request
```text
$ curl \
--header "X-Atlas-Token: ..." \
https://atlas.hashicorp.com/api/v1/terraform/configurations/my-organization/my-configuration/versions/latest
```
### Sample Response
```json
{
"version": {
"version": 6,
"metadata": {
"foo": "bar"
},
"tf_vars": [],
"variables": {}
}
}
```
- `version` `(int)` - the unique version instance number.
- `metadata` `(map<string|string>)` - a map of arbitrary metadata for this
version.
## Create Configuration Version
This endpoint creates a new configuration version.
| Method | Path |
| :----- | :------------- |
| `POST` | `/terraform/configurations/:username/:name/versions` |
### Parameters
- `:username` `(string: <required>)` - Specifies the username or organization
name under which to create this configuration version. This username must
already exist in the system, and the user must have permission to create new
configuration versions under this namespace. This is specified as part of the
URL.
- `:name` `(string: <required>)` - Specifies the name of the configuration for
which to create a new version. This is specified as part of the URL.
- `metadata` `(map<string|string>)` - Specifies an arbitrary hash of key-value
metadata pairs. This is specified as the payload as JSON.
- `variables` `(map<string|string>)` - Specifies a hash of key-value pairs that
will be made available as variables to this version.
### Sample Payload
```json
{
"version": {
"metadata": {
"git_branch": "master",
"remote_type": "atlas",
"remote_slug": "hashicorp/atlas"
},
"variables": {
"ami_id": "ami-123456",
"target_region": "us-east-1",
"consul_count": "5",
"consul_ami": "ami-123456"
}
}
}
```
### Sample Request
```text
$ curl \
--request POST \
--header "X-Atlas-Token: ..." \
--header "Content-Type: application/json" \
--data @payload.json \
https://atlas.hashicorp.com/api/v1/terraform/configurations/my-organization/my-configuration/versions
```
### Sample Response
```json
{
"version": 6,
"upload_path": "https://binstore.hashicorp.com/ddbd7db6-f96c-4633-beb6-22fe2d74eeed",
"token": "ddbd7db6-f96c-4633-beb6-22fe2d74eeed"
}
```
- `version` `(int)` - the unique version instance number. This is
auto-incrementing.
- `upload_path` `(string)` - the path where the archive should be uploaded via a
`POST` request.
- `token` `(string)` - the token that should be used when uploading the archive
to the `upload_path`.
## Check Upload Progress
This endpoint retrieves the progress for an upload of a configuration version.
| Method | Path |
| :----- | :------------- |
| `GET` | `/terraform/configurations/:username/:name/versions/progress/:token` |
### Parameters
- `:username` `(string: <required>)` - Specifies the username or organization to
read progress. This is specified as part of the URL.
- `:name` `(string: <required>)` - Specifies the name of the configuration for
to read progress. This is specified as part of the URL.
- `:token` `(string: <required>)` - Specifies the token that was returned from
the create option. **This is not an Atlas Token!** This is specified as part
of the URL.
### Sample Request
```text
$ curl \
--header "X-Atlas-Token: ..." \
https://atlas.hashicorp.com/api/v1/terraform/configurations/my-organization/my-configuration/versions/progress/ddbd7db6-f96c-4633-beb6-22fe2d74eeed
```
### Sample Response

View File

@ -0,0 +1,69 @@
---
layout: "enterprise"
page_title: "Environments - API - Terraform Enterprise"
sidebar_current: "docs-enterprise-api-environments"
description: |-
Environments represent running infrastructure managed by Terraform.
---
# Environments API
Environments represent running infrastructure managed by Terraform.
Environments can also be connected to Consul clusters. This documentation covers
the environment interactions with Terraform.
## Get Latest Configuration Version
This endpoint updates the Terraform variables for an environment. Due to the
sensitive nature of variables, they are not returned on success.
| Method | Path |
| :----- | :------------- |
| `PUT` | `/environments/:username/:name/variables` |
### Parameters
- `:username` `(string: <required>)` - Specifies the username or organization
name under which to update variables. This username must already exist in the
system, and the user must have permission to create new configuration versions
under this namespace. This is specified as part of the URL.
- `:name` `(string: <required>)` - Specifies the name of the environment for
which to update variables. This is specified as part of the URL.
- `variables` `(map<string|string>)` - Specifies a key-value map of Terraform
variables to be updated. Existing variables will only be removed when their
value is empty. Variables of the same key will be overwritten.
-> Note: Only string variables can be updated via the API currently. Creating or updating HCL variables is not yet supported.
### Sample Payload
```json
{
"variables": {
"desired_capacity": "15",
"foo": "bar"
}
}
```
### Sample Request
```text
$ curl \
--header "X-Atlas-Token: ..." \
--header "Content-Type: application/json" \
--request PUT \
--data @payload.json \
https://atlas.hashicorp.com/api/v1/environments/my-organization/my-environment/variables
```
### Sample Response
```text
```
(empty body)

View File

@ -0,0 +1,59 @@
---
layout: "enterprise"
page_title: "API - Terraform Enterprise"
sidebar_current: "docs-enterprise-api"
description: |-
Terraform Enterprise provides an API for a **subset of features**.
---
# Terraform Enterprise API Documentation
Terraform Enterprise provides an API for a **subset of features** available. For
questions or requests for new API features please email
[support@hashicorp.com](mailto:support@hashicorp.com).
The list of available endpoints are on the navigation.
## Authentication
All requests must be authenticated with an `X-Atlas-Token` HTTP header. This
token can be generated or revoked on the account tokens page. Your token will
have access to all resources your account has access to.
For organization level resources, we recommend creating a separate user account
that can be added to the organization with the specific privilege level
required.
## Response Codes
Standard HTTP response codes are returned. `404 Not Found` codes are returned
for all resources that a user does not have access to, as well as for resources
that don't exist. This is done to avoid a potential attacker discovering the
existence of a resource.
## Errors
Errors are returned in JSON format:
```json
{
"errors": {
"name": [
"has already been taken"
]
}
}
```
## Versioning
The API currently resides under the `/v1` prefix. Future APIs will increment
this version leaving the `/v1` API intact, though in the future certain features
may be deprecated. In that case, ample notice to migrate to the new API will be
provided.
## Content Type
The API accepts namespaced attributes in either JSON or
`application/x-www-form-urlencoded`. We recommend using JSON, but for simplicity
form style requests are supported.

View File

@ -0,0 +1,65 @@
---
layout: "enterprise"
page_title: "Runs - API - Terraform Enterprise"
sidebar_current: "docs-enterprise-api-runs"
description: |-
Runs in Terraform Enterprise represents a two step Terraform plan and a subsequent apply.
---
# Runs API
Runs in Terraform Enterprise represents a two step Terraform plan and a
subsequent apply.
Runs are queued under [environments](/docs/enterprise/api/environments.html)
and require a two-step confirmation workflow. However, environments
can be configured to auto-apply to avoid this.
## Queue Run
Starts a new run (plan) in the environment. Requires a configuration version to
be present on the environment to succeed, but will otherwise 404.
| Method | Path |
| :----- | :------------- |
| `POST` | `/environments/:username/:name/plan` |
### Parameters
- `:username` `(string: <required>)` - Specifies the username or organization
name under which to get the latest configuration version. This username must
already exist in the system, and the user must have permission to create new
configuration versions under this namespace. This is specified as part of the
URL.
- `:name` `(string: <required>)` - Specifies the name of the configuration for
which to get the latest configuration. This is specified as part of the URL.
- `destroy` `(bool: false)` - Specifies if the plan should be a destroy plan.
### Sample Payload
```json
{
"destroy": false
}
```
### Sample Request
```text
$ curl \
--request POST \
--header "X-Atlas-Token: ..." \
--header "Content-Type: application/json" \
--data @payload.json \
https://atlas.hashicorp.com/api/v1/environments/my-organization/my-environment/plan
```
### Sample Response
```json
{
"success": true
}
```

View File

@ -0,0 +1,67 @@
---
layout: "enterprise"
page_title: "State - API - Terraform Enterprise"
sidebar_current: "docs-enterprise-api-states"
description: |-
State represents the status of your infrastructure at the last time Terraform was run.
---
# State API
State represents the status of your infrastructure at the last time Terraform
was run. States can be pushed to Terraform Enterprise from Terraform's CLI after
an apply is done locally, or state is automatically stored if the apply is done
in Terraform Enterprise.
## List of States
This endpoint gets a list of states accessible to the user corresponding to the
provided token.
| Method | Path |
| :----- | :------------- |
| `GET` | `/terraform/state` |
### Parameters
- `?username` `(string: "")` - Specifies the organization/username to filter
states
- `?page` `(int: 1)` - Specifies the pagination, which defaults to page 1.
### Sample Requests
```text
$ curl \
--header "X-Atlas-Token: ..." \
https://atlas.hashicorp.com/api/v1/terraform/state
```
```text
$ curl \
--header "X-Atlas-Token: ..." \
https://atlas.hashicorp.com/api/v1/terraform/state?username=acme
```
### Sample Response
```json
{
"states": [
{
"updated_at": "2017-02-03T19:52:37.693Z",
"environment": {
"username": "my-organization",
"name": "docs-demo-one"
}
},
{
"updated_at": "2017-04-06T15:48:49.677Z",
"environment": {
"username": "my-organization",
"name": "docs-demo-two"
}
}
]
}
```

View File

@ -0,0 +1,49 @@
---
layout: "enterprise"
page_title: "Users - API - Terraform Enterprise"
sidebar_current: "docs-enterprise-api-users"
description: |-
Users are both users and organizations in Terraform Enterprise. They are the parent resource of all resources.
---
# Users API
Users are both users and organizations in Terraform Enterprise. They are the
parent resource of all resources.
Currently, only the retrieval of users is available on the API. Additionally,
only Vagrant box resources will be listed. Boxes will be returned based on
permissions over the organization, or user.
## Read User
This endpoint retrieves information about a single user.
| Method | Path |
| :----- | :------------- |
| `GET` | `/user/:username` |
### Parameters
- `:username` `(string: <required>)` - Specifies the username to search. This is
specified as part of the URL.
### Sample Request
```text
$ curl \
--header "X-Atlas-Token: ..." \
https://atlas.hashicorp.com/api/v1/user/my-user
```
### Sample Response
```json
{
"username": "sally-seashell",
"avatar_url": "https://www.gravatar.com/avatar/...",
"profile_html": "Sally is...",
"profile_markdown": "Sally is...",
"boxes": []
}
```

View File

@ -0,0 +1,55 @@
---
layout: "enterprise"
page_title: "Provider - Artifacts - Terraform Enterprise"
sidebar_current: "docs-enterprise-artifacts-provider"
description: |-
Terraform has a provider for managing artifacts called `atlas_artifact`.
---
# Artifact Provider
Terraform has a [provider](https://terraform.io/docs/providers/index.html) for managing Terraform Enterprise artifacts called `atlas_artifact`.
This is used to make data stored in Artifacts available to Terraform for
interpolation. In the following example, an artifact is defined and references
an AMI ID stored in Terraform Enterprise.
~> **Why is this called "atlas"?** Atlas was previously a commercial offering
from HashiCorp that included a full suite of enterprise products. The products
have since been broken apart into their individual products, like **Terraform
Enterprise**. While this transition is in progress, you may see references to
"atlas" in the documentation. We apologize for the inconvenience.
```hcl
provider "atlas" {
# You can also set the atlas token by exporting ATLAS_TOKEN into your env
token = "${var.atlas_token}"
}
resource "atlas_artifact" "web-worker" {
name = "my-username/web-worker"
type = "amazon.image"
version = "latest"
}
resource "aws_instance" "worker-machine" {
ami = "${atlas_artifact.web-worker.metadata_full.region-us-east-1}"
instance_type = "m1.small"
}
```
This automatically pulls the "latest" artifact version.
Following a new artifact version being created via a Packer build, the following
diff would be generated when running `terraform plan`.
```
-/+ aws_instance.worker-machine
ami: "ami-168f9d7e" => "ami-2f3a9df2" (forces new resource)
instance_type: "m1.small" => "m1.small"
```
This allows you to reference changing artifacts and trigger new deployments upon
pushing subsequent Packer builds.
Read more about artifacts in the [Terraform documentation](https://terraform.io/docs/providers/terraform-enterprise/r/artifact.html).

View File

@ -0,0 +1,14 @@
---
layout: "enterprise"
page_title: "Creating AMIs - Artifacts - Terraform Enterprise"
sidebar_current: "docs-enterprise-artifacts-amis"
description: |-
Creating AMI Artifacts with Packer.
---
# Creating AMI Artifacts with Packer and Terraform Enterprise
Currently, the best way to create AWS AMI artifacts is with Packer.
We detail how to do this in the [Packer section of the documentation](/docs/enterprise/packer/artifacts/creating-amis.html).

View File

@ -0,0 +1,21 @@
---
layout: "enterprise"
page_title: "Artifacts - Terraform Enterprise"
sidebar_current: "docs-enterprise-artifacts"
description: |-
Terraform Enterprise can be used to store artifacts for use by Terraform. Typically, artifacts are stored with Packer.
---
# About Terraform Artifacts
Terraform Enterprise can be used to store artifacts for use by Terraform.
Typically, artifacts are [stored with Packer](https://packer.io/docs).
Artifacts can be used in to deploy and manage images
of configuration. Artifacts are generic, but can be of varying types
like `amazon.image`. See the Packer [`artifact_type`](https://packer.io/docs/post-processors/atlas.html#artifact_type)
docs for more information.
Packer can create artifacts both while running in and out of Terraform
Enterprise network. This is possible due to the post-processors use of the
public artifact API to store the artifacts.

View File

@ -0,0 +1,70 @@
---
layout: "enterprise"
page_title: "Managing Versions - Artifacts - Terraform Enterprise"
sidebar_current: "docs-enterprise-artifacts-versions"
description: |-
Artifacts are versioned and assigned a version number, here is how to manage the versions.
---
# Managing Artifact Versions
Artifacts stored in Terraform Enterprise are versioned and assigned a version
number. Versions are useful to roll back, audit and deploy images specific
versions of images to certain environments in a targeted way.
This assumes you are familiar with the [artifact provider](https://terraform.io/docs/providers/terraform-enterprise/index.html)
in Terraform.
### Finding the Version of an Artifact
Artifact versions can be found with the [`terraform show` command](https://terraform.io/docs/commands/show.html),
or by looking at the Packer logs generated during builds. After a
successful artifact upload, version numbers are displayed. "latest" can
be used to use the latest version of the artifact.
The following output is from `terraform show`.
```text
atlas_artifact.web-worker:
id = us-east-1:ami-3a0a1d52
build = latest
metadata_full.# = 1
metadata_full.region-us-east-1 = ami-3a0a1d52
name = my-username/web-worker
slug = my-username/web-worker/amazon.image/7
type = amazon.image
```
In this case, the version is 7 and can be found in the persisted slug
attribute.
### Pinning Artifacts to Specific Versions
You can pin artifacts to a specific version. This allows for a targeted
deploy.
```hcl
resource "atlas_artifact" "web-worker" {
name = "my-username/web-worker"
type = "amazon.image"
version = 7
}
```
This will use version 7 of the `web-worker` artifact.
### Pinning Artifacts to Specific Builds
Artifacts can also be pinned to an Terraform build number. This is only
possible if Terraform Enterprise was used to build the artifact with Packer.
```hcl
resource "atlas_artifact" "web-worker" {
name = "my-username/web-worker"
type = "amazon.image"
build = 5
}
```
It's recommended to use versions, instead of builds, as it will be easier to
track when building outside of the Terraform Enterprise environment.

View File

@ -0,0 +1,13 @@
---
layout: "enterprise"
page_title: "FAQ - Terraform Enterprise"
sidebar_current: "docs-enterprise-faq"
description: |-
Frequently Asked Questions.
---
# Frequently Asked Questions
[Monolithic Artifacts](/docs/enterprise/faq/monolithic-artifacts.html) - *How do I build multiple applications into one artifact?*
[Rolling Deployments](/docs/enterprise/faq/rolling-deployments.html) - *How do I configure rolling deployments?*

View File

@ -0,0 +1,159 @@
---
layout: "enterprise"
page_title: "Monolithic Artifacts - FAQ - Terraform Enterprise"
sidebar_current: "docs-enterprise-faq-monolithic"
description: |-
How do I build multiple applications into one artifact?
---
# Monolithic Artifacts
*How do I build multiple applications into one artifact?*
Create your new Applications in Terraform Enterprise using the application
compilation feature.
You can either link each Application to the single Build Template you will be
using to create the monolithic artifact, or run periodic Packer builds.
Each time an Application is pushed, it will store the new application version in
the artifact registry as a tarball. These will be available for you to download
at build-time on the machines they belong.
Here's an example `compile.json` template that you will include with the rest of
your application files that do the compiling:
```json
{
"variables": {
"app_slug": "{{ env `ATLAS_APPLICATION_SLUG` }}"
},
"builders": [
{
"type": "docker",
"image": "ubuntu:14.04",
"commit": true
}
],
"provisioners": [
{
"type": "shell",
"inline": [
"apt-get -y update"
]
},
{
"type": "file",
"source": ".",
"destination": "/tmp/app"
},
{
"type": "shell",
"inline": [
"cd /tmp/app",
"make"
]
},
{
"type": "file",
"source": "/tmp/compiled-app.tar.gz",
"destination": "compiled-app.tar.gz",
"direction": "download"
}
],
"post-processors": [
[
{
"type": "artifice",
"files": ["compiled-app.tar.gz"]
},
{
"type": "atlas",
"artifact": "{{user `app_slug` }}",
"artifact_type": "archive"
}
]
]
}
```
In your Packer template, you can download each of the latest applications
artifacts onto the host using the shell provisioner:
```text
$ curl -L -H "X-Atlas-Token: ${ATLAS_TOKEN}" https://atlas.hashicorp.com/api/v1/artifacts/hashicorp/example/archive/latest/file -o example.tar.gz
```
Here's an example Packer template:
```json
{
"variables": {
"atlas_username": "{{env `ATLAS_USERNAME`}}",
"aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
"aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}",
"aws_region": "{{env `AWS_DEFAULT_REGION`}}",
"instance_type": "c3.large",
"source_ami": "ami-9a562df2",
"name": "example",
"ssh_username": "ubuntu",
"app_dir": "/app"
},
"push": {
"name": "{{user `atlas_username`}}/{{user `name`}}",
"vcs": false
},
"builders": [
{
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "{{user `aws_region`}}",
"vpc_id": "",
"subnet_id": "",
"instance_type": "{{user `instance_type`}}",
"source_ami": "{{user `source_ami`}}",
"ami_regions": [],
"ami_name": "{{user `name`}} {{timestamp}}",
"ami_description": "{{user `name`}} AMI",
"run_tags": { "ami-create": "{{user `name`}}" },
"tags": { "ami": "{{user `name`}}" },
"ssh_username": "{{user `ssh_username`}}",
"ssh_timeout": "10m",
"ssh_private_ip": false,
"associate_public_ip_address": true
}
],
"provisioners": [
{
"type": "shell",
"execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'",
"inline": [
"apt-get -y update",
"apt-get -y upgrade",
"apt-get -y install curl unzip tar",
"mkdir -p {{user `app_dir`}}",
"chmod a+w {{user `app_dir`}}",
"cd /tmp",
"curl -L -H 'X-Atlas-Token: ${ATLAS_TOKEN}' https://atlas.hashicorp.com/api/v1/artifacts/{{user `atlas_username`}}/{{user `name`}}/archive/latest/file -o example.tar.gz",
"tar -xzf example.tar.gz -C {{user `app_dir`}}"
]
}
],
"post-processors": [
{
"type": "atlas",
"artifact": "{{user `atlas_username`}}/{{user `name`}}",
"artifact_type": "amazon.image",
"metadata": {
"created_at": "{{timestamp}}"
}
}
]
}
```
Once downloaded, you can place each application slug where it needs to go to
produce the monolithic artifact your are accustom to.

View File

@ -0,0 +1,94 @@
---
layout: "enterprise"
page_title: "Rolling Deployments - FAQ - Terraform Enterprise"
sidebar_current: "docs-enterprise-faq-deployments"
description: |-
How do I configure rolling deployments in Terraform Enterprise?
---
# Rolling Deployments
*How do I configure rolling deployments?*
User are able to quickly change out an Artifact version that is being utilized
by Terraform, using variables within Terraform Enterprise. This is particularly
useful when testing specific versions of the given artifact without performing a
full rollout. This configuration also allows one to deploy any version of an
artifact with ease, simply by changing a version variable in Terraform and
re-deploying.
Here is an example:
```hcl
variable "type" { default = "amazon.image" }
variable "region" {}
variable "atlas_username" {}
variable "pinned_name" {}
variable "pinned_version" { default = "latest" }
resource "atlas_artifact" "pinned" {
name = "${var.atlas_username}/${var.pinned_name}"
type = "${var.type}"
version = "${var.pinned_version}"
lifecycle { create_before_destroy = true }
metadata {
region = "${var.region}"
}
}
output "pinned" { value = "${atlas_artifact.pinned.metadata_full.ami_id}" }
```
In the above example we have an `atlas_artifact` resource where you pass in the
version number via the variable `pinned_version`. (_note: this variable defaults
to latest_). If you ever want to deploy any other version, you just update the
variable `pinned_version` and redeploy.
Below is similar to the first example, but it is in the form of a module that
handles the creation of artifacts:
```hcl
variable "type" { default = "amazon.image" }
variable "region" {}
variable "atlas_username" {}
variable "artifact_name" {}
variable "artifact_version" { default = "latest" }
resource "atlas_artifact" "artifact" {
name = "${var.atlas_username}/${var.artifact_name}"
type = "${var.type}"
count = "${length(split(",", var.artifact_version))}"
version = "${element(split(",", var.artifact_version), count.index)}"
lifecycle { create_before_destroy = true }
metadata { region = "${var.region}" }
}
output "amis" { value = "${join(",", atlas_artifact.artifact.*.metadata_full.ami_id)}" }
```
One can then use the module as follows (_note: the source will likely be
different depending on the location of the module_):
```hcl
module "artifact_consul" {
source = "../../../modules/aws/util/artifact"
type = "${var.artifact_type}"
region = "${var.region}"
atlas_username = "${var.atlas_username}"
artifact_name = "${var.consul_artifact_name}"
artifact_version = "${var.consul_artifacts}"
}
```
In the above example, we have created artifacts for Consul. In this example, we
can create two versions of the artifact, "latest" and "pinned". This is useful
when rolling a cluster (like Consul) one node at a time, keeping some nodes
pinned to current version and others deployed with the latest Artifact.
There are additional details for implementing rolling deployments in the [Best-Practices Repo](https://github.com/hashicorp/best-practices/blob/master/terraform/providers/aws/us_east_1_prod/us_east_1_prod.tf#L105-L123), as there are some things uncovered in this FAQ (i.e Using the Terraform Enterprise Artifact in an instance).

View File

@ -0,0 +1,194 @@
---
layout: "enterprise"
page_title: "Glossary - Terraform Enterprise"
sidebar_current: "docs-enterprise-glossary"
description: |-
Terminology for Terraform Enterprise.
---
# Glossary
Terraform Enterprise, and this documentation, covers a large set of terminology
adopted from tools, industry standards and the community. This glossary seeks to
define as many of those terms as possible to help increase understanding in
interfacing with the platform and reading documentation.
## Authentication Tokens
Authentication tokens are tokens used to authenticate with Terraform Enterprise
via APIs or through tools. Authentication tokens can be revoked, expired or
created under any user.
## ACL
ACL is an acronym for access control list. This defines access to a set of
resources. Access to an object in Terraform Enterprise limited to "read" for
certain users is an example of an ACL.
## Alert
An alert represents a health check status change on a Consul node that is sent
to Terraform Enterprise, and then recorded and distributed to various
notification methods.
## Application
An application is a set of code that represents an application that should be
deployed. Applications can be linked to builds to be made available in the
Packer environment.
## Apply
An apply is the second step of the two steps required for Terraform to make
changes to infrastructure. The apply is the process of communicating with
external APIs to make the changes.
## Artifact
An artifact is an abstract representation of something you wish to store and use
again that has undergone configuration, compilation or some other build process.
An artifact is typically an image created by Packer that is then deployed by
Terraform, or used locally with Vagrant.
## Box
Boxes are a Vagrant specific package format. Vagrant can install and uses images
in box format.
## Build
Builds are resources that represent Packer configurations. A build is a generic
name, sometimes called a "Build Configuration" when defined in the Terraform
Enterprise UI.
## Build Configuration
A build configuration are settings associated with a resource that creates
artifacts via builds. A build configuration is the name in `packer push -name
acemeinc/web`.
## Catalog
The box catalog is a publicly available index of Vagrant Boxes that can be
downloaded from Terraform Enterprise and used for development.
## Consul
[Consul](https://consul.io) is a HashiCorp tool for service discovery,
configuration, and orchestration. Consul enables rapid deployment,
configuration, monitoring and maintenance of service-oriented architectures.
## Datacenter
A datacenter represents a group of nodes in the same network or datacenter
within Consul.
## Environment
Environments show the real-time status of your infrastructure, any pending
changes, and its change history. Environments can be configured to use any or
all of these three components.
Environments are the namespace of your Terraform Enterprise managed
infrastructure. As an example, if you to have a production environment for a
company named Acme Inc., your environment may be named
`my-username/production`.
To read more about features provided under environments, read the
[Terraform](/docs/enterprise) sections.
## Environment Variables
Environment variables injected into the environment of Packer builds or
Terraform Runs (plans and applies).
## Flapping
Flapping is something entering and leaving a healthy state rapidly. It is
typically associated with a health checks that briefly report unhealthy status
before recovering.
## Health Check
Health checks trigger alerts by changing status on a Consul node. That status
change is seen by Terraform Enterprise, when connected, and an associated alert
is recorded and sent to any configured notification methods, like email.
## Infrastructure
An infrastructure is a stateful representation of a set of Consul datacenters.
## Operator
An operator is a person who is making changes to infrastructure or settings.
## Packer
[Packer](https://packer.io) is a tool for creating images for platforms such as
Amazon AWS, OpenStack, VMware, VirtualBox, Docker, and more — all from a single
source configuration.
## Packer Template
A Packer template is a JSON file that configure the various components of Packer
in order to create one or more machine images.
## Plan
A plan is the second step of the two steps required for Terraform to make
changes to infrastructure. The plan is the process of determining what changes
will be made to.
## Providers
Providers are often referenced when discussing Packer or Terraform. Terraform
providers manage resources in Terraform.
[Read more](https://terraform.io/docs/providers/index.html).
## Post-Processors
The post-processor section within a Packer template configures any
post-processing that will be done to images built by the builders. Examples of
post-processing would be compressing files, uploading artifacts, etc..
## Registry
Often referred to as the "Artifact Registry", the registry stores artifacts, be
it images or IDs for cloud provider images.
## Run
A run represents a two step Terraform plan and a subsequent apply.
## Service
A service in Consul represents an application or service, which could be active
on any number of nodes.
## Share
Shares are let you instantly share public access to your running Vagrant
environment (virtual machine).
## State
Terraform state is the state of your managed infrastructure from the last time
Terraform was run. By default this state is stored in a local file named
`terraform.tfstate`, but it can also be stored in Terraform Enterprise and is
then called "Remote state".
## Terraform
[Terraform](https://terraform.io) is a tool for safely and efficiently changing
infrastructure across providers.
## Terraform Configuration
Terraform configuration is the configuration files and any files that may be
used in provisioners like `remote-exec`.
## Terraform Variables
Variables in Terraform, uploaded with `terraform push` or set in the UI. These
differ from environment variables as they are a first class Terraform variable
used in interpolation.

View File

@ -0,0 +1,19 @@
---
layout: "enterprise"
page_title: "Terraform Enterprise"
sidebar_current: "docs-enterprise-home"
description: |-
Terraform Enterprise is a tool for safely and efficiently changing infrastructure across providers.
---
# Terraform Enterprise Features
[Terraform Enterprise](https://www.hashicorp.com/products/terraform/) is a tool for safely and
efficiently changing infrastructure across providers.
This is a list of features specific to Terraform Enterprise.
- [Terraform Plans and Applies](/docs/enterprise/runs)
- [Terraform Artifact Registry](/docs/enterprise/artifacts)
- [Terraform Remote State Storage](/docs/enterprise/state)
- [Terraform Run Notifications](/docs/enterprise/runs/notifications.html)

View File

@ -0,0 +1,32 @@
---
layout: "enterprise"
page_title: "Authentication Policy - Organizations - Terraform Enterprise"
sidebar_current: "docs-enterprise-organizations-policy"
description: |-
Owners can set organization-wide authentication policy in Terraform Enterprise.
---
# Set an Organization Authentication Policy
Because organization membership affords members access to potentially sensitive
resources, owners can set organization-wide authentication policy in Terraform
Enterprise.
## Requiring Two-Factor Authentication
Organization owners can require that all organization team members use
[two-factor authentication](/docs/enterprise/user-accounts/authentication.html).
Those that lack two-factor authentication will be locked out of the web
interface until they enable it or leave the organization.
Visit your organization's configuration page to enable this feature. All
organization owners must have two-factor authentication enabled to require the
practice organization-wide. Note: locked-out users are still be able to interact
with Terraform Enterprise using their `ATLAS_TOKEN`.
## Disabling Two-Factor Authentication Requirement
Organization owners can disable the two-factor authentication requirement from
their organization's configuration page. Locked-out team members (those who have
not enabled two-factor authentication) will have their memberships reinstated.

View File

@ -0,0 +1,17 @@
---
layout: "enterprise"
page_title: "Create - Organizations - Terraform Enterprise"
sidebar_current: "docs-enterprise-organizations-create"
description: |-
How to create a Terraform Enterprise account.
---
# Create an Organization Account
To create an organization:
1. Create a personal account. You'll use this to create and administrate the
organization. You'll be able to add other users as owners of the organization,
so it won't be tied solely to your account.
1. Visit your new organization page to create the organization.

View File

@ -0,0 +1,16 @@
---
layout: "enterprise"
page_title: "Add a Credit Card - Organizations - Terraform Enterprise"
sidebar_current: "docs-enterprise-organizations-credit"
description: |-
You must add a credit card to your organization's account to setup auto billing.
---
# Add credit card details to an organization
To setup automated billing for your Terraform usage, you must add a credit card
to your organization's account. To do so, go into your account settings, then go
to the proper organization settings in the left navigation. Select billing in
the organization settings, and then enter your credit card information.
If you have any questions regarding billing or payment, contact [sales@hashicorp.com](mailto:sales@hashicorp.com).

View File

@ -0,0 +1,16 @@
---
layout: "enterprise"
page_title: "Organizations - Terraform Enterprise"
sidebar_current: "docs-enterprise-organizations"
description: |-
Organizations are a group of users in Terraform Enterprise that have access and ownership over shared resources.
---
## Organizations in Terraform Enterprise
Organizations are a group of users in Terraform Enterprise that have access and
ownership over shared resources. When operating within a team, we recommend
creating an organization to manage access control, auditing, billing and
authorization.
Each individual member of your organization should have their own account.

View File

@ -0,0 +1,26 @@
---
layout: "enterprise"
page_title: "Migrate - Organizations - Terraform Enterprise"
sidebar_current: "docs-enterprise-organizations-migrate"
description: |-
How to migrate existing organization.
---
# Migrate Organization
To migrate an existing user account to an organization:
1. Create or retrieve the username of a new personal account. You'll add this
account as an "owner" for the new organization during the migration process. If
you already have another account, write down your username.
2. Sign in as the account you wish to migrate and visit the migration page.
3. Put the username of the personal account you wish to make an owner of the
organization into the username text field and press "Migrate".
4. You should now be logged out and receive a confirmation email with the
personal account you migrated to.
5. Now, sign in with your personal account. If you visit you settings page, you
should see your migrated organization available to administrate.

View File

@ -0,0 +1,13 @@
---
layout: "enterprise"
page_title: "Trial - Organizations - Terraform Enterprise"
sidebar_current: "docs-enterprise-organizations-trials"
description: |-
Terraform Enterprise offers a 30-day trial.
---
# Start a trial
Terraform Enterprise offers organizations 30-day trials for [Terraform Enterprise](https://www.hashicorp.com/products/terraform/), [Consul Enterprise](https://www.hashicorp.com/consul.html), and Vagrant Enterprise. Note that trials are available for organizations, not users.
[Request a trial](https://www.hashicorp.com/products/terraform/) for your organization.

View File

@ -0,0 +1,65 @@
---
layout: "enterprise"
page_title: "Creating AMIs - Packer Artifacts - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerartifacts-amis"
description: |-
Creating AMI artifacts with Terraform Enterprise.
---
# Creating AMI Artifacts with Terraform Enterprise
In an immutable infrastructure workflow, it's important to version and store
full images (artifacts) to be deployed. This section covers storing [AWS
AMI](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) images in
Terraform Enterprise to be queried and used later.
Note the actual AMI does _not get stored_. Terraform Enterprise simply keeps the
AMI ID as a reference to the target image. Tools like Terraform can then use
this in a deploy.
### Steps
If you run Packer in Terraform Enterprise, the following will happen after a [push](/docs/enterprise/packer/builds/starting.html):
1. Terraform Enterprise will run `packer build` against your template in our
infrastructure. This spins up an AWS instance in your account and provisions it
with any specified provisioners
2. Packer stops the instance and stores the result as an AMI in AWS under your
account. This then returns an ID (the artifact) that it passes to the
post-processor
3. The post-processor creates and uploads the new artifact version with the ID
in Terraform Enterprise of the type `amazon.image` for use later
### Example
Below is a complete example Packer template that starts an AWS instance.
```json
{
"push": {
"name": "my-username/frontend"
},
"provisioners": [],
"builders": [
{
"type": "amazon-ebs",
"access_key": "",
"secret_key": "",
"region": "us-east-1",
"source_ami": "ami-2ccc7a44",
"instance_type": "c3.large",
"ssh_username": "ubuntu",
"ami_name": "Terraform Enterprise Example {{ timestamp }}"
}
],
"post-processors": [
{
"type": "atlas",
"artifact": "my-username/web-server",
"artifact_type": "amazon.image"
}
]
}
```

View File

@ -0,0 +1,141 @@
---
layout: "enterprise"
page_title: "Creating Vagrant Boxes - Packer Artifacts - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerartifacts-vagrant"
description: |-
Creating Vagrant artifacts with Terraform Enterprise.
---
# Creating Vagrant Boxes with Packer
We recommend using Packer to create boxes, as is it is fully repeatable and
keeps a strong history of changes within Terraform Enterprise.
## Getting Started
Using Packer requires more up front effort, but the repeatable and automated
builds will end any manual management of boxes. Additionally, all boxes will be
stored and served from Terraform Enterprise, keeping a history along the way.
## Post-Processors
Packer uses
[post-processors](https://packer.io/docs/templates/post-processors.html) to
define how to process images and artifacts after provisioning. Both the
`vagrant` and `atlas` post-processors must be used in order to upload Vagrant
Boxes to Terraform Enterprise via Packer.
It's important that they are [sequenced](https://packer.io/docs/templates/post-processors.html)
in the Packer template so they run in order. This is done by nesting arrays:
```javascript
{
"post-processors": [
[
{
"type": "vagrant"
// ...
},
{
"type": "atlas"
// ...
}
]
]
}
```
Sequencing automatically passes the resulting artifact from one
post-processor to the next in this case, the `.box` file.
### Vagrant Post-Processor
The [Vagrant post-processor](https://packer.io/docs/post-processors/vagrant.html) is required to package the image
from the build (an `.ovf` file, for example) into a `.box` file before
passing it to the `atlas` post-processor.
```json
{
"type": "vagrant",
"keep_input_artifact": false
}
```
The input artifact (i.e and `.ovf` file) does not need to be kept when building Vagrant Boxes,
as the resulting `.box` will contain it.
### Post-Processor
The [post-processor](https://packer.io/docs/post-processors/atlas.html) takes the resulting `.box` file and uploads it adding metadata about the box version.
```json
{
"type": "atlas",
"artifact": "my-username/dev-environment",
"artifact_type": "vagrant.box",
"metadata": {
"provider": "vmware_desktop",
"version": "0.0.1"
}
}
```
#### Attributes Required
These are all of the attributes for that post-processor
required for uploading Vagrant Boxes. A complete example is shown below.
- `artifact`: The username and box name (`username/name`) you're creating the version
of the box under. If the box doesn't exist, it will be automatically
created
- `artifact_type`: This must be `vagrant.box`. Terraform Enterprise uses this to determine
how to treat this artifact.
For `vagrant.box` type artifacts, you can specify keys in the metadata block:
- `provider`: The Vagrant provider for the box. Common providers are
`virtualbox`, `vmware_desktop`, `aws` and so on _(required)_
- `version`: This is the Vagrant box version and is constrained to the
same formatting as in the web UI: `*.*.*` _(optional, but required for boxes
with multiple providers). The version will increment on the minor version if left blank (e.g the initial version will be set to 0.1.0, the subsequent version will be set to 0.2.0)._
- `description`: This is the description that will be shown with the
version of the box. You can use Markdown for links and style. _(optional)_
## Example
An example post-processor block for Terraform Enterprise and Vagrant is below. In this example,
the build runs on both VMware and Virtualbox creating two
different providers for the same box version (`0.0.1`).
```json
{
"post-processors": [
[
{
"type": "vagrant",
"keep_input_artifact": false
},
{
"type": "atlas",
"only": ["vmware-iso"],
"artifact": "my-username/dev-environment",
"artifact_type": "vagrant.box",
"metadata": {
"provider": "vmware_desktop",
"version": "0.0.1"
}
},
{
"type": "atlas",
"only": ["virtualbox-iso"],
"artifact": "my-username/dev-environment",
"artifact_type": "vagrant.box",
"metadata": {
"provider": "virtualbox",
"version": "0.0.1"
}
}
]
]
}
```

View File

@ -0,0 +1,40 @@
---
layout: "enterprise"
page_title: "Packer Artifacts - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerartifacts"
description: |-
Packer creates and uploads artifacts to Terraform Enterprise.
---
# About Packer and Artifacts
Packer creates and uploads artifacts to Terraform Enterprise. This is done
with the [post-processor](https://packer.io/docs/post-processors/atlas.html).
Artifacts can then be used to deploy services or access via Vagrant. Artifacts
are generic, but can be of varying types. These types define different behavior
within Terraform Enterprise.
For uploading artifacts `artifact_type` can be set to any unique identifier,
however, the following are recommended for consistency.
- `amazon.image`
- `azure.image`
- `digitalocean.image`
- `docker.image`
- `google.image`
- `openstack.image`
- `parallels.image`
- `qemu.image`
- `virtualbox.image`
- `vmware.image`
- `custom.image`
- `application.archive`
- `vagrant.box`
Packer can create artifacts when running in Terraform Enterprise or locally.
This is possible due to the post-processors use of the public artifact API to
store the artifacts.
You can read more about artifacts and their use in the
[Terraform section](/docs/enterprise/) of the documentation.

View File

@ -0,0 +1,174 @@
---
layout: "enterprise"
page_title: "Build Environment - Packer Builds - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerbuilds-environment"
description: |-
This page outlines the environment that Packer runs in within Terraform Enterprise.
---
# Packer Build Environment
This page outlines the environment that Packer runs in within Terraform
Enterprise.
### Supported Builders
Terraform Enterprise currently supports running the following Packer builders:
- amazon-chroot
- amazon-ebs
- amazon-instance
- digitalocean
- docker
- googlecompute
- null
- openstack
- qemu
- virtualbox-iso
- vmware-iso
### Files
All files in the uploading package (via [Packer push or GitHub](/docs/enterprise/packer/builds/starting.html)),
and the application from the build pipeline are available on the filesystem
of the build environment.
You can use the file icon on the running build to show a list of
available files.
Files can be copied to the destination image Packer is provisioning
with [Packer Provisioners](https://packer.io/docs/templates/provisioners.html).
An example of this with the Shell provisioner is below.
```json
{
"provisioners": [
{
"type": "shell",
"scripts": [
"scripts/vagrant.sh",
"scripts/dependencies.sh",
"scripts/cleanup.sh"
]
}
]
}
```
We encourage use of relative paths over absolute paths to maintain portability
between Terraform Enterprise and local builds.
The total size of all files in the package being uploaded via
[Packer push or GitHub](/docs/enterprise/packer/builds/starting.html) must be 5 GB or less.
If you need to upload objects that are larger, such as dmgs, see the
[`packer push` "Limits" documentation](https://packer.io/docs/command-line/push.html)
for ways around this limitation.
### Hardware Limitations
Currently, each builder defined in the Packer template receives
the following hardware resources. This is subject to change.
- 1 CPU core
- 2 GB of memory
- 20 GBs of disk space
### Environment Variables
You can set any number of environment variables that will be injected
into your build environment at runtime. These variables can be
used to configure your build with secrets or other key value configuration.
Variables are encrypted and stored securely.
Additionally, the following environment variables are automatically injected. All injected environment variables will be prefixed with `ATLAS_`
- `ATLAS_TOKEN` - This is a unique, per-build token that expires at the end of
build execution (e.g. `"abcd.atlasv1.ghjkl..."`)
- `ATLAS_BUILD_ID` - This is a unique identifier for this build (e.g. `"33"`)
- `ATLAS_BUILD_NUMBER` - This is a unique identifier for all builds in the same
scope (e.g. `"12"`)
- `ATLAS_BUILD_NAME` - This is the name of the build (e.g. `"mybuild"`).
- `ATLAS_BUILD_SLUG` - This is the full name of the build
(e.g. `"company/mybuild"`).
- `ATLAS_BUILD_USERNAME` - This is the username associated with the build
(e.g. `"sammy"`)
- `ATLAS_BUILD_CONFIGURATION_VERSION` - This is the unique, auto-incrementing
version for the [Packer build configuration](/docs/enterprise/glossary/index.html) (e.g. `"34"`).
- `ATLAS_BUILD_GITHUB_BRANCH` - This is the name of the branch
that the associated Packer build configuration version was ingressed from
(e.g. `master`).
- `ATLAS_BUILD_GITHUB_COMMIT_SHA` - This is the full commit hash
of the commit that the associated Packer build configuration version was
ingressed from (e.g. `"abcd1234..."`).
- `ATLAS_BUILD_GITHUB_TAG` - This is the name of the tag
that the associated Packer build configuration version was ingressed from
(e.g. `"v0.1.0"`).
If the build was triggered by a new application version, the following
environment variables are also available:
- `ATLAS_APPLICATION_NAME` - This is the name of the application connected to
the Packer build (e.g. `"myapp"`).
- `ATLAS_APPLICATION_SLUG` - This is the full name of the application connected
to the Packer build (e.g. `"company/myapp"`).
- `ATLAS_APPLICATION_USERNAME` - This is the username associated with the
application connected to the Packer build (e.g. `"sammy"`)
- `ATLAS_APPLICATION_VERSION` - This is the version of the application connected
to the Packer build (e.g. `"2"`).
- `ATLAS_APPLICATION_GITHUB_BRANCH` - This is the name of the branch that the
associated application version was ingressed from (e.g. `master`).
- `ATLAS_APPLICATION_GITHUB_COMMIT_SHA` - This is the full commit hash
of the commit that the associated application version was ingressed from
(e.g. `"abcd1234..."`).
- `ATLAS_APPLICATION_GITHUB_TAG` - This is the name of the tag that the
associated application version was ingressed from (e.g. `"v0.1.0"`).
For any of the `GITHUB_` attributes, the value of the environment variable will
be the empty string (`""`) if the resource is not connected to GitHub or if the
resource was created outside of GitHub (like using `packer push` or
`vagrant push`).
### Base Artifact Variable Injection
A base artifact can be selected on the "Settings" page for a build
configuration. During each build, the latest artifact version will have it's
external ID (such as an AMI for AWS) injected as an environment variable for the
environment.
The keys for the following artifact types will be injected:
- `aws.ami`: `ATLAS_BASE_ARTIFACT_AWS_AMI_ID`
- `amazon.ami`: `ATLAS_BASE_ARTIFACT_AMAZON_AMI_ID`
- `amazon.image`: `ATLAS_BASE_ARTIFACT_AMAZON_IMAGE_ID`
- `google.image`: `ATLAS_BASE_ARTIFACT_GOOGLE_IMAGE_ID`
You can then reference this artifact in your Packer template, like this
AWS example:
```json
{
"variables": {
"base_ami": "{{env `ATLAS_BASE_ARTIFACT_AWS_AMI_ID`}}"
},
"builders": [
{
"type": "amazon-ebs",
"access_key": "",
"secret_key": "",
"region": "us-east-1",
"source_ami": "{{user `base_ami`}}"
}
]
}
```
## Notes on Security
Packer environment variables in Terraform Enterprise are encrypted using [Vault](https://vaultproject.io)
and closely guarded and audited. If you have questions or concerns
about the safety of your configuration, please contact our security team
at [security@hashicorp.com](mailto:security@hashicorp.com).

View File

@ -0,0 +1,37 @@
---
layout: "enterprise"
page_title: "Running - Packer Builds - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerbuilds-runbuilds"
description: |-
This briefly covers the internal process of running builds in Terraform Enterprise.
---
# How Packer Builds Run in Terraform Enterprise
This briefly covers the internal process of running builds in Terraform
Enterprise. It's not necessary to know this information, but may be valuable to
help understand implications of running or debugging failing builds.
### Steps of Execution
1. A Packer template and directory of files is uploaded via Packer Push or
GitHub
2. Terraform Enterprise creates a version of the build configuration and waits
for the upload to complete. At this point, the version will be visible in the UI
even if the upload has not completed
3. Once the upload finishes, the build is queued. This is potentially split
across multiple machines for faster processing
4. In the build environment, the package including the files and Packer template
are downloaded
5. `packer build` is run against the template in the build environment
6. Logs are streamed into the UI and stored
7. Any artifacts as part of the build are then uploaded via the public artifact
API, as they would be if Packer was executed locally
8. The build completes, the environment is teared down and status updated

View File

@ -0,0 +1,35 @@
---
layout: "enterprise"
page_title: "Packer Builds - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerbuilds"
description: |-
Builds are instances of `packer build` being run within Terraform Enterprise.
---
# About Builds
Builds are instances of `packer build` being run within Terraform Enterprise.
Every build belongs to a build configuration.
__Build configurations__ represent a set of Packer configuration versions and
builds run. It is used as a namespace within Terraform Enterprise, Packer
commands and URLs. Packer configuration sent to Terraform Enterprise are stored
and versioned under these build configurations.
These __versions__ of Packer configuration can contain:
- The Packer template, a JSON file which define one or more builds by
configuring the various components of Packer
- Any provisioning scripts or packages used by the template
- Applications that use the build as part of the pipeline and merged into the
version prior to running Packer on it
When a new version of Packer configuration and associated scripts from GitHub or
`packer push` is received, it automatically starts a new Packer build. That
Packer build runs in an isolated machine environment with the contents of that
version available to it.
You can be alerted of build events with
[Build Notifications](/docs/enterprise/packer/builds/notifications.html).

View File

@ -0,0 +1,32 @@
---
layout: "enterprise"
page_title: "Installing Software - Packer Builds - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerbuilds-installing"
description: |-
Installing software with Packer.
---
# Installing Software
Please review the [Packer Build Environment](/docs/enterprise/packer/builds/build-environment.html)
specification for important information on isolation, security, and hardware
limitations before continuing.
In some cases, it may be necessary to install custom software to build your
artifact using Packer. The easiest way to install software on the Packer builder
is via the `shell-local` provisioner. This will execute commands on the host
machine running Packer.
{
"provisioners": [
{
"type": "shell-local",
"command": "sudo apt-get install -y customsoftware"
}
]
}
Please note that nothing is persisted between Packer builds, so you will need
to install custom software on each run.
The Packer builders run the latest version of Ubuntu LTS.

View File

@ -0,0 +1,28 @@
---
layout: "enterprise"
page_title: "Managing Packer Versions - Packer Builds - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerbuilds-versions"
description: |-
Terraform Enterprise does not automatically upgrade the version of Packer used to run builds or compiles.
---
# Managing Packer Versions
Terraform Enterprise does not automatically upgrade the version of Packer used
to run builds or compiles. This is intentional, as occasionally there can be
backwards incompatible changes made to Packer that cause templates to stop
building properly, or new versions that produce some other unexpected behavior.
All upgrades must be performed by a user, but Terraform Enterprise will display
a notice above any builds run with out of date versions. We encourage the use of
the latest version when possible.
### Upgrading Packer
1. Go the Settings tab of a build configuration or application
2. Go to the "Packer Version" section and select the version you wish to use
3. Review the changelog for that version and previous versions
4. Click the save button. At this point, future builds will use that version

View File

@ -0,0 +1,22 @@
---
layout: "enterprise"
page_title: "Build Notifications - Packer Builds - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerbuilds-notifications"
description: |-
Terraform Enterprise can send build notifications to your organization.
---
# About Packer Build Notifications
Terraform Enterprise can send build notifications to your organization for the
following events:
- **Starting** - The build has begun.
- **Finished** - All build jobs have finished successfully.
- **Errored** - An error has occurred during one of the build jobs.
- **Canceled** - A user has canceled the build.
> Emails will include logs for the **Finished** and **Errored** events.
You can toggle notifications for each of these events on the "Integrations" tab
of a build configuration.

View File

@ -0,0 +1,20 @@
---
layout: "enterprise"
page_title: "Rebuilding - Packer Builds - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerbuilds-rebuilding"
description: |-
Sometimes builds fail due to temporary or remotely controlled conditions.
---
# Rebuilding Builds
Sometimes builds fail due to temporary or remotely controlled conditions.
In this case, it may make sense to "rebuild" a Packer build. To do so, visit the
build you wish to run again and click the Rebuild button. This will take that
exact version of configuration and run it again.
You can rebuild at any point in history, but this may cause side effects that
are not wanted. For example, if you were to rebuild an old version of a build,
it may create the next version of an artifact that is then released, causing a
rollback of your configuration to occur.

View File

@ -0,0 +1,34 @@
---
layout: "enterprise"
page_title: "Schedule Periodic Builds - Packer Builds - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerbuilds-scheduling"
description: |-
Terraform Enterprise can automatically run a Packer build and create artifacts on a specified schedule.
---
# Schedule Periodic Builds in Terraform Enterprise
Terraform Enterprise can automatically run a Packer build and
create artifacts on a specified schedule. This option is disabled by default and can be enabled by an
organization owner on a per-[environment](/docs/enterprise/glossary#environment) basis.
On the specified interval, builds will be automatically queued that run Packer
for you, creating any artifacts and sending the appropriate notifications.
If your artifacts are used in any other environments and you have activated the
plan on artifact upload feature, this may also queue Terraform plans.
This feature is useful for maintenance of images and automatic updates, or to
build nightly style images for staging or development environments.
## Enabling Periodic Builds
To enable periodic builds for a build, visit the build settings page and select
the desired interval and click the save button to persist the changes. An
initial build may immediately run, depending on the history, and then will
automatically build at the specified interval.
If you have run a build separately, either manually or triggered from GitHub or
Packer configuration version uploads, Terraform Enterprise will not queue a new
build until the allowed time after the manual build ran. This ensures that a
build has been executed at the specified schedule.

View File

@ -0,0 +1,74 @@
---
layout: "enterprise"
page_title: "Starting - Packer Builds - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerbuilds-starting"
description: |-
Packer builds can be started in Terraform Enterprise in two ways. This post is about how.
---
# Starting Packer Builds in Terraform Enterprise
Packer builds can be started in in two ways: `packer push` to upload the
template and directory or via a GitHub connection that retrieves the contents of
a repository after changes to the default branch (usually master).
### Packer Push
Packer `push` is a
[Packer command](https://packer.io/docs/command-line/push.html) that packages
and uploads a Packer template and directory. This then creates a build which
performs `packer build` against the uploaded template and packaged directory.
The directory is included in order to run any associated provisioners, builds or
post-processors that all might use local files. For example, a shell script or
set of Puppet modules used in a Packer build needs to be part of the upload for
Packer to be run remotely.
By default, everything in your directory is uploaded as part of the push.
However, it's not always the case that the entire directory should be uploaded.
Often, temporary or cache directories and files like `.git`, `.tmp` will be
included by default. This can cause builds to fail at certain sizes and should
be avoided. You can specify
[exclusions](https://packer.io/docs/templates/push.html#exclude) to avoid this
situation.
Packer also allows for a
[VCS option](https://packer.io/docs/templates/push.html#vcs) that will detect
your VCS (if there is one) and only upload the files that are tracked by the
VCS. This is useful for automatically excluding ignored files. In a VCS like
git, this basically does a `git ls-files`.
### GitHub Webhooks
Optionally, GitHub can be used to import Packer templates and configurations.
When used within an organization, this can be extremely valuable for keeping
differences in environments and last mile changes from occurring before an
upload.
After you have [connected your GitHub account](/docs/enterprise/vcs/github.html) to Terraform Enterprise,
you can connect your [Build Configuration](/docs/enterprise/glossary#build-configuration)
to the target GitHub repository. The GitHub repository will be linked to the
Packer configuration, and GitHub will start sending webhooks.
Certain GitHub webhook events, detailed below, will cause the repository to be
automatically ingressed into Terraform Enterprise and stored, along with references to the
GitHub commits and authorship information.
After each ingress the configuration will automatically build.
You can disable an ingress by adding the text `[atlas skip]` or `[ci skip]` to
your commit message.
Supported GitHub webhook events:
- push (on by default)
- ingress when a tag is created
- ingress when the default branch is updated
- note: the default branch is either configured on your configuration's
integrations tab in Terraform Enterprise, or if that is blank it is the GitHub
repository's default branch
- create (off by default)
- ingress when a tag is created
- note: if you want to only run on tag creation, turn on create events and
turn off push events

View File

@ -0,0 +1,120 @@
---
layout: "enterprise"
page_title: "Troubleshooting - Packer Builds - Terraform Enterprise"
sidebar_current: "docs-enterprise-packerbuilds-troubleshooting"
description: |-
Packer builds can fail in Terraform Enterprise for a number of reasons improper configuration, transient networking errors, and hardware constraints are all possible.
---
# Troubleshooting Failing Builds
Packer builds can fail in Terraform Enterprise for a number of reasons
improper configuration, transient networking errors, and hardware constraints
are all possible. Below is a list of debugging options you can use.
### Verbose Packer Logging
You can [set a variable](/docs/enterprise/packer/builds/build-environment.html#environment-variables) in the UI that increases the logging verbosity
in Packer. Set the `PACKER_LOG` key to a value of `1` to accomplish this.
After setting the variable, you'll need to [rebuild](/docs/enterprise/packer/builds/rebuilding.html).
Verbose logging will be much louder than normal Packer logs and isn't
recommended for day-to-day operations. Once enabled, you'll be able to see in
further detail why things failed or what operations Packer was performing.
This can also be used locally:
```text
$ PACKER_LOG=1 packer build ...
```
### Hanging Builds
Some VM builds, such as VMware or VirtualBox, may hang at various stages,
most notably `Waiting for SSH...`.
Things to pay attention to when this happens:
- SSH credentials must be properly configured. AWS keypairs should match, SSH
usernames should be correct, passwords should match, etc.
- Any VM pre-seed configuration should have the same SSH configuration as your
template defines
A good way to debug this is to manually attempt to use the same SSH
configuration locally, running with `packer build -debug`. See
more about [debugging Packer builds](https://packer.io/docs/other/debugging.html).
### Hardware Limitations
Your build may be failing by requesting larger memory or
disk usage then is available. Read more about the [build environment](/docs/enterprise/packer/builds/build-environment.html#hardware-limitations).
_Typically_ Packer builds that fail due to requesting hardware limits
that exceed Terraform Enterprise's [hardware limitations](/docs/enterprise/packer/builds/build-environment.html#hardware-limitations)
will fail with a _The operation was canceled_ error message as shown below:
```text
# ...
==> vmware-iso: Starting virtual machine...
vmware-iso: The VM will be run headless, without a GUI. If you want to
vmware-iso: view the screen of the VM, connect via VNC without a password to
vmware-iso: 127.0.0.1:5918
==> vmware-iso: Error starting VM: VMware error: Error: The operation was canceled
==> vmware-iso: Waiting 4.604392397s to give VMware time to clean up...
==> vmware-iso: Deleting output directory...
Build 'vmware-iso' errored: Error starting VM: VMware error: Error: The operation was canceled
==> Some builds didn't complete successfully and had errors:
--> vmware-iso: Error starting VM: VMware error: Error: The operation was canceled
```
### Local Debugging
Sometimes it's faster to debug failing builds locally. In this case,
you'll want to [install Packer](https://www.packer.io/intro/getting-started/setup.html) and any providers (like Virtualbox) necessary.
Because Terraform Enterprise runs the open source version of Packer, there
should be no difference in execution between the two, other than the environment
that Packer is running in. For more on hardware constraints in the Terraform
Enterprise environment read below.
Once your builds are running smoothly locally you can push it up to Terraform
Enterprise for versioning and automated builds.
### Internal Errors
This is a short list of internal errors and what they mean.
- SIC-001: Your data was being ingressed from GitHub but failed
to properly unpack. This can be caused by bad permissions, using
symlinks or very large repository sizes. Using symlinks inside of the
packer directory, or the root of the repository, if the packer directory
is unspecified, will result in this internal error.
_**Note:** Most often this error occurs when applications or builds are
linked to a GitHub repository and the directory and/or template paths are
incorrect. Double check that the paths specified when you linked the GitHub
repository match the actual paths to your template file._
- SEC-001: Your data was being unpacked from a tarball uploaded
and encountered an error. This can be caused by bad permissions, using
symlinks or very large tarball sizes.
### Community Resources
Packer is an open source project with an active community. If you're
having an issue specific to Packer, the best avenue for support is
the mailing list or IRC. All bug reports should go to GitHub.
- Website: [packer.io](https://packer.io)
- GitHub: [github.com/mitchellh/packer](https://github.com/mitchellh/packer)
- IRC: `#packer-tool` on Freenode
- Mailing list: [Google Groups](http://groups.google.com/group/packer-tool)
### Getting Support
If you believe your build is failing as a result of a bug in Terraform
Enterprise, or would like other support, please
[email us](mailto:support@hashicorp.com).

View File

@ -0,0 +1,28 @@
---
layout: "enterprise"
page_title: "Automatic Applies - Runs - Terraform Enterprise"
sidebar_current: "docs-enterprise-runs-applies"
description: |-
How to automatically apply plans.
---
# Automatic Terraform Applies
-> This is an unreleased beta feature. Please
<a href="mailto:support@hashicorp.com">contact support</a> if you are interested
in helping us test this feature.
You can automatically apply successful Terraform plans to your
infrastructure. This option is disabled by default and can be enabled by an
organization owner on a per-environment basis.
-> This is an advanced feature that enables changes to active infrastructure
without user confirmation. Please understand the implications to your
infrastructure before enabling.
## Enabling Auto-Apply
To enable auto-apply for an environment, visit the environment settings page
check the box labeled "auto apply" and click the save button to persist the
changes. The next successful Terraform plan for the environment will
automatically apply without user confirmation.

View File

@ -0,0 +1,57 @@
---
layout: "enterprise"
page_title: "Execution - Runs - Terraform Enterprise"
sidebar_current: "docs-enterprise-runs-execute"
description: |-
How runs execute in Terraform Enterprise.
---
# How Terraform Runs Execute
This briefly covers the internal process of running Terraform plan and applies.
It is not necessary to know this information, but may be valuable to help
understand implications of running or debugging failed runs.
## Steps of Execution
1. A set of Terraform configuration and directory of files is uploaded via Terraform Push or GitHub
2. Terraform Enterprise creates a version of the Terraform configuration and waits for the upload
to complete. At this point, the version will be visible in the UI even if the upload has
not completed
3. Once the upload finishes, Terraform Enterprise creates a run and queues a `terraform plan`
4. In the run environment, the package including the files and Terraform
configuration are downloaded
5. `terraform plan` is run against the configuration in the run environment
6. Logs are streamed into the UI and stored
7. The `.tfplan` file created in the plan is uploaded and stored
8. Once the plan completes, the environment is torn down and status is
updated in the UI
9. The plan then requires confirmation by an operator. It can optionally
be discarded and ignored at this stage
10. Once confirmed, the run then executes a `terraform apply` in a new
environment against the saved `.tfplan` file
11. The logs are streamed into the UI and stored
12. Once the apply completes, the environment is torn down, status is
updated in the UI and changed state is saved back
Note: In the case of a failed apply, it's safe to re-run. This is possible
because Terraform saves partial state and can "pick up where it left off".
### Customizing Terraform Execution
As described in the steps above, Terraform will be run against your configuration
when changes are pushed via GitHub, `terraform push`, or manually queued in the
UI. There are a few options available to customize the execution of Terraform.
These are:
- The directory that contains your environment's Terraform configuration can be customized
to support directory structures with more than one set of Terraform configuration files.
To customize the directory for your Environment, set the _Terraform Directory_
property in the [_GitHub Integration_](/docs/enterprise/vcs/github.html) settings for your environment. This is equivalent to
passing the `[dir]` argument when running Terraform in your local shell.
- The directory in which Terraform is executed from can be customized to support directory
structures with nested sub-directories or configurations that use Terraform modules with
relative paths. To customize the directory used for Terraform execution in your Environment, set the `TF_ATLAS_DIR`
[environment variable](/docs/enterprise/runs/variables-and-configuration.html#environment-variables)
to the relative path of the directory - ie. `terraform/production`. This is equivalent to
changing directories to the appropriate path in your local shell and then executing Terraform.

View File

@ -0,0 +1,70 @@
---
layout: "enterprise"
page_title: "Runs - Terraform Enterprise"
sidebar_current: "docs-enterprise-runs"
description: |-
A "run" in Atlas represents the logical grouping of two Terraform steps - a "plan" and an "apply".
---
# About Terraform Enterprise Runs
A "run" represents the logical grouping of two Terraform steps - a "plan" and an
"apply". The distinction between these two phases of a Terraform run are
documented below.
When a [new run is created](/docs/enterprise/runs/starting.html), Terraform
Enterprise automatically queues a Terraform plan. Because a plan does not change
the state of infrastructure, it is safe to execute a plan multiple times without
consequence. An apply executes the output of a plan and actively changes
infrastructure. To prevent race conditions, the platform will only execute one
plan/apply at a time (plans for validating GitHub Pull Requests are allowed to
happen concurrently, as they do not modify state). You can read more about
Terraform plans and applies below.
## Plan
During the plan phase of a run, the command `terraform plan` is executed.
Terraform performs a refresh and then determines what actions are necessary to
reach the desired state specified in the Terraform configuration files. A
successful plan outputs an executable file that is securely stored in Terraform
Enterprise and may be used in the subsequent apply.
Terraform plans do not change the state of infrastructure, so it is
safe to execute a plan multiple times. In fact, there are a number of components
that can trigger a Terraform plan. You can read more about this in the
[starting runs](/docs/enterprise/runs/starting.html) section.
## Apply
During the apply phase of a run, the command `terraform apply` is executed
with the executable result of the prior Terraform plan. This phase **can change
infrastructure** by applying the changes required to reach the desired state
specified in the Terraform configuration file.
While Terraform plans are safe to run multiple times, Terraform applies often
change active infrastructure. Because of this, the default behavior
is to require user confirmation as part of the
[Terraform run execution](/docs/enterprise/runs/how-runs-execute.html). Upon
user confirmation, the Terraform apply will be queued and executed. It is also
possible to configure
[automatic applies](/docs/enterprise/runs/automatic-applies.html), but this option is
disabled by default.
## Environment Locking
During run execution, the environment will lock to prevent other plans
and applies from executing simultaneously. When the run completes, the next
pending run, if any, will be started.
An administrator of the environment can also manually lock the environment, for
example during a maintenance period.
You can see the lock status of an environment, and lock/unlock the environment
by visiting that environment's settings page.
## Notifications
To receive alerts when user confirmation is needed or for any other phase of the
run process, you can
[enable run notifications](/docs/enterprise/runs/notifications.html) for your
organization or environment.

View File

@ -0,0 +1,34 @@
---
layout: "enterprise"
page_title: "Installing Software - Runs - Terraform Enterprise"
sidebar_current: "docs-enterprise-runs-installing"
description: |-
Installing custom software on the Terraform Runners.
---
# Installing Custom Software
The machines that run Terraform exist in an isolated environment and are
destroyed on each use. In some cases, it may be necessary to install certain
software on the Terraform runner, such as a configuration management tool like
Chef, Puppet, Ansible, or Salt.
The easiest way to install software on the Packer builder is via the
`local-exec` provisioner. This will execute commands on the host machine running
Terraform.
```hcl
resource "null_resource" "local-software" {
provisioner "local-exec" {
command = <<EOH
sudo apt-get update
sudo apt-get install -y ansible
EOH
}
}
```
Please note that nothing is persisted between Terraform runs, so you will need
to install custom software on each run.
The Packer builders run the latest version of Ubuntu LTS.

View File

@ -0,0 +1,31 @@
---
layout: "enterprise"
page_title: "Managing Terraform Versions - Runs - Terraform Enterprise"
sidebar_current: "docs-enterprise-runs-versions"
description: |-
How to manage versions of Terraform Enterprise.
---
# Managing Terraform Versions
Terraform Enterprise does not automatically upgrade the version of Terraform
used to execute plans and applies. This is intentional, as occasionally there
can be backwards incompatible changes made to Terraform that cause state and
plans to differ based on the same configuration, or new versions that produce
some other unexpected behavior.
All upgrades must be performed by a user, but Terraform Enterprise will display
a notice above any plans or applies run with out of date versions. We encourage
the use of the latest version when possible.
Note that regardless of when an upgrade is performed, the version of Terraform
used in a plan will be used in the subsequent apply.
### Upgrading Terraform
1. Go the Settings tab of an environment
2. Go to the "Terraform Version" section and select the version you
wish to use
3. Review the changelog for that version and previous versions
4. Click the save button. At this point, future builds will use that
version

View File

@ -0,0 +1,114 @@
---
layout: "enterprise"
page_title: "AWS Multi-Factor Authentication - Runs - Terraform Enterprise"
sidebar_current: "docs-enterprise-runs-multifactor-authentication"
description: |-
Installing custom software on the Terraform Runners.
---
# AWS Multi-Factor Authentication for Terraform Runs in Terraform Enterprise
You can optionally configure Terraform plans and applies to use multi-factor authentication using [AWS Secure Token Service](http://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html).
This option is disabled by default and can be enabled by an organization owner.
!> This is an advanced feature that enables changes to active infrastructure
without user confirmation. Please understand the implications to your
infrastructure before enabling.
## Setting Up AWS Multi-Factor Authentication
Before you are able to set up multi-factor authentication in Terraform
Enterprise, you must set up an IAM user in AWS. More details about creating an
IAM user can be found
[here](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable.html).
Setting up an AWS IAM user will provide you with the serial number and access
keys that you will need in order to connect to AWS Secure Token Service.
In order to set up multi-factor authentication for your organization, you must
have the following environment variables in your configuration:
'AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_MFA_SERIAL_NUMBER". You can
set these variables at `/settings/organization_variables.`
## Enabling AWS Multi-Factor Authentication
To enable multi-factor authentication, visit the environment settings page:
```text
/terraform/:organization/environments/:environment/settings
```
Use the drop down labeled "AWS Multi-Factor Authentication ". There are
currently three levels available: "never", "applies only", and "plans and
applies". Once you have selected your desired level, save your settings. All
subsequent runs on the environment will now require the selected level of
authentication.
## Using AWS Multi-Factor Authentication
Once you have elected to use AWS MFA for your Terraform Runs, you will then be
prompted to enter a token code each time you plan or apply the run depending on
your settings. Your one time use token code will be sent to you via the method
you selected when setting up your
[IAM account](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable.html).
If you have selected "applies only", you will be able to queue and run a plan
without entering your token code. Once the run finishes, you will need to enter
your token code and click "Authenticate" before the applying the plan. Once you
submit your token code, the apply will start, and you will see "Authenticated
with MFA by `user`" in the UI. If for any case there is an error when submitting
your token code, the lock icon in the UI will turn red, and an error will appear
alerting you to the failure.
If you have selected "plans and applies", you will be prompted to enter your
token before queueing your plan. Once you enter the token and click
"Authenticate", you will see "Authenticated with MFA by `user`" appear in the UI
logs. The plan will queue and you may run the plan once it is queued. Then,
before applying, you will be asked to authenticate with MFA again. Enter your
token, click Authenticate, and note that "Authenticated with MFA by `user`"
appears in the UI log after the apply begins. If for any case there is an error
authenticating, the lock icon in the UI will turn red, and an error will appear
alerting you to the failure.
## Using AWS Multi-Factor Authentication with AWS STS AssumeRole
The AWS Secure Token Service can be used to return a set of temporary security
credentials that a user can use to access resources that they might not normally
have access to (known as AssumeRole). The AssumeRole workflow is compatible with
AWS multi-factor authentication in Terraform Enterprise.
To use AssumeRole, you first need to create an IAM role and edit the trust
relationship policy document to contain the following:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::[INT]:user/[USER]"
},
"Action": "sts:AssumeRole",
"Condition": {
"Bool": {
"aws:MultiFactorAuthPresent": "true"
}
}
}
]
}
```
You can then configure the Terraform AWS provider to assume a given role by specifying the role ARN within the nested assume_role block:
```hcl
provider "aws" {
# ...
assume_role {
role_arn = "arn:aws:iam::[INT]:role/[ROLE]"
}
}
```

View File

@ -0,0 +1,33 @@
---
layout: "enterprise"
page_title: "Notifications - Runs - Terraform Enterprise"
sidebar_current: "docs-enterprise-runs-notifications"
description: |-
Terraform Enterprise can send notifications to your organization. This post is on how.
---
# Terraform Run Notifications
Terraform Enterprise can send run notifications, the following events are
configurable:
- **Needs Confirmation** - The plan phase has succeeded, and there are changes
that need to be confirmed before applying.
- **Confirmed** - A plan has been confirmed, and it will begin applying shortly.
- **Discarded** - A user has discarded the plan.
- **Applying** - The plan has begun to apply and make changes to your
infrastructure.
- **Applied** - The plan was applied successfully.
- **Errored** - An error has occurred during the plan or apply phase.
> Emails will include logs for the **Needs Confirmation**, **Applied**, and
> **Errored** events.
You can toggle notifications for each of these events on the "Integrations" tab
of an environment.

View File

@ -0,0 +1,40 @@
---
layout: "enterprise"
page_title: "Scheduling - Runs - Terraform Enterprise"
sidebar_current: "docs-enterprise-runs-schedule"
description: |-
Schedule periodic plan runs in Terraform.
---
# Schedule Periodic Plan Runs
-> This is an unreleased beta feature. Please
<a href="mailto:support@hashicorp.com">contact support</a> if you are interested
in helping us test this feature.
Terraform can automatically run a plan against your infrastructure on a
specified schedule. This option is disabled by default and can be enabled by an
organization owner on a per-environment basis.
On the specified interval, a plan can be run that for you, determining any
changes and sending the appropriate notifications.
When used with [automatic applies](/docs/enterprise/runs/automatic-applies.html), this feature can help converge
changes to infrastructure without human input.
Runs will not be queued while another plan or apply is in progress, or if the
environment has been manually locked. Se
[Environment Locking](/docs/enterprise/runs#environment-locking) for more
information.
## Enabling Periodic Plans
To enable periodic plans for an environment, visit the environment settings page
and select the desired interval and click the save button to persist the
changes. An initial plan may immediately run, depending on the state of your
environment, and then will automatically plan at the specified interval.
If you have manually run a plan separately, a new plan will not be queued until
the allotted time after the manual plan ran. This means that the platform simply
ensures that a plan has been executed at the specified schedule.

View File

@ -0,0 +1,117 @@
---
layout: "enterprise"
page_title: "Starting - Runs - Terraform Enterprise"
sidebar_current: "docs-enterprise-runs-starting"
description: |-
How to start runs in Terraform Enterprise.
---
# Starting Terraform Runs
There are a variety of ways to queue a Terraform run in Terraform Enterprise. In addition to
`terraform push`, you can connect your environment
to GitHub and runs based on new commits. You can
also intelligently queue new runs when linked artifacts are uploaded or changed.
Remember from the [previous section about Terraform runs](/docs/enterprise/runs)
that it is safe to trigger many plans without consequence since Terraform plans
do not change infrastructure.
## Terraform Push
Terraform `push` is a [Terraform command](https://terraform.io/docs/commands/push.html)
that packages and uploads a set of Terraform configuration and directory to the platform. This then creates a run
which performs `terraform plan` and `terraform apply` against the uploaded
configuration.
The directory is included in order to run any associated provisioners,
that might use local files. For example, a remote-exec provisioner
that executes a shell script.
By default, everything in your directory is uploaded as part of the push.
However, it's not always the case that the entire directory should be uploaded. Often,
temporary or cache directories and files like `.git`, `.tmp` will be included by default, which
can cause failures at certain sizes and should be avoided. You can
specify [exclusions](https://terraform.io/docs/commands/push.html) to avoid this situation.
Terraform also allows for a [VCS option](https://terraform.io/docs/commands/push.html#_vcs_true)
that will detect your VCS (if there is one) and only upload the files that are tracked by the VCS. This is
useful for automatically excluding ignored files. In a VCS like git, this
basically does a `git ls-files`.
## GitHub Webhooks
Optionally, GitHub can be used to import Terraform configuration. When used
within an organization, this can be extremely valuable for keeping differences
in environments and last mile changes from occurring before an upload.
After you have [connected your GitHub account to Terraform Enterprise](/docs/enterprise/vcs/github.html),
you can connect your environment to the target
GitHub repository. The GitHub repository will be linked to the Terraform Enterprise
configuration, and GitHub will start sending webhooks. Certain
GitHub webhook events, detailed below, will cause the repository to be
automatically ingressed into Terraform and stored, along with references to the
GitHub commits and authorship information.
Currently, an environment must already exist to be connected to GitHub. You can
create the environment with `terraform push`, detailed above, and then link it
to GitHub.
Each ingress will trigger a Terraform plan. If you have auto-apply enabled then
the plan will also be applied.
You can disable an ingress by adding the text `[atlas skip]` or `[ci skip]` to
your commit message.
Supported GitHub webhook events:
- pull_request (on by default)
- ingress when opened or reopened
- ingress when synchronized (new commits are pushed to the branch)
- push (on by default)
- ingress when a tag is created
- ingress when the default branch is updated
- note: the default branch is either configured on your configuration's
integrations tab, or if that is blank it is the GitHub
repository's default branch
- create (off by default)
- ingress when a tag is created
- note: if you want to only run on tag creation, turn on create events and
turn off push events
## Artifact Uploads
Upon successful completion of a Terraform run, the remote state is parsed and
any [artifacts](/docs/enterprise/artifacts/artifact-provider.html) are detected that
were referenced. When new versions of those referenced artifacts are uploaded, you have the option to automatically queue a new Terraform run.
For example, consider the following Terraform configuration which references an
artifact named "worker":
```hcl
resource "aws_instance" "worker" {
ami = "${atlas_artifact.worker.metadata_full.region-us-east-1}"
instance_type = "m1.small"
}
```
When a new version of the and artifact "worker" is uploaded either manually
or as the output of a [Packer build](/docs/enterprise/packer/builds/starting.html), a Terraform plan can be automatically triggered with this new artifact version.
You can enable this feature on a per-environment basis from the
environment settings page.
Combined with
[Terraform auto apply](/docs/enterprise/runs/automatic-applies.html), you can
continuously deliver infrastructure using Terraform and Terraform Enterprise.
## Terraform Plugins
If you are using a custom [Terraform Plugin](https://www.terraform.io/docs/plugins/index.html)
binary for a provider or provisioner that's not currently in a released
version of Terraform, you can still use this in Terraform Enterprise.
All you need to do is include a Linux AMD64 binary for the plugin in the
directory in which Terraform commands are run from; it will then be used next time you `terraform push` or ingress from GitHub.

View File

@ -0,0 +1,143 @@
---
layout: "enterprise"
page_title: "Variables and Configuration - Runs - Terraform Enterprise"
sidebar_current: "docs-enterprise-runs-variables"
description: |-
How to configure runs and their variables.
---
# Terraform Variables and Configuration
There are two ways to configure Terraform runs with Terraform variables or
environment variables.
## Terraform Variables
Terraform variables are first-class configuration in Terraform. They define the
parameterization of Terraform configurations and are important for sharing and
removal of sensitive secrets from version control.
Variables are sent with the `terraform push` command. Any variables in your local
`.tfvars` files are securely uploaded. Once variables are uploaded, Terraform will prefer the stored variables over any changes you
make locally. Please refer to the
[Terraform push documentation](https://www.terraform.io/docs/commands/push.html)
for more information.
You can also add, edit, and delete variables. To update Terraform variables,
visit the "variables" page on your environment.
The maximum size for the value of Terraform variables is `256kb`.
For detailed information about Terraform variables, please read the
[Terraform variables](https://terraform.io/docs/configuration/variables.html)
section of the Terraform documentation.
## Environment Variables
Environment variables are injected into the virtual environment that Terraform
executes in during the `plan` and `apply` phases.
You can add, edit, and delete environment variables from the "variables" page
on your environment.
Additionally, the following environment variables are automatically injected by
Terraform Enterprise. All injected environment variables will be prefixed with `ATLAS_`
- `ATLAS_TOKEN` - This is a unique, per-run token that expires at the end of
run execution (e.g. `"abcd.atlasv1.ghjkl..."`).
- `ATLAS_RUN_ID` - This is a unique identifier for this run (e.g. `"33"`).
- `ATLAS_CONFIGURATION_NAME` - This is the name of the configuration used in
this run. Unless you have configured it differently, this will also be the
name of the environment (e.g `"production"`).
- `ATLAS_CONFIGURATION_SLUG` - This is the full slug of the configuration used
in this run. Unless you have configured it differently, this will also be the
name of the environment (e.g. `"company/production"`).
- `ATLAS_CONFIGURATION_VERSION` - This is the unique, auto-incrementing version
for the Terraform configuration (e.g. `"34"`).
- `ATLAS_CONFIGURATION_VERSION_GITHUB_BRANCH` - This is the name of the branch
that the associated Terraform configuration version was ingressed from
(e.g. `master`).
- `ATLAS_CONFIGURATION_VERSION_GITHUB_COMMIT_SHA` - This is the full commit hash
of the commit that the associated Terraform configuration version was
ingressed from (e.g. `"abcd1234..."`).
- `ATLAS_CONFIGURATION_VERSION_GITHUB_TAG` - This is the name of the tag
that the associated Terraform configuration version was ingressed from
(e.g. `"v0.1.0"`).
For any of the `GITHUB_` attributes, the value of the environment variable will
be the empty string (`""`) if the resource is not connected to GitHub or if the
resource was created outside of GitHub (like using `terraform push`).
## Managing Secret Multi-Line Files
Terraform Enterprise has the ability to store multi-line files as variables. The recommended way to manage your secret/sensitive multi-line files (private key, SSL cert, SSL private key, CA, etc.) is to add them as [Terraform Variables](#terraform-variables) or [Environment Variables](#environment-variables).
Just like secret strings, it is recommended that you never check in these
multi-line secret files to version control by following the below steps.
Set the [variables](https://www.terraform.io/docs/configuration/variables.html)
in your Terraform template that resources utilizing the secret file will
reference:
```hcl
variable "private_key" {}
resource "aws_instance" "example" {
# ...
provisioner "remote-exec" {
connection {
host = "${self.private_ip}"
private_key = "${var.private_key}"
}
# ...
}
}
```
`terraform push` any "Terraform Variables":
$ terraform push -name $ATLAS_USERNAME/example -var "private_key=$MY_PRIVATE_KEY"
`terraform push` any "Environment Variables":
$ TF_VAR_private_key=$MY_PRIVATE_KEY terraform push -name $ATLAS_USERNAME/example
Alternatively, you can add or update variables manually by going to the
"Variables" section of your Environment and pasting the contents of the file in
as the value.
Now, any resource that consumes that variable will have access to the variable value, without having to check the file into version control. If you want to run Terraform locally, that file will still need to be passed in as a variable in the CLI. View the [Terraform Variable Documentation](https://www.terraform.io/docs/configuration/variables.html) for more info on how to accomplish this.
A few things to note...
The `.tfvars` file does not support multi-line files. You can still use
`.tfvars` to define variables, however, you will not be able to actually set the
variable in `.tfvars` with the multi-line file contents like you would a
variable in a `.tf` file.
If you are running Terraform locally, you can pass in the variables at the
command line:
$ terraform apply -var "private_key=$MY_PRIVATE_KEY"
$ TF_VAR_private_key=$MY_PRIVATE_KEY terraform apply
You can update variables locally by using the `-overwrite` flag with your `terraform push` command:
$ terraform push -name $ATLAS_USERNAME/example -var "private_key=$MY_PRIVATE_KEY" -overwrite=private_key
$ TF_VAR_private_key=$MY_PRIVATE_KEY terraform push -name $ATLAS_USERNAME/example -overwrite=private_key
## Notes on Security
Terraform variables and environment variables are encrypted using
[Vault](https://vaultproject.io) and closely guarded and audited. If you have
questions or concerns about the safety of your configuration, please contact
our security team at [security@hashicorp.com](mailto:security@hashicorp.com).

View File

@ -0,0 +1,23 @@
---
layout: "enterprise"
page_title: "Collaborating - State - Terraform Enterprise"
sidebar_current: "docs-enterprise-state-collaborating"
description: |-
How to collaborate on states.
---
# Collaborating on Terraform Remote State
Terraform Enterprise is one of a few options to store [remote state](/docs/enterprise/state).
Remote state gives you the ability to version and collaborate on Terraform
changes. It stores information about the changes Terraform makes based on
configuration.
In order to collaborate safely on remote state, we recommend
[creating an organization](/docs/enterprise/organizations/create.html) to
manage teams of users.
Then, following a [remote state push](/docs/enterprise/state) you can view state
versions in the changes tab of the environment created under the same name as
the remote state.

View File

@ -0,0 +1,24 @@
---
layout: "enterprise"
page_title: "State - Terraform Enterprise"
sidebar_current: "docs-enterprise-state"
description: |-
Terraform stores the state of your managed infrastructure from the last time Terraform was run. This section is about states.
---
# State
Terraform stores the state of your managed infrastructure from the last time
Terraform was run. By default this state is stored in a local file named
`terraform.tfstate`, but it can also be stored remotely, which works better in a
team environment.
Terraform Enterprise is a remote state provider, allowing you to store, version
and collaborate on states.
Remote state gives you more than just easier version control and safer storage.
It also allows you to delegate the outputs to other teams. This allows your
infrastructure to be more easily broken down into components that multiple teams
can access.
Read [more about remote state](https://www.terraform.io/docs/state/remote.html).

View File

@ -0,0 +1,23 @@
---
layout: "enterprise"
page_title: "Pushing - State - Terraform Enterprise"
sidebar_current: "docs-enterprise-state-pushing"
description: |-
Pushing remote states.
---
# Pushing Terraform Remote State to Terraform Enterprise
Terraform Enterprise is one of a few options to store [remote state](/docs/enterprise/state).
Remote state gives you the ability to version and collaborate on Terraform
changes. It stores information about the changes Terraform makes based on
configuration.
To use Terraform Enterprise to store remote state, you'll first need to have the
`ATLAS_TOKEN` environment variable set and run the following command.
```shell
$ terraform remote config \
-backend-config="name=$USERNAME/product"
```

View File

@ -0,0 +1,70 @@
---
layout: "enterprise"
page_title: "Resolving Conflicts - State - Terraform Enterprise"
sidebar_current: "docs-enterprise-state-resolving"
description: |-
Resolving conflicts with remote states.
---
# Resolving Conflicts in Remote States
Resolving state conflicts can be time consuming and error prone, so it's
important to approach it carefully.
There are several tools provided by Terraform Enterprise to help resolve
conflicts and fix remote state issues. First, you can navigate between state
versions in the changes view of your environment (after toggling on the remote
state checkbox) and view plain-text differences between versions.
This allows you to pinpoint where things may have gone wrong and make a educated
decision about resolving the conflict.
### Rolling Back to a Specific State Version
The rollback feature allows you to choose a new version to set as the "Head"
version of the state. Rolling back to a version means it will then return that
state upon request from a client. It will not increment the serial in the state,
but perform a hard rollback to the exact version of the state provided.
This allows you to reset the state to an older version, essentially forgetting
changes made in versions after that point.
To roll back to a specific version, navigate to it in the changes view and use
the rollback link. You'll need to confirm the version number to perform the
operation.
### Using Terraform Locally
Another way to resolve remote state conflicts is to merge and conflicted copies
locally by inspecting the raw state available in the path
`.terraform/terraform.tfstate`.
When making state changes, it's important to make backup copies in order to
avoid losing any data.
Any state that is pushed with a serial that is lower than the known serial when
the MD5 of the state does not match will be rejected.
The serial is embedded in the state file:
```json
{
"version": 1,
"serial": 555,
"remote": {
"type": "atlas",
"config": {
"name": "my-username/production"
}
}
}
```
Once a conflict has been resolved locally by editing the state file, the serial
can be incremented past the current version and pushed:
```shell
$ terraform remote push
```
This will upload the manually resolved state and set it as the head version.

View File

@ -0,0 +1,38 @@
---
layout: "enterprise"
page_title: "Support - Terraform Enterprise"
sidebar_current: "docs-enterprise-support"
description: |-
All users of Terraform Enterprise are urged to email feedback, questions or requests to the HashiCorp team.
---
# Contacting Support
All users of Terraform Enterprise are urged to email feedback, questions or
requests to the HashiCorp team.
### Free Support
We do not currently publish support SLAs for free accounts, but endeavor to
respond as quickly as possible. We respond to most requests within less than 24
hours.
## HashiCorp Tools Support
It's often the case that Terraform Enterprise questions or feedback relates to
the HashiCorp tooling. We encourage all Terraform Enterprise users to search for
related issues and problems in the open source repositories and mailing lists
prior to contacting us to help make our support more efficient and to help
resolve problems faster.
Visit the updating tools section for a list of our tools and their project
websites.
## Documentation Feedback
Due to the dynamic nature of Terraform Enterprise and the broad set of features
it provides, there may be information lacking in the documentation.
In this case, we appreciate any feedback to be emailed to us so
we can make improvements. Please email feedback to
<a href="mailto:support@hashicorp.com">support@hashicorp.com</a>.

View File

@ -0,0 +1,58 @@
---
layout: "enterprise"
page_title: "Authentication - Accounts - Terraform Enterprise"
sidebar_current: "docs-enterprise-accounts-authentication"
description: |-
Terraform Enterprise requires a username and password to sign up and login. However, there are several ways to authenticate with your account.
---
# Authentication
Terraform Enterprise requires a username and password to sign up and login.
However, there are several ways to authenticate with your account.
### Authentication Tokens
Authentication tokens are keys used to access your account via tools or over the
various APIs used in Terraform Enterprise.
You can create new tokens in the token section of your account settings. It's
important to keep tokens secure, as they are essentially a password and can be
used to access your account or resources. Additionally, token authentication
bypasses two factor authentication.
### Authenticating Tools
All HashiCorp tools look for the `ATLAS_TOKEN` environment variable:
```shell
$ export ATLAS_TOKEN=TOKEN
```
This will automatically authenticate all requests against this token. This is
the recommended way to authenticate with our various tools. Care should be given
to how this token is stored, as it is as good as a password.
### Two Factor Authentication
You can optionally enable Two Factor authentication, requiring an SMS or TOTP
one-time code every time you log in, after entering your username and password.
You can enable Two Factor authentication in the security section of your account
settings.
Be sure to save the generated recovery codes. Each backup code can be used once
to sign in if you do not have access to your two-factor authentication device.
### Sudo Mode
When accessing certain admin-level pages (adjusting your user profile, for
example), you may notice that you're prompted for your password, even though
you're already logged in. This is by design, and aims to help guard protect you
if your screen is unlocked and unattended.
### Session Management
You can see a list of your active sessions on your security settings page. From
here, you can revoke sessions, in case you have lost access to a machine from
which you were accessing.

View File

@ -0,0 +1,13 @@
---
layout: "enterprise"
page_title: "Accounts - Terraform Enterprise"
sidebar_current: "docs-enterprise-accounts"
description: |-
Users are the main identity system in Terraform Enterprise.
---
# User Accounts
Users are the main identity system in Terraform Enterprise. A user can be a
member of multiple [organizations](/docs/enterprise/organizations/index.html),
as well as individually collaborate on various resources.

View File

@ -0,0 +1,15 @@
---
layout: "enterprise"
page_title: "Recovery - Accounts - Terraform Enterprise"
sidebar_current: "docs-enterprise-accounts-recovery"
description: |-
If you have lost access to your account, use the reset password form to send yourself a link to reset your password.
---
# Account Recovery
If you have lost access to your Terraform Enterprise account, use the reset
password form on the login page to send yourself a link to reset your password.
If an email is unknown, [contact us](mailto:support@hashicorp.com) for further
help.

View File

@ -0,0 +1,66 @@
---
layout: "enterprise"
page_title: "Git - VCS Integrations - Terraform Enterprise"
sidebar_current: "docs-enterprise-vcs-git-"
description: |-
Git repositories can be integrated with Terraform Enterprise by using push command.
---
# Git Integration
Git repositories can be integrated with Terraform Enterprise by using
[`terraform push`](/docs/commands/push.html) to import Terraform configuration
when changes are committed. When Terraform configuration is imported using
`terraform push` a plan is automatically queued.
-> This integration is for Git repositories **not** hosted on GitHub. For GitHub, please see the GitHub documentation instead.
## Setup
Terraform configuration can be manually imported by running `terraform push`
like below:
```shell
$ terraform push -name=$USERNAME/ENV_NAME
```
A better option than having to manually run `terraform push` is to run it
using a git commit hook. A client-side `pre-push` hook is suitable and will
push your Terraform configuration when you push local changes to your Git
server.
### Client-side Commit Hook
The script below will execute `terraform push` when you push local changes to
your Git server. Place the script at `.git/pre-push` in your local Git
repository, set the necessary variables, and ensure the script is executable.
```shell
#!/bin/bash
#
# An example hook script to push Terraform configuration to Terraform Enterprise.
#
# Set the following variables for your project:
# - ENV_NAME - your environment name (e.g. org/env)
# - TERRAFORM_DIR - the local directory to push
# - DEFAULT_BRANCH - the branch to push. Other branches will be ignored.
ENV_NAME="YOUR_ORG/YOUR_ENV"
TERRAFORM_DIR="terraform"
DEFAULT_BRANCH=""
if [[ -z "$ENV_NAME" || -z "$TERRAFORM_DIR" || -z "$DEFAULT_BRANCH" ]]; then
echo 'pre-push hook: One or more variables are undefined. Canceling push.'
exit 1
fi
current_branch=$(git symbolic-ref HEAD | sed -e 's,.*/\(.*\),\1,')
if [ "$current_branch" == "$DEFAULT_BRANCH" ]; then
echo "pre-push hook: Pushing branch [$current_branch] to environment [$ENV_NAME]."
terraform push -name="$ENV_NAME" $TERRAFORM_DIR
else
echo "pre-push hook: NOT pushing branch [$current_branch] to environment [$ENV_NAME]."
fi
```

View File

@ -0,0 +1,38 @@
---
layout: "enterprise"
page_title: "GitHub - VCS Integrations - Terraform Enterprise"
sidebar_current: "docs-enterprise-vcs-github"
description: |-
GitHub repositories can be integrated with Terraform Enterprise by using push command.
---
# GitHub Integration
GitHub can be used to import Terraform configuration, automatically queuing runs
when changes are merged into a repository's default branch. Additionally, plans
are run when a pull request is created or updated. Terraform Enterprise will
update the pull request with the result of the Terraform plan providing quick
feedback on proposed changes.
## Setup
Terraform Enterprise environments are linked to individual GitHub repositories.
However, a single GitHub repository can be linked to multiple environments
allowing a single set of Terraform configuration to be used across multiple
environments.
Environments can be linked when they're initially created using the New
Environment process. Existing environments can be linked by setting GitHub
details in their **Integrations**.
To link a Terraform Enterprise environment to a GitHub repository, you need
three pieces of information:
- **GitHub repository** - The location of the repository being imported in the
format _username/repository_.
- **GitHub branch** - The branch from which to ingress new versions. This
defaults to the value GitHub provides as the default branch for this repository.
- **Path to directory of Terraform files** - The repository's subdirectory that
contains its terraform files. This defaults to the root of the repository.

View File

@ -0,0 +1,17 @@
---
layout: "enterprise"
page_title: "VCS Integrations - Terraform Enterprise"
sidebar_current: "docs-enterprise-vcs"
description: |-
Terraform Enterprise can integrate with version control software Git and GitHub.
---
# Integration with Version Control Software
Terraform Enterprise can integrate with your version control software to
automatically execute Terraform with your latest Terraform configuration as you
commit changes to source control.
Different capabilities within Terraform Enterprise are available depending on
the integration in use. The available integration options are on the sidebar
navigation.

View File

@ -73,7 +73,7 @@ You can use the same parameters to GitHub repositories as you can generic Git re
### Private GitHub Repos
If you need Terraform to be able to fetch modules from private GitHub repos on a remote machine (like Atlas or a CI server), you'll need to provide Terraform with credentials that can be used to authenticate as a user with read access to the private repo.
If you need Terraform to be able to fetch modules from private GitHub repos on a remote machine (like Terraform Enterprise or a CI server), you'll need to provide Terraform with credentials that can be used to authenticate as a user with read access to the private repo.
First, create a [machine user](https://developer.github.com/guides/managing-deploy-keys/#machine-users) on GitHub with read access to the private repo in question, then embed this user's credentials into the `source` parameter:

View File

@ -1,42 +0,0 @@
---
layout: "atlas"
page_title: "Provider: Atlas"
sidebar_current: "docs-atlas-index"
description: |-
The Atlas provider is used to interact with configuration,
artifacts, and metadata managed by the Atlas service.
---
# Atlas Provider
The Atlas provider is used to interact with resources, configuration,
artifacts, and metadata managed by [Atlas](https://atlas.hashicorp.com).
The provider needs to be configured with the proper credentials before
it can be used.
Use the navigation to the left to read about the available resources.
## Example Usage
```hcl
# Configure the Atlas provider
provider "atlas" {
token = "${var.atlas_token}"
}
# Fetch an artifact configuration
data "atlas_artifact" "web" {
# ...
}
```
## Argument Reference
The following arguments are supported:
* `address` - (Optional) Atlas server endpoint. Defaults to public Atlas.
This is only required when using an on-premise deployment of Atlas. This can
also be specified with the `ATLAS_ADDRESS` shell environment variable.
* `token` - (Required) API token. This can also be specified with the
`ATLAS_TOKEN` shell environment variable.

View File

@ -16,7 +16,7 @@ them by domain without having to hard code the ARNs as input.
## Example Usage
```
```hcl
data "aws_acm_certificate" "example" {
domain = "tf.example.com"
statuses = ["ISSUED"]

View File

@ -1,7 +1,7 @@
---
layout: "aws"
page_title: "AWS: aws_alb"
sidebar_current: "docs-aws-datasource-alb"
sidebar_current: "docs-aws-datasource-alb-x"
description: |-
Provides an Application Load Balancer data source.
---
@ -16,7 +16,7 @@ with it, etc.
## Example Usage
```
```hcl
variable "alb_arn" {
type = "string"
default = ""

View File

@ -16,7 +16,7 @@ information specific to the listener in question.
## Example Usage
```
```hcl
variable "listener_arn" {
type = "string"
}

Some files were not shown because too many files have changed in this diff Show More