Revert some work that happened since v0.12-dev branched

This work was done against APIs that were already changed in the branch
before work began, and so it doesn't apply to the v0.12 development work.

To allow v0.12 to merge down to master, we'll revert this work out for now
and then re-introduce equivalent functionality in later commits that works
against the new APIs.
This commit is contained in:
Martin Atkins 2018-10-16 19:48:28 -07:00
parent fd77765154
commit 541952bb8f
136 changed files with 300 additions and 14891 deletions

View File

@ -6,6 +6,7 @@ import (
"sync"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/svchost/disco"
"github.com/hashicorp/terraform/tfdiags"
backendatlas "github.com/hashicorp/terraform/backend/atlas"
@ -39,7 +40,7 @@ import (
var backends map[string]func() backend.Backend
var backendsLock sync.Mutex
func init() {
func Init(services *disco.Disco) {
// Our hardcoded backends. We don't need to acquire a lock here
// since init() code is serial and can't spawn goroutines.
backends = map[string]func() backend.Backend{

View File

@ -16,53 +16,31 @@ func TestInit_backend(t *testing.T) {
Name string
Type string
}{
{
"local",
"*local.Local",
}, {
"remote",
"*remote.Remote",
}, {
"atlas",
"*atlas.Backend",
}, {
"azurerm",
"*azure.Backend",
}, {
"consul",
"*consul.Backend",
}, {
"etcdv3",
"*etcd.Backend",
}, {
"gcs",
"*gcs.Backend",
}, {
"inmem",
"*inmem.Backend",
}, {
"manta",
"*manta.Backend",
}, {
"s3",
"*s3.Backend",
}, {
"swift",
"*swift.Backend",
}, {
"azure",
"init.deprecatedBackendShim",
},
{"local", "*local.Local"},
{"atlas", "*atlas.Backend"},
{"azurerm", "*azure.Backend"},
{"consul", "*consul.Backend"},
{"etcdv3", "*etcd.Backend"},
{"gcs", "*gcs.Backend"},
{"inmem", "*inmem.Backend"},
{"manta", "*manta.Backend"},
{"s3", "*s3.Backend"},
{"swift", "*swift.Backend"},
{"azure", "init.deprecatedBackendShim"},
}
// Make sure we get the requested backend
for _, b := range backends {
f := Backend(b.Name)
bType := reflect.TypeOf(f()).String()
if bType != b.Type {
t.Fatalf("expected backend %q to be %q, got: %q", b.Name, b.Type, bType)
}
t.Run(b.Name, func(t *testing.T) {
f := Backend(b.Name)
if f == nil {
t.Fatalf("backend %q is not present; should be", b.Name)
}
bType := reflect.TypeOf(f()).String()
if bType != b.Type {
t.Fatalf("expected backend %q to be %q, got: %q", b.Name, b.Type, bType)
}
})
}
}
@ -74,13 +52,7 @@ func TestInit_forceLocalBackend(t *testing.T) {
Name string
Type string
}{
{
"local",
"nil",
}, {
"remote",
"*remote.Remote",
},
{"local", "nil"},
}
// Set the TF_FORCE_LOCAL_BACKEND flag so all enhanced backends will
@ -88,6 +60,7 @@ func TestInit_forceLocalBackend(t *testing.T) {
if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil {
t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err)
}
defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND")
// Make sure we always get the local backend.
for _, b := range enhancedBackends {

View File

@ -1,28 +0,0 @@
// Package legacy contains a backend implementation that can be used
// with the legacy remote state clients.
package legacy
import (
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state/remote"
)
// Init updates the backend/init package map of initializers to support
// all the remote state types.
//
// If a type is already in the map, it will not be added. This will allow
// us to slowly convert the legacy types to first-class backends.
func Init(m map[string]backend.InitFn) {
for k := range remote.BuiltinClients {
if _, ok := m[k]; !ok {
// Copy the "k" value since the variable "k" is reused for
// each key (address doesn't change).
typ := k
// Build the factory function to return a backend of typ
m[k] = func() backend.Backend {
return &Backend{Type: typ}
}
}
}
}

View File

@ -1,34 +0,0 @@
package legacy
import (
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state/remote"
)
func TestInit(t *testing.T) {
m := make(map[string]backend.InitFn)
Init(m)
for k, _ := range remote.BuiltinClients {
b, ok := m[k]
if !ok {
t.Fatalf("missing: %s", k)
}
if typ := b().(*Backend).Type; typ != k {
t.Fatalf("bad type: %s", typ)
}
}
}
func TestInit_ignoreExisting(t *testing.T) {
m := make(map[string]backend.InitFn)
m["local"] = nil
Init(m)
if v, ok := m["local"]; !ok || v != nil {
t.Fatalf("bad: %#v", m)
}
}

View File

@ -15,14 +15,14 @@ import (
)
func TestLocal_impl(t *testing.T) {
var _ backend.Enhanced = New()
var _ backend.Local = New()
var _ backend.CLI = New()
var _ backend.Enhanced = new(Local)
var _ backend.Local = new(Local)
var _ backend.CLI = new(Local)
}
func TestLocal_backend(t *testing.T) {
defer testTmpDir(t)()
b := New()
b := &Local{}
backend.TestBackendStates(t, b)
backend.TestBackendStateLocks(t, b, b)
}
@ -49,7 +49,7 @@ func checkState(t *testing.T, path, expected string) {
}
func TestLocal_StatePaths(t *testing.T) {
b := New()
b := &Local{}
// Test the defaults
path, out, back := b.StatePaths("")
@ -207,11 +207,13 @@ func (b *testDelegateBackend) DeleteWorkspace(name string) error {
// verify that the MultiState methods are dispatched to the correct Backend.
func TestLocal_multiStateBackend(t *testing.T) {
// assign a separate backend where we can read the state
b := NewWithBackend(&testDelegateBackend{
stateErr: true,
statesErr: true,
deleteErr: true,
})
b := &Local{
Backend: &testDelegateBackend{
stateErr: true,
statesErr: true,
deleteErr: true,
},
}
if _, err := b.StateMgr("test"); err != errTestDelegateState {
t.Fatal("expected errTestDelegateState, got:", err)

View File

@ -89,7 +89,7 @@ func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.Pr
// TestNewLocalSingle is a factory for creating a TestLocalSingleState.
// This function matches the signature required for backend/init.
func TestNewLocalSingle() backend.Backend {
return &TestLocalSingleState{Local: &Local{}}
return &TestLocalSingleState{}
}
// TestLocalSingleState is a backend implementation that wraps Local
@ -99,7 +99,7 @@ func TestNewLocalSingle() backend.Backend {
// This isn't an actual use case, this is exported just to provide a
// easy way to test that behavior.
type TestLocalSingleState struct {
*Local
Local
}
func (b *TestLocalSingleState) State(name string) (statemgr.Full, error) {
@ -118,50 +118,6 @@ func (b *TestLocalSingleState) DeleteState(string) error {
return backend.ErrNamedStatesNotSupported
}
// TestNewLocalNoDefault is a factory for creating a TestLocalNoDefaultState.
// This function matches the signature required for backend/init.
func TestNewLocalNoDefault() backend.Backend {
return &TestLocalNoDefaultState{Local: &Local{}}
}
// TestLocalNoDefaultState is a backend implementation that wraps
// Local and modifies it to support named states, but not the
// default state. It returns ErrDefaultStateNotSupported when the
// DefaultStateName is used.
type TestLocalNoDefaultState struct {
*Local
}
func (b *TestLocalNoDefaultState) State(name string) (state.State, error) {
if name == backend.DefaultStateName {
return nil, backend.ErrDefaultStateNotSupported
}
return b.Local.State(name)
}
func (b *TestLocalNoDefaultState) States() ([]string, error) {
states, err := b.Local.States()
if err != nil {
return nil, err
}
filtered := states[:0]
for _, name := range states {
if name != backend.DefaultStateName {
filtered = append(filtered, name)
}
}
return filtered, nil
}
func (b *TestLocalNoDefaultState) DeleteState(name string) error {
if name == backend.DefaultStateName {
return backend.ErrDefaultStateNotSupported
}
return b.Local.DeleteState(name)
}
func testTempDir(t *testing.T) string {
d, err := ioutil.TempDir("", "tf")
if err != nil {
@ -174,4 +130,4 @@ func testTempDir(t *testing.T) string {
func testStateFile(t *testing.T, path string, s *states.State) {
stateFile := statemgr.NewFilesystem(path)
stateFile.WriteState(s)
}
}

View File

@ -1,581 +0,0 @@
package remote
import (
"context"
"fmt"
"log"
"net/http"
"net/url"
"os"
"sort"
"strings"
"sync"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/svchost"
"github.com/hashicorp/terraform/svchost/disco"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/version"
"github.com/mitchellh/cli"
"github.com/mitchellh/colorstring"
)
const (
defaultHostname = "app.terraform.io"
defaultModuleDepth = -1
defaultParallelism = 10
serviceID = "tfe.v2"
)
// Remote is an implementation of EnhancedBackend that performs all
// operations in a remote backend.
type Remote struct {
// CLI and Colorize control the CLI output. If CLI is nil then no CLI
// output will be done. If CLIColor is nil then no coloring will be done.
CLI cli.Ui
CLIColor *colorstring.Colorize
// ContextOpts are the base context options to set when initializing a
// new Terraform context. Many of these will be overridden or merged by
// Operation. See Operation for more details.
ContextOpts *terraform.ContextOpts
// client is the remote backend API client
client *tfe.Client
// hostname of the remote backend server
hostname string
// organization is the organization that contains the target workspaces
organization string
// workspace is used to map the default workspace to a remote workspace
workspace string
// prefix is used to filter down a set of workspaces that use a single
// configuration
prefix string
// schema defines the configuration for the backend
schema *schema.Backend
// services is used for service discovery
services *disco.Disco
// opLock locks operations
opLock sync.Mutex
}
// New creates a new initialized remote backend.
func New(services *disco.Disco) *Remote {
b := &Remote{
services: services,
}
b.schema = &schema.Backend{
Schema: map[string]*schema.Schema{
"hostname": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: schemaDescriptions["hostname"],
Default: defaultHostname,
},
"organization": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: schemaDescriptions["organization"],
},
"token": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: schemaDescriptions["token"],
},
"workspaces": &schema.Schema{
Type: schema.TypeSet,
Required: true,
Description: schemaDescriptions["workspaces"],
MinItems: 1,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: schemaDescriptions["name"],
},
"prefix": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: schemaDescriptions["prefix"],
},
},
},
},
},
ConfigureFunc: b.configure,
}
return b
}
func (b *Remote) configure(ctx context.Context) error {
d := schema.FromContextBackendConfig(ctx)
// Get the hostname and organization.
b.hostname = d.Get("hostname").(string)
b.organization = d.Get("organization").(string)
// Get and assert the workspaces configuration block.
workspace := d.Get("workspaces").(*schema.Set).List()[0].(map[string]interface{})
// Get the default workspace name and prefix.
b.workspace = workspace["name"].(string)
b.prefix = workspace["prefix"].(string)
// Make sure that we have either a workspace name or a prefix.
if b.workspace == "" && b.prefix == "" {
return fmt.Errorf("either workspace 'name' or 'prefix' is required")
}
// Make sure that only one of workspace name or a prefix is configured.
if b.workspace != "" && b.prefix != "" {
return fmt.Errorf("only one of workspace 'name' or 'prefix' is allowed")
}
// Discover the service URL for this host to confirm that it provides
// a remote backend API and to discover the required base path.
service, err := b.discover(b.hostname)
if err != nil {
return err
}
// Retrieve the token for this host as configured in the credentials
// section of the CLI Config File.
token, err := b.token(b.hostname)
if err != nil {
return err
}
if token == "" {
token = d.Get("token").(string)
}
cfg := &tfe.Config{
Address: service.String(),
BasePath: service.Path,
Token: token,
Headers: make(http.Header),
}
// Set the version header to the current version.
cfg.Headers.Set(version.Header, version.Version)
// Create the remote backend API client.
b.client, err = tfe.NewClient(cfg)
if err != nil {
return err
}
return nil
}
// discover the remote backend API service URL and token.
func (b *Remote) discover(hostname string) (*url.URL, error) {
host, err := svchost.ForComparison(hostname)
if err != nil {
return nil, err
}
service := b.services.DiscoverServiceURL(host, serviceID)
if service == nil {
return nil, fmt.Errorf("host %s does not provide a remote backend API", host)
}
return service, nil
}
// token returns the token for this host as configured in the credentials
// section of the CLI Config File. If no token was configured, an empty
// string will be returned instead.
func (b *Remote) token(hostname string) (string, error) {
host, err := svchost.ForComparison(hostname)
if err != nil {
return "", err
}
creds, err := b.services.CredentialsForHost(host)
if err != nil {
log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err)
return "", nil
}
if creds != nil {
return creds.Token(), nil
}
return "", nil
}
// Input is called to ask the user for input for completing the configuration.
func (b *Remote) Input(ui terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
return b.schema.Input(ui, c)
}
// Validate is called once at the beginning with the raw configuration and
// can return a list of warnings and/or errors.
func (b *Remote) Validate(c *terraform.ResourceConfig) ([]string, []error) {
return b.schema.Validate(c)
}
// Configure configures the backend itself with the configuration given.
func (b *Remote) Configure(c *terraform.ResourceConfig) error {
return b.schema.Configure(c)
}
// State returns the latest state of the given remote workspace. The workspace
// will be created if it doesn't exist.
func (b *Remote) State(workspace string) (state.State, error) {
if b.workspace == "" && workspace == backend.DefaultStateName {
return nil, backend.ErrDefaultStateNotSupported
}
if b.prefix == "" && workspace != backend.DefaultStateName {
return nil, backend.ErrNamedStatesNotSupported
}
workspaces, err := b.states()
if err != nil {
return nil, fmt.Errorf("Error retrieving workspaces: %v", err)
}
exists := false
for _, name := range workspaces {
if workspace == name {
exists = true
break
}
}
// Configure the remote workspace name.
switch {
case workspace == backend.DefaultStateName:
workspace = b.workspace
case b.prefix != "" && !strings.HasPrefix(workspace, b.prefix):
workspace = b.prefix + workspace
}
if !exists {
options := tfe.WorkspaceCreateOptions{
Name: tfe.String(workspace),
}
// We only set the Terraform Version for the new workspace if this is
// a release candidate or a final release.
if version.Prerelease == "" || strings.HasPrefix(version.Prerelease, "rc") {
options.TerraformVersion = tfe.String(version.String())
}
_, err = b.client.Workspaces.Create(context.Background(), b.organization, options)
if err != nil {
return nil, fmt.Errorf("Error creating workspace %s: %v", workspace, err)
}
}
client := &remoteClient{
client: b.client,
organization: b.organization,
workspace: workspace,
// This is optionally set during Terraform Enterprise runs.
runID: os.Getenv("TFE_RUN_ID"),
}
return &remote.State{Client: client}, nil
}
// DeleteState removes the remote workspace if it exists.
func (b *Remote) DeleteState(workspace string) error {
if b.workspace == "" && workspace == backend.DefaultStateName {
return backend.ErrDefaultStateNotSupported
}
if b.prefix == "" && workspace != backend.DefaultStateName {
return backend.ErrNamedStatesNotSupported
}
// Configure the remote workspace name.
switch {
case workspace == backend.DefaultStateName:
workspace = b.workspace
case b.prefix != "" && !strings.HasPrefix(workspace, b.prefix):
workspace = b.prefix + workspace
}
// Check if the configured organization exists.
_, err := b.client.Organizations.Read(context.Background(), b.organization)
if err != nil {
if err == tfe.ErrResourceNotFound {
return fmt.Errorf("organization %s does not exist", b.organization)
}
return err
}
client := &remoteClient{
client: b.client,
organization: b.organization,
workspace: workspace,
}
return client.Delete()
}
// States returns a filtered list of remote workspace names.
func (b *Remote) States() ([]string, error) {
if b.prefix == "" {
return nil, backend.ErrNamedStatesNotSupported
}
return b.states()
}
func (b *Remote) states() ([]string, error) {
// Check if the configured organization exists.
_, err := b.client.Organizations.Read(context.Background(), b.organization)
if err != nil {
if err == tfe.ErrResourceNotFound {
return nil, fmt.Errorf("organization %s does not exist", b.organization)
}
return nil, err
}
options := tfe.WorkspaceListOptions{}
switch {
case b.workspace != "":
options.Search = tfe.String(b.workspace)
case b.prefix != "":
options.Search = tfe.String(b.prefix)
}
// Create a slice to contain all the names.
var names []string
for {
wl, err := b.client.Workspaces.List(context.Background(), b.organization, options)
if err != nil {
return nil, err
}
for _, w := range wl.Items {
if b.workspace != "" && w.Name == b.workspace {
names = append(names, backend.DefaultStateName)
continue
}
if b.prefix != "" && strings.HasPrefix(w.Name, b.prefix) {
names = append(names, strings.TrimPrefix(w.Name, b.prefix))
}
}
// Exit the loop when we've seen all pages.
if wl.CurrentPage >= wl.TotalPages {
break
}
// Update the page number to get the next page.
options.PageNumber = wl.NextPage
}
// Sort the result so we have consistent output.
sort.StringSlice(names).Sort()
return names, nil
}
// Operation implements backend.Enhanced
func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) {
// Configure the remote workspace name.
switch {
case op.Workspace == backend.DefaultStateName:
op.Workspace = b.workspace
case b.prefix != "" && !strings.HasPrefix(op.Workspace, b.prefix):
op.Workspace = b.prefix + op.Workspace
}
// Determine the function to call for our operation
var f func(context.Context, context.Context, *backend.Operation) (*tfe.Run, error)
switch op.Type {
case backend.OperationTypePlan:
f = b.opPlan
case backend.OperationTypeApply:
f = b.opApply
default:
return nil, fmt.Errorf(
"\n\nThe \"remote\" backend does not support the %q operation.\n"+
"Please use the remote backend web UI for running this operation:\n"+
"https://%s/app/%s/%s", op.Type, b.hostname, b.organization, op.Workspace)
}
// Lock
b.opLock.Lock()
// Build our running operation
// the runninCtx is only used to block until the operation returns.
runningCtx, done := context.WithCancel(context.Background())
runningOp := &backend.RunningOperation{
Context: runningCtx,
PlanEmpty: true,
}
// stopCtx wraps the context passed in, and is used to signal a graceful Stop.
stopCtx, stop := context.WithCancel(ctx)
runningOp.Stop = stop
// cancelCtx is used to cancel the operation immediately, usually
// indicating that the process is exiting.
cancelCtx, cancel := context.WithCancel(context.Background())
runningOp.Cancel = cancel
// Do it.
go func() {
defer done()
defer stop()
defer cancel()
defer b.opLock.Unlock()
r, opErr := f(stopCtx, cancelCtx, op)
if opErr != nil && opErr != context.Canceled {
runningOp.Err = opErr
return
}
if r != nil {
// Retrieve the run to get its current status.
r, err := b.client.Runs.Read(cancelCtx, r.ID)
if err != nil {
runningOp.Err = generalError("error retrieving run", err)
return
}
// Record if there are any changes.
runningOp.PlanEmpty = !r.HasChanges
if opErr == context.Canceled {
runningOp.Err = b.cancel(cancelCtx, op, r)
}
if runningOp.Err == nil && r.Status == tfe.RunErrored {
runningOp.ExitCode = 1
}
}
}()
// Return the running operation.
return runningOp, nil
}
func (b *Remote) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error {
if r.Status == tfe.RunPending && r.Actions.IsCancelable {
// Only ask if the remote operation should be canceled
// if the auto approve flag is not set.
if !op.AutoApprove {
v, err := op.UIIn.Input(&terraform.InputOpts{
Id: "cancel",
Query: "\nDo you want to cancel the pending remote operation?",
Description: "Only 'yes' will be accepted to cancel.",
})
if err != nil {
return generalError("error asking to cancel", err)
}
if v != "yes" {
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationNotCanceled)))
}
return nil
}
} else {
if b.CLI != nil {
// Insert a blank line to separate the ouputs.
b.CLI.Output("")
}
}
// Try to cancel the remote operation.
err := b.client.Runs.Cancel(cancelCtx, r.ID, tfe.RunCancelOptions{})
if err != nil {
return generalError("error cancelling run", err)
}
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationCanceled)))
}
}
return nil
}
// Colorize returns the Colorize structure that can be used for colorizing
// output. This is guaranteed to always return a non-nil value and so useful
// as a helper to wrap any potentially colored strings.
// func (b *Remote) Colorize() *colorstring.Colorize {
// if b.CLIColor != nil {
// return b.CLIColor
// }
// return &colorstring.Colorize{
// Colors: colorstring.DefaultColors,
// Disable: true,
// }
// }
func generalError(msg string, err error) error {
if urlErr, ok := err.(*url.Error); ok {
err = urlErr.Err
}
switch err {
case context.Canceled:
return err
case tfe.ErrResourceNotFound:
return fmt.Errorf(strings.TrimSpace(fmt.Sprintf(notFoundErr, msg, err)))
default:
return fmt.Errorf(strings.TrimSpace(fmt.Sprintf(generalErr, msg, err)))
}
}
const generalErr = `
%s: %v
The configured "remote" backend encountered an unexpected error. Sometimes
this is caused by network connection problems, in which case you could retry
the command. If the issue persists please open a support ticket to get help
resolving the problem.
`
const notFoundErr = `
%s: %v
The configured "remote" backend returns '404 Not Found' errors for resources
that do not exist, as well as for resources that a user doesn't have access
to. When the resource does exists, please check the rights for the used token.
`
const operationCanceled = `
[reset][red]The remote operation was successfully cancelled.[reset]
`
const operationNotCanceled = `
[reset][red]The remote operation was not cancelled.[reset]
`
var schemaDescriptions = map[string]string{
"hostname": "The remote backend hostname to connect to (defaults to app.terraform.io).",
"organization": "The name of the organization containing the targeted workspace(s).",
"token": "The token used to authenticate with the remote backend. If credentials for the\n" +
"host are configured in the CLI Config File, then those will be used instead.",
"workspaces": "Workspaces contains arguments used to filter down to a set of workspaces\n" +
"to work on.",
"name": "A workspace name used to map the default workspace to a named remote workspace.\n" +
"When configured only the default workspace can be used. This option conflicts\n" +
"with \"prefix\"",
"prefix": "A prefix used to filter workspaces using a single configuration. New workspaces\n" +
"will automatically be prefixed with this prefix. If omitted only the default\n" +
"workspace can be used. This option conflicts with \"name\"",
}

View File

@ -1,243 +0,0 @@
package remote
import (
"bufio"
"context"
"fmt"
"log"
"strings"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/terraform"
)
func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation) (*tfe.Run, error) {
log.Printf("[INFO] backend/remote: starting Apply operation")
// Retrieve the workspace used to run this operation in.
w, err := b.client.Workspaces.Read(stopCtx, b.organization, op.Workspace)
if err != nil {
return nil, generalError("error retrieving workspace", err)
}
if !w.Permissions.CanUpdate {
return nil, fmt.Errorf(strings.TrimSpace(applyErrNoUpdateRights))
}
if w.VCSRepo != nil {
return nil, fmt.Errorf(strings.TrimSpace(applyErrVCSNotSupported))
}
if op.Parallelism != defaultParallelism {
return nil, fmt.Errorf(strings.TrimSpace(applyErrParallelismNotSupported))
}
if op.Plan != nil {
return nil, fmt.Errorf(strings.TrimSpace(applyErrPlanNotSupported))
}
if !op.PlanRefresh {
return nil, fmt.Errorf(strings.TrimSpace(applyErrNoRefreshNotSupported))
}
if op.Targets != nil {
return nil, fmt.Errorf(strings.TrimSpace(applyErrTargetsNotSupported))
}
if op.Variables != nil {
return nil, fmt.Errorf(strings.TrimSpace(
fmt.Sprintf(applyErrVariablesNotSupported, b.hostname, b.organization, op.Workspace)))
}
if (op.Module == nil || op.Module.Config().Dir == "") && !op.Destroy {
return nil, fmt.Errorf(strings.TrimSpace(applyErrNoConfig))
}
// Run the plan phase.
r, err := b.plan(stopCtx, cancelCtx, op, w)
if err != nil {
return r, err
}
// This check is also performed in the plan method to determine if
// the policies should be checked, but we need to check the values
// here again to determine if we are done and should return.
if !r.HasChanges || r.Status == tfe.RunErrored {
return r, nil
}
// Retrieve the run to get its current status.
r, err = b.client.Runs.Read(stopCtx, r.ID)
if err != nil {
return r, generalError("error retrieving run", err)
}
// Return if the run cannot be confirmed.
if !w.AutoApply && !r.Actions.IsConfirmable {
return r, nil
}
// Since we already checked the permissions before creating the run
// this should never happen. But it doesn't hurt to keep this in as
// a safeguard for any unexpected situations.
if !w.AutoApply && !r.Permissions.CanApply {
// Make sure we discard the run if possible.
if r.Actions.IsDiscardable {
err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{})
if err != nil {
if op.Destroy {
return r, generalError("error disarding destroy", err)
}
return r, generalError("error disarding apply", err)
}
}
return r, fmt.Errorf(strings.TrimSpace(
fmt.Sprintf(applyErrNoApplyRights, b.hostname, b.organization, op.Workspace)))
}
mustConfirm := (op.UIIn != nil && op.UIOut != nil) &&
((op.Destroy && (!op.DestroyForce && !op.AutoApprove)) || (!op.Destroy && !op.AutoApprove))
if !w.AutoApply {
if mustConfirm {
opts := &terraform.InputOpts{Id: "approve"}
if op.Destroy {
opts.Query = "\nDo you really want to destroy all resources in workspace \"" + op.Workspace + "\"?"
opts.Description = "Terraform will destroy all your managed infrastructure, as shown above.\n" +
"There is no undo. Only 'yes' will be accepted to confirm."
} else {
opts.Query = "\nDo you want to perform these actions in workspace \"" + op.Workspace + "\"?"
opts.Description = "Terraform will perform the actions described above.\n" +
"Only 'yes' will be accepted to approve."
}
if err = b.confirm(stopCtx, op, opts, r, "yes"); err != nil {
return r, err
}
}
err = b.client.Runs.Apply(stopCtx, r.ID, tfe.RunApplyOptions{})
if err != nil {
return r, generalError("error approving the apply command", err)
}
}
// If we don't need to ask for confirmation, insert a blank
// line to separate the ouputs.
if w.AutoApply || !mustConfirm {
if b.CLI != nil {
b.CLI.Output("")
}
}
r, err = b.waitForRun(stopCtx, cancelCtx, op, "apply", r, w)
if err != nil {
return r, err
}
logs, err := b.client.Applies.Logs(stopCtx, r.Apply.ID)
if err != nil {
return r, generalError("error retrieving logs", err)
}
scanner := bufio.NewScanner(logs)
skip := 0
for scanner.Scan() {
// Skip the first 3 lines to prevent duplicate output.
if skip < 3 {
skip++
continue
}
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(scanner.Text()))
}
}
if err := scanner.Err(); err != nil {
return r, generalError("error reading logs", err)
}
return r, nil
}
const applyErrNoUpdateRights = `
Insufficient rights to apply changes!
[reset][yellow]The provided credentials have insufficient rights to apply changes. In order
to apply changes at least write permissions on the workspace are required.[reset]
`
const applyErrVCSNotSupported = `
Apply not allowed for workspaces with a VCS connection.
A workspace that is connected to a VCS requires the VCS-driven workflow
to ensure that the VCS remains the single source of truth.
`
const applyErrParallelismNotSupported = `
Custom parallelism values are currently not supported!
The "remote" backend does not support setting a custom parallelism
value at this time.
`
const applyErrPlanNotSupported = `
Applying a saved plan is currently not supported!
The "remote" backend currently requires configuration to be present and
does not accept an existing saved plan as an argument at this time.
`
const applyErrNoRefreshNotSupported = `
Applying without refresh is currently not supported!
Currently the "remote" backend will always do an in-memory refresh of
the Terraform state prior to generating the plan.
`
const applyErrTargetsNotSupported = `
Resource targeting is currently not supported!
The "remote" backend does not support resource targeting at this time.
`
const applyErrVariablesNotSupported = `
Run variables are currently not supported!
The "remote" backend does not support setting run variables at this time.
Currently the only to way to pass variables to the remote backend is by
creating a '*.auto.tfvars' variables file. This file will automatically
be loaded by the "remote" backend when the workspace is configured to use
Terraform v0.10.0 or later.
Additionally you can also set variables on the workspace in the web UI:
https://%s/app/%s/%s/variables
`
const applyErrNoConfig = `
No configuration files found!
Apply requires configuration to be present. Applying without a configuration
would mark everything for destruction, which is normally not what is desired.
If you would like to destroy everything, please run 'terraform destroy' which
does not require any configuration files.
`
const applyErrNoApplyRights = `
Insufficient rights to approve the pending changes!
[reset][yellow]There are pending changes, but the provided credentials have insufficient rights
to approve them. The run will be discarded to prevent it from blocking the queue
waiting for external approval. To queue a run that can be approved by someone
else, please use the 'Queue Plan' button in the web UI:
https://%s/app/%s/%s/runs[reset]
`
const applyDefaultHeader = `
[reset][yellow]Running apply in the remote backend. Output will stream here. Pressing Ctrl-C
will cancel the remote apply if its still pending. If the apply started it
will stop streaming the logs, but will not stop the apply running remotely.
To view this run in a browser, visit:
https://%s/app/%s/%s/runs/%s[reset]
`

View File

@ -1,882 +0,0 @@
package remote
import (
"context"
"os"
"os/signal"
"strings"
"syscall"
"testing"
"time"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/cli"
)
func testOperationApply() *backend.Operation {
return &backend.Operation{
Parallelism: defaultParallelism,
PlanRefresh: true,
Type: backend.OperationTypeApply,
}
}
func TestRemote_applyBasic(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
input := testInput(t, map[string]string{
"approve": "yes",
})
op := testOperationApply()
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
if len(input.answers) > 0 {
t.Fatalf("expected no unused answers, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
t.Fatalf("missing apply summery in output: %s", output)
}
}
func TestRemote_applyWithoutPermissions(t *testing.T) {
b := testBackendNoDefault(t)
// Create a named workspace without permissions.
w, err := b.client.Workspaces.Create(
context.Background(),
b.organization,
tfe.WorkspaceCreateOptions{
Name: tfe.String(b.prefix + "prod"),
},
)
if err != nil {
t.Fatalf("error creating named workspace: %v", err)
}
w.Permissions.CanUpdate = false
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
op := testOperationApply()
op.Module = mod
op.Workspace = "prod"
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !strings.Contains(run.Err.Error(), "insufficient rights to apply changes") {
t.Fatalf("expected a permissions error, got: %v", run.Err)
}
}
func TestRemote_applyWithVCS(t *testing.T) {
b := testBackendNoDefault(t)
// Create a named workspace with a VCS.
_, err := b.client.Workspaces.Create(
context.Background(),
b.organization,
tfe.WorkspaceCreateOptions{
Name: tfe.String(b.prefix + "prod"),
VCSRepo: &tfe.VCSRepoOptions{},
},
)
if err != nil {
t.Fatalf("error creating named workspace: %v", err)
}
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
op := testOperationApply()
op.Module = mod
op.Workspace = "prod"
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "not allowed for workspaces with a VCS") {
t.Fatalf("expected a VCS error, got: %v", run.Err)
}
}
func TestRemote_applyWithParallelism(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
op := testOperationApply()
op.Module = mod
op.Parallelism = 3
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !strings.Contains(run.Err.Error(), "parallelism values are currently not supported") {
t.Fatalf("expected a parallelism error, got: %v", run.Err)
}
}
func TestRemote_applyWithPlan(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
op := testOperationApply()
op.Module = mod
op.Plan = &terraform.Plan{}
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "saved plan is currently not supported") {
t.Fatalf("expected a saved plan error, got: %v", run.Err)
}
}
func TestRemote_applyWithoutRefresh(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
op := testOperationApply()
op.Module = mod
op.PlanRefresh = false
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !strings.Contains(run.Err.Error(), "refresh is currently not supported") {
t.Fatalf("expected a refresh error, got: %v", run.Err)
}
}
func TestRemote_applyWithTarget(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
op := testOperationApply()
op.Module = mod
op.Targets = []string{"null_resource.foo"}
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "targeting is currently not supported") {
t.Fatalf("expected a targeting error, got: %v", run.Err)
}
}
func TestRemote_applyWithVariables(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
op := testOperationApply()
op.Module = mod
op.Variables = map[string]interface{}{"foo": "bar"}
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !strings.Contains(run.Err.Error(), "variables are currently not supported") {
t.Fatalf("expected a variables error, got: %v", run.Err)
}
}
func TestRemote_applyNoConfig(t *testing.T) {
b := testBackendDefault(t)
op := testOperationApply()
op.Module = nil
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "configuration files found") {
t.Fatalf("expected configuration files error, got: %v", run.Err)
}
}
func TestRemote_applyNoChanges(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply-no-changes")
defer modCleanup()
op := testOperationApply()
op.Module = mod
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") {
t.Fatalf("expected no changes in plan summery: %s", output)
}
}
func TestRemote_applyNoApprove(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
input := testInput(t, map[string]string{
"approve": "no",
})
op := testOperationApply()
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "Apply discarded") {
t.Fatalf("expected an apply discarded error, got: %v", run.Err)
}
if len(input.answers) > 0 {
t.Fatalf("expected no unused answers, got: %v", input.answers)
}
}
func TestRemote_applyAutoApprove(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
input := testInput(t, map[string]string{
"approve": "no",
})
op := testOperationApply()
op.AutoApprove = true
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
if len(input.answers) != 1 {
t.Fatalf("expected an unused answer, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
t.Fatalf("missing apply summery in output: %s", output)
}
}
func TestRemote_applyWithAutoApply(t *testing.T) {
b := testBackendNoDefault(t)
// Create a named workspace that auto applies.
_, err := b.client.Workspaces.Create(
context.Background(),
b.organization,
tfe.WorkspaceCreateOptions{
AutoApply: tfe.Bool(true),
Name: tfe.String(b.prefix + "prod"),
},
)
if err != nil {
t.Fatalf("error creating named workspace: %v", err)
}
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
input := testInput(t, map[string]string{
"approve": "yes",
})
op := testOperationApply()
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = "prod"
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
if len(input.answers) != 1 {
t.Fatalf("expected an unused answer, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
t.Fatalf("missing apply summery in output: %s", output)
}
}
func TestRemote_applyLockTimeout(t *testing.T) {
b := testBackendDefault(t)
ctx := context.Background()
// Retrieve the workspace used to run this operation in.
w, err := b.client.Workspaces.Read(ctx, b.organization, b.workspace)
if err != nil {
t.Fatalf("error retrieving workspace: %v", err)
}
// Create a new configuration version.
c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{})
if err != nil {
t.Fatalf("error creating configuration version: %v", err)
}
// Create a pending run to block this run.
_, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{
ConfigurationVersion: c,
Workspace: w,
})
if err != nil {
t.Fatalf("error creating pending run: %v", err)
}
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
input := testInput(t, map[string]string{
"cancel": "yes",
"approve": "yes",
})
op := testOperationApply()
op.StateLockTimeout = 5 * time.Second
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
_, err = b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, syscall.SIGINT)
select {
case <-sigint:
// Stop redirecting SIGINT signals.
signal.Stop(sigint)
case <-time.After(10 * time.Second):
t.Fatalf("expected lock timeout after 5 seconds, waited 10 seconds")
}
if len(input.answers) != 2 {
t.Fatalf("expected unused answers, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "Lock timeout exceeded") {
t.Fatalf("missing lock timout error in output: %s", output)
}
if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("unexpected plan summery in output: %s", output)
}
if strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
t.Fatalf("unexpected apply summery in output: %s", output)
}
}
func TestRemote_applyDestroy(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply-destroy")
defer modCleanup()
input := testInput(t, map[string]string{
"approve": "yes",
})
op := testOperationApply()
op.Destroy = true
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
if len(input.answers) > 0 {
t.Fatalf("expected no unused answers, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "0 to add, 0 to change, 1 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "0 added, 0 changed, 1 destroyed") {
t.Fatalf("missing apply summery in output: %s", output)
}
}
func TestRemote_applyDestroyNoConfig(t *testing.T) {
b := testBackendDefault(t)
input := testInput(t, map[string]string{
"approve": "yes",
})
op := testOperationApply()
op.Destroy = true
op.Module = nil
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("unexpected apply error: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
if len(input.answers) > 0 {
t.Fatalf("expected no unused answers, got: %v", input.answers)
}
}
func TestRemote_applyPolicyPass(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply-policy-passed")
defer modCleanup()
input := testInput(t, map[string]string{
"approve": "yes",
})
op := testOperationApply()
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
if len(input.answers) > 0 {
t.Fatalf("expected no unused answers, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "Sentinel Result: true") {
t.Fatalf("missing polic check result in output: %s", output)
}
if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
t.Fatalf("missing apply summery in output: %s", output)
}
}
func TestRemote_applyPolicyHardFail(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply-policy-hard-failed")
defer modCleanup()
input := testInput(t, map[string]string{
"approve": "yes",
})
op := testOperationApply()
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "hard failed") {
t.Fatalf("expected a policy check error, got: %v", run.Err)
}
if len(input.answers) != 1 {
t.Fatalf("expected an unused answers, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "Sentinel Result: false") {
t.Fatalf("missing policy check result in output: %s", output)
}
if strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
t.Fatalf("unexpected apply summery in output: %s", output)
}
}
func TestRemote_applyPolicySoftFail(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply-policy-soft-failed")
defer modCleanup()
input := testInput(t, map[string]string{
"override": "override",
"approve": "yes",
})
op := testOperationApply()
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
if len(input.answers) > 0 {
t.Fatalf("expected no unused answers, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "Sentinel Result: false") {
t.Fatalf("missing policy check result in output: %s", output)
}
if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
t.Fatalf("missing apply summery in output: %s", output)
}
}
func TestRemote_applyPolicySoftFailAutoApprove(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply-policy-soft-failed")
defer modCleanup()
input := testInput(t, map[string]string{
"override": "override",
})
op := testOperationApply()
op.AutoApprove = true
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an apply error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "soft failed") {
t.Fatalf("expected a policy check error, got: %v", run.Err)
}
if len(input.answers) != 1 {
t.Fatalf("expected an unused answers, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "Sentinel Result: false") {
t.Fatalf("missing policy check result in output: %s", output)
}
if strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
t.Fatalf("unexpected apply summery in output: %s", output)
}
}
func TestRemote_applyPolicySoftFailAutoApply(t *testing.T) {
b := testBackendDefault(t)
// Create a named workspace that auto applies.
_, err := b.client.Workspaces.Create(
context.Background(),
b.organization,
tfe.WorkspaceCreateOptions{
AutoApply: tfe.Bool(true),
Name: tfe.String(b.prefix + "prod"),
},
)
if err != nil {
t.Fatalf("error creating named workspace: %v", err)
}
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply-policy-soft-failed")
defer modCleanup()
input := testInput(t, map[string]string{
"override": "override",
"approve": "yes",
})
op := testOperationApply()
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = "prod"
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
if len(input.answers) != 1 {
t.Fatalf("expected an unused answer, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "Sentinel Result: false") {
t.Fatalf("missing policy check result in output: %s", output)
}
if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
t.Fatalf("missing apply summery in output: %s", output)
}
}
func TestRemote_applyWithRemoteError(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply-with-error")
defer modCleanup()
op := testOperationApply()
op.Module = mod
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.ExitCode != 1 {
t.Fatalf("expected exit code 1, got %d", run.ExitCode)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "null_resource.foo: 1 error") {
t.Fatalf("missing apply error in output: %s", output)
}
}

View File

@ -1,305 +0,0 @@
package remote
import (
"bufio"
"context"
"errors"
"fmt"
"math"
"time"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/terraform"
)
// backoff will perform exponential backoff based on the iteration and
// limited by the provided min and max (in milliseconds) durations.
func backoff(min, max float64, iter int) time.Duration {
backoff := math.Pow(2, float64(iter)/5) * min
if backoff > max {
backoff = max
}
return time.Duration(backoff) * time.Millisecond
}
func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) {
started := time.Now()
updated := started
for i := 0; ; i++ {
select {
case <-stopCtx.Done():
return r, stopCtx.Err()
case <-cancelCtx.Done():
return r, cancelCtx.Err()
case <-time.After(backoff(1000, 3000, i)):
// Timer up, show status
}
// Retrieve the run to get its current status.
r, err := b.client.Runs.Read(stopCtx, r.ID)
if err != nil {
return r, generalError("error retrieving run", err)
}
// Return if the run is no longer pending.
if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed {
if i == 0 && opType == "plan" && b.CLI != nil {
b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType)))
}
if i > 0 && b.CLI != nil {
// Insert a blank line to separate the ouputs.
b.CLI.Output("")
}
return r, nil
}
// Check if 30 seconds have passed since the last update.
current := time.Now()
if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) {
updated = current
position := 0
elapsed := ""
// Calculate and set the elapsed time.
if i > 0 {
elapsed = fmt.Sprintf(
" (%s elapsed)", current.Sub(started).Truncate(30*time.Second))
}
// Retrieve the workspace used to run this operation in.
w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name)
if err != nil {
return nil, generalError("error retrieving workspace", err)
}
// If the workspace is locked the run will not be queued and we can
// update the status without making any expensive calls.
if w.Locked && w.CurrentRun != nil {
cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID)
if err != nil {
return r, generalError("error retrieving current run", err)
}
if cr.Status == tfe.RunPending {
b.CLI.Output(b.Colorize().Color(
"Waiting for the manually locked workspace to be unlocked..." + elapsed))
continue
}
}
// Skip checking the workspace queue when we are the current run.
if w.CurrentRun == nil || w.CurrentRun.ID != r.ID {
found := false
options := tfe.RunListOptions{}
runlist:
for {
rl, err := b.client.Runs.List(stopCtx, w.ID, options)
if err != nil {
return r, generalError("error retrieving run list", err)
}
// Loop through all runs to calculate the workspace queue position.
for _, item := range rl.Items {
if !found {
if r.ID == item.ID {
found = true
}
continue
}
// If the run is in a final state, ignore it and continue.
switch item.Status {
case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored:
continue
case tfe.RunPlanned:
if op.Type == backend.OperationTypePlan {
continue
}
}
// Increase the workspace queue position.
position++
// Stop searching when we reached the current run.
if w.CurrentRun != nil && w.CurrentRun.ID == item.ID {
break runlist
}
}
// Exit the loop when we've seen all pages.
if rl.CurrentPage >= rl.TotalPages {
break
}
// Update the page number to get the next page.
options.PageNumber = rl.NextPage
}
if position > 0 {
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"Waiting for %d run(s) to finish before being queued...%s",
position,
elapsed,
)))
continue
}
}
options := tfe.RunQueueOptions{}
search:
for {
rq, err := b.client.Organizations.RunQueue(stopCtx, b.organization, options)
if err != nil {
return r, generalError("error retrieving queue", err)
}
// Search through all queued items to find our run.
for _, item := range rq.Items {
if r.ID == item.ID {
position = item.PositionInQueue
break search
}
}
// Exit the loop when we've seen all pages.
if rq.CurrentPage >= rq.TotalPages {
break
}
// Update the page number to get the next page.
options.PageNumber = rq.NextPage
}
if position > 0 {
c, err := b.client.Organizations.Capacity(stopCtx, b.organization)
if err != nil {
return r, generalError("error retrieving capacity", err)
}
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"Waiting for %d queued run(s) to finish before starting...%s",
position-c.Running,
elapsed,
)))
continue
}
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"Waiting for the %s to start...%s", opType, elapsed)))
}
}
}
func (b *Remote) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error {
if b.CLI != nil {
b.CLI.Output("\n------------------------------------------------------------------------\n")
}
for i, pc := range r.PolicyChecks {
logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID)
if err != nil {
return generalError("error retrieving policy check logs", err)
}
scanner := bufio.NewScanner(logs)
// Retrieve the policy check to get its current status.
pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID)
if err != nil {
return generalError("error retrieving policy check", err)
}
var msgPrefix string
switch pc.Scope {
case tfe.PolicyScopeOrganization:
msgPrefix = "Organization policy check"
case tfe.PolicyScopeWorkspace:
msgPrefix = "Workspace policy check"
default:
msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope)
}
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
}
for scanner.Scan() {
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(scanner.Text()))
}
}
if err := scanner.Err(); err != nil {
return generalError("error reading logs", err)
}
switch pc.Status {
case tfe.PolicyPasses:
if (op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil {
b.CLI.Output("\n------------------------------------------------------------------------")
}
continue
case tfe.PolicyErrored:
return fmt.Errorf(msgPrefix + " errored.")
case tfe.PolicyHardFailed:
return fmt.Errorf(msgPrefix + " hard failed.")
case tfe.PolicySoftFailed:
if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil ||
op.AutoApprove || !pc.Actions.IsOverridable || !pc.Permissions.CanOverride {
return fmt.Errorf(msgPrefix + " soft failed.")
}
default:
return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status)
}
opts := &terraform.InputOpts{
Id: "override",
Query: "\nDo you want to override the soft failed policy check?",
Description: "Only 'override' will be accepted to override.",
}
if err = b.confirm(stopCtx, op, opts, r, "override"); err != nil {
return err
}
if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil {
return generalError("error overriding policy check", err)
}
if b.CLI != nil {
b.CLI.Output("------------------------------------------------------------------------")
}
}
return nil
}
func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *terraform.InputOpts, r *tfe.Run, keyword string) error {
v, err := op.UIIn.Input(opts)
if err != nil {
return fmt.Errorf("Error asking %s: %v", opts.Id, err)
}
if v != keyword {
// Retrieve the run again to get its current status.
r, err = b.client.Runs.Read(stopCtx, r.ID)
if err != nil {
return generalError("error retrieving run", err)
}
// Make sure we discard the run if possible.
if r.Actions.IsDiscardable {
err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{})
if err != nil {
if op.Destroy {
return generalError("error disarding destroy", err)
}
return generalError("error disarding apply", err)
}
}
// Even if the run was disarding successfully, we still
// return an error as the apply command was cancelled.
if op.Destroy {
return errors.New("Destroy discarded.")
}
return errors.New("Apply discarded.")
}
return nil
}

View File

@ -1,998 +0,0 @@
package remote
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"strings"
"time"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/terraform"
)
type mockClient struct {
Applies *mockApplies
ConfigurationVersions *mockConfigurationVersions
Organizations *mockOrganizations
Plans *mockPlans
PolicyChecks *mockPolicyChecks
Runs *mockRuns
StateVersions *mockStateVersions
Workspaces *mockWorkspaces
}
func newMockClient() *mockClient {
c := &mockClient{}
c.Applies = newMockApplies(c)
c.ConfigurationVersions = newMockConfigurationVersions(c)
c.Organizations = newMockOrganizations(c)
c.Plans = newMockPlans(c)
c.PolicyChecks = newMockPolicyChecks(c)
c.Runs = newMockRuns(c)
c.StateVersions = newMockStateVersions(c)
c.Workspaces = newMockWorkspaces(c)
return c
}
type mockApplies struct {
client *mockClient
applies map[string]*tfe.Apply
logs map[string]string
}
func newMockApplies(client *mockClient) *mockApplies {
return &mockApplies{
client: client,
applies: make(map[string]*tfe.Apply),
logs: make(map[string]string),
}
}
// create is a helper function to create a mock apply that uses the configured
// working directory to find the logfile.
func (m *mockApplies) create(cvID, workspaceID string) (*tfe.Apply, error) {
c, ok := m.client.ConfigurationVersions.configVersions[cvID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
if c.Speculative {
// Speculative means its plan-only so we don't create a Apply.
return nil, nil
}
id := generateID("apply-")
url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id)
a := &tfe.Apply{
ID: id,
LogReadURL: url,
Status: tfe.ApplyPending,
}
w, ok := m.client.Workspaces.workspaceIDs[workspaceID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
if w.AutoApply {
a.Status = tfe.ApplyRunning
}
m.logs[url] = filepath.Join(
m.client.ConfigurationVersions.uploadPaths[cvID],
w.WorkingDirectory,
"apply.log",
)
m.applies[a.ID] = a
return a, nil
}
func (m *mockApplies) Read(ctx context.Context, applyID string) (*tfe.Apply, error) {
a, ok := m.applies[applyID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
// Together with the mockLogReader this allows testing queued runs.
if a.Status == tfe.ApplyRunning {
a.Status = tfe.ApplyFinished
}
return a, nil
}
func (m *mockApplies) Logs(ctx context.Context, applyID string) (io.Reader, error) {
a, err := m.Read(ctx, applyID)
if err != nil {
return nil, err
}
logfile, ok := m.logs[a.LogReadURL]
if !ok {
return nil, tfe.ErrResourceNotFound
}
if _, err := os.Stat(logfile); os.IsNotExist(err) {
return bytes.NewBufferString("logfile does not exist"), nil
}
logs, err := ioutil.ReadFile(logfile)
if err != nil {
return nil, err
}
done := func() (bool, error) {
a, err := m.Read(ctx, applyID)
if err != nil {
return false, err
}
if a.Status != tfe.ApplyFinished {
return false, nil
}
return true, nil
}
return &mockLogReader{
done: done,
logs: bytes.NewBuffer(logs),
}, nil
}
type mockConfigurationVersions struct {
client *mockClient
configVersions map[string]*tfe.ConfigurationVersion
uploadPaths map[string]string
uploadURLs map[string]*tfe.ConfigurationVersion
}
func newMockConfigurationVersions(client *mockClient) *mockConfigurationVersions {
return &mockConfigurationVersions{
client: client,
configVersions: make(map[string]*tfe.ConfigurationVersion),
uploadPaths: make(map[string]string),
uploadURLs: make(map[string]*tfe.ConfigurationVersion),
}
}
func (m *mockConfigurationVersions) List(ctx context.Context, workspaceID string, options tfe.ConfigurationVersionListOptions) (*tfe.ConfigurationVersionList, error) {
cvl := &tfe.ConfigurationVersionList{}
for _, cv := range m.configVersions {
cvl.Items = append(cvl.Items, cv)
}
cvl.Pagination = &tfe.Pagination{
CurrentPage: 1,
NextPage: 1,
PreviousPage: 1,
TotalPages: 1,
TotalCount: len(cvl.Items),
}
return cvl, nil
}
func (m *mockConfigurationVersions) Create(ctx context.Context, workspaceID string, options tfe.ConfigurationVersionCreateOptions) (*tfe.ConfigurationVersion, error) {
id := generateID("cv-")
url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id)
cv := &tfe.ConfigurationVersion{
ID: id,
Status: tfe.ConfigurationPending,
UploadURL: url,
}
m.configVersions[cv.ID] = cv
m.uploadURLs[url] = cv
return cv, nil
}
func (m *mockConfigurationVersions) Read(ctx context.Context, cvID string) (*tfe.ConfigurationVersion, error) {
cv, ok := m.configVersions[cvID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
return cv, nil
}
func (m *mockConfigurationVersions) Upload(ctx context.Context, url, path string) error {
cv, ok := m.uploadURLs[url]
if !ok {
return errors.New("404 not found")
}
m.uploadPaths[cv.ID] = path
cv.Status = tfe.ConfigurationUploaded
return nil
}
// mockInput is a mock implementation of terraform.UIInput.
type mockInput struct {
answers map[string]string
}
func (m *mockInput) Input(opts *terraform.InputOpts) (string, error) {
v, ok := m.answers[opts.Id]
if !ok {
return "", fmt.Errorf("unexpected input request in test: %s", opts.Id)
}
delete(m.answers, opts.Id)
return v, nil
}
type mockOrganizations struct {
client *mockClient
organizations map[string]*tfe.Organization
}
func newMockOrganizations(client *mockClient) *mockOrganizations {
return &mockOrganizations{
client: client,
organizations: make(map[string]*tfe.Organization),
}
}
func (m *mockOrganizations) List(ctx context.Context, options tfe.OrganizationListOptions) (*tfe.OrganizationList, error) {
orgl := &tfe.OrganizationList{}
for _, org := range m.organizations {
orgl.Items = append(orgl.Items, org)
}
orgl.Pagination = &tfe.Pagination{
CurrentPage: 1,
NextPage: 1,
PreviousPage: 1,
TotalPages: 1,
TotalCount: len(orgl.Items),
}
return orgl, nil
}
// mockLogReader is a mock logreader that enables testing queued runs.
type mockLogReader struct {
done func() (bool, error)
logs *bytes.Buffer
}
func (m *mockLogReader) Read(l []byte) (int, error) {
for {
if written, err := m.read(l); err != io.ErrNoProgress {
return written, err
}
time.Sleep(500 * time.Millisecond)
}
}
func (m *mockLogReader) read(l []byte) (int, error) {
done, err := m.done()
if err != nil {
return 0, err
}
if !done {
return 0, io.ErrNoProgress
}
return m.logs.Read(l)
}
func (m *mockOrganizations) Create(ctx context.Context, options tfe.OrganizationCreateOptions) (*tfe.Organization, error) {
org := &tfe.Organization{Name: *options.Name}
m.organizations[org.Name] = org
return org, nil
}
func (m *mockOrganizations) Read(ctx context.Context, name string) (*tfe.Organization, error) {
org, ok := m.organizations[name]
if !ok {
return nil, tfe.ErrResourceNotFound
}
return org, nil
}
func (m *mockOrganizations) Update(ctx context.Context, name string, options tfe.OrganizationUpdateOptions) (*tfe.Organization, error) {
org, ok := m.organizations[name]
if !ok {
return nil, tfe.ErrResourceNotFound
}
org.Name = *options.Name
return org, nil
}
func (m *mockOrganizations) Delete(ctx context.Context, name string) error {
delete(m.organizations, name)
return nil
}
func (m *mockOrganizations) Capacity(ctx context.Context, name string) (*tfe.Capacity, error) {
var pending, running int
for _, r := range m.client.Runs.runs {
if r.Status == tfe.RunPending {
pending++
continue
}
running++
}
return &tfe.Capacity{Pending: pending, Running: running}, nil
}
func (m *mockOrganizations) RunQueue(ctx context.Context, name string, options tfe.RunQueueOptions) (*tfe.RunQueue, error) {
rq := &tfe.RunQueue{}
for _, r := range m.client.Runs.runs {
rq.Items = append(rq.Items, r)
}
rq.Pagination = &tfe.Pagination{
CurrentPage: 1,
NextPage: 1,
PreviousPage: 1,
TotalPages: 1,
TotalCount: len(rq.Items),
}
return rq, nil
}
type mockPlans struct {
client *mockClient
logs map[string]string
plans map[string]*tfe.Plan
}
func newMockPlans(client *mockClient) *mockPlans {
return &mockPlans{
client: client,
logs: make(map[string]string),
plans: make(map[string]*tfe.Plan),
}
}
// create is a helper function to create a mock plan that uses the configured
// working directory to find the logfile.
func (m *mockPlans) create(cvID, workspaceID string) (*tfe.Plan, error) {
id := generateID("plan-")
url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id)
p := &tfe.Plan{
ID: id,
LogReadURL: url,
Status: tfe.PlanPending,
}
w, ok := m.client.Workspaces.workspaceIDs[workspaceID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
m.logs[url] = filepath.Join(
m.client.ConfigurationVersions.uploadPaths[cvID],
w.WorkingDirectory,
"plan.log",
)
m.plans[p.ID] = p
return p, nil
}
func (m *mockPlans) Read(ctx context.Context, planID string) (*tfe.Plan, error) {
p, ok := m.plans[planID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
// Together with the mockLogReader this allows testing queued runs.
if p.Status == tfe.PlanRunning {
p.Status = tfe.PlanFinished
}
return p, nil
}
func (m *mockPlans) Logs(ctx context.Context, planID string) (io.Reader, error) {
p, err := m.Read(ctx, planID)
if err != nil {
return nil, err
}
logfile, ok := m.logs[p.LogReadURL]
if !ok {
return nil, tfe.ErrResourceNotFound
}
if _, err := os.Stat(logfile); os.IsNotExist(err) {
return bytes.NewBufferString("logfile does not exist"), nil
}
logs, err := ioutil.ReadFile(logfile)
if err != nil {
return nil, err
}
done := func() (bool, error) {
p, err := m.Read(ctx, planID)
if err != nil {
return false, err
}
if p.Status != tfe.PlanFinished {
return false, nil
}
return true, nil
}
return &mockLogReader{
done: done,
logs: bytes.NewBuffer(logs),
}, nil
}
type mockPolicyChecks struct {
client *mockClient
checks map[string]*tfe.PolicyCheck
logs map[string]string
}
func newMockPolicyChecks(client *mockClient) *mockPolicyChecks {
return &mockPolicyChecks{
client: client,
checks: make(map[string]*tfe.PolicyCheck),
logs: make(map[string]string),
}
}
// create is a helper function to create a mock policy check that uses the
// configured working directory to find the logfile.
func (m *mockPolicyChecks) create(cvID, workspaceID string) (*tfe.PolicyCheck, error) {
id := generateID("pc-")
pc := &tfe.PolicyCheck{
ID: id,
Actions: &tfe.PolicyActions{},
Permissions: &tfe.PolicyPermissions{},
Scope: tfe.PolicyScopeOrganization,
Status: tfe.PolicyPending,
}
w, ok := m.client.Workspaces.workspaceIDs[workspaceID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
logfile := filepath.Join(
m.client.ConfigurationVersions.uploadPaths[cvID],
w.WorkingDirectory,
"policy.log",
)
if _, err := os.Stat(logfile); os.IsNotExist(err) {
return nil, nil
}
m.logs[pc.ID] = logfile
m.checks[pc.ID] = pc
return pc, nil
}
func (m *mockPolicyChecks) List(ctx context.Context, runID string, options tfe.PolicyCheckListOptions) (*tfe.PolicyCheckList, error) {
_, ok := m.client.Runs.runs[runID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
pcl := &tfe.PolicyCheckList{}
for _, pc := range m.checks {
pcl.Items = append(pcl.Items, pc)
}
pcl.Pagination = &tfe.Pagination{
CurrentPage: 1,
NextPage: 1,
PreviousPage: 1,
TotalPages: 1,
TotalCount: len(pcl.Items),
}
return pcl, nil
}
func (m *mockPolicyChecks) Read(ctx context.Context, policyCheckID string) (*tfe.PolicyCheck, error) {
pc, ok := m.checks[policyCheckID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
logfile, ok := m.logs[pc.ID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
if _, err := os.Stat(logfile); os.IsNotExist(err) {
return nil, fmt.Errorf("logfile does not exist")
}
logs, err := ioutil.ReadFile(logfile)
if err != nil {
return nil, err
}
switch {
case bytes.Contains(logs, []byte("Sentinel Result: true")):
pc.Status = tfe.PolicyPasses
case bytes.Contains(logs, []byte("Sentinel Result: false")):
switch {
case bytes.Contains(logs, []byte("hard-mandatory")):
pc.Status = tfe.PolicyHardFailed
case bytes.Contains(logs, []byte("soft-mandatory")):
pc.Actions.IsOverridable = true
pc.Permissions.CanOverride = true
pc.Status = tfe.PolicySoftFailed
}
default:
// As this is an unexpected state, we say the policy errored.
pc.Status = tfe.PolicyErrored
}
return pc, nil
}
func (m *mockPolicyChecks) Override(ctx context.Context, policyCheckID string) (*tfe.PolicyCheck, error) {
pc, ok := m.checks[policyCheckID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
pc.Status = tfe.PolicyOverridden
return pc, nil
}
func (m *mockPolicyChecks) Logs(ctx context.Context, policyCheckID string) (io.Reader, error) {
pc, ok := m.checks[policyCheckID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
logfile, ok := m.logs[pc.ID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
if _, err := os.Stat(logfile); os.IsNotExist(err) {
return bytes.NewBufferString("logfile does not exist"), nil
}
logs, err := ioutil.ReadFile(logfile)
if err != nil {
return nil, err
}
switch {
case bytes.Contains(logs, []byte("Sentinel Result: true")):
pc.Status = tfe.PolicyPasses
case bytes.Contains(logs, []byte("Sentinel Result: false")):
switch {
case bytes.Contains(logs, []byte("hard-mandatory")):
pc.Status = tfe.PolicyHardFailed
case bytes.Contains(logs, []byte("soft-mandatory")):
pc.Actions.IsOverridable = true
pc.Permissions.CanOverride = true
pc.Status = tfe.PolicySoftFailed
}
default:
// As this is an unexpected state, we say the policy errored.
pc.Status = tfe.PolicyErrored
}
return bytes.NewBuffer(logs), nil
}
type mockRuns struct {
client *mockClient
runs map[string]*tfe.Run
workspaces map[string][]*tfe.Run
}
func newMockRuns(client *mockClient) *mockRuns {
return &mockRuns{
client: client,
runs: make(map[string]*tfe.Run),
workspaces: make(map[string][]*tfe.Run),
}
}
func (m *mockRuns) List(ctx context.Context, workspaceID string, options tfe.RunListOptions) (*tfe.RunList, error) {
w, ok := m.client.Workspaces.workspaceIDs[workspaceID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
rl := &tfe.RunList{}
for _, r := range m.workspaces[w.ID] {
rl.Items = append(rl.Items, r)
}
rl.Pagination = &tfe.Pagination{
CurrentPage: 1,
NextPage: 1,
PreviousPage: 1,
TotalPages: 1,
TotalCount: len(rl.Items),
}
return rl, nil
}
func (m *mockRuns) Create(ctx context.Context, options tfe.RunCreateOptions) (*tfe.Run, error) {
a, err := m.client.Applies.create(options.ConfigurationVersion.ID, options.Workspace.ID)
if err != nil {
return nil, err
}
p, err := m.client.Plans.create(options.ConfigurationVersion.ID, options.Workspace.ID)
if err != nil {
return nil, err
}
pc, err := m.client.PolicyChecks.create(options.ConfigurationVersion.ID, options.Workspace.ID)
if err != nil {
return nil, err
}
r := &tfe.Run{
ID: generateID("run-"),
Actions: &tfe.RunActions{IsCancelable: true},
Apply: a,
HasChanges: false,
Permissions: &tfe.RunPermissions{},
Plan: p,
Status: tfe.RunPending,
}
if pc != nil {
r.PolicyChecks = []*tfe.PolicyCheck{pc}
}
if options.IsDestroy != nil {
r.IsDestroy = *options.IsDestroy
}
w, ok := m.client.Workspaces.workspaceIDs[options.Workspace.ID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
if w.CurrentRun == nil {
w.CurrentRun = r
}
m.runs[r.ID] = r
m.workspaces[options.Workspace.ID] = append(m.workspaces[options.Workspace.ID], r)
return r, nil
}
func (m *mockRuns) Read(ctx context.Context, runID string) (*tfe.Run, error) {
r, ok := m.runs[runID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
pending := false
for _, r := range m.runs {
if r.ID != runID && r.Status == tfe.RunPending {
pending = true
break
}
}
if !pending && r.Status == tfe.RunPending {
// Only update the status if there are no other pending runs.
r.Status = tfe.RunPlanning
r.Plan.Status = tfe.PlanRunning
}
logs, _ := ioutil.ReadFile(m.client.Plans.logs[r.Plan.LogReadURL])
if r.Plan.Status == tfe.PlanFinished {
if r.IsDestroy || bytes.Contains(logs, []byte("1 to add, 0 to change, 0 to destroy")) {
r.Actions.IsCancelable = false
r.Actions.IsConfirmable = true
r.HasChanges = true
r.Permissions.CanApply = true
}
if bytes.Contains(logs, []byte("null_resource.foo: 1 error")) {
r.Actions.IsCancelable = false
r.HasChanges = false
r.Status = tfe.RunErrored
}
}
return r, nil
}
func (m *mockRuns) Apply(ctx context.Context, runID string, options tfe.RunApplyOptions) error {
r, ok := m.runs[runID]
if !ok {
return tfe.ErrResourceNotFound
}
if r.Status != tfe.RunPending {
// Only update the status if the run is not pending anymore.
r.Status = tfe.RunApplying
r.Apply.Status = tfe.ApplyRunning
}
return nil
}
func (m *mockRuns) Cancel(ctx context.Context, runID string, options tfe.RunCancelOptions) error {
panic("not implemented")
}
func (m *mockRuns) ForceCancel(ctx context.Context, runID string, options tfe.RunForceCancelOptions) error {
panic("not implemented")
}
func (m *mockRuns) Discard(ctx context.Context, runID string, options tfe.RunDiscardOptions) error {
panic("not implemented")
}
type mockStateVersions struct {
client *mockClient
states map[string][]byte
stateVersions map[string]*tfe.StateVersion
workspaces map[string][]string
}
func newMockStateVersions(client *mockClient) *mockStateVersions {
return &mockStateVersions{
client: client,
states: make(map[string][]byte),
stateVersions: make(map[string]*tfe.StateVersion),
workspaces: make(map[string][]string),
}
}
func (m *mockStateVersions) List(ctx context.Context, options tfe.StateVersionListOptions) (*tfe.StateVersionList, error) {
svl := &tfe.StateVersionList{}
for _, sv := range m.stateVersions {
svl.Items = append(svl.Items, sv)
}
svl.Pagination = &tfe.Pagination{
CurrentPage: 1,
NextPage: 1,
PreviousPage: 1,
TotalPages: 1,
TotalCount: len(svl.Items),
}
return svl, nil
}
func (m *mockStateVersions) Create(ctx context.Context, workspaceID string, options tfe.StateVersionCreateOptions) (*tfe.StateVersion, error) {
id := generateID("sv-")
runID := os.Getenv("TFE_RUN_ID")
url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id)
if runID != "" && (options.Run == nil || runID != options.Run.ID) {
return nil, fmt.Errorf("option.Run.ID does not contain the ID exported by TFE_RUN_ID")
}
sv := &tfe.StateVersion{
ID: id,
DownloadURL: url,
Serial: *options.Serial,
}
state, err := base64.StdEncoding.DecodeString(*options.State)
if err != nil {
return nil, err
}
m.states[sv.DownloadURL] = state
m.stateVersions[sv.ID] = sv
m.workspaces[workspaceID] = append(m.workspaces[workspaceID], sv.ID)
return sv, nil
}
func (m *mockStateVersions) Read(ctx context.Context, svID string) (*tfe.StateVersion, error) {
sv, ok := m.stateVersions[svID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
return sv, nil
}
func (m *mockStateVersions) Current(ctx context.Context, workspaceID string) (*tfe.StateVersion, error) {
w, ok := m.client.Workspaces.workspaceIDs[workspaceID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
svs, ok := m.workspaces[w.ID]
if !ok || len(svs) == 0 {
return nil, tfe.ErrResourceNotFound
}
sv, ok := m.stateVersions[svs[len(svs)-1]]
if !ok {
return nil, tfe.ErrResourceNotFound
}
return sv, nil
}
func (m *mockStateVersions) Download(ctx context.Context, url string) ([]byte, error) {
state, ok := m.states[url]
if !ok {
return nil, tfe.ErrResourceNotFound
}
return state, nil
}
type mockWorkspaces struct {
client *mockClient
workspaceIDs map[string]*tfe.Workspace
workspaceNames map[string]*tfe.Workspace
}
func newMockWorkspaces(client *mockClient) *mockWorkspaces {
return &mockWorkspaces{
client: client,
workspaceIDs: make(map[string]*tfe.Workspace),
workspaceNames: make(map[string]*tfe.Workspace),
}
}
func (m *mockWorkspaces) List(ctx context.Context, organization string, options tfe.WorkspaceListOptions) (*tfe.WorkspaceList, error) {
dummyWorkspaces := 10
wl := &tfe.WorkspaceList{}
// Get the prefix from the search options.
prefix := ""
if options.Search != nil {
prefix = *options.Search
}
// Get all the workspaces that match the prefix.
var ws []*tfe.Workspace
for _, w := range m.workspaceIDs {
if strings.HasPrefix(w.Name, prefix) {
ws = append(ws, w)
}
}
// Return an empty result if we have no matches.
if len(ws) == 0 {
wl.Pagination = &tfe.Pagination{
CurrentPage: 1,
}
return wl, nil
}
// Return dummy workspaces for the first page to test pagination.
if options.PageNumber <= 1 {
for i := 0; i < dummyWorkspaces; i++ {
wl.Items = append(wl.Items, &tfe.Workspace{
ID: generateID("ws-"),
Name: fmt.Sprintf("dummy-workspace-%d", i),
})
}
wl.Pagination = &tfe.Pagination{
CurrentPage: 1,
NextPage: 2,
TotalPages: 2,
TotalCount: len(wl.Items) + len(ws),
}
return wl, nil
}
// Return the actual workspaces that matched as the second page.
wl.Items = ws
wl.Pagination = &tfe.Pagination{
CurrentPage: 2,
PreviousPage: 1,
TotalPages: 2,
TotalCount: len(wl.Items) + dummyWorkspaces,
}
return wl, nil
}
func (m *mockWorkspaces) Create(ctx context.Context, organization string, options tfe.WorkspaceCreateOptions) (*tfe.Workspace, error) {
w := &tfe.Workspace{
ID: generateID("ws-"),
Name: *options.Name,
Permissions: &tfe.WorkspacePermissions{
CanQueueRun: true,
CanUpdate: true,
},
}
if options.AutoApply != nil {
w.AutoApply = *options.AutoApply
}
if options.VCSRepo != nil {
w.VCSRepo = &tfe.VCSRepo{}
}
m.workspaceIDs[w.ID] = w
m.workspaceNames[w.Name] = w
return w, nil
}
func (m *mockWorkspaces) Read(ctx context.Context, organization, workspace string) (*tfe.Workspace, error) {
w, ok := m.workspaceNames[workspace]
if !ok {
return nil, tfe.ErrResourceNotFound
}
return w, nil
}
func (m *mockWorkspaces) Update(ctx context.Context, organization, workspace string, options tfe.WorkspaceUpdateOptions) (*tfe.Workspace, error) {
w, ok := m.workspaceNames[workspace]
if !ok {
return nil, tfe.ErrResourceNotFound
}
if options.Name != nil {
w.Name = *options.Name
}
if options.TerraformVersion != nil {
w.TerraformVersion = *options.TerraformVersion
}
if options.WorkingDirectory != nil {
w.WorkingDirectory = *options.WorkingDirectory
}
delete(m.workspaceNames, workspace)
m.workspaceNames[w.Name] = w
return w, nil
}
func (m *mockWorkspaces) Delete(ctx context.Context, organization, workspace string) error {
if w, ok := m.workspaceNames[workspace]; ok {
delete(m.workspaceIDs, w.ID)
}
delete(m.workspaceNames, workspace)
return nil
}
func (m *mockWorkspaces) Lock(ctx context.Context, workspaceID string, options tfe.WorkspaceLockOptions) (*tfe.Workspace, error) {
w, ok := m.workspaceIDs[workspaceID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
w.Locked = true
return w, nil
}
func (m *mockWorkspaces) Unlock(ctx context.Context, workspaceID string) (*tfe.Workspace, error) {
w, ok := m.workspaceIDs[workspaceID]
if !ok {
return nil, tfe.ErrResourceNotFound
}
w.Locked = false
return w, nil
}
func (m *mockWorkspaces) AssignSSHKey(ctx context.Context, workspaceID string, options tfe.WorkspaceAssignSSHKeyOptions) (*tfe.Workspace, error) {
panic("not implemented")
}
func (m *mockWorkspaces) UnassignSSHKey(ctx context.Context, workspaceID string) (*tfe.Workspace, error) {
panic("not implemented")
}
const alphanumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
func generateID(s string) string {
b := make([]byte, 16)
for i := range b {
b[i] = alphanumeric[rand.Intn(len(alphanumeric))]
}
return s + string(b)
}

View File

@ -1,319 +0,0 @@
package remote
import (
"bufio"
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"syscall"
"time"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/backend"
)
func (b *Remote) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation) (*tfe.Run, error) {
log.Printf("[INFO] backend/remote: starting Plan operation")
// Retrieve the workspace used to run this operation in.
w, err := b.client.Workspaces.Read(stopCtx, b.organization, op.Workspace)
if err != nil {
return nil, generalError("error retrieving workspace", err)
}
if !w.Permissions.CanQueueRun {
return nil, fmt.Errorf(strings.TrimSpace(fmt.Sprintf(planErrNoQueueRunRights)))
}
if op.ModuleDepth != defaultModuleDepth {
return nil, fmt.Errorf(strings.TrimSpace(planErrModuleDepthNotSupported))
}
if op.Parallelism != defaultParallelism {
return nil, fmt.Errorf(strings.TrimSpace(planErrParallelismNotSupported))
}
if op.Plan != nil {
return nil, fmt.Errorf(strings.TrimSpace(planErrPlanNotSupported))
}
if op.PlanOutPath != "" {
return nil, fmt.Errorf(strings.TrimSpace(planErrOutPathNotSupported))
}
if !op.PlanRefresh {
return nil, fmt.Errorf(strings.TrimSpace(planErrNoRefreshNotSupported))
}
if op.Targets != nil {
return nil, fmt.Errorf(strings.TrimSpace(planErrTargetsNotSupported))
}
if op.Variables != nil {
return nil, fmt.Errorf(strings.TrimSpace(
fmt.Sprintf(planErrVariablesNotSupported, b.hostname, b.organization, op.Workspace)))
}
if (op.Module == nil || op.Module.Config().Dir == "") && !op.Destroy {
return nil, fmt.Errorf(strings.TrimSpace(planErrNoConfig))
}
return b.plan(stopCtx, cancelCtx, op, w)
}
func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) {
configOptions := tfe.ConfigurationVersionCreateOptions{
AutoQueueRuns: tfe.Bool(false),
Speculative: tfe.Bool(op.Type == backend.OperationTypePlan),
}
cv, err := b.client.ConfigurationVersions.Create(stopCtx, w.ID, configOptions)
if err != nil {
return nil, generalError("error creating configuration version", err)
}
var configDir string
if op.Module != nil && op.Module.Config().Dir != "" {
// Make sure to take the working directory into account by removing
// the working directory from the current path. This will result in
// a path that points to the expected root of the workspace.
configDir = filepath.Clean(strings.TrimSuffix(
filepath.Clean(op.Module.Config().Dir),
filepath.Clean(w.WorkingDirectory),
))
} else {
// We did a check earlier to make sure we either have a config dir,
// or the plan is run with -destroy. So this else clause will only
// be executed when we are destroying and doesn't need the config.
configDir, err = ioutil.TempDir("", "tf")
if err != nil {
return nil, generalError("error creating temporary directory", err)
}
defer os.RemoveAll(configDir)
// Make sure the configured working directory exists.
err = os.MkdirAll(filepath.Join(configDir, w.WorkingDirectory), 0700)
if err != nil {
return nil, generalError(
"error creating temporary working directory", err)
}
}
err = b.client.ConfigurationVersions.Upload(stopCtx, cv.UploadURL, configDir)
if err != nil {
return nil, generalError("error uploading configuration files", err)
}
uploaded := false
for i := 0; i < 60 && !uploaded; i++ {
select {
case <-stopCtx.Done():
return nil, context.Canceled
case <-cancelCtx.Done():
return nil, context.Canceled
case <-time.After(500 * time.Millisecond):
cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID)
if err != nil {
return nil, generalError("error retrieving configuration version", err)
}
if cv.Status == tfe.ConfigurationUploaded {
uploaded = true
}
}
}
if !uploaded {
return nil, generalError(
"error uploading configuration files", errors.New("operation timed out"))
}
runOptions := tfe.RunCreateOptions{
IsDestroy: tfe.Bool(op.Destroy),
Message: tfe.String("Queued manually using Terraform"),
ConfigurationVersion: cv,
Workspace: w,
}
r, err := b.client.Runs.Create(stopCtx, runOptions)
if err != nil {
return r, generalError("error creating run", err)
}
// When the lock timeout is set,
if op.StateLockTimeout > 0 {
go func() {
select {
case <-stopCtx.Done():
return
case <-cancelCtx.Done():
return
case <-time.After(op.StateLockTimeout):
// Retrieve the run to get its current status.
r, err := b.client.Runs.Read(cancelCtx, r.ID)
if err != nil {
log.Printf("[ERROR] error reading run: %v", err)
return
}
if r.Status == tfe.RunPending && r.Actions.IsCancelable {
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(lockTimeoutErr)))
}
// We abuse the auto aprove flag to indicate that we do not
// want to ask if the remote operation should be canceled.
op.AutoApprove = true
p, err := os.FindProcess(os.Getpid())
if err != nil {
log.Printf("[ERROR] error searching process ID: %v", err)
return
}
p.Signal(syscall.SIGINT)
}
}
}()
}
if b.CLI != nil {
header := planDefaultHeader
if op.Type == backend.OperationTypeApply {
header = applyDefaultHeader
}
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf(
header, b.hostname, b.organization, op.Workspace, r.ID)) + "\n"))
}
r, err = b.waitForRun(stopCtx, cancelCtx, op, "plan", r, w)
if err != nil {
return r, err
}
logs, err := b.client.Plans.Logs(stopCtx, r.Plan.ID)
if err != nil {
return r, generalError("error retrieving logs", err)
}
scanner := bufio.NewScanner(logs)
for scanner.Scan() {
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(scanner.Text()))
}
}
if err := scanner.Err(); err != nil {
return r, generalError("error reading logs", err)
}
// Retrieve the run to get its current status.
r, err = b.client.Runs.Read(stopCtx, r.ID)
if err != nil {
return r, generalError("error retrieving run", err)
}
// Return if there are no changes or the run errored. We return
// without an error, even if the run errored, as the error is
// already displayed by the output of the remote run.
if !r.HasChanges || r.Status == tfe.RunErrored {
return r, nil
}
// Check any configured sentinel policies.
if len(r.PolicyChecks) > 0 {
err = b.checkPolicy(stopCtx, cancelCtx, op, r)
if err != nil {
return r, err
}
}
return r, nil
}
const planErrNoQueueRunRights = `
Insufficient rights to generate a plan!
[reset][yellow]The provided credentials have insufficient rights to generate a plan. In order
to generate plans, at least plan permissions on the workspace are required.[reset]
`
const planErrModuleDepthNotSupported = `
Custom module depths are currently not supported!
The "remote" backend does not support setting a custom module
depth at this time.
`
const planErrParallelismNotSupported = `
Custom parallelism values are currently not supported!
The "remote" backend does not support setting a custom parallelism
value at this time.
`
const planErrPlanNotSupported = `
Displaying a saved plan is currently not supported!
The "remote" backend currently requires configuration to be present and
does not accept an existing saved plan as an argument at this time.
`
const planErrOutPathNotSupported = `
Saving a generated plan is currently not supported!
The "remote" backend does not support saving the generated execution
plan locally at this time.
`
const planErrNoRefreshNotSupported = `
Planning without refresh is currently not supported!
Currently the "remote" backend will always do an in-memory refresh of
the Terraform state prior to generating the plan.
`
const planErrTargetsNotSupported = `
Resource targeting is currently not supported!
The "remote" backend does not support resource targeting at this time.
`
const planErrVariablesNotSupported = `
Run variables are currently not supported!
The "remote" backend does not support setting run variables at this time.
Currently the only to way to pass variables to the remote backend is by
creating a '*.auto.tfvars' variables file. This file will automatically
be loaded by the "remote" backend when the workspace is configured to use
Terraform v0.10.0 or later.
Additionally you can also set variables on the workspace in the web UI:
https://%s/app/%s/%s/variables
`
const planErrNoConfig = `
No configuration files found!
Plan requires configuration to be present. Planning without a configuration
would mark everything for destruction, which is normally not what is desired.
If you would like to destroy everything, please run plan with the "-destroy"
flag or create a single empty configuration file. Otherwise, please create
a Terraform configuration file in the path being executed and try again.
`
const planDefaultHeader = `
[reset][yellow]Running plan in the remote backend. Output will stream here. Pressing Ctrl-C
will stop streaming the logs, but will not stop the plan running remotely.
To view this run in a browser, visit:
https://%s/app/%s/%s/runs/%s[reset]
`
// The newline in this error is to make it look good in the CLI!
const lockTimeoutErr = `
[reset][red]Lock timeout exceeded, sending interrupt to cancel the remote operation.
[reset]
`

View File

@ -1,597 +0,0 @@
package remote
import (
"context"
"os"
"os/signal"
"strings"
"syscall"
"testing"
"time"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/cli"
)
func testOperationPlan() *backend.Operation {
return &backend.Operation{
ModuleDepth: defaultModuleDepth,
Parallelism: defaultParallelism,
PlanRefresh: true,
Type: backend.OperationTypePlan,
}
}
func TestRemote_planBasic(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.PlanEmpty {
t.Fatal("expected a non-empty plan")
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
}
func TestRemote_planWithoutPermissions(t *testing.T) {
b := testBackendNoDefault(t)
// Create a named workspace without permissions.
w, err := b.client.Workspaces.Create(
context.Background(),
b.organization,
tfe.WorkspaceCreateOptions{
Name: tfe.String(b.prefix + "prod"),
},
)
if err != nil {
t.Fatalf("error creating named workspace: %v", err)
}
w.Permissions.CanQueueRun = false
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.Workspace = "prod"
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected a plan error, got: %v", run.Err)
}
if !strings.Contains(run.Err.Error(), "insufficient rights to generate a plan") {
t.Fatalf("expected a permissions error, got: %v", run.Err)
}
}
func TestRemote_planWithModuleDepth(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.ModuleDepth = 1
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected a plan error, got: %v", run.Err)
}
if !strings.Contains(run.Err.Error(), "module depths are currently not supported") {
t.Fatalf("expected a module depth error, got: %v", run.Err)
}
}
func TestRemote_planWithParallelism(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.Parallelism = 3
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected a plan error, got: %v", run.Err)
}
if !strings.Contains(run.Err.Error(), "parallelism values are currently not supported") {
t.Fatalf("expected a parallelism error, got: %v", run.Err)
}
}
func TestRemote_planWithPlan(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.Plan = &terraform.Plan{}
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected a plan error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "saved plan is currently not supported") {
t.Fatalf("expected a saved plan error, got: %v", run.Err)
}
}
func TestRemote_planWithPath(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.PlanOutPath = "./test-fixtures/plan"
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected a plan error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "generated plan is currently not supported") {
t.Fatalf("expected a generated plan error, got: %v", run.Err)
}
}
func TestRemote_planWithoutRefresh(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.PlanRefresh = false
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected a plan error, got: %v", run.Err)
}
if !strings.Contains(run.Err.Error(), "refresh is currently not supported") {
t.Fatalf("expected a refresh error, got: %v", run.Err)
}
}
func TestRemote_planWithTarget(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.Targets = []string{"null_resource.foo"}
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected a plan error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "targeting is currently not supported") {
t.Fatalf("expected a targeting error, got: %v", run.Err)
}
}
func TestRemote_planWithVariables(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.Variables = map[string]interface{}{"foo": "bar"}
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected an plan error, got: %v", run.Err)
}
if !strings.Contains(run.Err.Error(), "variables are currently not supported") {
t.Fatalf("expected a variables error, got: %v", run.Err)
}
}
func TestRemote_planNoConfig(t *testing.T) {
b := testBackendDefault(t)
op := testOperationPlan()
op.Module = nil
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected a plan error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "configuration files found") {
t.Fatalf("expected configuration files error, got: %v", run.Err)
}
}
func TestRemote_planLockTimeout(t *testing.T) {
b := testBackendDefault(t)
ctx := context.Background()
// Retrieve the workspace used to run this operation in.
w, err := b.client.Workspaces.Read(ctx, b.organization, b.workspace)
if err != nil {
t.Fatalf("error retrieving workspace: %v", err)
}
// Create a new configuration version.
c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{})
if err != nil {
t.Fatalf("error creating configuration version: %v", err)
}
// Create a pending run to block this run.
_, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{
ConfigurationVersion: c,
Workspace: w,
})
if err != nil {
t.Fatalf("error creating pending run: %v", err)
}
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
input := testInput(t, map[string]string{
"cancel": "yes",
"approve": "yes",
})
op := testOperationPlan()
op.StateLockTimeout = 5 * time.Second
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
_, err = b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, syscall.SIGINT)
select {
case <-sigint:
// Stop redirecting SIGINT signals.
signal.Stop(sigint)
case <-time.After(10 * time.Second):
t.Fatalf("expected lock timeout after 5 seconds, waited 10 seconds")
}
if len(input.answers) != 2 {
t.Fatalf("expected unused answers, got: %v", input.answers)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "Lock timeout exceeded") {
t.Fatalf("missing lock timout error in output: %s", output)
}
if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("unexpected plan summery in output: %s", output)
}
}
func TestRemote_planDestroy(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Destroy = true
op.Module = mod
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("unexpected plan error: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
}
func TestRemote_planDestroyNoConfig(t *testing.T) {
b := testBackendDefault(t)
op := testOperationPlan()
op.Destroy = true
op.Module = nil
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("unexpected plan error: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
}
func TestRemote_planWithWorkingDirectory(t *testing.T) {
b := testBackendDefault(t)
options := tfe.WorkspaceUpdateOptions{
WorkingDirectory: tfe.String("terraform"),
}
// Configure the workspace to use a custom working direcrtory.
_, err := b.client.Workspaces.Update(context.Background(), b.organization, b.workspace, options)
if err != nil {
t.Fatalf("error configuring working directory: %v", err)
}
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan-with-working-directory/terraform")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
}
func TestRemote_planPolicyPass(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan-policy-passed")
defer modCleanup()
input := testInput(t, map[string]string{})
op := testOperationPlan()
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.PlanEmpty {
t.Fatalf("expected a non-empty plan")
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "Sentinel Result: true") {
t.Fatalf("missing polic check result in output: %s", output)
}
}
func TestRemote_planPolicyHardFail(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan-policy-hard-failed")
defer modCleanup()
input := testInput(t, map[string]string{})
op := testOperationPlan()
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected a plan error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "hard failed") {
t.Fatalf("expected a policy check error, got: %v", run.Err)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "Sentinel Result: false") {
t.Fatalf("missing policy check result in output: %s", output)
}
}
func TestRemote_planPolicySoftFail(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan-policy-soft-failed")
defer modCleanup()
input := testInput(t, map[string]string{})
op := testOperationPlan()
op.Module = mod
op.UIIn = input
op.UIOut = b.CLI
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err == nil {
t.Fatalf("expected a plan error, got: %v", run.Err)
}
if !run.PlanEmpty {
t.Fatalf("expected plan to be empty")
}
if !strings.Contains(run.Err.Error(), "soft failed") {
t.Fatalf("expected a policy check error, got: %v", run.Err)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
t.Fatalf("missing plan summery in output: %s", output)
}
if !strings.Contains(output, "Sentinel Result: false") {
t.Fatalf("missing policy check result in output: %s", output)
}
}
func TestRemote_planWithRemoteError(t *testing.T) {
b := testBackendDefault(t)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan-with-error")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.Workspace = backend.DefaultStateName
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("error starting operation: %v", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("error running operation: %v", run.Err)
}
if run.ExitCode != 1 {
t.Fatalf("expected exit code 1, got %d", run.ExitCode)
}
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, "null_resource.foo: 1 error") {
t.Fatalf("missing plan error in output: %s", output)
}
}

View File

@ -1,181 +0,0 @@
package remote
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"fmt"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/terraform"
)
type remoteClient struct {
client *tfe.Client
lockInfo *state.LockInfo
organization string
runID string
workspace string
}
// Get the remote state.
func (r *remoteClient) Get() (*remote.Payload, error) {
ctx := context.Background()
// Retrieve the workspace for which to create a new state.
w, err := r.client.Workspaces.Read(ctx, r.organization, r.workspace)
if err != nil {
if err == tfe.ErrResourceNotFound {
// If no state exists, then return nil.
return nil, nil
}
return nil, fmt.Errorf("Error retrieving workspace: %v", err)
}
sv, err := r.client.StateVersions.Current(ctx, w.ID)
if err != nil {
if err == tfe.ErrResourceNotFound {
// If no state exists, then return nil.
return nil, nil
}
return nil, fmt.Errorf("Error retrieving remote state: %v", err)
}
state, err := r.client.StateVersions.Download(ctx, sv.DownloadURL)
if err != nil {
return nil, fmt.Errorf("Error downloading remote state: %v", err)
}
// If the state is empty, then return nil.
if len(state) == 0 {
return nil, nil
}
// Get the MD5 checksum of the state.
sum := md5.Sum(state)
return &remote.Payload{
Data: state,
MD5: sum[:],
}, nil
}
// Put the remote state.
func (r *remoteClient) Put(state []byte) error {
ctx := context.Background()
// Retrieve the workspace for which to create a new state.
w, err := r.client.Workspaces.Read(ctx, r.organization, r.workspace)
if err != nil {
return fmt.Errorf("Error retrieving workspace: %v", err)
}
// Read the raw state into a Terraform state.
tfState, err := terraform.ReadState(bytes.NewReader(state))
if err != nil {
return fmt.Errorf("Error reading state: %s", err)
}
options := tfe.StateVersionCreateOptions{
Lineage: tfe.String(tfState.Lineage),
Serial: tfe.Int64(tfState.Serial),
MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))),
State: tfe.String(base64.StdEncoding.EncodeToString(state)),
}
// If we have a run ID, make sure to add it to the options
// so the state will be properly associated with the run.
if r.runID != "" {
options.Run = &tfe.Run{ID: r.runID}
}
// Create the new state.
_, err = r.client.StateVersions.Create(ctx, w.ID, options)
if err != nil {
return fmt.Errorf("Error creating remote state: %v", err)
}
return nil
}
// Delete the remote state.
func (r *remoteClient) Delete() error {
err := r.client.Workspaces.Delete(context.Background(), r.organization, r.workspace)
if err != nil && err != tfe.ErrResourceNotFound {
return fmt.Errorf("Error deleting workspace %s: %v", r.workspace, err)
}
return nil
}
// Lock the remote state.
func (r *remoteClient) Lock(info *state.LockInfo) (string, error) {
ctx := context.Background()
lockErr := &state.LockError{Info: r.lockInfo}
// Retrieve the workspace to lock.
w, err := r.client.Workspaces.Read(ctx, r.organization, r.workspace)
if err != nil {
lockErr.Err = err
return "", lockErr
}
// Check if the workspace is already locked.
if w.Locked {
lockErr.Err = fmt.Errorf(
"remote state already\nlocked (lock ID: \"%s/%s\")", r.organization, r.workspace)
return "", lockErr
}
// Lock the workspace.
w, err = r.client.Workspaces.Lock(ctx, w.ID, tfe.WorkspaceLockOptions{
Reason: tfe.String("Locked by Terraform"),
})
if err != nil {
lockErr.Err = err
return "", lockErr
}
r.lockInfo = info
return r.lockInfo.ID, nil
}
// Unlock the remote state.
func (r *remoteClient) Unlock(id string) error {
ctx := context.Background()
lockErr := &state.LockError{Info: r.lockInfo}
// Verify the expected lock ID.
if r.lockInfo != nil && r.lockInfo.ID != id {
lockErr.Err = fmt.Errorf("lock ID does not match existing lock")
return lockErr
}
// Verify the optional force-unlock lock ID.
if r.lockInfo == nil && r.organization+"/"+r.workspace != id {
lockErr.Err = fmt.Errorf("lock ID does not match existing lock")
return lockErr
}
// Retrieve the workspace to lock.
w, err := r.client.Workspaces.Read(ctx, r.organization, r.workspace)
if err != nil {
lockErr.Err = err
return lockErr
}
// Unlock the workspace.
w, err = r.client.Workspaces.Unlock(ctx, w.ID)
if err != nil {
lockErr.Err = err
return lockErr
}
return nil
}

View File

@ -1,58 +0,0 @@
package remote
import (
"bytes"
"os"
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/terraform"
)
func TestRemoteClient_impl(t *testing.T) {
var _ remote.Client = new(remoteClient)
}
func TestRemoteClient(t *testing.T) {
client := testRemoteClient(t)
remote.TestClient(t, client)
}
func TestRemoteClient_stateLock(t *testing.T) {
b := testBackendDefault(t)
s1, err := b.State(backend.DefaultStateName)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
s2, err := b.State(backend.DefaultStateName)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
}
func TestRemoteClient_withRunID(t *testing.T) {
// Set the TFE_RUN_ID environment variable before creating the client!
if err := os.Setenv("TFE_RUN_ID", generateID("run-")); err != nil {
t.Fatalf("error setting env var TFE_RUN_ID: %v", err)
}
// Create a new test client.
client := testRemoteClient(t)
// Create a new empty state.
state := bytes.NewBuffer(nil)
if err := terraform.WriteState(terraform.NewState(), state); err != nil {
t.Fatalf("expected no error, got: %v", err)
}
// Store the new state to verify (this will be done
// by the mock that is used) that the run ID is set.
if err := client.Put(state.Bytes()); err != nil {
t.Fatalf("expected no error, got %v", err)
}
}

View File

@ -1,240 +0,0 @@
package remote
import (
"errors"
"reflect"
"strings"
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/terraform"
)
func TestRemote(t *testing.T) {
var _ backend.Enhanced = New(nil)
var _ backend.CLI = New(nil)
}
func TestRemote_backendDefault(t *testing.T) {
b := testBackendDefault(t)
backend.TestBackendStates(t, b)
backend.TestBackendStateLocks(t, b, b)
backend.TestBackendStateForceUnlock(t, b, b)
}
func TestRemote_backendNoDefault(t *testing.T) {
b := testBackendNoDefault(t)
backend.TestBackendStates(t, b)
}
func TestRemote_config(t *testing.T) {
cases := map[string]struct {
config map[string]interface{}
err error
}{
"with_a_name": {
config: map[string]interface{}{
"organization": "hashicorp",
"workspaces": []interface{}{
map[string]interface{}{
"name": "prod",
},
},
},
err: nil,
},
"with_a_prefix": {
config: map[string]interface{}{
"organization": "hashicorp",
"workspaces": []interface{}{
map[string]interface{}{
"prefix": "my-app-",
},
},
},
err: nil,
},
"without_either_a_name_and_a_prefix": {
config: map[string]interface{}{
"organization": "hashicorp",
"workspaces": []interface{}{
map[string]interface{}{},
},
},
err: errors.New("either workspace 'name' or 'prefix' is required"),
},
"with_both_a_name_and_a_prefix": {
config: map[string]interface{}{
"organization": "hashicorp",
"workspaces": []interface{}{
map[string]interface{}{
"name": "prod",
"prefix": "my-app-",
},
},
},
err: errors.New("only one of workspace 'name' or 'prefix' is allowed"),
},
"with_an_unknown_host": {
config: map[string]interface{}{
"hostname": "nonexisting.local",
"organization": "hashicorp",
"workspaces": []interface{}{
map[string]interface{}{
"name": "prod",
},
},
},
err: errors.New("host nonexisting.local does not provide a remote backend API"),
},
}
for name, tc := range cases {
s := testServer(t)
b := New(testDisco(s))
// Get the proper config structure
rc, err := config.NewRawConfig(tc.config)
if err != nil {
t.Fatalf("%s: error creating raw config: %v", name, err)
}
conf := terraform.NewResourceConfig(rc)
// Validate
warns, errs := b.Validate(conf)
if len(warns) > 0 {
t.Fatalf("%s: validation warnings: %v", name, warns)
}
if len(errs) > 0 {
t.Fatalf("%s: validation errors: %v", name, errs)
}
// Configure
err = b.Configure(conf)
if err != tc.err && err != nil && tc.err != nil && err.Error() != tc.err.Error() {
t.Fatalf("%s: expected error %q, got: %q", name, tc.err, err)
}
}
}
func TestRemote_nonexistingOrganization(t *testing.T) {
msg := "does not exist"
b := testBackendNoDefault(t)
b.organization = "nonexisting"
if _, err := b.State("prod"); err == nil || !strings.Contains(err.Error(), msg) {
t.Fatalf("expected %q error, got: %v", msg, err)
}
if err := b.DeleteState("prod"); err == nil || !strings.Contains(err.Error(), msg) {
t.Fatalf("expected %q error, got: %v", msg, err)
}
if _, err := b.States(); err == nil || !strings.Contains(err.Error(), msg) {
t.Fatalf("expected %q error, got: %v", msg, err)
}
}
func TestRemote_addAndRemoveStatesDefault(t *testing.T) {
b := testBackendDefault(t)
if _, err := b.States(); err != backend.ErrNamedStatesNotSupported {
t.Fatalf("expected error %v, got %v", backend.ErrNamedStatesNotSupported, err)
}
if _, err := b.State(backend.DefaultStateName); err != nil {
t.Fatalf("expected no error, got %v", err)
}
if _, err := b.State("prod"); err != backend.ErrNamedStatesNotSupported {
t.Fatalf("expected error %v, got %v", backend.ErrNamedStatesNotSupported, err)
}
if err := b.DeleteState(backend.DefaultStateName); err != nil {
t.Fatalf("expected no error, got %v", err)
}
if err := b.DeleteState("prod"); err != backend.ErrNamedStatesNotSupported {
t.Fatalf("expected error %v, got %v", backend.ErrNamedStatesNotSupported, err)
}
}
func TestRemote_addAndRemoveStatesNoDefault(t *testing.T) {
b := testBackendNoDefault(t)
states, err := b.States()
if err != nil {
t.Fatal(err)
}
expectedStates := []string(nil)
if !reflect.DeepEqual(states, expectedStates) {
t.Fatalf("expected states %#+v, got %#+v", expectedStates, states)
}
if _, err := b.State(backend.DefaultStateName); err != backend.ErrDefaultStateNotSupported {
t.Fatalf("expected error %v, got %v", backend.ErrDefaultStateNotSupported, err)
}
expectedA := "test_A"
if _, err := b.State(expectedA); err != nil {
t.Fatal(err)
}
states, err = b.States()
if err != nil {
t.Fatal(err)
}
expectedStates = append(expectedStates, expectedA)
if !reflect.DeepEqual(states, expectedStates) {
t.Fatalf("expected %#+v, got %#+v", expectedStates, states)
}
expectedB := "test_B"
if _, err := b.State(expectedB); err != nil {
t.Fatal(err)
}
states, err = b.States()
if err != nil {
t.Fatal(err)
}
expectedStates = append(expectedStates, expectedB)
if !reflect.DeepEqual(states, expectedStates) {
t.Fatalf("expected %#+v, got %#+v", expectedStates, states)
}
if err := b.DeleteState(backend.DefaultStateName); err != backend.ErrDefaultStateNotSupported {
t.Fatalf("expected error %v, got %v", backend.ErrDefaultStateNotSupported, err)
}
if err := b.DeleteState(expectedA); err != nil {
t.Fatal(err)
}
states, err = b.States()
if err != nil {
t.Fatal(err)
}
expectedStates = []string{expectedB}
if !reflect.DeepEqual(states, expectedStates) {
t.Fatalf("expected %#+v got %#+v", expectedStates, states)
}
if err := b.DeleteState(expectedB); err != nil {
t.Fatal(err)
}
states, err = b.States()
if err != nil {
t.Fatal(err)
}
expectedStates = []string(nil)
if !reflect.DeepEqual(states, expectedStates) {
t.Fatalf("expected %#+v, got %#+v", expectedStates, states)
}
}

View File

@ -1,13 +0,0 @@
package remote
import (
"github.com/hashicorp/terraform/backend"
)
// CLIInit implements backend.CLI
func (b *Remote) CLIInit(opts *backend.CLIOpts) error {
b.CLI = opts.CLI
b.CLIColor = opts.CLIColor
b.ContextOpts = opts.ContextOpts
return nil
}

View File

@ -1,47 +0,0 @@
package remote
import (
"regexp"
"github.com/mitchellh/colorstring"
)
// colorsRe is used to find ANSI escaped color codes.
var colorsRe = regexp.MustCompile("\033\\[\\d{1,3}m")
// Colorer is the interface that must be implemented to colorize strings.
type Colorer interface {
Color(v string) string
}
// Colorize is used to print output when the -no-color flag is used. It will
// strip all ANSI escaped color codes which are set while the operation was
// executed in Terraform Enterprise.
//
// When Terraform Enterprise supports run specific variables, this code can be
// removed as we can then pass the CLI flag to the backend and prevent the color
// codes from being written to the output.
type Colorize struct {
cliColor *colorstring.Colorize
}
// Color will strip all ANSI escaped color codes and return a uncolored string.
func (c *Colorize) Color(v string) string {
return colorsRe.ReplaceAllString(c.cliColor.Color(v), "")
}
// Colorize returns the Colorize structure that can be used for colorizing
// output. This is guaranteed to always return a non-nil value and so is useful
// as a helper to wrap any potentially colored strings.
func (b *Remote) Colorize() Colorer {
if b.CLIColor != nil && !b.CLIColor.Disable {
return b.CLIColor
}
if b.CLIColor != nil {
return &Colorize{cliColor: b.CLIColor}
}
return &Colorize{cliColor: &colorstring.Colorize{
Colors: colorstring.DefaultColors,
Disable: true,
}}
}

View File

@ -1,4 +0,0 @@
null_resource.hello: Destroying... (ID: 8657651096157629581)
null_resource.hello: Destruction complete after 0s
Apply complete! Resources: 0 added, 0 changed, 1 destroyed.

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,22 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
null_resource.hello: Refreshing state... (ID: 8657651096157629581)
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
- destroy
Terraform will perform the following actions:
- null_resource.hello
Plan: 0 to add, 0 to change, 1 to destroy.

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,17 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
null_resource.hello: Refreshing state... (ID: 8657651096157629581)
------------------------------------------------------------------------
No changes. Infrastructure is up-to-date.
This means that Terraform did not detect any differences between your
configuration and real physical resources that exist. As a result, no
actions need to be performed.

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,21 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ null_resource.foo
id: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.

View File

@ -1,12 +0,0 @@
Sentinel Result: false
Sentinel evaluated to false because one or more Sentinel policies evaluated
to false. This false was not due to an undefined value or runtime error.
1 policies evaluated.
## Policy 1: Passthrough.sentinel (hard-mandatory)
Result: false
FALSE - Passthrough.sentinel:1:1 - Rule "main"

View File

@ -1,4 +0,0 @@
null_resource.hello: Creating...
null_resource.hello: Creation complete after 0s (ID: 8657651096157629581)
Apply complete! Resources: 1 added, 0 changed, 0 destroyed.

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,21 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ null_resource.foo
id: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.

View File

@ -1,12 +0,0 @@
Sentinel Result: true
This result means that Sentinel policies returned true and the protected
behavior is allowed by Sentinel policies.
1 policies evaluated.
## Policy 1: Passthrough.sentinel (soft-mandatory)
Result: true
TRUE - Passthrough.sentinel:1:1 - Rule "main"

View File

@ -1,4 +0,0 @@
null_resource.hello: Creating...
null_resource.hello: Creation complete after 0s (ID: 8657651096157629581)
Apply complete! Resources: 1 added, 0 changed, 0 destroyed.

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,21 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ null_resource.foo
id: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.

View File

@ -1,12 +0,0 @@
Sentinel Result: false
Sentinel evaluated to false because one or more Sentinel policies evaluated
to false. This false was not due to an undefined value or runtime error.
1 policies evaluated.
## Policy 1: Passthrough.sentinel (soft-mandatory)
Result: false
FALSE - Passthrough.sentinel:1:1 - Rule "main"

View File

@ -1,5 +0,0 @@
resource "null_resource" "foo" {
triggers {
random = "${guid()}"
}
}

View File

@ -1,10 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Error: null_resource.foo: 1 error(s) occurred:
* null_resource.foo: 1:3: unknown function called: guid in:
${guid()}

View File

@ -1,4 +0,0 @@
null_resource.hello: Creating...
null_resource.hello: Creation complete after 0s (ID: 8657651096157629581)
Apply complete! Resources: 1 added, 0 changed, 0 destroyed.

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,21 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ null_resource.foo
id: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,21 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ null_resource.foo
id: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.

View File

@ -1,12 +0,0 @@
Sentinel Result: false
Sentinel evaluated to false because one or more Sentinel policies evaluated
to false. This false was not due to an undefined value or runtime error.
1 policies evaluated.
## Policy 1: Passthrough.sentinel (hard-mandatory)
Result: false
FALSE - Passthrough.sentinel:1:1 - Rule "main"

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,21 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ null_resource.foo
id: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.

View File

@ -1,12 +0,0 @@
Sentinel Result: true
This result means that Sentinel policies returned true and the protected
behavior is allowed by Sentinel policies.
1 policies evaluated.
## Policy 1: Passthrough.sentinel (soft-mandatory)
Result: true
TRUE - Passthrough.sentinel:1:1 - Rule "main"

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,21 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ null_resource.foo
id: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.

View File

@ -1,12 +0,0 @@
Sentinel Result: false
Sentinel evaluated to false because one or more Sentinel policies evaluated
to false. This false was not due to an undefined value or runtime error.
1 policies evaluated.
## Policy 1: Passthrough.sentinel (soft-mandatory)
Result: false
FALSE - Passthrough.sentinel:1:1 - Rule "main"

View File

@ -1,5 +0,0 @@
resource "null_resource" "foo" {
triggers {
random = "${guid()}"
}
}

View File

@ -1,10 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Error: null_resource.foo: 1 error(s) occurred:
* null_resource.foo: 1:3: unknown function called: guid in:
${guid()}

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,21 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ null_resource.foo
id: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.

View File

@ -1 +0,0 @@
resource "null_resource" "foo" {}

View File

@ -1,21 +0,0 @@
Terraform v0.11.7
Configuring remote state backend...
Initializing Terraform configuration...
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ null_resource.foo
id: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.

View File

@ -1,137 +0,0 @@
package remote
import (
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"testing"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/svchost"
"github.com/hashicorp/terraform/svchost/auth"
"github.com/hashicorp/terraform/svchost/disco"
"github.com/mitchellh/cli"
)
const (
testCred = "test-auth-token"
)
var (
tfeHost = svchost.Hostname(defaultHostname)
credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{
tfeHost: {"token": testCred},
})
)
func testInput(t *testing.T, answers map[string]string) *mockInput {
return &mockInput{answers: answers}
}
func testBackendDefault(t *testing.T) *Remote {
c := map[string]interface{}{
"organization": "hashicorp",
"workspaces": []interface{}{
map[string]interface{}{
"name": "prod",
},
},
}
return testBackend(t, c)
}
func testBackendNoDefault(t *testing.T) *Remote {
c := map[string]interface{}{
"organization": "hashicorp",
"workspaces": []interface{}{
map[string]interface{}{
"prefix": "my-app-",
},
},
}
return testBackend(t, c)
}
func testRemoteClient(t *testing.T) remote.Client {
b := testBackendDefault(t)
raw, err := b.State(backend.DefaultStateName)
if err != nil {
t.Fatalf("error: %v", err)
}
s := raw.(*remote.State)
return s.Client
}
func testBackend(t *testing.T, c map[string]interface{}) *Remote {
s := testServer(t)
b := New(testDisco(s))
// Configure the backend so the client is created.
backend.TestBackendConfig(t, b, c)
// Get a new mock client.
mc := newMockClient()
// Replace the services we use with our mock services.
b.CLI = cli.NewMockUi()
b.client.Applies = mc.Applies
b.client.ConfigurationVersions = mc.ConfigurationVersions
b.client.Organizations = mc.Organizations
b.client.Plans = mc.Plans
b.client.PolicyChecks = mc.PolicyChecks
b.client.Runs = mc.Runs
b.client.StateVersions = mc.StateVersions
b.client.Workspaces = mc.Workspaces
ctx := context.Background()
// Create the organization.
_, err := b.client.Organizations.Create(ctx, tfe.OrganizationCreateOptions{
Name: tfe.String(b.organization),
})
if err != nil {
t.Fatalf("error: %v", err)
}
// Create the default workspace if required.
if b.workspace != "" {
_, err = b.client.Workspaces.Create(ctx, b.organization, tfe.WorkspaceCreateOptions{
Name: tfe.String(b.workspace),
})
if err != nil {
t.Fatalf("error: %v", err)
}
}
return b
}
// testServer returns a *httptest.Server used for local testing.
func testServer(t *testing.T) *httptest.Server {
mux := http.NewServeMux()
// Respond to service discovery calls.
mux.HandleFunc("/well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
io.WriteString(w, `{"tfe.v2":"/api/v2/"}`)
})
return httptest.NewServer(mux)
}
// testDisco returns a *disco.Disco mapping app.terraform.io and
// localhost to a local test server.
func testDisco(s *httptest.Server) *disco.Disco {
services := map[string]interface{}{
"tfe.v2": fmt.Sprintf("%s/api/v2/", s.URL),
}
d := disco.NewWithCredentialsSource(credsSrc)
d.ForceHostServices(svchost.Hostname(defaultHostname), services)
d.ForceHostServices(svchost.Hostname("localhost"), services)
return d
}

View File

@ -155,7 +155,7 @@ func TestState_basic(t *testing.T) {
t.Fatalf("unexpected errors: %s", diags.Err())
}
if !got.RawEquals(test.Want) {
if !test.Want.RawEquals(got) {
t.Errorf("wrong result\nconfig: %sgot: %swant: %s", dump.Value(config), dump.Value(got), dump.Value(test.Want))
}
})

View File

@ -3,6 +3,7 @@ package terraform
import (
"testing"
backendinit "github.com/hashicorp/terraform/backend/init"
"github.com/hashicorp/terraform/providers"
)
@ -14,6 +15,7 @@ func init() {
testAccProviders = map[string]*Provider{
"terraform": testAccProvider,
}
backendinit.Init(nil)
}
func TestProvider_impl(t *testing.T) {

View File

@ -22,6 +22,7 @@ import (
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
backendinit "github.com/hashicorp/terraform/backend/init"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configload"
"github.com/hashicorp/terraform/configs/configschema"
@ -46,9 +47,6 @@ var testingDir string
func init() {
test = true
// Initialize the backends
backendInit.Init(nil)
// Expand the fixture dir on init because we change the working
// directory in some tests.
var err error
@ -75,6 +73,9 @@ func TestMain(m *testing.M) {
log.SetOutput(ioutil.Discard)
}
// Make sure backend init is initialized, since our tests tend to assume it.
backendinit.Init(nil)
os.Exit(m.Run())
}

View File

@ -156,8 +156,7 @@ func (c *InitCommand) Run(args []string) int {
if empty, err := config.IsEmptyDir(path); err != nil {
diags = diags.Append(fmt.Errorf("Error checking configuration: %s", err))
return 1
}
if empty {
} else if empty {
c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitEmpty)))
return 0
}
@ -276,12 +275,14 @@ func (c *InitCommand) Run(args []string) int {
if back != nil {
sMgr, err := back.StateMgr(c.Workspace())
if err != nil {
c.Ui.Error(fmt.Sprintf("Error loading state: %s", err))
c.Ui.Error(fmt.Sprintf(
"Error loading state: %s", err))
return 1
}
if err := sMgr.RefreshState(); err != nil {
c.Ui.Error(fmt.Sprintf("Error refreshing state: %s", err))
c.Ui.Error(fmt.Sprintf(
"Error refreshing state: %s", err))
return 1
}

View File

@ -52,6 +52,10 @@ type Meta struct {
// "terraform-native' services running at a specific user-facing hostname.
Services *disco.Disco
// Credentials provides access to credentials for "terraform-native"
// services, which are accessed by a service hostname.
Credentials auth.CredentialsSource
// RunningInAutomation indicates that commands are being run by an
// automated system rather than directly at a command prompt.
//

View File

@ -122,7 +122,7 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, tfdiags.Diagnostics
}
// Build the local backend
local := backendLocal.NewWithBackend(b)
local := &backendlocal.Local{Backend: b}
if err := local.CLIInit(cliOpts); err != nil {
// Local backend isn't allowed to fail. It would be a bug.
panic(err)
@ -238,7 +238,7 @@ func (m *Meta) backendCLIOpts() *backend.CLIOpts {
// for some checks that require a remote backend.
func (m *Meta) IsLocalBackend(b backend.Backend) bool {
// Is it a local backend?
bLocal, ok := b.(*backendLocal.Local)
bLocal, ok := b.(*backendlocal.Local)
// If it is, does it not have an alternate state backend?
if ok {
@ -610,7 +610,10 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta
return nil, diags
}
if len(localStates) > 0 {
// If the local state is not empty, we need to potentially do a
// state migration to the new backend (with user permission), unless the
// destination is also "local"
if localS := localState.State(); !localS.Empty() {
// Perform the migration
err = m.backendMigrateState(&backendMigrateOpts{
OneType: "local",
@ -628,8 +631,8 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta
// can get us here too. Don't delete our state if the old and new paths
// are the same.
erase := true
if newLocalB, ok := b.(*backendLocal.Local); ok {
if localB, ok := localB.(*backendLocal.Local); ok {
if newLocalB, ok := b.(*backendlocal.Local); ok {
if localB, ok := localB.(*backendlocal.Local); ok {
if newLocalB.StatePath == localB.StatePath {
erase = false
}
@ -794,7 +797,7 @@ func (m *Meta) backend_C_r_S_unchanged(c *configs.Backend, cHash int, sMgr *stat
}
// Get the backend
f := backendInit.Backend(s.Backend.Type)
f := backendinit.Backend(s.Backend.Type)
if f == nil {
diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Backend.Type))
return nil, diags
@ -858,7 +861,7 @@ func (m *Meta) backendInitFromConfig(c *configs.Backend) (backend.Backend, cty.V
var diags tfdiags.Diagnostics
// Get the backend
f := backendInit.Backend(c.Type)
f := backendinit.Backend(c.Type)
if f == nil {
diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendNewUnknown), c.Type))
return nil, cty.NilVal, diags
@ -898,7 +901,7 @@ func (m *Meta) backendInitFromSaved(s *terraform.BackendState) (backend.Backend,
var diags tfdiags.Diagnostics
// Get the backend
f := backendInit.Backend(s.Type)
f := backendinit.Backend(s.Type)
if f == nil {
diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Type))
return nil, diags

View File

@ -8,7 +8,6 @@ import (
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/hashicorp/terraform/states"
@ -20,17 +19,6 @@ import (
"github.com/hashicorp/terraform/terraform"
)
type backendMigrateOpts struct {
OneType, TwoType string
One, Two backend.Backend
// Fields below are set internally when migrate is called
oneEnv string // source env
twoEnv string // dest env
force bool // if true, won't ask for confirmation
}
// backendMigrateState handles migrating (copying) state from one backend
// to another. This function handles asking the user for confirmation
// as well as the copy itself.
@ -172,56 +160,7 @@ func (m *Meta) backendMigrateState_S_S(opts *backendMigrateOpts) error {
}
}
// Its possible that the currently selected workspace is not migrated,
// so we call selectWorkspace to ensure a valid workspace is selected.
return m.selectWorkspace(opts.Two)
}
// selectWorkspace gets a list of migrated workspaces and then checks
// if the currently selected workspace is valid. If not, it will ask
// the user to select a workspace from the list.
func (m *Meta) selectWorkspace(b backend.Backend) error {
workspaces, err := b.States()
if err != nil {
return fmt.Errorf("Failed to get migrated workspaces: %s", err)
}
if len(workspaces) == 0 {
return fmt.Errorf(errBackendNoMigratedWorkspaces)
}
// Get the currently selected workspace.
workspace := m.Workspace()
// Check if any of the migrated workspaces match the selected workspace
// and create a numbered list with migrated workspaces.
var list strings.Builder
for i, w := range workspaces {
if w == workspace {
return nil
}
fmt.Fprintf(&list, "%d. %s\n", i+1, w)
}
// If the selected workspace is not migrated, ask the user to select
// a workspace from the list of migrated workspaces.
v, err := m.UIInput().Input(&terraform.InputOpts{
Id: "select-workspace",
Query: fmt.Sprintf(
"[reset][bold][yellow]The currently selected workspace (%s) is not migrated.[reset]",
workspace),
Description: fmt.Sprintf(
strings.TrimSpace(inputBackendSelectWorkspace), list.String()),
})
if err != nil {
return fmt.Errorf("Error asking to select workspace: %s", err)
}
idx, err := strconv.Atoi(v)
if err != nil || (idx < 1 || idx > len(workspaces)) {
return fmt.Errorf("Error selecting workspace: input not a valid number")
}
return m.SetWorkspace(workspaces[idx-1])
return nil
}
// Multi-state to single state.
@ -442,6 +381,17 @@ func (m *Meta) backendMigrateNonEmptyConfirm(
return m.confirm(inputOpts)
}
type backendMigrateOpts struct {
OneType, TwoType string
One, Two backend.Backend
// Fields below are set internally when migrate is called
oneEnv string // source env
twoEnv string // dest env
force bool // if true, won't ask for confirmation
}
const errMigrateLoadStates = `
Error inspecting states in the %q backend:
%s
@ -464,8 +414,8 @@ above error and try again.
`
const errMigrateMulti = `
Error migrating the workspace %q from the previous %q backend
to the newly configured %q backend:
Error migrating the workspace %q from the previous %q backend to the newly
configured %q backend:
%s
Terraform copies workspaces in alphabetical order. Any workspaces
@ -478,22 +428,13 @@ This will attempt to copy (with permission) all workspaces again.
`
const errBackendStateCopy = `
Error copying state from the previous %q backend to the newly configured
%q backend:
Error copying state from the previous %q backend to the newly configured %q backend:
%s
The state in the previous backend remains intact and unmodified. Please resolve
the error above and try again.
`
const errBackendNoMigratedWorkspaces = `
No workspaces are migrated. Use the "terraform workspace" command to create
and select a new workspace.
If the backend already contains existing workspaces, you may need to update
the workspace name or prefix in the backend configuration.
`
const inputBackendMigrateEmpty = `
Pre-existing state was found while migrating the previous %q backend to the
newly configured %q backend. No existing state was found in the newly
@ -525,9 +466,9 @@ up, or cancel altogether, answer "no" and Terraform will abort.
`
const inputBackendMigrateMultiToMulti = `
Both the existing %[1]q backend and the newly configured %[2]q backend
support workspaces. When migrating between backends, Terraform will copy
all workspaces (with the same names). THIS WILL OVERWRITE any conflicting
Both the existing %[1]q backend and the newly configured %[2]q backend support
workspaces. When migrating between backends, Terraform will copy all
workspaces (with the same names). THIS WILL OVERWRITE any conflicting
states in the destination.
Terraform initialization doesn't currently migrate only select workspaces.
@ -537,15 +478,3 @@ pull and push those states.
If you answer "yes", Terraform will migrate all states. If you answer
"no", Terraform will abort.
`
const inputBackendNewWorkspaceName = `
Please provide a new workspace name (e.g. dev, test) that will be used
to migrate the existing default workspace.
`
const inputBackendSelectWorkspace = `
This is expected behavior when the selected workspace did not have an
existing non-empty state. Please enter a number to select a workspace:
%s
`

View File

@ -12,8 +12,8 @@ import (
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/backend"
backendInit "github.com/hashicorp/terraform/backend/init"
backendLocal "github.com/hashicorp/terraform/backend/local"
backendinit "github.com/hashicorp/terraform/backend/init"
backendlocal "github.com/hashicorp/terraform/backend/local"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/helper/copy"
"github.com/hashicorp/terraform/plans"
@ -745,8 +745,8 @@ func TestMetaBackend_reconfigureChange(t *testing.T) {
defer testChdir(t, td)()
// Register the single-state backend
backendInit.Set("local-single", backendLocal.TestNewLocalSingle)
defer backendInit.Set("local-single", nil)
backendinit.Set("local-single", backendlocal.TestNewLocalSingle)
defer backendinit.Set("local-single", nil)
// Setup the meta
m := testMetaBackend(t, nil)
@ -844,11 +844,12 @@ func TestMetaBackend_configuredChangeCopy_singleState(t *testing.T) {
defer testChdir(t, td)()
// Register the single-state backend
backendInit.Set("local-single", backendLocal.TestNewLocalSingle)
defer backendInit.Set("local-single", nil)
backendinit.Set("local-single", backendlocal.TestNewLocalSingle)
defer backendinit.Set("local-single", nil)
// Ask input
defer testInputMap(t, map[string]string{
"backend-migrate-to-new": "yes",
"backend-migrate-copy-to-empty": "yes",
})()
@ -899,11 +900,12 @@ func TestMetaBackend_configuredChangeCopy_multiToSingleDefault(t *testing.T) {
defer testChdir(t, td)()
// Register the single-state backend
backendInit.Set("local-single", backendLocal.TestNewLocalSingle)
defer backendInit.Set("local-single", nil)
backendinit.Set("local-single", backendlocal.TestNewLocalSingle)
defer backendinit.Set("local-single", nil)
// Ask input
defer testInputMap(t, map[string]string{
"backend-migrate-to-new": "yes",
"backend-migrate-copy-to-empty": "yes",
})()
@ -953,11 +955,12 @@ func TestMetaBackend_configuredChangeCopy_multiToSingle(t *testing.T) {
defer testChdir(t, td)()
// Register the single-state backend
backendInit.Set("local-single", backendLocal.TestNewLocalSingle)
defer backendInit.Set("local-single", nil)
backendinit.Set("local-single", backendlocal.TestNewLocalSingle)
defer backendinit.Set("local-single", nil)
// Ask input
defer testInputMap(t, map[string]string{
"backend-migrate-to-new": "yes",
"backend-migrate-multistate-to-single": "yes",
"backend-migrate-copy-to-empty": "yes",
})()
@ -998,7 +1001,7 @@ func TestMetaBackend_configuredChangeCopy_multiToSingle(t *testing.T) {
}
// Verify existing workspaces exist
envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename)
envPath := filepath.Join(backendlocal.DefaultWorkspaceDir, "env2", backendlocal.DefaultStateFilename)
if _, err := os.Stat(envPath); err != nil {
t.Fatal("env should exist")
}
@ -1019,11 +1022,12 @@ func TestMetaBackend_configuredChangeCopy_multiToSingleCurrentEnv(t *testing.T)
defer testChdir(t, td)()
// Register the single-state backend
backendInit.Set("local-single", backendLocal.TestNewLocalSingle)
defer backendInit.Set("local-single", nil)
backendinit.Set("local-single", backendlocal.TestNewLocalSingle)
defer backendinit.Set("local-single", nil)
// Ask input
defer testInputMap(t, map[string]string{
"backend-migrate-to-new": "yes",
"backend-migrate-multistate-to-single": "yes",
"backend-migrate-copy-to-empty": "yes",
})()
@ -1069,7 +1073,7 @@ func TestMetaBackend_configuredChangeCopy_multiToSingleCurrentEnv(t *testing.T)
}
// Verify existing workspaces exist
envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename)
envPath := filepath.Join(backendlocal.DefaultWorkspaceDir, "env2", backendlocal.DefaultStateFilename)
if _, err := os.Stat(envPath); err != nil {
t.Fatal("env should exist")
}
@ -1086,6 +1090,7 @@ func TestMetaBackend_configuredChangeCopy_multiToMulti(t *testing.T) {
// Ask input
defer testInputMap(t, map[string]string{
"backend-migrate-to-new": "yes",
"backend-migrate-multistate-to-multistate": "yes",
})()
@ -1153,7 +1158,7 @@ func TestMetaBackend_configuredChangeCopy_multiToMulti(t *testing.T) {
{
// Verify existing workspaces exist
envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename)
envPath := filepath.Join(backendlocal.DefaultWorkspaceDir, "env2", backendlocal.DefaultStateFilename)
if _, err := os.Stat(envPath); err != nil {
t.Fatal("env should exist")
}
@ -1161,159 +1166,7 @@ func TestMetaBackend_configuredChangeCopy_multiToMulti(t *testing.T) {
{
// Verify new workspaces exist
envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename)
if _, err := os.Stat(envPath); err != nil {
t.Fatal("env should exist")
}
}
}
// Changing a configured backend that supports multi-state to a
// backend that also supports multi-state, but doesn't allow a
// default state while the default state is non-empty.
func TestMetaBackend_configuredChangeCopy_multiToNoDefaultWithDefault(t *testing.T) {
// Create a temporary working directory that is empty
td := tempDir(t)
copy.CopyDir(testFixturePath("backend-change-multi-to-no-default-with-default"), td)
defer os.RemoveAll(td)
defer testChdir(t, td)()
// Register the single-state backend
backendInit.Set("local-no-default", backendLocal.TestNewLocalNoDefault)
defer backendInit.Set("local-no-default", nil)
// Ask input
defer testInputMap(t, map[string]string{
"backend-migrate-multistate-to-multistate": "yes",
"new-state-name": "env1",
})()
// Setup the meta
m := testMetaBackend(t, nil)
// Get the backend
b, err := m.Backend(&BackendOpts{Init: true})
if err != nil {
t.Fatalf("bad: %s", err)
}
// Check resulting states
states, err := b.States()
if err != nil {
t.Fatalf("bad: %s", err)
}
sort.Strings(states)
expected := []string{"env1", "env2"}
if !reflect.DeepEqual(states, expected) {
t.Fatalf("bad: %#v", states)
}
{
// Check the renamed default state
s, err := b.State("env1")
if err != nil {
t.Fatalf("bad: %s", err)
}
if err := s.RefreshState(); err != nil {
t.Fatalf("bad: %s", err)
}
state := s.State()
if state == nil {
t.Fatal("state should not be nil")
}
if state.Lineage != "backend-change-env1" {
t.Fatalf("bad: %#v", state)
}
}
{
// Verify existing workspaces exist
envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename)
if _, err := os.Stat(envPath); err != nil {
t.Fatal("env should exist")
}
}
{
// Verify new workspaces exist
envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename)
if _, err := os.Stat(envPath); err != nil {
t.Fatal("env should exist")
}
}
}
// Changing a configured backend that supports multi-state to a
// backend that also supports multi-state, but doesn't allow a
// default state while the default state is empty.
func TestMetaBackend_configuredChangeCopy_multiToNoDefaultWithoutDefault(t *testing.T) {
// Create a temporary working directory that is empty
td := tempDir(t)
copy.CopyDir(testFixturePath("backend-change-multi-to-no-default-without-default"), td)
defer os.RemoveAll(td)
defer testChdir(t, td)()
// Register the single-state backend
backendInit.Set("local-no-default", backendLocal.TestNewLocalNoDefault)
defer backendInit.Set("local-no-default", nil)
// Ask input
defer testInputMap(t, map[string]string{
"backend-migrate-multistate-to-multistate": "yes",
"select-workspace": "1",
})()
// Setup the meta
m := testMetaBackend(t, nil)
// Get the backend
b, err := m.Backend(&BackendOpts{Init: true})
if err != nil {
t.Fatalf("bad: %s", err)
}
// Check resulting states
states, err := b.States()
if err != nil {
t.Fatalf("bad: %s", err)
}
sort.Strings(states)
expected := []string{"env2"}
if !reflect.DeepEqual(states, expected) {
t.Fatalf("bad: %#v", states)
}
{
// Check the named state
s, err := b.State("env2")
if err != nil {
t.Fatalf("bad: %s", err)
}
if err := s.RefreshState(); err != nil {
t.Fatalf("bad: %s", err)
}
state := s.State()
if state == nil {
t.Fatal("state should not be nil")
}
if state.Lineage != "backend-change-env2" {
t.Fatalf("bad: %#v", state)
}
}
{
// Verify existing workspaces exist
envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename)
if _, err := os.Stat(envPath); err != nil {
t.Fatal("env should exist")
}
}
{
// Verify new workspaces exist
envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename)
envPath := filepath.Join("envdir-new", "env2", backendlocal.DefaultStateFilename)
if _, err := os.Stat(envPath); err != nil {
t.Fatal("env should exist")
}

View File

@ -320,7 +320,6 @@ func (m *Meta) initConfigLoader() (*configload.Loader, error) {
loader, err := configload.NewLoader(&configload.Config{
ModulesDir: m.modulesDir(),
Services: m.Services,
Creds: m.Credentials,
})
if err != nil {
return nil, err

View File

@ -5,7 +5,7 @@ import (
"fmt"
"time"
backendLocal "github.com/hashicorp/terraform/backend/local"
backendlocal "github.com/hashicorp/terraform/backend/local"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/states/statemgr"
"github.com/hashicorp/terraform/terraform"

View File

@ -218,40 +218,6 @@ if (err == nil) {
}
```
#### Username password authenticate
```Go
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
oauthConfig,
applicationID,
username,
password,
resource,
callbacks...)
if (err == nil) {
token := spt.Token
}
```
#### Authorization code authenticate
``` Go
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
oauthConfig,
applicationID,
clientSecret,
authorizationCode,
redirectURI,
resource,
callbacks...)
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
### Command Line Tool
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.

View File

@ -1,19 +1,5 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"net/url"
@ -32,24 +18,8 @@ type OAuthConfig struct {
DeviceCodeEndpoint url.URL
}
// IsZero returns true if the OAuthConfig object is zero-initialized.
func (oac OAuthConfig) IsZero() bool {
return oac == OAuthConfig{}
}
func validateStringParam(param, name string) error {
if len(param) == 0 {
return fmt.Errorf("parameter '" + name + "' cannot be empty")
}
return nil
}
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
return nil, err
}
// it's legal for tenantID to be empty so don't validate it
const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s"
u, err := url.Parse(activeDirectoryEndpoint)
if err != nil {

View File

@ -1,19 +1,5 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
This file is largely based on rjw57/oauth2device's code, with the follow differences:
* scope -> resource, and only allow a single one

View File

@ -1,20 +0,0 @@
// +build !windows
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// msiPath is the path to the MSI Extension settings file (to discover the endpoint)
var msiPath = "/var/lib/waagent/ManagedIdentity-Settings"

View File

@ -1,25 +0,0 @@
// +build windows
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"os"
"strings"
)
// msiPath is the path to the MSI Extension settings file (to discover the endpoint)
var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/")

View File

@ -1,19 +1,5 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"

View File

@ -1,19 +1,5 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"net/http"
)

View File

@ -1,19 +1,5 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"crypto/rand"
"crypto/rsa"
@ -27,15 +13,14 @@ import (
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/Azure/go-autorest/autorest/date"
"github.com/dgrijalva/jwt-go"
)
const (
defaultRefresh = 5 * time.Minute
tokenBaseDate = "1970-01-01T00:00:00Z"
// OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow
OAuthGrantTypeDeviceCode = "device_code"
@ -43,30 +28,27 @@ const (
// OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows
OAuthGrantTypeClientCredentials = "client_credentials"
// OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows
OAuthGrantTypeUserPass = "password"
// OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows
OAuthGrantTypeRefreshToken = "refresh_token"
// OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows
OAuthGrantTypeAuthorizationCode = "authorization_code"
// managedIdentitySettingsPath is the path to the MSI Extension settings file (to discover the endpoint)
managedIdentitySettingsPath = "/var/lib/waagent/ManagedIdentity-Settings"
// metadataHeader is the header required by MSI extension
metadataHeader = "Metadata"
)
var expirationBase time.Time
func init() {
expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate)
}
// OAuthTokenProvider is an interface which should be implemented by an access token retriever
type OAuthTokenProvider interface {
OAuthToken() string
}
// TokenRefreshError is an interface used by errors returned during token refresh.
type TokenRefreshError interface {
error
Response() *http.Response
}
// Refresher is an interface for token refresh functionality
type Refresher interface {
Refresh() error
@ -91,21 +73,13 @@ type Token struct {
Type string `json:"token_type"`
}
// IsZero returns true if the token object is zero-initialized.
func (t Token) IsZero() bool {
return t == Token{}
}
// Expires returns the time.Time when the Token expires.
func (t Token) Expires() time.Time {
s, err := strconv.Atoi(t.ExpiresOn)
if err != nil {
s = -3600
}
expiration := date.NewUnixTimeFromSeconds(float64(s))
return time.Time(expiration).UTC()
return expirationBase.Add(time.Duration(s) * time.Second).UTC()
}
// IsExpired returns true if the Token is expired, false otherwise.
@ -163,36 +137,10 @@ type ServicePrincipalCertificateSecret struct {
type ServicePrincipalMSISecret struct {
}
// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth.
type ServicePrincipalUsernamePasswordSecret struct {
Username string
Password string
}
// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth.
type ServicePrincipalAuthorizationCodeSecret struct {
ClientSecret string
AuthorizationCode string
RedirectURI string
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
v.Set("code", secret.AuthorizationCode)
v.Set("client_secret", secret.ClientSecret)
v.Set("redirect_uri", secret.RedirectURI)
return nil
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
v.Set("username", secret.Username)
v.Set("password", secret.Password)
return nil
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
// MSI extension requires the authority field to be set to the real tenant authority endpoint
func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
v.Set("authority", spt.oauthConfig.AuthorityEndpoint.String())
return nil
}
@ -245,46 +193,25 @@ func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *Se
type ServicePrincipalToken struct {
Token
secret ServicePrincipalSecret
oauthConfig OAuthConfig
clientID string
resource string
autoRefresh bool
autoRefreshLock *sync.Mutex
refreshWithin time.Duration
sender Sender
secret ServicePrincipalSecret
oauthConfig OAuthConfig
clientID string
resource string
autoRefresh bool
refreshWithin time.Duration
sender Sender
refreshCallbacks []TokenRefreshCallback
}
func validateOAuthConfig(oac OAuthConfig) error {
if oac.IsZero() {
return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized")
}
return nil
}
// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation.
func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateOAuthConfig(oauthConfig); err != nil {
return nil, err
}
if err := validateStringParam(id, "id"); err != nil {
return nil, err
}
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
if secret == nil {
return nil, fmt.Errorf("parameter 'secret' cannot be nil")
}
spt := &ServicePrincipalToken{
oauthConfig: oauthConfig,
secret: secret,
clientID: id,
resource: resource,
autoRefresh: true,
autoRefreshLock: &sync.Mutex{},
refreshWithin: defaultRefresh,
sender: &http.Client{},
refreshCallbacks: callbacks,
@ -294,18 +221,6 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token
func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateOAuthConfig(oauthConfig); err != nil {
return nil, err
}
if err := validateStringParam(clientID, "clientID"); err != nil {
return nil, err
}
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
if token.IsZero() {
return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized")
}
spt, err := NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
@ -324,18 +239,6 @@ func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID s
// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal
// credentials scoped to the named resource.
func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateOAuthConfig(oauthConfig); err != nil {
return nil, err
}
if err := validateStringParam(clientID, "clientID"); err != nil {
return nil, err
}
if err := validateStringParam(secret, "secret"); err != nil {
return nil, err
}
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
return NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
@ -347,23 +250,8 @@ func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret s
)
}
// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes.
// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes.
func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateOAuthConfig(oauthConfig); err != nil {
return nil, err
}
if err := validateStringParam(clientID, "clientID"); err != nil {
return nil, err
}
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
if certificate == nil {
return nil, fmt.Errorf("parameter 'certificate' cannot be nil")
}
if privateKey == nil {
return nil, fmt.Errorf("parameter 'privateKey' cannot be nil")
}
return NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
@ -376,175 +264,57 @@ func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID s
)
}
// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password.
func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateOAuthConfig(oauthConfig); err != nil {
return nil, err
}
if err := validateStringParam(clientID, "clientID"); err != nil {
return nil, err
}
if err := validateStringParam(username, "username"); err != nil {
return nil, err
}
if err := validateStringParam(password, "password"); err != nil {
return nil, err
}
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
return NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
resource,
&ServicePrincipalUsernamePasswordSecret{
Username: username,
Password: password,
},
callbacks...,
)
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
func NewServicePrincipalTokenFromMSI(oauthConfig OAuthConfig, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return newServicePrincipalTokenFromMSI(oauthConfig, resource, managedIdentitySettingsPath, callbacks...)
}
// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the
func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateOAuthConfig(oauthConfig); err != nil {
return nil, err
}
if err := validateStringParam(clientID, "clientID"); err != nil {
return nil, err
}
if err := validateStringParam(clientSecret, "clientSecret"); err != nil {
return nil, err
}
if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil {
return nil, err
}
if err := validateStringParam(redirectURI, "redirectURI"); err != nil {
return nil, err
}
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
return NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
resource,
&ServicePrincipalAuthorizationCodeSecret{
ClientSecret: clientSecret,
AuthorizationCode: authorizationCode,
RedirectURI: redirectURI,
},
callbacks...,
)
}
// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines.
func GetMSIVMEndpoint() (string, error) {
return getMSIVMEndpoint(msiPath)
}
func getMSIVMEndpoint(path string) (string, error) {
func newServicePrincipalTokenFromMSI(oauthConfig OAuthConfig, resource, settingsPath string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
// Read MSI settings
bytes, err := ioutil.ReadFile(path)
bytes, err := ioutil.ReadFile(settingsPath)
if err != nil {
return "", err
return nil, err
}
msiSettings := struct {
URL string `json:"url"`
}{}
err = json.Unmarshal(bytes, &msiSettings)
if err != nil {
return "", err
}
return msiSettings.URL, nil
}
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the system assigned identity when creating the token.
func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...)
}
// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the specified user assigned identity when creating the token.
func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...)
}
func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil {
return nil, err
}
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
if userAssignedID != nil {
if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil {
return nil, err
}
}
// We set the oauth config token endpoint to be MSI's endpoint
msiEndpointURL, err := url.Parse(msiEndpoint)
// We leave the authority as-is so MSI can POST it with the token request
msiEndpointURL, err := url.Parse(msiSettings.URL)
if err != nil {
return nil, err
}
oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "")
msiTokenEndpointURL, err := msiEndpointURL.Parse("/oauth2/token")
if err != nil {
return nil, err
}
oauthConfig.TokenEndpoint = *msiTokenEndpointURL
spt := &ServicePrincipalToken{
oauthConfig: *oauthConfig,
oauthConfig: oauthConfig,
secret: &ServicePrincipalMSISecret{},
resource: resource,
autoRefresh: true,
autoRefreshLock: &sync.Mutex{},
refreshWithin: defaultRefresh,
sender: &http.Client{},
refreshCallbacks: callbacks,
}
if userAssignedID != nil {
spt.clientID = *userAssignedID
}
return spt, nil
}
// internal type that implements TokenRefreshError
type tokenRefreshError struct {
message string
resp *http.Response
}
// Error implements the error interface which is part of the TokenRefreshError interface.
func (tre tokenRefreshError) Error() string {
return tre.message
}
// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation.
func (tre tokenRefreshError) Response() *http.Response {
return tre.resp
}
func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError {
return tokenRefreshError{message: message, resp: resp}
}
// EnsureFresh will refresh the token if it will expire within the refresh window (as set by
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
// RefreshWithin) and autoRefresh flag is on.
func (spt *ServicePrincipalToken) EnsureFresh() error {
if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) {
// take the lock then check to see if the token was already refreshed
spt.autoRefreshLock.Lock()
defer spt.autoRefreshLock.Unlock()
if spt.WillExpireIn(spt.refreshWithin) {
return spt.Refresh()
}
return spt.Refresh()
}
return nil
}
@ -563,28 +333,15 @@ func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
}
// Refresh obtains a fresh token for the Service Principal.
// This method is not safe for concurrent use and should be syncrhonized.
func (spt *ServicePrincipalToken) Refresh() error {
return spt.refreshInternal(spt.resource)
}
// RefreshExchange refreshes the token, but for a different resource.
// This method is not safe for concurrent use and should be syncrhonized.
func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
return spt.refreshInternal(resource)
}
func (spt *ServicePrincipalToken) getGrantType() string {
switch spt.secret.(type) {
case *ServicePrincipalUsernamePasswordSecret:
return OAuthGrantTypeUserPass
case *ServicePrincipalAuthorizationCodeSecret:
return OAuthGrantTypeAuthorizationCode
default:
return OAuthGrantTypeClientCredentials
}
}
func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
v := url.Values{}
v.Set("client_id", spt.clientID)
@ -594,7 +351,7 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
v.Set("grant_type", OAuthGrantTypeRefreshToken)
v.Set("refresh_token", spt.RefreshToken)
} else {
v.Set("grant_type", spt.getGrantType())
v.Set("grant_type", OAuthGrantTypeClientCredentials)
err := spt.secret.SetAuthenticationValues(spt, &v)
if err != nil {
return err
@ -617,17 +374,12 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
if err != nil {
return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err)
}
defer resp.Body.Close()
rb, err := ioutil.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
if err != nil {
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode), resp)
}
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp)
return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'", resp.StatusCode)
}
rb, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err)
}

View File

@ -1,19 +1,5 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"net/http"
@ -24,12 +10,9 @@ import (
)
const (
bearerChallengeHeader = "Www-Authenticate"
bearer = "Bearer"
tenantID = "tenantID"
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
bingAPISdkHeader = "X-BingApis-SDK-Client"
golangBingAPISdkHeaderValue = "Go-SDK"
bearerChallengeHeader = "Www-Authenticate"
bearer = "Bearer"
tenantID = "tenantID"
)
// Authorizer is the interface that provides a PrepareDecorator used to supply request
@ -47,53 +30,6 @@ func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
return WithNothing()
}
// APIKeyAuthorizer implements API Key authorization.
type APIKeyAuthorizer struct {
headers map[string]interface{}
queryParameters map[string]interface{}
}
// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers.
func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer {
return NewAPIKeyAuthorizer(headers, nil)
}
// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters.
func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer {
return NewAPIKeyAuthorizer(nil, queryParameters)
}
// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers.
func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer {
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
}
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
return func(p Preparer) Preparer {
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
}
}
// CognitiveServicesAuthorizer implements authorization for Cognitive Services.
type CognitiveServicesAuthorizer struct {
subscriptionKey string
}
// NewCognitiveServicesAuthorizer is
func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer {
return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey}
}
// WithAuthorization is
func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator {
headers := make(map[string]interface{})
headers[apiKeyAuthorizerHeader] = csa.subscriptionKey
headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
}
// BearerAuthorizer implements the bearer authorization
type BearerAuthorizer struct {
tokenProvider adal.OAuthTokenProvider
@ -119,11 +55,7 @@ func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
if ok {
err := refresher.EnsureFresh()
if err != nil {
var resp *http.Response
if tokError, ok := err.(adal.TokenRefreshError); ok {
resp = tokError.Response()
}
return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp,
return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", nil,
"Failed to refresh the Token for request to %s", r.URL)
}
}
@ -233,22 +165,3 @@ func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) {
return bc, err
}
// EventGridKeyAuthorizer implements authorization for event grid using key authentication.
type EventGridKeyAuthorizer struct {
topicKey string
}
// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer
// with the specified topic key.
func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer {
return EventGridKeyAuthorizer{topicKey: topicKey}
}
// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header.
func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator {
headers := map[string]interface{}{
"aeg-sas-key": egta.topicKey,
}
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
}

View File

@ -57,20 +57,6 @@ generated clients, see the Client described below.
*/
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"net/http"
"time"
@ -87,9 +73,6 @@ const (
// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
// and false otherwise.
func ResponseHasStatusCode(resp *http.Response, codes ...int) bool {
if resp == nil {
return false
}
return containsInt(codes, resp.StatusCode)
}

View File

@ -1,23 +1,7 @@
package azure
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
@ -39,152 +23,6 @@ const (
operationSucceeded string = "Succeeded"
)
var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK}
// Future provides a mechanism to access the status and results of an asynchronous request.
// Since futures are stateful they should be passed by value to avoid race conditions.
type Future struct {
req *http.Request
resp *http.Response
ps pollingState
}
// NewFuture returns a new Future object initialized with the specified request.
func NewFuture(req *http.Request) Future {
return Future{req: req}
}
// Response returns the last HTTP response or nil if there isn't one.
func (f Future) Response() *http.Response {
return f.resp
}
// Status returns the last status message of the operation.
func (f Future) Status() string {
if f.ps.State == "" {
return "Unknown"
}
return f.ps.State
}
// PollingMethod returns the method used to monitor the status of the asynchronous operation.
func (f Future) PollingMethod() PollingMethodType {
return f.ps.PollingMethod
}
// Done queries the service to see if the operation has completed.
func (f *Future) Done(sender autorest.Sender) (bool, error) {
// exit early if this future has terminated
if f.ps.hasTerminated() {
return true, f.errorInfo()
}
resp, err := sender.Do(f.req)
f.resp = resp
if err != nil || !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) {
return false, err
}
err = updatePollingState(resp, &f.ps)
if err != nil {
return false, err
}
if f.ps.hasTerminated() {
return true, f.errorInfo()
}
f.req, err = newPollingRequest(f.ps)
return false, err
}
// GetPollingDelay returns a duration the application should wait before checking
// the status of the asynchronous request and true; this value is returned from
// the service via the Retry-After response header. If the header wasn't returned
// then the function returns the zero-value time.Duration and false.
func (f Future) GetPollingDelay() (time.Duration, bool) {
if f.resp == nil {
return 0, false
}
retry := f.resp.Header.Get(autorest.HeaderRetryAfter)
if retry == "" {
return 0, false
}
d, err := time.ParseDuration(retry + "s")
if err != nil {
panic(err)
}
return d, true
}
// WaitForCompletion will return when one of the following conditions is met: the long
// running operation has completed, the provided context is cancelled, or the client's
// polling duration has been exceeded. It will retry failed polling attempts based on
// the retry value defined in the client up to the maximum retry attempts.
func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error {
ctx, cancel := context.WithTimeout(ctx, client.PollingDuration)
defer cancel()
done, err := f.Done(client)
for attempts := 0; !done; done, err = f.Done(client) {
if attempts >= client.RetryAttempts {
return autorest.NewErrorWithError(err, "azure", "WaitForCompletion", f.resp, "the number of retries has been exceeded")
}
// we want delayAttempt to be zero in the non-error case so
// that DelayForBackoff doesn't perform exponential back-off
var delayAttempt int
var delay time.Duration
if err == nil {
// check for Retry-After delay, if not present use the client's polling delay
var ok bool
delay, ok = f.GetPollingDelay()
if !ok {
delay = client.PollingDelay
}
} else {
// there was an error polling for status so perform exponential
// back-off based on the number of attempts using the client's retry
// duration. update attempts after delayAttempt to avoid off-by-one.
delayAttempt = attempts
delay = client.RetryDuration
attempts++
}
// wait until the delay elapses or the context is cancelled
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done())
if !delayElapsed {
return autorest.NewErrorWithError(ctx.Err(), "azure", "WaitForCompletion", f.resp, "context has been cancelled")
}
}
return err
}
// if the operation failed the polling state will contain
// error information and implements the error interface
func (f *Future) errorInfo() error {
if !f.ps.hasSucceeded() {
return f.ps
}
return nil
}
// MarshalJSON implements the json.Marshaler interface.
func (f Future) MarshalJSON() ([]byte, error) {
return json.Marshal(&f.ps)
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (f *Future) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, &f.ps)
if err != nil {
return err
}
f.req, err = newPollingRequest(f.ps)
return err
}
// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure
// long-running operation. It will delay between requests for the duration specified in the
// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
@ -196,7 +34,8 @@ func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
if err != nil {
return resp, err
}
if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) {
pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK}
if !autorest.ResponseHasStatusCode(resp, pollingCodes...) {
return resp, nil
}
@ -213,11 +52,10 @@ func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
break
}
r, err = newPollingRequest(ps)
r, err = newPollingRequest(resp, ps)
if err != nil {
return resp, err
}
r.Cancel = resp.Request.Cancel
delay = autorest.GetRetryAfter(resp, delay)
resp, err = autorest.SendWithSender(s, r,
@ -234,15 +72,20 @@ func getAsyncOperation(resp *http.Response) string {
}
func hasSucceeded(state string) bool {
return strings.EqualFold(state, operationSucceeded)
return state == operationSucceeded
}
func hasTerminated(state string) bool {
return strings.EqualFold(state, operationCanceled) || strings.EqualFold(state, operationFailed) || strings.EqualFold(state, operationSucceeded)
switch state {
case operationCanceled, operationFailed, operationSucceeded:
return true
default:
return false
}
}
func hasFailed(state string) bool {
return strings.EqualFold(state, operationFailed)
return state == operationFailed
}
type provisioningTracker interface {
@ -303,42 +146,36 @@ func (ps provisioningStatus) hasProvisioningError() bool {
return ps.ProvisioningError != ServiceError{}
}
// PollingMethodType defines a type used for enumerating polling mechanisms.
type PollingMethodType string
type pollingResponseFormat string
const (
// PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header.
PollingAsyncOperation PollingMethodType = "AsyncOperation"
// PollingLocation indicates the polling method uses the Location header.
PollingLocation PollingMethodType = "Location"
// PollingUnknown indicates an unknown polling method and is the default value.
PollingUnknown PollingMethodType = ""
usesOperationResponse pollingResponseFormat = "OperationResponse"
usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus"
formatIsUnknown pollingResponseFormat = ""
)
type pollingState struct {
PollingMethod PollingMethodType `json:"pollingMethod"`
URI string `json:"uri"`
State string `json:"state"`
Code string `json:"code"`
Message string `json:"message"`
responseFormat pollingResponseFormat
uri string
state string
code string
message string
}
func (ps pollingState) hasSucceeded() bool {
return hasSucceeded(ps.State)
return hasSucceeded(ps.state)
}
func (ps pollingState) hasTerminated() bool {
return hasTerminated(ps.State)
return hasTerminated(ps.state)
}
func (ps pollingState) hasFailed() bool {
return hasFailed(ps.State)
return hasFailed(ps.state)
}
func (ps pollingState) Error() string {
return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.State, ps.Code, ps.Message)
return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message)
}
// updatePollingState maps the operation status -- retrieved from either a provisioningState
@ -353,7 +190,7 @@ func updatePollingState(resp *http.Response, ps *pollingState) error {
// -- The first response will always be a provisioningStatus response; only the polling requests,
// depending on the header returned, may be something otherwise.
var pt provisioningTracker
if ps.PollingMethod == PollingAsyncOperation {
if ps.responseFormat == usesOperationResponse {
pt = &operationResource{}
} else {
pt = &provisioningStatus{}
@ -361,30 +198,30 @@ func updatePollingState(resp *http.Response, ps *pollingState) error {
// If this is the first request (that is, the polling response shape is unknown), determine how
// to poll and what to expect
if ps.PollingMethod == PollingUnknown {
if ps.responseFormat == formatIsUnknown {
req := resp.Request
if req == nil {
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing")
}
// Prefer the Azure-AsyncOperation header
ps.URI = getAsyncOperation(resp)
if ps.URI != "" {
ps.PollingMethod = PollingAsyncOperation
ps.uri = getAsyncOperation(resp)
if ps.uri != "" {
ps.responseFormat = usesOperationResponse
} else {
ps.PollingMethod = PollingLocation
ps.responseFormat = usesProvisioningStatus
}
// Else, use the Location header
if ps.URI == "" {
ps.URI = autorest.GetLocation(resp)
if ps.uri == "" {
ps.uri = autorest.GetLocation(resp)
}
// Lastly, requests against an existing resource, use the last request URI
if ps.URI == "" {
if ps.uri == "" {
m := strings.ToUpper(req.Method)
if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet {
ps.URI = req.URL.String()
ps.uri = req.URL.String()
}
}
}
@ -405,23 +242,23 @@ func updatePollingState(resp *http.Response, ps *pollingState) error {
// -- Unknown states are per-service inprogress states
// -- Otherwise, infer state from HTTP status code
if pt.hasTerminated() {
ps.State = pt.state()
ps.state = pt.state()
} else if pt.state() != "" {
ps.State = operationInProgress
ps.state = operationInProgress
} else {
switch resp.StatusCode {
case http.StatusAccepted:
ps.State = operationInProgress
ps.state = operationInProgress
case http.StatusNoContent, http.StatusCreated, http.StatusOK:
ps.State = operationSucceeded
ps.state = operationSucceeded
default:
ps.State = operationFailed
ps.state = operationFailed
}
}
if strings.EqualFold(ps.State, operationInProgress) && ps.URI == "" {
if ps.state == operationInProgress && ps.uri == "" {
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL)
}
@ -430,49 +267,36 @@ func updatePollingState(resp *http.Response, ps *pollingState) error {
// -- Response
// -- Otherwise, Unknown
if ps.hasFailed() {
if ps.PollingMethod == PollingAsyncOperation {
if ps.responseFormat == usesOperationResponse {
or := pt.(*operationResource)
ps.Code = or.OperationError.Code
ps.Message = or.OperationError.Message
ps.code = or.OperationError.Code
ps.message = or.OperationError.Message
} else {
p := pt.(*provisioningStatus)
if p.hasProvisioningError() {
ps.Code = p.ProvisioningError.Code
ps.Message = p.ProvisioningError.Message
ps.code = p.ProvisioningError.Code
ps.message = p.ProvisioningError.Message
} else {
ps.Code = "Unknown"
ps.Message = "None"
ps.code = "Unknown"
ps.message = "None"
}
}
}
return nil
}
func newPollingRequest(ps pollingState) (*http.Request, error) {
reqPoll, err := autorest.Prepare(&http.Request{},
func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) {
req := resp.Request
if req == nil {
return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing")
}
reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel},
autorest.AsGet(),
autorest.WithBaseURL(ps.URI))
autorest.WithBaseURL(ps.uri))
if err != nil {
return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.URI)
return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri)
}
return reqPoll, nil
}
// AsyncOpIncompleteError is the type that's returned from a future that has not completed.
type AsyncOpIncompleteError struct {
// FutureType is the name of the type composed of a azure.Future.
FutureType string
}
// Error returns an error message including the originating type name of the error.
func (e AsyncOpIncompleteError) Error() string {
return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType)
}
// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters.
func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError {
return AsyncOpIncompleteError{
FutureType: futureType,
}
}

View File

@ -5,20 +5,6 @@ See the included examples for more detail.
*/
package azure
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
@ -179,13 +165,7 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
if decodeErr != nil {
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
} else if e.ServiceError == nil {
// Check if error is unwrapped ServiceError
if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" {
e.ServiceError = &ServiceError{
Code: "Unknown",
Message: "Unknown service error",
}
}
e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"}
}
e.RequestID = ExtractRequestID(resp)

View File

@ -1,31 +1,10 @@
package azure
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
)
// EnvironmentFilepathName captures the name of the environment variable containing the path to the file
// to be used while populating the Azure Environment.
const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
var environments = map[string]Environment{
"AZURECHINACLOUD": ChinaCloud,
"AZUREGERMANCLOUD": GermanCloud,
@ -44,7 +23,6 @@ type Environment struct {
GalleryEndpoint string `json:"galleryEndpoint"`
KeyVaultEndpoint string `json:"keyVaultEndpoint"`
GraphEndpoint string `json:"graphEndpoint"`
ServiceBusEndpoint string `json:"serviceBusEndpoint"`
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
@ -67,12 +45,11 @@ var (
GalleryEndpoint: "https://gallery.azure.com/",
KeyVaultEndpoint: "https://vault.azure.net/",
GraphEndpoint: "https://graph.windows.net/",
ServiceBusEndpoint: "https://servicebus.windows.net/",
StorageEndpointSuffix: "core.windows.net",
SQLDatabaseDNSSuffix: "database.windows.net",
TrafficManagerDNSSuffix: "trafficmanager.net",
KeyVaultDNSSuffix: "vault.azure.net",
ServiceBusEndpointSuffix: "servicebus.windows.net",
ServiceBusEndpointSuffix: "servicebus.azure.com",
ServiceManagementVMDNSSuffix: "cloudapp.net",
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
ContainerRegistryDNSSuffix: "azurecr.io",
@ -85,11 +62,10 @@ var (
PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index",
ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/",
ResourceManagerEndpoint: "https://management.usgovcloudapi.net/",
ActiveDirectoryEndpoint: "https://login.microsoftonline.us/",
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
GalleryEndpoint: "https://gallery.usgovcloudapi.net/",
KeyVaultEndpoint: "https://vault.usgovcloudapi.net/",
GraphEndpoint: "https://graph.windows.net/",
ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/",
GraphEndpoint: "https://graph.usgovcloudapi.net/",
StorageEndpointSuffix: "core.usgovcloudapi.net",
SQLDatabaseDNSSuffix: "database.usgovcloudapi.net",
TrafficManagerDNSSuffix: "usgovtrafficmanager.net",
@ -111,12 +87,11 @@ var (
GalleryEndpoint: "https://gallery.chinacloudapi.cn/",
KeyVaultEndpoint: "https://vault.azure.cn/",
GraphEndpoint: "https://graph.chinacloudapi.cn/",
ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/",
StorageEndpointSuffix: "core.chinacloudapi.cn",
SQLDatabaseDNSSuffix: "database.chinacloudapi.cn",
TrafficManagerDNSSuffix: "trafficmanager.cn",
KeyVaultDNSSuffix: "vault.azure.cn",
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net",
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
ContainerRegistryDNSSuffix: "azurecr.io",
@ -133,7 +108,6 @@ var (
GalleryEndpoint: "https://gallery.cloudapi.de/",
KeyVaultEndpoint: "https://vault.microsoftazure.de/",
GraphEndpoint: "https://graph.cloudapi.de/",
ServiceBusEndpoint: "https://servicebus.cloudapi.de/",
StorageEndpointSuffix: "core.cloudapi.de",
SQLDatabaseDNSSuffix: "database.cloudapi.de",
TrafficManagerDNSSuffix: "azuretrafficmanager.de",
@ -145,37 +119,12 @@ var (
}
)
// EnvironmentFromName returns an Environment based on the common name specified.
// EnvironmentFromName returns an Environment based on the common name specified
func EnvironmentFromName(name string) (Environment, error) {
// IMPORTANT
// As per @radhikagupta5:
// This is technical debt, fundamentally here because Kubernetes is not currently accepting
// contributions to the providers. Once that is an option, the provider should be updated to
// directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation
// from this method based on the name that is provided to us.
if strings.EqualFold(name, "AZURESTACKCLOUD") {
return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName))
}
name = strings.ToUpper(name)
env, ok := environments[name]
if !ok {
return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name)
}
return env, nil
}
// EnvironmentFromFile loads an Environment from a configuration file available on disk.
// This function is particularly useful in the Hybrid Cloud model, where one must define their own
// endpoints.
func EnvironmentFromFile(location string) (unmarshaled Environment, err error) {
fileContents, err := ioutil.ReadFile(location)
if err != nil {
return
}
err = json.Unmarshal(fileContents, &unmarshaled)
return
}

View File

@ -1,203 +0,0 @@
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package azure
import (
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
)
// DoRetryWithRegistration tries to register the resource provider in case it is unregistered.
// It also handles request retries
func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator {
return func(s autorest.Sender) autorest.Sender {
return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
rr := autorest.NewRetriableRequest(r)
for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ {
err = rr.Prepare()
if err != nil {
return resp, err
}
resp, err = autorest.SendWithSender(s, rr.Request(),
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
)
if err != nil {
return resp, err
}
if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration {
return resp, err
}
var re RequestError
err = autorest.Respond(
resp,
autorest.ByUnmarshallingJSON(&re),
)
if err != nil {
return resp, err
}
err = re
if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" {
regErr := register(client, r, re)
if regErr != nil {
return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err)
}
}
}
return resp, fmt.Errorf("failed request: %s", err)
})
}
}
func getProvider(re RequestError) (string, error) {
if re.ServiceError != nil {
if re.ServiceError.Details != nil && len(*re.ServiceError.Details) > 0 {
detail := (*re.ServiceError.Details)[0].(map[string]interface{})
return detail["target"].(string), nil
}
}
return "", errors.New("provider was not found in the response")
}
func register(client autorest.Client, originalReq *http.Request, re RequestError) error {
subID := getSubscription(originalReq.URL.Path)
if subID == "" {
return errors.New("missing parameter subscriptionID to register resource provider")
}
providerName, err := getProvider(re)
if err != nil {
return fmt.Errorf("missing parameter provider to register resource provider: %s", err)
}
newURL := url.URL{
Scheme: originalReq.URL.Scheme,
Host: originalReq.URL.Host,
}
// taken from the resources SDK
// with almost identical code, this sections are easier to mantain
// It is also not a good idea to import the SDK here
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252
pathParameters := map[string]interface{}{
"resourceProviderNamespace": autorest.Encode("path", providerName),
"subscriptionId": autorest.Encode("path", subID),
}
const APIVersion = "2016-09-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(newURL.String()),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters),
autorest.WithQueryParameters(queryParameters),
)
req, err := preparer.Prepare(&http.Request{})
if err != nil {
return err
}
req.Cancel = originalReq.Cancel
resp, err := autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
)
if err != nil {
return err
}
type Provider struct {
RegistrationState *string `json:"registrationState,omitempty"`
}
var provider Provider
err = autorest.Respond(
resp,
WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&provider),
autorest.ByClosing(),
)
if err != nil {
return err
}
// poll for registered provisioning state
now := time.Now()
for err == nil && time.Since(now) < client.PollingDuration {
// taken from the resources SDK
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(newURL.String()),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters),
autorest.WithQueryParameters(queryParameters),
)
req, err = preparer.Prepare(&http.Request{})
if err != nil {
return err
}
req.Cancel = originalReq.Cancel
resp, err := autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
)
if err != nil {
return err
}
err = autorest.Respond(
resp,
WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&provider),
autorest.ByClosing(),
)
if err != nil {
return err
}
if provider.RegistrationState != nil &&
*provider.RegistrationState == "Registered" {
break
}
delayed := autorest.DelayWithRetryAfter(resp, originalReq.Cancel)
if !delayed {
autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Cancel)
}
}
if !(time.Since(now) < client.PollingDuration) {
return errors.New("polling for resource provider registration has exceeded the polling duration")
}
return err
}
func getSubscription(path string) string {
parts := strings.Split(path, "/")
for i, v := range parts {
if v == "subscriptions" && (i+1) < len(parts) {
return parts[i+1]
}
}
return ""
}

View File

@ -1,19 +1,5 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"fmt"
@ -35,9 +21,6 @@ const (
// DefaultRetryAttempts is number of attempts for retry status codes (5xx).
DefaultRetryAttempts = 3
// DefaultRetryDuration is the duration to wait between retries.
DefaultRetryDuration = 30 * time.Second
)
var (
@ -50,8 +33,7 @@ var (
Version(),
)
// StatusCodesForRetry are a defined group of status code for which the client will retry
StatusCodesForRetry = []int{
statusCodesForRetry = []int{
http.StatusRequestTimeout, // 408
http.StatusTooManyRequests, // 429
http.StatusInternalServerError, // 500
@ -166,9 +148,6 @@ type Client struct {
UserAgent string
Jar http.CookieJar
// Set to true to skip attempted registration of resource providers (false by default).
SkipResourceProviderRegistration bool
}
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
@ -178,10 +157,9 @@ func NewClientWithUserAgent(ua string) Client {
PollingDelay: DefaultPollingDelay,
PollingDuration: DefaultPollingDuration,
RetryAttempts: DefaultRetryAttempts,
RetryDuration: DefaultRetryDuration,
RetryDuration: 30 * time.Second,
UserAgent: defaultUserAgent,
}
c.Sender = c.sender()
c.AddToUserAgent(ua)
return c
}
@ -207,17 +185,12 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
c.WithInspection(),
c.WithAuthorization())
if err != nil {
var resp *http.Response
if detErr, ok := err.(DetailedError); ok {
// if the authorization failed (e.g. invalid credentials) there will
// be a response associated with the error, be sure to return it.
resp = detErr.Response
}
return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
}
resp, err := SendWithSender(c.sender(), r)
Respond(resp, c.ByInspecting())
resp, err := SendWithSender(c.sender(), r,
DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...))
Respond(resp,
c.ByInspecting())
return resp, err
}

View File

@ -5,20 +5,6 @@ time.Time types. And both convert to time.Time through a ToTime method.
*/
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"time"

View File

@ -1,19 +1,5 @@
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"regexp"
"time"

View File

@ -1,19 +1,5 @@
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"time"

View File

@ -1,19 +1,5 @@
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/binary"

View File

@ -1,19 +1,5 @@
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"strings"
"time"

View File

@ -1,19 +1,5 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"net/http"

View File

@ -1,19 +1,5 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/json"
@ -27,9 +13,8 @@ import (
)
const (
mimeTypeJSON = "application/json"
mimeTypeOctetStream = "application/octet-stream"
mimeTypeFormPost = "application/x-www-form-urlencoded"
mimeTypeJSON = "application/json"
mimeTypeFormPost = "application/x-www-form-urlencoded"
headerAuthorization = "Authorization"
headerContentType = "Content-Type"
@ -113,28 +98,6 @@ func WithHeader(header string, value string) PrepareDecorator {
}
}
// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to
// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before
// adding them.
func WithHeaders(headers map[string]interface{}) PrepareDecorator {
h := ensureValueStrings(headers)
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
if r.Header == nil {
r.Header = make(http.Header)
}
for name, value := range h {
r.Header.Set(http.CanonicalHeaderKey(name), value)
}
}
return r, err
})
}
}
// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
// value is "Bearer " followed by the supplied token.
func WithBearerAuthorization(token string) PrepareDecorator {
@ -165,11 +128,6 @@ func AsJSON() PrepareDecorator {
return AsContentType(mimeTypeJSON)
}
// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header.
func AsOctetStream() PrepareDecorator {
return AsContentType(mimeTypeOctetStream)
}
// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The
// decorator does not validate that the passed method string is a known HTTP method.
func WithMethod(method string) PrepareDecorator {
@ -243,11 +201,6 @@ func WithFormData(v url.Values) PrepareDecorator {
r, err := p.Prepare(r)
if err == nil {
s := v.Encode()
if r.Header == nil {
r.Header = make(http.Header)
}
r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost)
r.ContentLength = int64(len(s))
r.Body = ioutil.NopCloser(strings.NewReader(s))
}
@ -463,16 +416,11 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato
if r.URL == nil {
return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
}
v := r.URL.Query()
for key, value := range parameters {
d, err := url.QueryUnescape(value)
if err != nil {
return r, err
}
v.Add(key, d)
v.Add(key, value)
}
r.URL.RawQuery = v.Encode()
r.URL.RawQuery = createQuery(v)
}
return r, err
})

View File

@ -1,19 +1,5 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/json"

View File

@ -1,19 +1,5 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"io"

View File

@ -1,31 +1,17 @@
// +build !go1.8
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package autorest
import (
"bytes"
"io/ioutil"
"net/http"
)
// RetriableRequest provides facilities for retrying an HTTP request.
type RetriableRequest struct {
req *http.Request
br *bytes.Reader
req *http.Request
br *bytes.Reader
reset bool
}
// Prepare signals that the request is about to be sent.
@ -33,17 +19,21 @@ func (rr *RetriableRequest) Prepare() (err error) {
// preserve the request body; this is to support retry logic as
// the underlying transport will always close the reqeust body
if rr.req.Body != nil {
if rr.br != nil {
_, err = rr.br.Seek(0, 0 /*io.SeekStart*/)
rr.req.Body = ioutil.NopCloser(rr.br)
}
if err != nil {
return err
if rr.reset {
if rr.br != nil {
_, err = rr.br.Seek(0, 0 /*io.SeekStart*/)
}
rr.reset = false
if err != nil {
return err
}
}
if rr.br == nil {
// fall back to making a copy (only do this once)
err = rr.prepareFromByteReader()
}
// indicates that the request body needs to be reset
rr.reset = true
}
return err
}

View File

@ -1,33 +1,19 @@
// +build go1.8
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package autorest
import (
"bytes"
"io"
"io/ioutil"
"net/http"
)
// RetriableRequest provides facilities for retrying an HTTP request.
type RetriableRequest struct {
req *http.Request
rc io.ReadCloser
br *bytes.Reader
req *http.Request
rc io.ReadCloser
br *bytes.Reader
reset bool
}
// Prepare signals that the request is about to be sent.
@ -35,14 +21,16 @@ func (rr *RetriableRequest) Prepare() (err error) {
// preserve the request body; this is to support retry logic as
// the underlying transport will always close the reqeust body
if rr.req.Body != nil {
if rr.rc != nil {
rr.req.Body = rr.rc
} else if rr.br != nil {
_, err = rr.br.Seek(0, io.SeekStart)
rr.req.Body = ioutil.NopCloser(rr.br)
}
if err != nil {
return err
if rr.reset {
if rr.rc != nil {
rr.req.Body = rr.rc
} else if rr.br != nil {
_, err = rr.br.Seek(0, io.SeekStart)
}
rr.reset = false
if err != nil {
return err
}
}
if rr.req.GetBody != nil {
// this will allow us to preserve the body without having to
@ -55,6 +43,8 @@ func (rr *RetriableRequest) Prepare() (err error) {
// fall back to making a copy (only do this once)
err = rr.prepareFromByteReader()
}
// indicates that the request body needs to be reset
rr.reset = true
}
return err
}

View File

@ -1,19 +1,5 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"log"
@ -215,26 +201,19 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
rr := NewRetriableRequest(r)
// Increment to add the first call (attempts denotes number of retries)
attempts++
for attempt := 0; attempt < attempts; {
for attempt := 0; attempt < attempts; attempt++ {
err = rr.Prepare()
if err != nil {
return resp, err
}
resp, err = s.Do(rr.Request())
// we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
// resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
if err != nil || !ResponseHasStatusCode(resp, codes...) {
return resp, err
}
delayed := DelayWithRetryAfter(resp, r.Cancel)
if !delayed {
DelayForBackoff(backoff, attempt, r.Cancel)
}
// don't count a 429 against the number of attempts
// so that we continue to retry until it succeeds
if resp == nil || resp.StatusCode != http.StatusTooManyRequests {
attempt++
}
}
return resp, err
})
@ -244,9 +223,6 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in
// responses with status code 429
func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool {
if resp == nil {
return false
}
retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After"))
if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 {
select {

View File

@ -3,20 +3,6 @@ Package to provides helpers to ease working with pointer values of marshalled st
*/
package to
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// String returns a string value for the passed string pointer. It returns the empty string if the
// pointer is nil.
func String(s *string) string {

View File

@ -1,31 +1,15 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"reflect"
"sort"
"strings"
"github.com/Azure/go-autorest/autorest/adal"
)
// EncodedAs is a series of constants specifying various data encodings
@ -139,38 +123,13 @@ func MapToValues(m map[string]interface{}) url.Values {
return v
}
// AsStringSlice method converts interface{} to []string. This expects a
//that the parameter passed to be a slice or array of a type that has the underlying
//type a string.
func AsStringSlice(s interface{}) ([]string, error) {
v := reflect.ValueOf(s)
if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.")
}
stringSlice := make([]string, 0, v.Len())
for i := 0; i < v.Len(); i++ {
stringSlice = append(stringSlice, v.Index(i).String())
}
return stringSlice, nil
}
// String method converts interface v to string. If interface is a list, it
// joins list elements using the seperator. Note that only sep[0] will be used for
// joining if any separator is specified.
// joins list elements using separator.
func String(v interface{}, sep ...string) string {
if len(sep) == 0 {
return ensureValueString(v)
if len(sep) > 0 {
return ensureValueString(strings.Join(v.([]string), sep[0]))
}
stringSlice, ok := v.([]string)
if ok == false {
var err error
stringSlice, err = AsStringSlice(v)
if err != nil {
panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err))
}
}
return ensureValueString(strings.Join(stringSlice, sep[0]))
return ensureValueString(v)
}
// Encode method encodes url path and query parameters.
@ -194,25 +153,26 @@ func queryEscape(s string) string {
return url.QueryEscape(s)
}
// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't).
// This is mainly useful for long-running operations that use the Azure-AsyncOperation
// header, so we change the initial PUT into a GET to retrieve the final result.
func ChangeToGet(req *http.Request) *http.Request {
req.Method = "GET"
req.Body = nil
req.ContentLength = 0
req.Header.Del("Content-Length")
return req
}
// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError
// interface. If err is a DetailedError it will walk the chain of Original errors.
func IsTokenRefreshError(err error) bool {
if _, ok := err.(adal.TokenRefreshError); ok {
return true
// This method is same as Encode() method of "net/url" go package,
// except it does not encode the query parameters because they
// already come encoded. It formats values map in query format (bar=foo&a=b).
func createQuery(v url.Values) string {
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
if de, ok := err.(DetailedError); ok {
return IsTokenRefreshError(de.Original)
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
prefix := url.QueryEscape(k) + "="
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
buf.WriteString(v)
}
}
return false
return buf.String()
}

View File

@ -3,20 +3,6 @@ Package validation provides methods for validating parameter value using reflect
*/
package validation
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"reflect"
@ -105,12 +91,15 @@ func validateStruct(x reflect.Value, v Constraint, name ...string) error {
return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target))
}
return Validate([]Validation{
if err := Validate([]Validation{
{
TargetValue: getInterfaceValue(f),
Constraints: []Constraint{v},
},
})
}); err != nil {
return err
}
return nil
}
func validatePtr(x reflect.Value, v Constraint) error {

View File

@ -1,19 +1,5 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"fmt"
@ -22,9 +8,9 @@ import (
)
const (
major = 9
minor = 8
patch = 1
major = 8
minor = 0
patch = 0
tag = ""
)

View File

@ -1,27 +0,0 @@
Copyright (c) 2013 Google. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,320 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package query implements encoding of structs into URL query parameters.
//
// As a simple example:
//
// type Options struct {
// Query string `url:"q"`
// ShowAll bool `url:"all"`
// Page int `url:"page"`
// }
//
// opt := Options{ "foo", true, 2 }
// v, _ := query.Values(opt)
// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2"
//
// The exact mapping between Go values and url.Values is described in the
// documentation for the Values() function.
package query
import (
"bytes"
"fmt"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
var timeType = reflect.TypeOf(time.Time{})
var encoderType = reflect.TypeOf(new(Encoder)).Elem()
// Encoder is an interface implemented by any type that wishes to encode
// itself into URL values in a non-standard way.
type Encoder interface {
EncodeValues(key string, v *url.Values) error
}
// Values returns the url.Values encoding of v.
//
// Values expects to be passed a struct, and traverses it recursively using the
// following encoding rules.
//
// Each exported struct field is encoded as a URL parameter unless
//
// - the field's tag is "-", or
// - the field is empty and its tag specifies the "omitempty" option
//
// The empty values are false, 0, any nil pointer or interface value, any array
// slice, map, or string of length zero, and any time.Time that returns true
// for IsZero().
//
// The URL parameter name defaults to the struct field name but can be
// specified in the struct field's tag value. The "url" key in the struct
// field's tag value is the key name, followed by an optional comma and
// options. For example:
//
// // Field is ignored by this package.
// Field int `url:"-"`
//
// // Field appears as URL parameter "myName".
// Field int `url:"myName"`
//
// // Field appears as URL parameter "myName" and the field is omitted if
// // its value is empty
// Field int `url:"myName,omitempty"`
//
// // Field appears as URL parameter "Field" (the default), but the field
// // is skipped if empty. Note the leading comma.
// Field int `url:",omitempty"`
//
// For encoding individual field values, the following type-dependent rules
// apply:
//
// Boolean values default to encoding as the strings "true" or "false".
// Including the "int" option signals that the field should be encoded as the
// strings "1" or "0".
//
// time.Time values default to encoding as RFC3339 timestamps. Including the
// "unix" option signals that the field should be encoded as a Unix time (see
// time.Unix())
//
// Slice and Array values default to encoding as multiple URL values of the
// same name. Including the "comma" option signals that the field should be
// encoded as a single comma-delimited value. Including the "space" option
// similarly encodes the value as a single space-delimited string. Including
// the "semicolon" option will encode the value as a semicolon-delimited string.
// Including the "brackets" option signals that the multiple URL values should
// have "[]" appended to the value name. "numbered" will append a number to
// the end of each incidence of the value name, example:
// name0=value0&name1=value1, etc.
//
// Anonymous struct fields are usually encoded as if their inner exported
// fields were fields in the outer struct, subject to the standard Go
// visibility rules. An anonymous struct field with a name given in its URL
// tag is treated as having that name, rather than being anonymous.
//
// Non-nil pointer values are encoded as the value pointed to.
//
// Nested structs are encoded including parent fields in value names for
// scoping. e.g:
//
// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO"
//
// All other values are encoded using their default string representation.
//
// Multiple fields that encode to the same URL parameter name will be included
// as multiple URL values of the same name.
func Values(v interface{}) (url.Values, error) {
values := make(url.Values)
val := reflect.ValueOf(v)
for val.Kind() == reflect.Ptr {
if val.IsNil() {
return values, nil
}
val = val.Elem()
}
if v == nil {
return values, nil
}
if val.Kind() != reflect.Struct {
return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind())
}
err := reflectValue(values, val, "")
return values, err
}
// reflectValue populates the values parameter from the struct fields in val.
// Embedded structs are followed recursively (using the rules defined in the
// Values function documentation) breadth-first.
func reflectValue(values url.Values, val reflect.Value, scope string) error {
var embedded []reflect.Value
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
sf := typ.Field(i)
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
sv := val.Field(i)
tag := sf.Tag.Get("url")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if name == "" {
if sf.Anonymous && sv.Kind() == reflect.Struct {
// save embedded struct for later processing
embedded = append(embedded, sv)
continue
}
name = sf.Name
}
if scope != "" {
name = scope + "[" + name + "]"
}
if opts.Contains("omitempty") && isEmptyValue(sv) {
continue
}
if sv.Type().Implements(encoderType) {
if !reflect.Indirect(sv).IsValid() {
sv = reflect.New(sv.Type().Elem())
}
m := sv.Interface().(Encoder)
if err := m.EncodeValues(name, &values); err != nil {
return err
}
continue
}
if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array {
var del byte
if opts.Contains("comma") {
del = ','
} else if opts.Contains("space") {
del = ' '
} else if opts.Contains("semicolon") {
del = ';'
} else if opts.Contains("brackets") {
name = name + "[]"
}
if del != 0 {
s := new(bytes.Buffer)
first := true
for i := 0; i < sv.Len(); i++ {
if first {
first = false
} else {
s.WriteByte(del)
}
s.WriteString(valueString(sv.Index(i), opts))
}
values.Add(name, s.String())
} else {
for i := 0; i < sv.Len(); i++ {
k := name
if opts.Contains("numbered") {
k = fmt.Sprintf("%s%d", name, i)
}
values.Add(k, valueString(sv.Index(i), opts))
}
}
continue
}
for sv.Kind() == reflect.Ptr {
if sv.IsNil() {
break
}
sv = sv.Elem()
}
if sv.Type() == timeType {
values.Add(name, valueString(sv, opts))
continue
}
if sv.Kind() == reflect.Struct {
reflectValue(values, sv, name)
continue
}
values.Add(name, valueString(sv, opts))
}
for _, f := range embedded {
if err := reflectValue(values, f, scope); err != nil {
return err
}
}
return nil
}
// valueString returns the string representation of a value.
func valueString(v reflect.Value, opts tagOptions) string {
for v.Kind() == reflect.Ptr {
if v.IsNil() {
return ""
}
v = v.Elem()
}
if v.Kind() == reflect.Bool && opts.Contains("int") {
if v.Bool() {
return "1"
}
return "0"
}
if v.Type() == timeType {
t := v.Interface().(time.Time)
if opts.Contains("unix") {
return strconv.FormatInt(t.Unix(), 10)
}
return t.Format(time.RFC3339)
}
return fmt.Sprint(v.Interface())
}
// isEmptyValue checks if a value should be considered empty for the purposes
// of omitting fields with the "omitempty" option.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
if v.Type() == timeType {
return v.Interface().(time.Time).IsZero()
}
return false
}
// tagOptions is the string following a comma in a struct field's "url" tag, or
// the empty string. It does not include the leading comma.
type tagOptions []string
// parseTag splits a struct field's url tag into its name and comma-separated
// options.
func parseTag(tag string) (string, tagOptions) {
s := strings.Split(tag, ",")
return s[0], s[1:]
}
// Contains checks whether the tagOptions contains the specified option.
func (o tagOptions) Contains(option string) bool {
for _, s := range o {
if s == option {
return true
}
}
return false
}

View File

@ -1,373 +0,0 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

View File

@ -1,70 +0,0 @@
# go-slug
[![Build Status](https://travis-ci.org/hashicorp/go-slug.svg?branch=master)](https://travis-ci.org/hashicorp/go-slug)
[![GitHub license](https://img.shields.io/github/license/hashicorp/go-slug.svg)](https://github.com/hashicorp/go-slug/blob/master/LICENSE)
[![GoDoc](https://godoc.org/github.com/hashicorp/go-slug?status.svg)](https://godoc.org/github.com/hashicorp/go-slug)
[![Go Report Card](https://goreportcard.com/badge/github.com/hashicorp/go-slug)](https://goreportcard.com/report/github.com/hashicorp/go-slug)
[![GitHub issues](https://img.shields.io/github/issues/hashicorp/go-slug.svg)](https://github.com/hashicorp/go-slug/issues)
Package `go-slug` offers functions for packing and unpacking Terraform Enterprise
compatible slugs. Slugs are gzip compressed tar files containing Terraform configuration files.
## Installation
Installation can be done with a normal `go get`:
```
go get -u github.com/hashicorp/go-slug
```
## Documentation
For the complete usage of `go-slug`, see the full [package docs](https://godoc.org/github.com/hashicorp/go-slug).
## Example
Packing or unpacking a slug is pretty straight forward as shown in the
following example:
```go
package main
import (
"bytes"
"ioutil"
"log"
"os"
slug "github.com/hashicorp/go-slug"
)
func main() {
// First create a buffer for storing the slug.
slug := bytes.NewBuffer(nil)
// Then call the Pack function with a directory path containing the
// configuration files and an io.Writer to write the slug to.
if _, err := Pack("test-fixtures/archive-dir", slug); err != nil {
log.Fatal(err)
}
// Create a directory to unpack the slug contents into.
dst, err := ioutil.TempDir("", "slug")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(dst)
// Unpacking a slug is done by calling the Unpack function with an
// io.Reader to read the slug from and a directory path of an existing
// directory to store the unpacked configuration files.
if err := Unpack(slug, dst); err != nil {
log.Fatal(err)
}
}
```
## Issues and Contributing
If you find an issue with this package, please report an issue. If you'd like,
we welcome any contributions. Fork this repository and submit a pull request.

Some files were not shown because too many files have changed in this diff Show More