Initial commit of 'cloud' package

The cloud package intends to implement a new integration for
Terraform Cloud/Enterprise. The purpose of this integration is to better
support TFC users; it will shed some overly generic UX and architecture,
behavior changes that are otherwise backwards incompatible in the remote
backend, and technical debt - all of which are vestiges from before
Terraform Cloud existed.

This initial commit is largely a porting of the existing 'remote'
backend, which will serve as an underlying implementation detail and not
be a typical user-level backend. This is because to re-implement the
literal backend interface is orthogonal to the purpose of this
integration, and can always be migrated away from later.

As this backend is considered an implementation detail, it will not be
registered as a declarable backend. Within these changes it is, for easy
of initial development and a clean diff.
This commit is contained in:
Chris Arcand 2021-08-12 14:30:24 -05:00
parent ded4f1a0fd
commit ea8ad0b15a
36 changed files with 8537 additions and 1 deletions

View File

@ -27,6 +27,7 @@ import (
backendPg "github.com/hashicorp/terraform/internal/backend/remote-state/pg"
backendS3 "github.com/hashicorp/terraform/internal/backend/remote-state/s3"
backendSwift "github.com/hashicorp/terraform/internal/backend/remote-state/swift"
backendCloud "github.com/hashicorp/terraform/internal/cloud"
)
// backends is the list of available backends. This is a global variable
@ -49,7 +50,6 @@ func Init(services *disco.Disco) {
defer backendsLock.Unlock()
backends = map[string]backend.InitFn{
// Enhanced backends.
"local": func() backend.Backend { return backendLocal.New() },
"remote": func() backend.Backend { return backendRemote.New(services) },
@ -70,6 +70,10 @@ func Init(services *disco.Disco) {
"s3": func() backend.Backend { return backendS3.New() },
"swift": func() backend.Backend { return backendSwift.New() },
// Terraform Cloud 'backend'
// This is an implementation detail only, used for the cloud package
"cloud": func() backend.Backend { return backendCloud.New(services) },
// Deprecated backends.
"azure": func() backend.Backend {
return deprecateBackend(

1049
internal/cloud/backend.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,301 @@
package cloud
import (
"bufio"
"context"
"fmt"
"io"
"log"
tfe "github.com/hashicorp/go-tfe"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/hashicorp/terraform/internal/tfdiags"
)
func (b *Cloud) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) {
log.Printf("[INFO] cloud: starting Apply operation")
var diags tfdiags.Diagnostics
// We should remove the `CanUpdate` part of this test, but for now
// (to remain compatible with tfe.v2.1) we'll leave it in here.
if !w.Permissions.CanUpdate && !w.Permissions.CanQueueApply {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Insufficient rights to apply changes",
"The provided credentials have insufficient rights to apply changes. In order "+
"to apply changes at least write permissions on the workspace are required.",
))
return nil, diags.Err()
}
if w.VCSRepo != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Apply not allowed for workspaces with a VCS connection",
"A workspace that is connected to a VCS requires the VCS-driven workflow "+
"to ensure that the VCS remains the single source of truth.",
))
return nil, diags.Err()
}
if op.Parallelism != defaultParallelism {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Custom parallelism values are currently not supported",
`Terraform Cloud does not support setting a custom parallelism `+
`value at this time.`,
))
}
if op.PlanFile != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Applying a saved plan is currently not supported",
`Terraform Cloud currently requires configuration to be present and `+
`does not accept an existing saved plan as an argument at this time.`,
))
}
if b.hasExplicitVariableValues(op) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Run variables are currently not supported",
fmt.Sprintf(
"Terraform Cloud does not support setting run variables from command line arguments at this time. "+
"Currently the only to way to pass variables is by "+
"creating a '*.auto.tfvars' variables file. This file will automatically "+
"be loaded when the workspace is configured to use "+
"Terraform v0.10.0 or later.\n\nAdditionally you can also set variables on "+
"the workspace in the web UI:\nhttps://%s/app/%s/%s/variables",
b.hostname, b.organization, op.Workspace,
),
))
}
if !op.HasConfig() && op.PlanMode != plans.DestroyMode {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"No configuration files found",
`Apply requires configuration to be present. Applying without a configuration `+
`would mark everything for destruction, which is normally not what is desired. `+
`If you would like to destroy everything, please run 'terraform destroy' which `+
`does not require any configuration files.`,
))
}
// For API versions prior to 2.3, RemoteAPIVersion will return an empty string,
// so if there's an error when parsing the RemoteAPIVersion, it's handled as
// equivalent to an API version < 2.3.
currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion())
if !op.PlanRefresh {
desiredAPIVersion, _ := version.NewVersion("2.4")
if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Planning without refresh is not supported",
fmt.Sprintf(
`The host %s does not support the -refresh=false option for `+
`remote plans.`,
b.hostname,
),
))
}
}
if op.PlanMode == plans.RefreshOnlyMode {
desiredAPIVersion, _ := version.NewVersion("2.4")
if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Refresh-only mode is not supported",
fmt.Sprintf(
`The host %s does not support -refresh-only mode for `+
`remote plans.`,
b.hostname,
),
))
}
}
if len(op.ForceReplace) != 0 {
desiredAPIVersion, _ := version.NewVersion("2.4")
if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Planning resource replacements is not supported",
fmt.Sprintf(
`The host %s does not support the -replace option for `+
`remote plans.`,
b.hostname,
),
))
}
}
if len(op.Targets) != 0 {
desiredAPIVersion, _ := version.NewVersion("2.3")
if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Resource targeting is not supported",
fmt.Sprintf(
`The host %s does not support the -target option for `+
`remote plans.`,
b.hostname,
),
))
}
}
// Return if there are any errors.
if diags.HasErrors() {
return nil, diags.Err()
}
// Run the plan phase.
r, err := b.plan(stopCtx, cancelCtx, op, w)
if err != nil {
return r, err
}
// This check is also performed in the plan method to determine if
// the policies should be checked, but we need to check the values
// here again to determine if we are done and should return.
if !r.HasChanges || r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored {
return r, nil
}
// Retrieve the run to get its current status.
r, err = b.client.Runs.Read(stopCtx, r.ID)
if err != nil {
return r, generalError("Failed to retrieve run", err)
}
// Return if the run cannot be confirmed.
if !w.AutoApply && !r.Actions.IsConfirmable {
return r, nil
}
// Since we already checked the permissions before creating the run
// this should never happen. But it doesn't hurt to keep this in as
// a safeguard for any unexpected situations.
if !w.AutoApply && !r.Permissions.CanApply {
// Make sure we discard the run if possible.
if r.Actions.IsDiscardable {
err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{})
if err != nil {
switch op.PlanMode {
case plans.DestroyMode:
return r, generalError("Failed to discard destroy", err)
default:
return r, generalError("Failed to discard apply", err)
}
}
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Insufficient rights to approve the pending changes",
fmt.Sprintf("There are pending changes, but the provided credentials have "+
"insufficient rights to approve them. The run will be discarded to prevent "+
"it from blocking the queue waiting for external approval. To queue a run "+
"that can be approved by someone else, please use the 'Queue Plan' button in "+
"the web UI:\nhttps://%s/app/%s/%s/runs", b.hostname, b.organization, op.Workspace),
))
return r, diags.Err()
}
mustConfirm := (op.UIIn != nil && op.UIOut != nil) && !op.AutoApprove
if !w.AutoApply {
if mustConfirm {
opts := &terraform.InputOpts{Id: "approve"}
if op.PlanMode == plans.DestroyMode {
opts.Query = "\nDo you really want to destroy all resources in workspace \"" + op.Workspace + "\"?"
opts.Description = "Terraform will destroy all your managed infrastructure, as shown above.\n" +
"There is no undo. Only 'yes' will be accepted to confirm."
} else {
opts.Query = "\nDo you want to perform these actions in workspace \"" + op.Workspace + "\"?"
opts.Description = "Terraform will perform the actions described above.\n" +
"Only 'yes' will be accepted to approve."
}
err = b.confirm(stopCtx, op, opts, r, "yes")
if err != nil && err != errRunApproved {
return r, err
}
}
if err != errRunApproved {
if err = b.client.Runs.Apply(stopCtx, r.ID, tfe.RunApplyOptions{}); err != nil {
return r, generalError("Failed to approve the apply command", err)
}
}
}
// If we don't need to ask for confirmation, insert a blank
// line to separate the ouputs.
if w.AutoApply || !mustConfirm {
if b.CLI != nil {
b.CLI.Output("")
}
}
r, err = b.waitForRun(stopCtx, cancelCtx, op, "apply", r, w)
if err != nil {
return r, err
}
logs, err := b.client.Applies.Logs(stopCtx, r.Apply.ID)
if err != nil {
return r, generalError("Failed to retrieve logs", err)
}
reader := bufio.NewReaderSize(logs, 64*1024)
if b.CLI != nil {
skip := 0
for next := true; next; {
var l, line []byte
for isPrefix := true; isPrefix; {
l, isPrefix, err = reader.ReadLine()
if err != nil {
if err != io.EOF {
return r, generalError("Failed to read logs", err)
}
next = false
}
line = append(line, l...)
}
// Skip the first 3 lines to prevent duplicate output.
if skip < 3 {
skip++
continue
}
if next || len(line) > 0 {
b.CLI.Output(b.Colorize().Color(string(line)))
}
}
}
return r, nil
}
const applyDefaultHeader = `
[reset][yellow]Running apply in Terraform Cloud. Output will stream here. Pressing Ctrl-C
will cancel the remote apply if it's still pending. If the apply started it
will stop streaming the logs, but will not stop the apply running remotely.[reset]
Preparing the remote apply...
`

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,20 @@
package cloud
import (
"github.com/hashicorp/terraform/internal/backend"
)
// CLIInit implements backend.CLI
func (b *Cloud) CLIInit(opts *backend.CLIOpts) error {
if cli, ok := b.local.(backend.CLI); ok {
if err := cli.CLIInit(opts); err != nil {
return err
}
}
b.CLI = opts.CLI
b.CLIColor = opts.CLIColor
b.ContextOpts = opts.ContextOpts
return nil
}

View File

@ -0,0 +1,50 @@
package cloud
import (
"regexp"
"github.com/mitchellh/colorstring"
)
// TODO SvH: This file should be deleted and the type cliColorize should be
// renamed back to Colorize as soon as we can pass -no-color to the backend.
// colorsRe is used to find ANSI escaped color codes.
var colorsRe = regexp.MustCompile("\033\\[\\d{1,3}m")
// Colorer is the interface that must be implemented to colorize strings.
type Colorer interface {
Color(v string) string
}
// Colorize is used to print output when the -no-color flag is used. It will
// strip all ANSI escaped color codes which are set while the operation was
// executed in Terraform Enterprise.
//
// When Terraform Enterprise supports run specific variables, this code can be
// removed as we can then pass the CLI flag to the backend and prevent the color
// codes from being written to the output.
type Colorize struct {
cliColor *colorstring.Colorize
}
// Color will strip all ANSI escaped color codes and return a uncolored string.
func (c *Colorize) Color(v string) string {
return colorsRe.ReplaceAllString(c.cliColor.Color(v), "")
}
// Colorize returns the Colorize structure that can be used for colorizing
// output. This is guaranteed to always return a non-nil value and so is useful
// as a helper to wrap any potentially colored strings.
func (b *Cloud) Colorize() Colorer {
if b.CLIColor != nil && !b.CLIColor.Disable {
return b.CLIColor
}
if b.CLIColor != nil {
return &Colorize{cliColor: b.CLIColor}
}
return &Colorize{cliColor: &colorstring.Colorize{
Colors: colorstring.DefaultColors,
Disable: true,
}}
}

View File

@ -0,0 +1,574 @@
package cloud
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"math"
"strconv"
"strings"
"time"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/terraform"
)
var (
errApplyDiscarded = errors.New("Apply discarded.")
errDestroyDiscarded = errors.New("Destroy discarded.")
errRunApproved = errors.New("approved using the UI or API")
errRunDiscarded = errors.New("discarded using the UI or API")
errRunOverridden = errors.New("overridden using the UI or API")
)
var (
backoffMin = 1000.0
backoffMax = 3000.0
runPollInterval = 3 * time.Second
)
// backoff will perform exponential backoff based on the iteration and
// limited by the provided min and max (in milliseconds) durations.
func backoff(min, max float64, iter int) time.Duration {
backoff := math.Pow(2, float64(iter)/5) * min
if backoff > max {
backoff = max
}
return time.Duration(backoff) * time.Millisecond
}
func (b *Cloud) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) {
started := time.Now()
updated := started
for i := 0; ; i++ {
select {
case <-stopCtx.Done():
return r, stopCtx.Err()
case <-cancelCtx.Done():
return r, cancelCtx.Err()
case <-time.After(backoff(backoffMin, backoffMax, i)):
// Timer up, show status
}
// Retrieve the run to get its current status.
r, err := b.client.Runs.Read(stopCtx, r.ID)
if err != nil {
return r, generalError("Failed to retrieve run", err)
}
// Return if the run is no longer pending.
if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed {
if i == 0 && opType == "plan" && b.CLI != nil {
b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType)))
}
if i > 0 && b.CLI != nil {
// Insert a blank line to separate the ouputs.
b.CLI.Output("")
}
return r, nil
}
// Check if 30 seconds have passed since the last update.
current := time.Now()
if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) {
updated = current
position := 0
elapsed := ""
// Calculate and set the elapsed time.
if i > 0 {
elapsed = fmt.Sprintf(
" (%s elapsed)", current.Sub(started).Truncate(30*time.Second))
}
// Retrieve the workspace used to run this operation in.
w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name)
if err != nil {
return nil, generalError("Failed to retrieve workspace", err)
}
// If the workspace is locked the run will not be queued and we can
// update the status without making any expensive calls.
if w.Locked && w.CurrentRun != nil {
cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID)
if err != nil {
return r, generalError("Failed to retrieve current run", err)
}
if cr.Status == tfe.RunPending {
b.CLI.Output(b.Colorize().Color(
"Waiting for the manually locked workspace to be unlocked..." + elapsed))
continue
}
}
// Skip checking the workspace queue when we are the current run.
if w.CurrentRun == nil || w.CurrentRun.ID != r.ID {
found := false
options := tfe.RunListOptions{}
runlist:
for {
rl, err := b.client.Runs.List(stopCtx, w.ID, options)
if err != nil {
return r, generalError("Failed to retrieve run list", err)
}
// Loop through all runs to calculate the workspace queue position.
for _, item := range rl.Items {
if !found {
if r.ID == item.ID {
found = true
}
continue
}
// If the run is in a final state, ignore it and continue.
switch item.Status {
case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored:
continue
case tfe.RunPlanned:
if op.Type == backend.OperationTypePlan {
continue
}
}
// Increase the workspace queue position.
position++
// Stop searching when we reached the current run.
if w.CurrentRun != nil && w.CurrentRun.ID == item.ID {
break runlist
}
}
// Exit the loop when we've seen all pages.
if rl.CurrentPage >= rl.TotalPages {
break
}
// Update the page number to get the next page.
options.PageNumber = rl.NextPage
}
if position > 0 {
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"Waiting for %d run(s) to finish before being queued...%s",
position,
elapsed,
)))
continue
}
}
options := tfe.RunQueueOptions{}
search:
for {
rq, err := b.client.Organizations.RunQueue(stopCtx, b.organization, options)
if err != nil {
return r, generalError("Failed to retrieve queue", err)
}
// Search through all queued items to find our run.
for _, item := range rq.Items {
if r.ID == item.ID {
position = item.PositionInQueue
break search
}
}
// Exit the loop when we've seen all pages.
if rq.CurrentPage >= rq.TotalPages {
break
}
// Update the page number to get the next page.
options.PageNumber = rq.NextPage
}
if position > 0 {
c, err := b.client.Organizations.Capacity(stopCtx, b.organization)
if err != nil {
return r, generalError("Failed to retrieve capacity", err)
}
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"Waiting for %d queued run(s) to finish before starting...%s",
position-c.Running,
elapsed,
)))
continue
}
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"Waiting for the %s to start...%s", opType, elapsed)))
}
}
}
// hasExplicitVariableValues is a best-effort check to determine whether the
// user has provided -var or -var-file arguments to a remote operation.
//
// The results may be inaccurate if the configuration is invalid or if
// individual variable values are invalid. That's okay because we only use this
// result to hint the user to set variables a different way. It's always the
// remote system's responsibility to do final validation of the input.
func (b *Cloud) hasExplicitVariableValues(op *backend.Operation) bool {
// Load the configuration using the caller-provided configuration loader.
config, _, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir)
if configDiags.HasErrors() {
// If we can't load the configuration then we'll assume no explicit
// variable values just to let the remote operation start and let
// the remote system return the same set of configuration errors.
return false
}
// We're intentionally ignoring the diagnostics here because validation
// of the variable values is the responsibilty of the remote system. Our
// goal here is just to make a best effort count of how many variable
// values are coming from -var or -var-file CLI arguments so that we can
// hint the user that those are not supported for remote operations.
variables, _ := backend.ParseVariableValues(op.Variables, config.Module.Variables)
// Check for explicitly-defined (-var and -var-file) variables, which the
// Terraform Cloud currently does not support. All other source types are okay,
// because they are implicit from the execution context anyway and so
// their final values will come from the _remote_ execution context.
for _, v := range variables {
switch v.SourceType {
case terraform.ValueFromCLIArg, terraform.ValueFromNamedFile:
return true
}
}
return false
}
func (b *Cloud) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error {
if r.CostEstimate == nil {
return nil
}
msgPrefix := "Cost estimation"
started := time.Now()
updated := started
for i := 0; ; i++ {
select {
case <-stopCtx.Done():
return stopCtx.Err()
case <-cancelCtx.Done():
return cancelCtx.Err()
case <-time.After(backoff(backoffMin, backoffMax, i)):
}
// Retrieve the cost estimate to get its current status.
ce, err := b.client.CostEstimates.Read(stopCtx, r.CostEstimate.ID)
if err != nil {
return generalError("Failed to retrieve cost estimate", err)
}
// If the run is canceled or errored, but the cost-estimate still has
// no result, there is nothing further to render.
if ce.Status != tfe.CostEstimateFinished {
if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored {
return nil
}
}
// checking if i == 0 so as to avoid printing this starting horizontal-rule
// every retry, and that it only prints it on the first (i=0) attempt.
if b.CLI != nil && i == 0 {
b.CLI.Output("\n------------------------------------------------------------------------\n")
}
switch ce.Status {
case tfe.CostEstimateFinished:
delta, err := strconv.ParseFloat(ce.DeltaMonthlyCost, 64)
if err != nil {
return generalError("Unexpected error", err)
}
sign := "+"
if delta < 0 {
sign = "-"
}
deltaRepr := strings.Replace(ce.DeltaMonthlyCost, "-", "", 1)
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Resources: %d of %d estimated", ce.MatchedResourcesCount, ce.ResourcesCount)))
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(" $%s/mo %s$%s", ce.ProposedMonthlyCost, sign, deltaRepr)))
if len(r.PolicyChecks) == 0 && r.HasChanges && op.Type == backend.OperationTypeApply {
b.CLI.Output("\n------------------------------------------------------------------------")
}
}
return nil
case tfe.CostEstimatePending, tfe.CostEstimateQueued:
// Check if 30 seconds have passed since the last update.
current := time.Now()
if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) {
updated = current
elapsed := ""
// Calculate and set the elapsed time.
if i > 0 {
elapsed = fmt.Sprintf(
" (%s elapsed)", current.Sub(started).Truncate(30*time.Second))
}
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
b.CLI.Output(b.Colorize().Color("Waiting for cost estimate to complete..." + elapsed + "\n"))
}
continue
case tfe.CostEstimateSkippedDueToTargeting:
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
b.CLI.Output("Not available for this plan, because it was created with the -target option.")
b.CLI.Output("\n------------------------------------------------------------------------")
return nil
case tfe.CostEstimateErrored:
b.CLI.Output(msgPrefix + " errored.\n")
b.CLI.Output("\n------------------------------------------------------------------------")
return nil
case tfe.CostEstimateCanceled:
return fmt.Errorf(msgPrefix + " canceled.")
default:
return fmt.Errorf("Unknown or unexpected cost estimate state: %s", ce.Status)
}
}
}
func (b *Cloud) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error {
if b.CLI != nil {
b.CLI.Output("\n------------------------------------------------------------------------\n")
}
for i, pc := range r.PolicyChecks {
// Read the policy check logs. This is a blocking call that will only
// return once the policy check is complete.
logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID)
if err != nil {
return generalError("Failed to retrieve policy check logs", err)
}
reader := bufio.NewReaderSize(logs, 64*1024)
// Retrieve the policy check to get its current status.
pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID)
if err != nil {
return generalError("Failed to retrieve policy check", err)
}
// If the run is canceled or errored, but the policy check still has
// no result, there is nothing further to render.
if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored {
switch pc.Status {
case tfe.PolicyPending, tfe.PolicyQueued, tfe.PolicyUnreachable:
continue
}
}
var msgPrefix string
switch pc.Scope {
case tfe.PolicyScopeOrganization:
msgPrefix = "Organization policy check"
case tfe.PolicyScopeWorkspace:
msgPrefix = "Workspace policy check"
default:
msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope)
}
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
}
if b.CLI != nil {
for next := true; next; {
var l, line []byte
for isPrefix := true; isPrefix; {
l, isPrefix, err = reader.ReadLine()
if err != nil {
if err != io.EOF {
return generalError("Failed to read logs", err)
}
next = false
}
line = append(line, l...)
}
if next || len(line) > 0 {
b.CLI.Output(b.Colorize().Color(string(line)))
}
}
}
switch pc.Status {
case tfe.PolicyPasses:
if (r.HasChanges && op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil {
b.CLI.Output("\n------------------------------------------------------------------------")
}
continue
case tfe.PolicyErrored:
return fmt.Errorf(msgPrefix + " errored.")
case tfe.PolicyHardFailed:
return fmt.Errorf(msgPrefix + " hard failed.")
case tfe.PolicySoftFailed:
runUrl := fmt.Sprintf(runHeader, b.hostname, b.organization, op.Workspace, r.ID)
if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil ||
!pc.Actions.IsOverridable || !pc.Permissions.CanOverride {
return fmt.Errorf(msgPrefix + " soft failed.\n" + runUrl)
}
if op.AutoApprove {
if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil {
return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err)
}
} else {
opts := &terraform.InputOpts{
Id: "override",
Query: "\nDo you want to override the soft failed policy check?",
Description: "Only 'override' will be accepted to override.",
}
err = b.confirm(stopCtx, op, opts, r, "override")
if err != nil && err != errRunOverridden {
return fmt.Errorf(
fmt.Sprintf("Failed to override: %s\n%s\n", err.Error(), runUrl),
)
}
if err != errRunOverridden {
if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil {
return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err)
}
} else {
b.CLI.Output(fmt.Sprintf("The run needs to be manually overridden or discarded.\n%s\n", runUrl))
}
}
if b.CLI != nil {
b.CLI.Output("------------------------------------------------------------------------")
}
default:
return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status)
}
}
return nil
}
func (b *Cloud) confirm(stopCtx context.Context, op *backend.Operation, opts *terraform.InputOpts, r *tfe.Run, keyword string) error {
doneCtx, cancel := context.WithCancel(stopCtx)
result := make(chan error, 2)
go func() {
// Make sure we cancel doneCtx before we return
// so the input command is also canceled.
defer cancel()
for {
select {
case <-doneCtx.Done():
return
case <-stopCtx.Done():
return
case <-time.After(runPollInterval):
// Retrieve the run again to get its current status.
r, err := b.client.Runs.Read(stopCtx, r.ID)
if err != nil {
result <- generalError("Failed to retrieve run", err)
return
}
switch keyword {
case "override":
if r.Status != tfe.RunPolicyOverride {
if r.Status == tfe.RunDiscarded {
err = errRunDiscarded
} else {
err = errRunOverridden
}
}
case "yes":
if !r.Actions.IsConfirmable {
if r.Status == tfe.RunDiscarded {
err = errRunDiscarded
} else {
err = errRunApproved
}
}
}
if err != nil {
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(
fmt.Sprintf("[reset][yellow]%s[reset]", err.Error())))
}
if err == errRunDiscarded {
err = errApplyDiscarded
if op.PlanMode == plans.DestroyMode {
err = errDestroyDiscarded
}
}
result <- err
return
}
}
}
}()
result <- func() error {
v, err := op.UIIn.Input(doneCtx, opts)
if err != nil && err != context.Canceled && stopCtx.Err() != context.Canceled {
return fmt.Errorf("Error asking %s: %v", opts.Id, err)
}
// We return the error of our parent channel as we don't
// care about the error of the doneCtx which is only used
// within this function. So if the doneCtx was canceled
// because stopCtx was canceled, this will properly return
// a context.Canceled error and otherwise it returns nil.
if doneCtx.Err() == context.Canceled || stopCtx.Err() == context.Canceled {
return stopCtx.Err()
}
// Make sure we cancel the context here so the loop that
// checks for external changes to the run is ended before
// we start to make changes ourselves.
cancel()
if v != keyword {
// Retrieve the run again to get its current status.
r, err = b.client.Runs.Read(stopCtx, r.ID)
if err != nil {
return generalError("Failed to retrieve run", err)
}
// Make sure we discard the run if possible.
if r.Actions.IsDiscardable {
err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{})
if err != nil {
if op.PlanMode == plans.DestroyMode {
return generalError("Failed to discard destroy", err)
}
return generalError("Failed to discard apply", err)
}
}
// Even if the run was discarded successfully, we still
// return an error as the apply command was canceled.
if op.PlanMode == plans.DestroyMode {
return errDestroyDiscarded
}
return errApplyDiscarded
}
return nil
}()
return <-result
}

View File

@ -0,0 +1,280 @@
package cloud
import (
"context"
"fmt"
"log"
"strings"
"github.com/hashicorp/errwrap"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/states/statemgr"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/hashicorp/terraform/internal/tfdiags"
"github.com/zclconf/go-cty/cty"
)
// Context implements backend.Enhanced.
func (b *Cloud) Context(op *backend.Operation) (*terraform.Context, statemgr.Full, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
op.StateLocker = op.StateLocker.WithContext(context.Background())
// Get the remote workspace name.
remoteWorkspaceName := b.getRemoteWorkspaceName(op.Workspace)
// Get the latest state.
log.Printf("[TRACE] cloud: requesting state manager for workspace %q", remoteWorkspaceName)
stateMgr, err := b.StateMgr(op.Workspace)
if err != nil {
diags = diags.Append(errwrap.Wrapf("Error loading state: {{err}}", err))
return nil, nil, diags
}
log.Printf("[TRACE] cloud: requesting state lock for workspace %q", remoteWorkspaceName)
if diags := op.StateLocker.Lock(stateMgr, op.Type.String()); diags.HasErrors() {
return nil, nil, diags
}
defer func() {
// If we're returning with errors, and thus not producing a valid
// context, we'll want to avoid leaving the remote workspace locked.
if diags.HasErrors() {
diags = diags.Append(op.StateLocker.Unlock())
}
}()
log.Printf("[TRACE] cloud: reading remote state for workspace %q", remoteWorkspaceName)
if err := stateMgr.RefreshState(); err != nil {
diags = diags.Append(errwrap.Wrapf("Error loading state: {{err}}", err))
return nil, nil, diags
}
// Initialize our context options
var opts terraform.ContextOpts
if v := b.ContextOpts; v != nil {
opts = *v
}
// Copy set options from the operation
opts.PlanMode = op.PlanMode
opts.Targets = op.Targets
opts.UIInput = op.UIIn
// Load the latest state. If we enter contextFromPlanFile below then the
// state snapshot in the plan file must match this, or else it'll return
// error diagnostics.
log.Printf("[TRACE] cloud: retrieving remote state snapshot for workspace %q", remoteWorkspaceName)
opts.State = stateMgr.State()
log.Printf("[TRACE] cloud: loading configuration for the current working directory")
config, configDiags := op.ConfigLoader.LoadConfig(op.ConfigDir)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, nil, diags
}
opts.Config = config
// The underlying API expects us to use the opaque workspace id to request
// variables, so we'll need to look that up using our organization name
// and workspace name.
remoteWorkspaceID, err := b.getRemoteWorkspaceID(context.Background(), op.Workspace)
if err != nil {
diags = diags.Append(errwrap.Wrapf("Error finding remote workspace: {{err}}", err))
return nil, nil, diags
}
log.Printf("[TRACE] cloud: retrieving variables from workspace %s/%s (%s)", remoteWorkspaceName, b.organization, remoteWorkspaceID)
tfeVariables, err := b.client.Variables.List(context.Background(), remoteWorkspaceID, tfe.VariableListOptions{})
if err != nil && err != tfe.ErrResourceNotFound {
diags = diags.Append(errwrap.Wrapf("Error loading variables: {{err}}", err))
return nil, nil, diags
}
if op.AllowUnsetVariables {
// If we're not going to use the variables in an operation we'll be
// more lax about them, stubbing out any unset ones as unknown.
// This gives us enough information to produce a consistent context,
// but not enough information to run a real operation (plan, apply, etc)
opts.Variables = stubAllVariables(op.Variables, config.Module.Variables)
} else {
if tfeVariables != nil {
if op.Variables == nil {
op.Variables = make(map[string]backend.UnparsedVariableValue)
}
for _, v := range tfeVariables.Items {
if v.Category == tfe.CategoryTerraform {
op.Variables[v.Key] = &remoteStoredVariableValue{
definition: v,
}
}
}
}
if op.Variables != nil {
variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables)
diags = diags.Append(varDiags)
if diags.HasErrors() {
return nil, nil, diags
}
opts.Variables = variables
}
}
tfCtx, ctxDiags := terraform.NewContext(&opts)
diags = diags.Append(ctxDiags)
log.Printf("[TRACE] cloud: finished building terraform.Context")
return tfCtx, stateMgr, diags
}
func (b *Cloud) getRemoteWorkspaceName(localWorkspaceName string) string {
switch {
case localWorkspaceName == backend.DefaultStateName:
// The default workspace name is a special case, for when the backend
// is configured to with to an exact remote workspace rather than with
// a remote workspace _prefix_.
return b.workspace
case b.prefix != "" && !strings.HasPrefix(localWorkspaceName, b.prefix):
return b.prefix + localWorkspaceName
default:
return localWorkspaceName
}
}
func (b *Cloud) getRemoteWorkspace(ctx context.Context, localWorkspaceName string) (*tfe.Workspace, error) {
remoteWorkspaceName := b.getRemoteWorkspaceName(localWorkspaceName)
log.Printf("[TRACE] cloud: looking up workspace for %s/%s", b.organization, remoteWorkspaceName)
remoteWorkspace, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName)
if err != nil {
return nil, err
}
return remoteWorkspace, nil
}
func (b *Cloud) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) {
remoteWorkspace, err := b.getRemoteWorkspace(ctx, localWorkspaceName)
if err != nil {
return "", err
}
return remoteWorkspace.ID, nil
}
func stubAllVariables(vv map[string]backend.UnparsedVariableValue, decls map[string]*configs.Variable) terraform.InputValues {
ret := make(terraform.InputValues, len(decls))
for name, cfg := range decls {
raw, exists := vv[name]
if !exists {
ret[name] = &terraform.InputValue{
Value: cty.UnknownVal(cfg.Type),
SourceType: terraform.ValueFromConfig,
}
continue
}
val, diags := raw.ParseVariableValue(cfg.ParsingMode)
if diags.HasErrors() {
ret[name] = &terraform.InputValue{
Value: cty.UnknownVal(cfg.Type),
SourceType: terraform.ValueFromConfig,
}
continue
}
ret[name] = val
}
return ret
}
// remoteStoredVariableValue is a backend.UnparsedVariableValue implementation
// that translates from the go-tfe representation of stored variables into
// the Terraform Core backend representation of variables.
type remoteStoredVariableValue struct {
definition *tfe.Variable
}
var _ backend.UnparsedVariableValue = (*remoteStoredVariableValue)(nil)
func (v *remoteStoredVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
var val cty.Value
switch {
case v.definition.Sensitive:
// If it's marked as sensitive then it's not available for use in
// local operations. We'll use an unknown value as a placeholder for
// it so that operations that don't need it might still work, but
// we'll also produce a warning about it to add context for any
// errors that might result here.
val = cty.DynamicVal
if !v.definition.HCL {
// If it's not marked as HCL then we at least know that the
// value must be a string, so we'll set that in case it allows
// us to do some more precise type checking.
val = cty.UnknownVal(cty.String)
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
fmt.Sprintf("Value for var.%s unavailable", v.definition.Key),
fmt.Sprintf("The value of variable %q is marked as sensitive in the remote workspace. This operation always runs locally, so the value for that variable is not available.", v.definition.Key),
))
case v.definition.HCL:
// If the variable value is marked as being in HCL syntax, we need to
// parse it the same way as it would be interpreted in a .tfvars
// file because that is how it would get passed to Terraform CLI for
// a remote operation and we want to mimic that result as closely as
// possible.
var exprDiags hcl.Diagnostics
expr, exprDiags := hclsyntax.ParseExpression([]byte(v.definition.Value), "<remote workspace>", hcl.Pos{Line: 1, Column: 1})
if expr != nil {
var moreDiags hcl.Diagnostics
val, moreDiags = expr.Value(nil)
exprDiags = append(exprDiags, moreDiags...)
} else {
// We'll have already put some errors in exprDiags above, so we'll
// just stub out the value here.
val = cty.DynamicVal
}
// We don't have sufficient context to return decent error messages
// for syntax errors in the remote values, so we'll just return a
// generic message instead for now.
// (More complete error messages will still result from true remote
// operations, because they'll run on the remote system where we've
// materialized the values into a tfvars file we can report from.)
if exprDiags.HasErrors() {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
fmt.Sprintf("Invalid expression for var.%s", v.definition.Key),
fmt.Sprintf("The value of variable %q is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.", v.definition.Key),
))
}
default:
// A variable value _not_ marked as HCL is always be a string, given
// literally.
val = cty.StringVal(v.definition.Value)
}
return &terraform.InputValue{
Value: val,
// We mark these as "from input" with the rationale that entering
// variable values into the Terraform Cloud or Enterprise UI is,
// roughly speaking, a similar idea to entering variable values at
// the interactive CLI prompts. It's not a perfect correspondance,
// but it's closer than the other options.
SourceType: terraform.ValueFromInput,
}, diags
}

View File

@ -0,0 +1,235 @@
package cloud
import (
"context"
"testing"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/command/arguments"
"github.com/hashicorp/terraform/internal/command/clistate"
"github.com/hashicorp/terraform/internal/command/views"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/initwd"
"github.com/hashicorp/terraform/internal/states/statemgr"
"github.com/hashicorp/terraform/internal/terminal"
"github.com/zclconf/go-cty/cty"
)
func TestRemoteStoredVariableValue(t *testing.T) {
tests := map[string]struct {
Def *tfe.Variable
Want cty.Value
WantError string
}{
"string literal": {
&tfe.Variable{
Key: "test",
Value: "foo",
HCL: false,
Sensitive: false,
},
cty.StringVal("foo"),
``,
},
"string HCL": {
&tfe.Variable{
Key: "test",
Value: `"foo"`,
HCL: true,
Sensitive: false,
},
cty.StringVal("foo"),
``,
},
"list HCL": {
&tfe.Variable{
Key: "test",
Value: `[]`,
HCL: true,
Sensitive: false,
},
cty.EmptyTupleVal,
``,
},
"null HCL": {
&tfe.Variable{
Key: "test",
Value: `null`,
HCL: true,
Sensitive: false,
},
cty.NullVal(cty.DynamicPseudoType),
``,
},
"literal sensitive": {
&tfe.Variable{
Key: "test",
HCL: false,
Sensitive: true,
},
cty.UnknownVal(cty.String),
``,
},
"HCL sensitive": {
&tfe.Variable{
Key: "test",
HCL: true,
Sensitive: true,
},
cty.DynamicVal,
``,
},
"HCL computation": {
// This (stored expressions containing computation) is not a case
// we intentionally supported, but it became possible for remote
// operations in Terraform 0.12 (due to Terraform Cloud/Enterprise
// just writing the HCL verbatim into generated `.tfvars` files).
// We support it here for consistency, and we continue to support
// it in both places for backward-compatibility. In practice,
// there's little reason to do computation in a stored variable
// value because references are not supported.
&tfe.Variable{
Key: "test",
Value: `[for v in ["a"] : v]`,
HCL: true,
Sensitive: false,
},
cty.TupleVal([]cty.Value{cty.StringVal("a")}),
``,
},
"HCL syntax error": {
&tfe.Variable{
Key: "test",
Value: `[`,
HCL: true,
Sensitive: false,
},
cty.DynamicVal,
`Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`,
},
"HCL with references": {
&tfe.Variable{
Key: "test",
Value: `foo.bar`,
HCL: true,
Sensitive: false,
},
cty.DynamicVal,
`Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
v := &remoteStoredVariableValue{
definition: test.Def,
}
// This ParseVariableValue implementation ignores the parsing mode,
// so we'll just always parse literal here. (The parsing mode is
// selected by the remote server, not by our local configuration.)
gotIV, diags := v.ParseVariableValue(configs.VariableParseLiteral)
if test.WantError != "" {
if !diags.HasErrors() {
t.Fatalf("missing expected error\ngot: <no error>\nwant: %s", test.WantError)
}
errStr := diags.Err().Error()
if errStr != test.WantError {
t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError)
}
} else {
if diags.HasErrors() {
t.Fatalf("unexpected error\ngot: %s\nwant: <no error>", diags.Err().Error())
}
got := gotIV.Value
if !test.Want.RawEquals(got) {
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want)
}
}
})
}
}
func TestRemoteContextWithVars(t *testing.T) {
catTerraform := tfe.CategoryTerraform
catEnv := tfe.CategoryEnv
tests := map[string]struct {
Opts *tfe.VariableCreateOptions
WantError string
}{
"Terraform variable": {
&tfe.VariableCreateOptions{
Category: &catTerraform,
},
`Value for undeclared variable: A variable named "key" was assigned a value, but the root module does not declare a variable of that name. To use this value, add a "variable" block to the configuration.`,
},
"environment variable": {
&tfe.VariableCreateOptions{
Category: &catEnv,
},
``,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
configDir := "./testdata/empty"
b, bCleanup := testBackendDefault(t)
defer bCleanup()
_, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir)
defer configCleanup()
workspaceID, err := b.getRemoteWorkspaceID(context.Background(), backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
streams, _ := terminal.StreamsForTesting(t)
view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams))
op := &backend.Operation{
ConfigDir: configDir,
ConfigLoader: configLoader,
StateLocker: clistate.NewLocker(0, view),
Workspace: backend.DefaultStateName,
}
v := test.Opts
if v.Key == nil {
key := "key"
v.Key = &key
}
b.client.Variables.Create(context.TODO(), workspaceID, *v)
_, _, diags := b.Context(op)
if test.WantError != "" {
if !diags.HasErrors() {
t.Fatalf("missing expected error\ngot: <no error>\nwant: %s", test.WantError)
}
errStr := diags.Err().Error()
if errStr != test.WantError {
t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError)
}
// When Context() returns an error, it should unlock the state,
// so re-locking it is expected to succeed.
stateMgr, _ := b.StateMgr(backend.DefaultStateName)
if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil {
t.Fatalf("unexpected error locking state: %s", err.Error())
}
} else {
if diags.HasErrors() {
t.Fatalf("unexpected error\ngot: %s\nwant: <no error>", diags.Err().Error())
}
// When Context() succeeds, this should fail w/ "workspace already locked"
stateMgr, _ := b.StateMgr(backend.DefaultStateName)
if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil {
t.Fatal("unexpected success locking state after Context")
}
}
})
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,439 @@
package cloud
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"syscall"
"time"
tfe "github.com/hashicorp/go-tfe"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/tfdiags"
)
var planConfigurationVersionsPollInterval = 500 * time.Millisecond
func (b *Cloud) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) {
log.Printf("[INFO] cloud: starting Plan operation")
var diags tfdiags.Diagnostics
if !w.Permissions.CanQueueRun {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Insufficient rights to generate a plan",
"The provided credentials have insufficient rights to generate a plan. In order "+
"to generate plans, at least plan permissions on the workspace are required.",
))
return nil, diags.Err()
}
if op.Parallelism != defaultParallelism {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Custom parallelism values are currently not supported",
`Terraform Cloud does not support setting a custom parallelism `+
`value at this time.`,
))
}
if op.PlanFile != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Displaying a saved plan is currently not supported",
`Terraform Cloud currently requires configuration to be present and `+
`does not accept an existing saved plan as an argument at this time.`,
))
}
if op.PlanOutPath != "" {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Saving a generated plan is currently not supported",
`Terraform Cloud does not support saving the generated execution `+
`plan locally at this time.`,
))
}
if b.hasExplicitVariableValues(op) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Run variables are currently not supported",
fmt.Sprintf(
"Terraform Cloud does not support setting run variables from command line arguments at this time. "+
"Currently the only to way to pass variables is by "+
"creating a '*.auto.tfvars' variables file. This file will automatically "+
"be loaded when the workspace is configured to use "+
"Terraform v0.10.0 or later.\n\nAdditionally you can also set variables on "+
"the workspace in the web UI:\nhttps://%s/app/%s/%s/variables",
b.hostname, b.organization, op.Workspace,
),
))
}
if !op.HasConfig() && op.PlanMode != plans.DestroyMode {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"No configuration files found",
`Plan requires configuration to be present. Planning without a configuration `+
`would mark everything for destruction, which is normally not what is desired. `+
`If you would like to destroy everything, please run plan with the "-destroy" `+
`flag or create a single empty configuration file. Otherwise, please create `+
`a Terraform configuration file in the path being executed and try again.`,
))
}
// For API versions prior to 2.3, RemoteAPIVersion will return an empty string,
// so if there's an error when parsing the RemoteAPIVersion, it's handled as
// equivalent to an API version < 2.3.
currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion())
if len(op.Targets) != 0 {
desiredAPIVersion, _ := version.NewVersion("2.3")
if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Resource targeting is not supported",
fmt.Sprintf(
`The host %s does not support the -target option for `+
`remote plans.`,
b.hostname,
),
))
}
}
if !op.PlanRefresh {
desiredAPIVersion, _ := version.NewVersion("2.4")
if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Planning without refresh is not supported",
fmt.Sprintf(
`The host %s does not support the -refresh=false option for `+
`remote plans.`,
b.hostname,
),
))
}
}
if len(op.ForceReplace) != 0 {
desiredAPIVersion, _ := version.NewVersion("2.4")
if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Planning resource replacements is not supported",
fmt.Sprintf(
`The host %s does not support the -replace option for `+
`remote plans.`,
b.hostname,
),
))
}
}
if op.PlanMode == plans.RefreshOnlyMode {
desiredAPIVersion, _ := version.NewVersion("2.4")
if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Refresh-only mode is not supported",
fmt.Sprintf(
`The host %s does not support -refresh-only mode for `+
`remote plans.`,
b.hostname,
),
))
}
}
// Return if there are any errors.
if diags.HasErrors() {
return nil, diags.Err()
}
return b.plan(stopCtx, cancelCtx, op, w)
}
func (b *Cloud) plan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) {
if b.CLI != nil {
header := planDefaultHeader
if op.Type == backend.OperationTypeApply {
header = applyDefaultHeader
}
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(header) + "\n"))
}
configOptions := tfe.ConfigurationVersionCreateOptions{
AutoQueueRuns: tfe.Bool(false),
Speculative: tfe.Bool(op.Type == backend.OperationTypePlan),
}
cv, err := b.client.ConfigurationVersions.Create(stopCtx, w.ID, configOptions)
if err != nil {
return nil, generalError("Failed to create configuration version", err)
}
var configDir string
if op.ConfigDir != "" {
// De-normalize the configuration directory path.
configDir, err = filepath.Abs(op.ConfigDir)
if err != nil {
return nil, generalError(
"Failed to get absolute path of the configuration directory: %v", err)
}
// Make sure to take the working directory into account by removing
// the working directory from the current path. This will result in
// a path that points to the expected root of the workspace.
configDir = filepath.Clean(strings.TrimSuffix(
filepath.Clean(configDir),
filepath.Clean(w.WorkingDirectory),
))
// If the workspace has a subdirectory as its working directory then
// our configDir will be some parent directory of the current working
// directory. Users are likely to find that surprising, so we'll
// produce an explicit message about it to be transparent about what
// we are doing and why.
if w.WorkingDirectory != "" && filepath.Base(configDir) != w.WorkingDirectory {
if b.CLI != nil {
b.CLI.Output(fmt.Sprintf(strings.TrimSpace(`
The remote workspace is configured to work with configuration at
%s relative to the target repository.
Terraform will upload the contents of the following directory,
excluding files or directories as defined by a .terraformignore file
at %s/.terraformignore (if it is present),
in order to capture the filesystem context the remote workspace expects:
%s
`), w.WorkingDirectory, configDir, configDir) + "\n")
}
}
} else {
// We did a check earlier to make sure we either have a config dir,
// or the plan is run with -destroy. So this else clause will only
// be executed when we are destroying and doesn't need the config.
configDir, err = ioutil.TempDir("", "tf")
if err != nil {
return nil, generalError("Failed to create temporary directory", err)
}
defer os.RemoveAll(configDir)
// Make sure the configured working directory exists.
err = os.MkdirAll(filepath.Join(configDir, w.WorkingDirectory), 0700)
if err != nil {
return nil, generalError(
"Failed to create temporary working directory", err)
}
}
err = b.client.ConfigurationVersions.Upload(stopCtx, cv.UploadURL, configDir)
if err != nil {
return nil, generalError("Failed to upload configuration files", err)
}
uploaded := false
for i := 0; i < 60 && !uploaded; i++ {
select {
case <-stopCtx.Done():
return nil, context.Canceled
case <-cancelCtx.Done():
return nil, context.Canceled
case <-time.After(planConfigurationVersionsPollInterval):
cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID)
if err != nil {
return nil, generalError("Failed to retrieve configuration version", err)
}
if cv.Status == tfe.ConfigurationUploaded {
uploaded = true
}
}
}
if !uploaded {
return nil, generalError(
"Failed to upload configuration files", errors.New("operation timed out"))
}
runOptions := tfe.RunCreateOptions{
ConfigurationVersion: cv,
Refresh: tfe.Bool(op.PlanRefresh),
Workspace: w,
}
switch op.PlanMode {
case plans.NormalMode:
// okay, but we don't need to do anything special for this
case plans.RefreshOnlyMode:
runOptions.RefreshOnly = tfe.Bool(true)
case plans.DestroyMode:
runOptions.IsDestroy = tfe.Bool(true)
default:
// Shouldn't get here because we should update this for each new
// plan mode we add, mapping it to the corresponding RunCreateOptions
// field.
return nil, generalError(
"Invalid plan mode",
fmt.Errorf("Terraform Cloud doesn't support %s", op.PlanMode),
)
}
if len(op.Targets) != 0 {
runOptions.TargetAddrs = make([]string, 0, len(op.Targets))
for _, addr := range op.Targets {
runOptions.TargetAddrs = append(runOptions.TargetAddrs, addr.String())
}
}
if len(op.ForceReplace) != 0 {
runOptions.ReplaceAddrs = make([]string, 0, len(op.ForceReplace))
for _, addr := range op.ForceReplace {
runOptions.ReplaceAddrs = append(runOptions.ReplaceAddrs, addr.String())
}
}
r, err := b.client.Runs.Create(stopCtx, runOptions)
if err != nil {
return r, generalError("Failed to create run", err)
}
// When the lock timeout is set, if the run is still pending and
// cancellable after that period, we attempt to cancel it.
if lockTimeout := op.StateLocker.Timeout(); lockTimeout > 0 {
go func() {
select {
case <-stopCtx.Done():
return
case <-cancelCtx.Done():
return
case <-time.After(lockTimeout):
// Retrieve the run to get its current status.
r, err := b.client.Runs.Read(cancelCtx, r.ID)
if err != nil {
log.Printf("[ERROR] error reading run: %v", err)
return
}
if r.Status == tfe.RunPending && r.Actions.IsCancelable {
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(lockTimeoutErr)))
}
// We abuse the auto aprove flag to indicate that we do not
// want to ask if the remote operation should be canceled.
op.AutoApprove = true
p, err := os.FindProcess(os.Getpid())
if err != nil {
log.Printf("[ERROR] error searching process ID: %v", err)
return
}
p.Signal(syscall.SIGINT)
}
}
}()
}
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf(
runHeader, b.hostname, b.organization, op.Workspace, r.ID)) + "\n"))
}
r, err = b.waitForRun(stopCtx, cancelCtx, op, "plan", r, w)
if err != nil {
return r, err
}
logs, err := b.client.Plans.Logs(stopCtx, r.Plan.ID)
if err != nil {
return r, generalError("Failed to retrieve logs", err)
}
reader := bufio.NewReaderSize(logs, 64*1024)
if b.CLI != nil {
for next := true; next; {
var l, line []byte
for isPrefix := true; isPrefix; {
l, isPrefix, err = reader.ReadLine()
if err != nil {
if err != io.EOF {
return r, generalError("Failed to read logs", err)
}
next = false
}
line = append(line, l...)
}
if next || len(line) > 0 {
b.CLI.Output(b.Colorize().Color(string(line)))
}
}
}
// Retrieve the run to get its current status.
r, err = b.client.Runs.Read(stopCtx, r.ID)
if err != nil {
return r, generalError("Failed to retrieve run", err)
}
// If the run is canceled or errored, we still continue to the
// cost-estimation and policy check phases to ensure we render any
// results available. In the case of a hard-failed policy check, the
// status of the run will be "errored", but there is still policy
// information which should be shown.
// Show any cost estimation output.
if r.CostEstimate != nil {
err = b.costEstimate(stopCtx, cancelCtx, op, r)
if err != nil {
return r, err
}
}
// Check any configured sentinel policies.
if len(r.PolicyChecks) > 0 {
err = b.checkPolicy(stopCtx, cancelCtx, op, r)
if err != nil {
return r, err
}
}
return r, nil
}
const planDefaultHeader = `
[reset][yellow]Running plan in Terraform Cloud. Output will stream here. Pressing Ctrl-C
will stop streaming the logs, but will not stop the plan running remotely.[reset]
Preparing the remote plan...
`
const runHeader = `
[reset][yellow]To view this run in a browser, visit:
https://%s/app/%s/%s/runs/%s[reset]
`
// The newline in this error is to make it look good in the CLI!
const lockTimeoutErr = `
[reset][red]Lock timeout exceeded, sending interrupt to cancel the remote operation.
[reset]
`

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,182 @@
package cloud
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"fmt"
tfe "github.com/hashicorp/go-tfe"
"github.com/hashicorp/terraform/internal/states/remote"
"github.com/hashicorp/terraform/internal/states/statefile"
"github.com/hashicorp/terraform/internal/states/statemgr"
)
type remoteClient struct {
client *tfe.Client
lockInfo *statemgr.LockInfo
organization string
runID string
stateUploadErr bool
workspace *tfe.Workspace
forcePush bool
}
// Get the remote state.
func (r *remoteClient) Get() (*remote.Payload, error) {
ctx := context.Background()
sv, err := r.client.StateVersions.Current(ctx, r.workspace.ID)
if err != nil {
if err == tfe.ErrResourceNotFound {
// If no state exists, then return nil.
return nil, nil
}
return nil, fmt.Errorf("Error retrieving state: %v", err)
}
state, err := r.client.StateVersions.Download(ctx, sv.DownloadURL)
if err != nil {
return nil, fmt.Errorf("Error downloading state: %v", err)
}
// If the state is empty, then return nil.
if len(state) == 0 {
return nil, nil
}
// Get the MD5 checksum of the state.
sum := md5.Sum(state)
return &remote.Payload{
Data: state,
MD5: sum[:],
}, nil
}
// Put the remote state.
func (r *remoteClient) Put(state []byte) error {
ctx := context.Background()
// Read the raw state into a Terraform state.
stateFile, err := statefile.Read(bytes.NewReader(state))
if err != nil {
return fmt.Errorf("Error reading state: %s", err)
}
options := tfe.StateVersionCreateOptions{
Lineage: tfe.String(stateFile.Lineage),
Serial: tfe.Int64(int64(stateFile.Serial)),
MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))),
State: tfe.String(base64.StdEncoding.EncodeToString(state)),
Force: tfe.Bool(r.forcePush),
}
// If we have a run ID, make sure to add it to the options
// so the state will be properly associated with the run.
if r.runID != "" {
options.Run = &tfe.Run{ID: r.runID}
}
// Create the new state.
_, err = r.client.StateVersions.Create(ctx, r.workspace.ID, options)
if err != nil {
r.stateUploadErr = true
return fmt.Errorf("Error uploading state: %v", err)
}
return nil
}
// Delete the remote state.
func (r *remoteClient) Delete() error {
err := r.client.Workspaces.Delete(context.Background(), r.organization, r.workspace.Name)
if err != nil && err != tfe.ErrResourceNotFound {
return fmt.Errorf("Error deleting workspace %s: %v", r.workspace.Name, err)
}
return nil
}
// EnableForcePush to allow the remote client to overwrite state
// by implementing remote.ClientForcePusher
func (r *remoteClient) EnableForcePush() {
r.forcePush = true
}
// Lock the remote state.
func (r *remoteClient) Lock(info *statemgr.LockInfo) (string, error) {
ctx := context.Background()
lockErr := &statemgr.LockError{Info: r.lockInfo}
// Lock the workspace.
_, err := r.client.Workspaces.Lock(ctx, r.workspace.ID, tfe.WorkspaceLockOptions{
Reason: tfe.String("Locked by Terraform"),
})
if err != nil {
if err == tfe.ErrWorkspaceLocked {
lockErr.Info = info
err = fmt.Errorf("%s (lock ID: \"%s/%s\")", err, r.organization, r.workspace.Name)
}
lockErr.Err = err
return "", lockErr
}
r.lockInfo = info
return r.lockInfo.ID, nil
}
// Unlock the remote state.
func (r *remoteClient) Unlock(id string) error {
ctx := context.Background()
// We first check if there was an error while uploading the latest
// state. If so, we will not unlock the workspace to prevent any
// changes from being applied until the correct state is uploaded.
if r.stateUploadErr {
return nil
}
lockErr := &statemgr.LockError{Info: r.lockInfo}
// With lock info this should be treated as a normal unlock.
if r.lockInfo != nil {
// Verify the expected lock ID.
if r.lockInfo.ID != id {
lockErr.Err = fmt.Errorf("lock ID does not match existing lock")
return lockErr
}
// Unlock the workspace.
_, err := r.client.Workspaces.Unlock(ctx, r.workspace.ID)
if err != nil {
lockErr.Err = err
return lockErr
}
return nil
}
// Verify the optional force-unlock lock ID.
if r.organization+"/"+r.workspace.Name != id {
lockErr.Err = fmt.Errorf(
"lock ID %q does not match existing lock ID \"%s/%s\"",
id,
r.organization,
r.workspace.Name,
)
return lockErr
}
// Force unlock the workspace.
_, err := r.client.Workspaces.ForceUnlock(ctx, r.workspace.ID)
if err != nil {
lockErr.Err = err
return lockErr
}
return nil
}

View File

@ -0,0 +1,59 @@
package cloud
import (
"bytes"
"os"
"testing"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/states/remote"
"github.com/hashicorp/terraform/internal/states/statefile"
)
func TestRemoteClient_impl(t *testing.T) {
var _ remote.Client = new(remoteClient)
}
func TestRemoteClient(t *testing.T) {
client := testRemoteClient(t)
remote.TestClient(t, client)
}
func TestRemoteClient_stateLock(t *testing.T) {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
s1, err := b.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
s2, err := b.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
}
func TestRemoteClient_withRunID(t *testing.T) {
// Set the TFE_RUN_ID environment variable before creating the client!
if err := os.Setenv("TFE_RUN_ID", generateID("run-")); err != nil {
t.Fatalf("error setting env var TFE_RUN_ID: %v", err)
}
// Create a new test client.
client := testRemoteClient(t)
// Create a new empty state.
sf := statefile.New(states.NewState(), "", 0)
var buf bytes.Buffer
statefile.Write(sf, &buf)
// Store the new state to verify (this will be done
// by the mock that is used) that the run ID is set.
if err := client.Put(buf.Bytes()); err != nil {
t.Fatalf("expected no error, got %v", err)
}
}

View File

@ -0,0 +1,723 @@
package cloud
import (
"context"
"fmt"
"reflect"
"strings"
"testing"
tfe "github.com/hashicorp/go-tfe"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/terraform-svchost/disco"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/tfdiags"
tfversion "github.com/hashicorp/terraform/version"
"github.com/zclconf/go-cty/cty"
backendLocal "github.com/hashicorp/terraform/internal/backend/local"
)
func TestCloud(t *testing.T) {
var _ backend.Enhanced = New(nil)
var _ backend.CLI = New(nil)
}
func TestCloud_backendDefault(t *testing.T) {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
backend.TestBackendStates(t, b)
backend.TestBackendStateLocks(t, b, b)
backend.TestBackendStateForceUnlock(t, b, b)
}
func TestCloud_backendNoDefault(t *testing.T) {
b, bCleanup := testBackendNoDefault(t)
defer bCleanup()
backend.TestBackendStates(t, b)
}
func TestCloud_config(t *testing.T) {
cases := map[string]struct {
config cty.Value
confErr string
valErr string
}{
"with_a_nonexisting_organization": {
config: cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("nonexisting"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prod"),
"prefix": cty.NullVal(cty.String),
}),
}),
confErr: "organization \"nonexisting\" at host app.terraform.io not found",
},
"with_an_unknown_host": {
config: cty.ObjectVal(map[string]cty.Value{
"hostname": cty.StringVal("nonexisting.local"),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prod"),
"prefix": cty.NullVal(cty.String),
}),
}),
confErr: "Failed to request discovery document",
},
// localhost advertises TFE services, but has no token in the credentials
"without_a_token": {
config: cty.ObjectVal(map[string]cty.Value{
"hostname": cty.StringVal("localhost"),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prod"),
"prefix": cty.NullVal(cty.String),
}),
}),
confErr: "terraform login localhost",
},
"with_a_name": {
config: cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prod"),
"prefix": cty.NullVal(cty.String),
}),
}),
},
"with_a_prefix": {
config: cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.NullVal(cty.String),
"prefix": cty.StringVal("my-app-"),
}),
}),
},
"without_either_a_name_and_a_prefix": {
config: cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.NullVal(cty.String),
"prefix": cty.NullVal(cty.String),
}),
}),
valErr: `Either workspace "name" or "prefix" is required`,
},
"with_both_a_name_and_a_prefix": {
config: cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prod"),
"prefix": cty.StringVal("my-app-"),
}),
}),
valErr: `Only one of workspace "name" or "prefix" is allowed`,
},
"null config": {
config: cty.NullVal(cty.EmptyObject),
},
}
for name, tc := range cases {
s := testServer(t)
b := New(testDisco(s))
// Validate
_, valDiags := b.PrepareConfig(tc.config)
if (valDiags.Err() != nil || tc.valErr != "") &&
(valDiags.Err() == nil || !strings.Contains(valDiags.Err().Error(), tc.valErr)) {
t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err())
}
// Configure
confDiags := b.Configure(tc.config)
if (confDiags.Err() != nil || tc.confErr != "") &&
(confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.confErr)) {
t.Fatalf("%s: unexpected configure result: %v", name, confDiags.Err())
}
}
}
func TestCloud_versionConstraints(t *testing.T) {
cases := map[string]struct {
config cty.Value
prerelease string
version string
result string
}{
"compatible version": {
config: cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prod"),
"prefix": cty.NullVal(cty.String),
}),
}),
version: "0.11.1",
},
"version too old": {
config: cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prod"),
"prefix": cty.NullVal(cty.String),
}),
}),
version: "0.0.1",
result: "upgrade Terraform to >= 0.1.0",
},
"version too new": {
config: cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prod"),
"prefix": cty.NullVal(cty.String),
}),
}),
version: "10.0.1",
result: "downgrade Terraform to <= 10.0.0",
},
}
// Save and restore the actual version.
p := tfversion.Prerelease
v := tfversion.Version
defer func() {
tfversion.Prerelease = p
tfversion.Version = v
}()
for name, tc := range cases {
s := testServer(t)
b := New(testDisco(s))
// Set the version for this test.
tfversion.Prerelease = tc.prerelease
tfversion.Version = tc.version
// Validate
_, valDiags := b.PrepareConfig(tc.config)
if valDiags.HasErrors() {
t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err())
}
// Configure
confDiags := b.Configure(tc.config)
if (confDiags.Err() != nil || tc.result != "") &&
(confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.result)) {
t.Fatalf("%s: unexpected configure result: %v", name, confDiags.Err())
}
}
}
func TestCloud_localBackend(t *testing.T) {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
local, ok := b.local.(*backendLocal.Local)
if !ok {
t.Fatalf("expected b.local to be \"*local.Local\", got: %T", b.local)
}
cloud, ok := local.Backend.(*Cloud)
if !ok {
t.Fatalf("expected local.Backend to be *cloud.Cloud, got: %T", cloud)
}
}
func TestCloud_addAndRemoveWorkspacesDefault(t *testing.T) {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
if _, err := b.Workspaces(); err != backend.ErrWorkspacesNotSupported {
t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err)
}
if _, err := b.StateMgr(backend.DefaultStateName); err != nil {
t.Fatalf("expected no error, got %v", err)
}
if _, err := b.StateMgr("prod"); err != backend.ErrWorkspacesNotSupported {
t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err)
}
if err := b.DeleteWorkspace(backend.DefaultStateName); err != nil {
t.Fatalf("expected no error, got %v", err)
}
if err := b.DeleteWorkspace("prod"); err != backend.ErrWorkspacesNotSupported {
t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err)
}
}
func TestCloud_addAndRemoveWorkspacesNoDefault(t *testing.T) {
b, bCleanup := testBackendNoDefault(t)
defer bCleanup()
states, err := b.Workspaces()
if err != nil {
t.Fatal(err)
}
expectedWorkspaces := []string(nil)
if !reflect.DeepEqual(states, expectedWorkspaces) {
t.Fatalf("expected states %#+v, got %#+v", expectedWorkspaces, states)
}
if _, err := b.StateMgr(backend.DefaultStateName); err != backend.ErrDefaultWorkspaceNotSupported {
t.Fatalf("expected error %v, got %v", backend.ErrDefaultWorkspaceNotSupported, err)
}
expectedA := "test_A"
if _, err := b.StateMgr(expectedA); err != nil {
t.Fatal(err)
}
states, err = b.Workspaces()
if err != nil {
t.Fatal(err)
}
expectedWorkspaces = append(expectedWorkspaces, expectedA)
if !reflect.DeepEqual(states, expectedWorkspaces) {
t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states)
}
expectedB := "test_B"
if _, err := b.StateMgr(expectedB); err != nil {
t.Fatal(err)
}
states, err = b.Workspaces()
if err != nil {
t.Fatal(err)
}
expectedWorkspaces = append(expectedWorkspaces, expectedB)
if !reflect.DeepEqual(states, expectedWorkspaces) {
t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states)
}
if err := b.DeleteWorkspace(backend.DefaultStateName); err != backend.ErrDefaultWorkspaceNotSupported {
t.Fatalf("expected error %v, got %v", backend.ErrDefaultWorkspaceNotSupported, err)
}
if err := b.DeleteWorkspace(expectedA); err != nil {
t.Fatal(err)
}
states, err = b.Workspaces()
if err != nil {
t.Fatal(err)
}
expectedWorkspaces = []string{expectedB}
if !reflect.DeepEqual(states, expectedWorkspaces) {
t.Fatalf("expected %#+v got %#+v", expectedWorkspaces, states)
}
if err := b.DeleteWorkspace(expectedB); err != nil {
t.Fatal(err)
}
states, err = b.Workspaces()
if err != nil {
t.Fatal(err)
}
expectedWorkspaces = []string(nil)
if !reflect.DeepEqual(states, expectedWorkspaces) {
t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states)
}
}
func TestCloud_checkConstraints(t *testing.T) {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
cases := map[string]struct {
constraints *disco.Constraints
prerelease string
version string
result string
}{
"compatible version": {
constraints: &disco.Constraints{
Minimum: "0.11.0",
Maximum: "0.11.11",
},
version: "0.11.1",
result: "",
},
"version too old": {
constraints: &disco.Constraints{
Minimum: "0.11.0",
Maximum: "0.11.11",
},
version: "0.10.1",
result: "upgrade Terraform to >= 0.11.0",
},
"version too new": {
constraints: &disco.Constraints{
Minimum: "0.11.0",
Maximum: "0.11.11",
},
version: "0.12.0",
result: "downgrade Terraform to <= 0.11.11",
},
"version excluded - ordered": {
constraints: &disco.Constraints{
Minimum: "0.11.0",
Excluding: []string{"0.11.7", "0.11.8"},
Maximum: "0.11.11",
},
version: "0.11.7",
result: "upgrade Terraform to > 0.11.8",
},
"version excluded - unordered": {
constraints: &disco.Constraints{
Minimum: "0.11.0",
Excluding: []string{"0.11.8", "0.11.6"},
Maximum: "0.11.11",
},
version: "0.11.6",
result: "upgrade Terraform to > 0.11.8",
},
"list versions": {
constraints: &disco.Constraints{
Minimum: "0.11.0",
Maximum: "0.11.11",
},
version: "0.10.1",
result: "versions >= 0.11.0, <= 0.11.11.",
},
"list exclusion": {
constraints: &disco.Constraints{
Minimum: "0.11.0",
Excluding: []string{"0.11.6"},
Maximum: "0.11.11",
},
version: "0.11.6",
result: "excluding version 0.11.6.",
},
"list exclusions": {
constraints: &disco.Constraints{
Minimum: "0.11.0",
Excluding: []string{"0.11.8", "0.11.6"},
Maximum: "0.11.11",
},
version: "0.11.6",
result: "excluding versions 0.11.6, 0.11.8.",
},
}
// Save and restore the actual version.
p := tfversion.Prerelease
v := tfversion.Version
defer func() {
tfversion.Prerelease = p
tfversion.Version = v
}()
for name, tc := range cases {
// Set the version for this test.
tfversion.Prerelease = tc.prerelease
tfversion.Version = tc.version
// Check the constraints.
diags := b.checkConstraints(tc.constraints)
if (diags.Err() != nil || tc.result != "") &&
(diags.Err() == nil || !strings.Contains(diags.Err().Error(), tc.result)) {
t.Fatalf("%s: unexpected constraints result: %v", name, diags.Err())
}
}
}
func TestCloud_StateMgr_versionCheck(t *testing.T) {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
// Some fixed versions for testing with. This logic is a simple string
// comparison, so we don't need many test cases.
v0135 := version.Must(version.NewSemver("0.13.5"))
v0140 := version.Must(version.NewSemver("0.14.0"))
// Save original local version state and restore afterwards
p := tfversion.Prerelease
v := tfversion.Version
s := tfversion.SemVer
defer func() {
tfversion.Prerelease = p
tfversion.Version = v
tfversion.SemVer = s
}()
// For this test, the local Terraform version is set to 0.14.0
tfversion.Prerelease = ""
tfversion.Version = v0140.String()
tfversion.SemVer = v0140
// Update the mock remote workspace Terraform version to match the local
// Terraform version
if _, err := b.client.Workspaces.Update(
context.Background(),
b.organization,
b.workspace,
tfe.WorkspaceUpdateOptions{
TerraformVersion: tfe.String(v0140.String()),
},
); err != nil {
t.Fatalf("error: %v", err)
}
// This should succeed
if _, err := b.StateMgr(backend.DefaultStateName); err != nil {
t.Fatalf("expected no error, got %v", err)
}
// Now change the remote workspace to a different Terraform version
if _, err := b.client.Workspaces.Update(
context.Background(),
b.organization,
b.workspace,
tfe.WorkspaceUpdateOptions{
TerraformVersion: tfe.String(v0135.String()),
},
); err != nil {
t.Fatalf("error: %v", err)
}
// This should fail
want := `Remote workspace Terraform version "0.13.5" does not match local Terraform version "0.14.0"`
if _, err := b.StateMgr(backend.DefaultStateName); err.Error() != want {
t.Fatalf("wrong error\n got: %v\nwant: %v", err.Error(), want)
}
}
func TestCloud_StateMgr_versionCheckLatest(t *testing.T) {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
v0140 := version.Must(version.NewSemver("0.14.0"))
// Save original local version state and restore afterwards
p := tfversion.Prerelease
v := tfversion.Version
s := tfversion.SemVer
defer func() {
tfversion.Prerelease = p
tfversion.Version = v
tfversion.SemVer = s
}()
// For this test, the local Terraform version is set to 0.14.0
tfversion.Prerelease = ""
tfversion.Version = v0140.String()
tfversion.SemVer = v0140
// Update the remote workspace to the pseudo-version "latest"
if _, err := b.client.Workspaces.Update(
context.Background(),
b.organization,
b.workspace,
tfe.WorkspaceUpdateOptions{
TerraformVersion: tfe.String("latest"),
},
); err != nil {
t.Fatalf("error: %v", err)
}
// This should succeed despite not being a string match
if _, err := b.StateMgr(backend.DefaultStateName); err != nil {
t.Fatalf("expected no error, got %v", err)
}
}
func TestCloud_VerifyWorkspaceTerraformVersion(t *testing.T) {
testCases := []struct {
local string
remote string
operations bool
wantErr bool
}{
{"0.13.5", "0.13.5", true, false},
{"0.14.0", "0.13.5", true, true},
{"0.14.0", "0.13.5", false, false},
{"0.14.0", "0.14.1", true, false},
{"0.14.0", "1.0.99", true, false},
{"0.14.0", "1.1.0", true, true},
{"1.2.0", "1.2.99", true, false},
{"1.2.0", "1.3.0", true, true},
{"0.15.0", "latest", true, false},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("local %s, remote %s", tc.local, tc.remote), func(t *testing.T) {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
local := version.Must(version.NewSemver(tc.local))
// Save original local version state and restore afterwards
p := tfversion.Prerelease
v := tfversion.Version
s := tfversion.SemVer
defer func() {
tfversion.Prerelease = p
tfversion.Version = v
tfversion.SemVer = s
}()
// Override local version as specified
tfversion.Prerelease = ""
tfversion.Version = local.String()
tfversion.SemVer = local
// Update the mock remote workspace Terraform version to the
// specified remote version
if _, err := b.client.Workspaces.Update(
context.Background(),
b.organization,
b.workspace,
tfe.WorkspaceUpdateOptions{
Operations: tfe.Bool(tc.operations),
TerraformVersion: tfe.String(tc.remote),
},
); err != nil {
t.Fatalf("error: %v", err)
}
diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName)
if tc.wantErr {
if len(diags) != 1 {
t.Fatal("expected diag, but none returned")
}
if got := diags.Err().Error(); !strings.Contains(got, "Terraform version mismatch") {
t.Fatalf("unexpected error: %s", got)
}
} else {
if len(diags) != 0 {
t.Fatalf("unexpected diags: %s", diags.Err())
}
}
})
}
}
func TestCloud_VerifyWorkspaceTerraformVersion_workspaceErrors(t *testing.T) {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
// Attempting to check the version against a workspace which doesn't exist
// should result in no errors
diags := b.VerifyWorkspaceTerraformVersion("invalid-workspace")
if len(diags) != 0 {
t.Fatalf("unexpected error: %s", diags.Err())
}
// Use a special workspace ID to trigger a 500 error, which should result
// in a failed check
diags = b.VerifyWorkspaceTerraformVersion("network-error")
if len(diags) != 1 {
t.Fatal("expected diag, but none returned")
}
if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Workspace read failed") {
t.Fatalf("unexpected error: %s", got)
}
// Update the mock remote workspace Terraform version to an invalid version
if _, err := b.client.Workspaces.Update(
context.Background(),
b.organization,
b.workspace,
tfe.WorkspaceUpdateOptions{
TerraformVersion: tfe.String("1.0.cheetarah"),
},
); err != nil {
t.Fatalf("error: %v", err)
}
diags = b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName)
if len(diags) != 1 {
t.Fatal("expected diag, but none returned")
}
if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Invalid Terraform version") {
t.Fatalf("unexpected error: %s", got)
}
}
func TestCloud_VerifyWorkspaceTerraformVersion_ignoreFlagSet(t *testing.T) {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
// If the ignore flag is set, the behaviour changes
b.IgnoreVersionConflict()
// Different local & remote versions to cause an error
local := version.Must(version.NewSemver("0.14.0"))
remote := version.Must(version.NewSemver("0.13.5"))
// Save original local version state and restore afterwards
p := tfversion.Prerelease
v := tfversion.Version
s := tfversion.SemVer
defer func() {
tfversion.Prerelease = p
tfversion.Version = v
tfversion.SemVer = s
}()
// Override local version as specified
tfversion.Prerelease = ""
tfversion.Version = local.String()
tfversion.SemVer = local
// Update the mock remote workspace Terraform version to the
// specified remote version
if _, err := b.client.Workspaces.Update(
context.Background(),
b.organization,
b.workspace,
tfe.WorkspaceUpdateOptions{
TerraformVersion: tfe.String(remote.String()),
},
); err != nil {
t.Fatalf("error: %v", err)
}
diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName)
if len(diags) != 1 {
t.Fatal("expected diag, but none returned")
}
if got, want := diags[0].Severity(), tfdiags.Warning; got != want {
t.Errorf("wrong severity: got %#v, want %#v", got, want)
}
if got, want := diags[0].Description().Summary, "Terraform version mismatch"; got != want {
t.Errorf("wrong summary: got %s, want %s", got, want)
}
wantDetail := "The local Terraform version (0.14.0) does not match the configured version for remote workspace hashicorp/prod (0.13.5)."
if got := diags[0].Description().Detail; got != wantDetail {
t.Errorf("wrong summary: got %s, want %s", got, wantDetail)
}
}

View File

@ -0,0 +1,25 @@
package cloud
import (
"flag"
"os"
"testing"
"time"
_ "github.com/hashicorp/terraform/internal/logging"
)
func TestMain(m *testing.M) {
flag.Parse()
// Make sure TF_FORCE_LOCAL_BACKEND is unset
os.Unsetenv("TF_FORCE_LOCAL_BACKEND")
// Reduce delays to make tests run faster
backoffMin = 1.0
backoffMax = 1.0
planConfigurationVersionsPollInterval = 1 * time.Millisecond
runPollInterval = 1 * time.Millisecond
os.Exit(m.Run())
}

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

@ -0,0 +1,4 @@
variable "foo" {}
variable "bar" {}
resource "null_resource" "foo" {}

View File

@ -0,0 +1,5 @@
resource "null_resource" "foo" {
triggers {
random = "${guid()}"
}
}

1
internal/cloud/testdata/apply/main.tf vendored Normal file
View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

@ -0,0 +1,4 @@
variable "foo" {}
variable "bar" {}
resource "null_resource" "foo" {}

View File

@ -0,0 +1,5 @@
resource "null_resource" "foo" {
triggers {
random = "${guid()}"
}
}

View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

1
internal/cloud/testdata/plan/main.tf vendored Normal file
View File

@ -0,0 +1 @@
resource "null_resource" "foo" {}

299
internal/cloud/testing.go Normal file
View File

@ -0,0 +1,299 @@
package cloud
import (
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"path"
"testing"
tfe "github.com/hashicorp/go-tfe"
svchost "github.com/hashicorp/terraform-svchost"
"github.com/hashicorp/terraform-svchost/auth"
"github.com/hashicorp/terraform-svchost/disco"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/configs/configschema"
"github.com/hashicorp/terraform/internal/httpclient"
"github.com/hashicorp/terraform/internal/providers"
"github.com/hashicorp/terraform/internal/states/remote"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/hashicorp/terraform/internal/tfdiags"
"github.com/hashicorp/terraform/version"
"github.com/mitchellh/cli"
"github.com/zclconf/go-cty/cty"
backendLocal "github.com/hashicorp/terraform/internal/backend/local"
)
const (
testCred = "test-auth-token"
)
var (
tfeHost = svchost.Hostname(defaultHostname)
credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{
tfeHost: {"token": testCred},
})
)
func testInput(t *testing.T, answers map[string]string) *mockInput {
return &mockInput{answers: answers}
}
func testBackendDefault(t *testing.T) (*Cloud, func()) {
obj := cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prod"),
"prefix": cty.NullVal(cty.String),
}),
})
return testBackend(t, obj)
}
func testBackendNoDefault(t *testing.T) (*Cloud, func()) {
obj := cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("hashicorp"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.NullVal(cty.String),
"prefix": cty.StringVal("my-app-"),
}),
})
return testBackend(t, obj)
}
func testBackendNoOperations(t *testing.T) (*Cloud, func()) {
obj := cty.ObjectVal(map[string]cty.Value{
"hostname": cty.NullVal(cty.String),
"organization": cty.StringVal("no-operations"),
"token": cty.NullVal(cty.String),
"workspaces": cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prod"),
"prefix": cty.NullVal(cty.String),
}),
})
return testBackend(t, obj)
}
func testRemoteClient(t *testing.T) remote.Client {
b, bCleanup := testBackendDefault(t)
defer bCleanup()
raw, err := b.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatalf("error: %v", err)
}
return raw.(*remote.State).Client
}
func testBackend(t *testing.T, obj cty.Value) (*Cloud, func()) {
s := testServer(t)
b := New(testDisco(s))
// Configure the backend so the client is created.
newObj, valDiags := b.PrepareConfig(obj)
if len(valDiags) != 0 {
t.Fatal(valDiags.ErrWithWarnings())
}
obj = newObj
confDiags := b.Configure(obj)
if len(confDiags) != 0 {
t.Fatal(confDiags.ErrWithWarnings())
}
// Get a new mock client.
mc := newMockClient()
// Replace the services we use with our mock services.
b.CLI = cli.NewMockUi()
b.client.Applies = mc.Applies
b.client.ConfigurationVersions = mc.ConfigurationVersions
b.client.CostEstimates = mc.CostEstimates
b.client.Organizations = mc.Organizations
b.client.Plans = mc.Plans
b.client.PolicyChecks = mc.PolicyChecks
b.client.Runs = mc.Runs
b.client.StateVersions = mc.StateVersions
b.client.Variables = mc.Variables
b.client.Workspaces = mc.Workspaces
// Set local to a local test backend.
b.local = testLocalBackend(t, b)
ctx := context.Background()
// Create the organization.
_, err := b.client.Organizations.Create(ctx, tfe.OrganizationCreateOptions{
Name: tfe.String(b.organization),
})
if err != nil {
t.Fatalf("error: %v", err)
}
// Create the default workspace if required.
if b.workspace != "" {
_, err = b.client.Workspaces.Create(ctx, b.organization, tfe.WorkspaceCreateOptions{
Name: tfe.String(b.workspace),
})
if err != nil {
t.Fatalf("error: %v", err)
}
}
return b, s.Close
}
func testLocalBackend(t *testing.T, cloud *Cloud) backend.Enhanced {
b := backendLocal.NewWithBackend(cloud)
// Add a test provider to the local backend.
p := backendLocal.TestLocalProvider(t, b, "null", &terraform.ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"null_resource": {
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Computed: true},
},
},
},
})
p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("yes"),
})}
return b
}
// testServer returns a *httptest.Server used for local testing.
func testServer(t *testing.T) *httptest.Server {
mux := http.NewServeMux()
// Respond to service discovery calls.
mux.HandleFunc("/well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
io.WriteString(w, `{
"state.v2": "/api/v2/",
"tfe.v2.1": "/api/v2/",
"versions.v1": "/v1/versions/"
}`)
})
// Respond to service version constraints calls.
mux.HandleFunc("/v1/versions/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
io.WriteString(w, fmt.Sprintf(`{
"service": "%s",
"product": "terraform",
"minimum": "0.1.0",
"maximum": "10.0.0"
}`, path.Base(r.URL.Path)))
})
// Respond to pings to get the API version header.
mux.HandleFunc("/api/v2/ping", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("TFP-API-Version", "2.4")
})
// Respond to the initial query to read the hashicorp org entitlements.
mux.HandleFunc("/api/v2/organizations/hashicorp/entitlement-set", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/vnd.api+json")
io.WriteString(w, `{
"data": {
"id": "org-GExadygjSbKP8hsY",
"type": "entitlement-sets",
"attributes": {
"operations": true,
"private-module-registry": true,
"sentinel": true,
"state-storage": true,
"teams": true,
"vcs-integrations": true
}
}
}`)
})
// Respond to the initial query to read the no-operations org entitlements.
mux.HandleFunc("/api/v2/organizations/no-operations/entitlement-set", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/vnd.api+json")
io.WriteString(w, `{
"data": {
"id": "org-ufxa3y8jSbKP8hsT",
"type": "entitlement-sets",
"attributes": {
"operations": false,
"private-module-registry": true,
"sentinel": true,
"state-storage": true,
"teams": true,
"vcs-integrations": true
}
}
}`)
})
// All tests that are assumed to pass will use the hashicorp organization,
// so for all other organization requests we will return a 404.
mux.HandleFunc("/api/v2/organizations/", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(404)
io.WriteString(w, `{
"errors": [
{
"status": "404",
"title": "not found"
}
]
}`)
})
return httptest.NewServer(mux)
}
// testDisco returns a *disco.Disco mapping app.terraform.io and
// localhost to a local test server.
func testDisco(s *httptest.Server) *disco.Disco {
services := map[string]interface{}{
"state.v2": fmt.Sprintf("%s/api/v2/", s.URL),
"tfe.v2.1": fmt.Sprintf("%s/api/v2/", s.URL),
"versions.v1": fmt.Sprintf("%s/v1/versions/", s.URL),
}
d := disco.NewWithCredentialsSource(credsSrc)
d.SetUserAgent(httpclient.TerraformUserAgent(version.String()))
d.ForceHostServices(svchost.Hostname(defaultHostname), services)
d.ForceHostServices(svchost.Hostname("localhost"), services)
return d
}
type unparsedVariableValue struct {
value string
source terraform.ValueSourceType
}
func (v *unparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) {
return &terraform.InputValue{
Value: cty.StringVal(v.value),
SourceType: v.source,
}, tfdiags.Diagnostics{}
}
// testVariable returns a backend.UnparsedVariableValue used for testing.
func testVariables(s terraform.ValueSourceType, vs ...string) map[string]backend.UnparsedVariableValue {
vars := make(map[string]backend.UnparsedVariableValue, len(vs))
for _, v := range vs {
vars[v] = &unparsedVariableValue{
value: v,
source: s,
}
}
return vars
}