Merge pull request #26270 from hashicorp/jbardin/refresh-plan

Move resource refreshing into plan
This commit is contained in:
James Bardin 2020-09-17 10:19:33 -04:00 committed by GitHub
commit 4295f1e1e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 287 additions and 253 deletions

View File

@ -80,18 +80,6 @@ func (b *Local) opApply(
// If we weren't given a plan, then we refresh/plan
if op.PlanFile == nil {
// If we're refreshing before apply, perform that
if op.PlanRefresh {
log.Printf("[INFO] backend/local: apply calling Refresh")
_, refreshDiags := tfCtx.Refresh()
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
runningOp.Result = backend.OperationFailure
b.ShowDiagnostics(diags)
return
}
}
// Perform the plan
log.Printf("[INFO] backend/local: apply calling Plan")
plan, planDiags := tfCtx.Plan()

View File

@ -97,27 +97,6 @@ func (b *Local) opPlan(
runningOp.State = tfCtx.State()
// If we're refreshing before plan, perform that
baseState := runningOp.State
if op.PlanRefresh {
log.Printf("[INFO] backend/local: plan calling Refresh")
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(planRefreshing) + "\n"))
}
refreshedState, refreshDiags := tfCtx.Refresh()
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
b.ReportResult(runningOp, diags)
return
}
baseState = refreshedState // plan will be relative to our refreshed state
if b.CLI != nil {
b.CLI.Output("\n------------------------------------------------------------------------")
}
}
// Perform the plan in a goroutine so we can be interrupted
var plan *plans.Plan
var planDiags tfdiags.Diagnostics
@ -142,6 +121,7 @@ func (b *Local) opPlan(
b.ReportResult(runningOp, diags)
return
}
// Record whether this plan includes any side-effects that could be applied.
runningOp.PlanEmpty = !planHasSideEffects(priorState, plan.Changes)
@ -161,7 +141,7 @@ func (b *Local) opPlan(
// We may have updated the state in the refresh step above, but we
// will freeze that updated state in the plan file for now and
// only write it if this plan is subsequently applied.
plannedStateFile := statemgr.PlannedStateUpdate(opState, baseState)
plannedStateFile := statemgr.PlannedStateUpdate(opState, plan.State)
log.Printf("[INFO] backend/local: writing plan output to: %s", path)
err := planfile.Create(path, configSnap, plannedStateFile, plan)
@ -187,7 +167,7 @@ func (b *Local) opPlan(
return
}
b.renderPlan(plan, baseState, priorState, schemas)
b.renderPlan(plan, plan.State, priorState, schemas)
// If we've accumulated any warnings along the way then we'll show them
// here just before we show the summary and next steps. If we encountered

View File

@ -357,8 +357,8 @@ func TestLocal_planDeposedOnly(t *testing.T) {
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
if !p.ReadResourceCalled {
t.Fatal("ReadResource should be called")
if p.ReadResourceCalled {
t.Fatal("ReadResource should not be called")
}
if run.PlanEmpty {
t.Fatal("plan should not be empty")
@ -478,6 +478,12 @@ Plan: 1 to add, 0 to change, 1 to destroy.`
}
func TestLocal_planRefreshFalse(t *testing.T) {
// since there is no longer a separate Refresh walk, `-refresh=false
// doesn't do anything.
// FIXME: determine if we need a refresh option for the new plan, or remove
// this test
t.Skip()
b, cleanup := TestLocal(t)
defer cleanup()
@ -543,8 +549,8 @@ func TestLocal_planDestroy(t *testing.T) {
t.Fatalf("plan operation failed")
}
if !p.ReadResourceCalled {
t.Fatal("ReadResource should be called")
if p.ReadResourceCalled {
t.Fatal("ReadResource should not be called")
}
if run.PlanEmpty {
@ -599,12 +605,12 @@ func TestLocal_planDestroy_withDataSources(t *testing.T) {
t.Fatalf("plan operation failed")
}
if !p.ReadResourceCalled {
t.Fatal("ReadResource should be called")
if p.ReadResourceCalled {
t.Fatal("ReadResource should not be called")
}
if !p.ReadDataSourceCalled {
t.Fatal("ReadDataSourceCalled should be called")
if p.ReadDataSourceCalled {
t.Fatal("ReadDataSourceCalled should not be called")
}
if run.PlanEmpty {
@ -621,7 +627,7 @@ func TestLocal_planDestroy_withDataSources(t *testing.T) {
// Data source should not be rendered in the output
expectedOutput := `Terraform will perform the following actions:
# test_instance.foo will be destroyed
# test_instance.foo[0] will be destroyed
- resource "test_instance" "foo" {
- ami = "bar" -> null
@ -635,7 +641,7 @@ Plan: 0 to add, 0 to change, 1 to destroy.`
output := b.CLI.(*cli.MockUi).OutputWriter.String()
if !strings.Contains(output, expectedOutput) {
t.Fatalf("Unexpected output (expected no data source):\n%s", output)
t.Fatalf("Unexpected output:\n%s", output)
}
}
@ -824,7 +830,7 @@ func testPlanState_tainted() *states.State {
Mode: addrs.ManagedResourceMode,
Type: "test_instance",
Name: "foo",
}.Instance(addrs.IntKey(0)),
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectTainted,
AttrsJSON: []byte(`{

View File

@ -8,6 +8,7 @@ import (
"testing"
"github.com/hashicorp/terraform/e2e"
"github.com/hashicorp/terraform/plans"
)
// The tests in this file run through different scenarios recommended in our
@ -72,11 +73,25 @@ func TestPlanApplyInAutomation(t *testing.T) {
// stateResources := plan.Changes.Resources
diffResources := plan.Changes.Resources
if len(diffResources) != 1 || diffResources[0].Addr.String() != "null_resource.test" {
if len(diffResources) != 2 {
t.Errorf("incorrect number of resources in plan")
}
expected := map[string]plans.Action{
"data.template_file.test": plans.Read,
"null_resource.test": plans.Create,
}
for _, r := range diffResources {
expectedAction, ok := expected[r.Addr.String()]
if !ok {
t.Fatalf("unexpected change for %q", r.Addr)
}
if r.Action != expectedAction {
t.Fatalf("unexpected action %q for %q", r.Action, r.Addr)
}
}
//// APPLY
stdout, stderr, err = tf.Run("apply", "-input=false", "tfplan")
if err != nil {

View File

@ -9,6 +9,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/terraform/e2e"
"github.com/hashicorp/terraform/plans"
"github.com/zclconf/go-cty/cty"
)
@ -71,8 +72,23 @@ func TestPrimarySeparatePlan(t *testing.T) {
}
diffResources := plan.Changes.Resources
if len(diffResources) != 1 || diffResources[0].Addr.String() != "null_resource.test" {
t.Errorf("incorrect diff in plan; want just null_resource.test to have been rendered, but have:\n%s", spew.Sdump(diffResources))
if len(diffResources) != 2 {
t.Errorf("incorrect number of resources in plan")
}
expected := map[string]plans.Action{
"data.template_file.test": plans.Read,
"null_resource.test": plans.Create,
}
for _, r := range diffResources {
expectedAction, ok := expected[r.Addr.String()]
if !ok {
t.Fatalf("unexpected change for %q", r.Addr)
}
if r.Action != expectedAction {
t.Fatalf("unexpected action %q for %q", r.Action, r.Addr)
}
}
//// APPLY

View File

@ -96,10 +96,6 @@ func TestPlan_plan(t *testing.T) {
if code := c.Run(args); code != 1 {
t.Fatalf("wrong exit status %d; want 1\nstderr: %s", code, ui.ErrorWriter.String())
}
if p.ReadResourceCalled {
t.Fatal("ReadResource should not have been called")
}
}
func TestPlan_destroy(t *testing.T) {
@ -142,10 +138,6 @@ func TestPlan_destroy(t *testing.T) {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
if !p.ReadResourceCalled {
t.Fatal("ReadResource should have been called")
}
plan := testReadPlan(t, outPath)
for _, rc := range plan.Changes.Resources {
if got, want := rc.Action, plans.Delete; got != want {

View File

@ -53,18 +53,7 @@
]
}
},
"prior_state": {
"format_version": "0.1",
"values": {
"outputs": {
"test": {
"sensitive": false,
"value": "bar"
}
},
"root_module": {}
}
},
"prior_state": {},
"resource_changes": [
{
"address": "test_instance.test[0]",

View File

@ -3,7 +3,12 @@
"terraform_version": "0.12.0",
"serial": 7,
"lineage": "configuredUnchanged",
"outputs": {},
"outputs": {
"test": {
"value": "bar",
"type": "string"
}
},
"resources": [
{
"mode": "managed",

View File

@ -3,7 +3,12 @@
"terraform_version": "0.12.0",
"serial": 7,
"lineage": "configuredUnchanged",
"outputs": {},
"outputs": {
"test": {
"value": "bar",
"type": "string"
}
},
"resources": [
{
"mode": "managed",

View File

@ -69,18 +69,7 @@
]
}
},
"prior_state": {
"format_version": "0.1",
"values": {
"outputs": {
"test": {
"sensitive": false,
"value": "baz"
}
},
"root_module": {}
}
},
"prior_state": {},
"resource_changes": [
{
"address": "module.module_test_bar.test_instance.test",

View File

@ -101,20 +101,14 @@
"format_version": "0.1",
"terraform_version": "0.13.0",
"values": {
"outputs": {
"test": {
"sensitive": false,
"value": "bar"
}
},
"root_module": {
"resources": [
{
"address": "test_instance.test[0]",
"index": 0,
"mode": "managed",
"type": "test_instance",
"name": "test",
"index": 0,
"provider_name": "registry.terraform.io/hashicorp/test",
"schema_version": 0,
"values": {

View File

@ -9,7 +9,6 @@ import (
"sort"
"strings"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs/hcl2shim"
"github.com/hashicorp/terraform/states"
@ -61,28 +60,18 @@ func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep)
log.Printf("[WARN] Config warnings:\n%s", stepDiags)
}
// Refresh!
newState, stepDiags := ctx.Refresh()
// shim the state first so the test can check the state on errors
state, err = shimNewState(newState, step.providers)
if err != nil {
return nil, err
}
if stepDiags.HasErrors() {
return state, newOperationError("refresh", stepDiags)
}
// If this step is a PlanOnly step, skip over this first Plan and subsequent
// Apply, and use the follow up Plan that checks for perpetual diffs
if !step.PlanOnly {
// Plan!
if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() {
p, stepDiags := ctx.Plan()
if stepDiags.HasErrors() {
return state, newOperationError("plan", stepDiags)
} else {
log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes))
}
newState := p.State
log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes))
// We need to keep a copy of the state prior to destroying
// such that destroy steps can verify their behavior in the check
// function
@ -115,11 +104,21 @@ func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep)
// Now, verify that Plan is now empty and we don't have a perpetual diff issue
// We do this with TWO plans. One without a refresh.
var p *plans.Plan
if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() {
p, stepDiags := ctx.Plan()
if stepDiags.HasErrors() {
return state, newOperationError("follow-up plan", stepDiags)
}
if !p.Changes.Empty() {
// we don't technically need this any longer with plan handling refreshing,
// but run it anyway to ensure the context is working as expected.
p, stepDiags = ctx.Plan()
if stepDiags.HasErrors() {
return state, newOperationError("second follow-up plan", stepDiags)
}
empty := p.Changes.Empty()
newState := p.State
if !empty {
if step.ExpectNonEmptyPlan {
log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
} else {
@ -128,39 +127,6 @@ func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep)
}
}
// And another after a Refresh.
if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) {
newState, stepDiags = ctx.Refresh()
if stepDiags.HasErrors() {
return state, newOperationError("follow-up refresh", stepDiags)
}
state, err = shimNewState(newState, step.providers)
if err != nil {
return nil, err
}
}
if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() {
return state, newOperationError("second follow-up refresh", stepDiags)
}
empty := p.Changes.Empty()
// Data resources are tricky because they legitimately get instantiated
// during refresh so that they will be already populated during the
// plan walk. Because of this, if we have any data resources in the
// config we'll end up wanting to destroy them again here. This is
// acceptable and expected, and we'll treat it as "empty" for the
// sake of this testing.
if step.Destroy && !empty {
empty = true
for _, change := range p.Changes.Resources {
if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode {
empty = false
break
}
}
}
if !empty {
if step.ExpectNonEmptyPlan {
log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes))

View File

@ -5,6 +5,7 @@ import (
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/states"
"github.com/zclconf/go-cty/cty"
)
@ -16,15 +17,16 @@ import (
// result that will be completed during apply by resolving any values that
// cannot be predicted.
//
// A plan must always be accompanied by the state and configuration it was
// built from, since the plan does not itself include all of the information
// required to make the changes indicated.
// A plan must always be accompanied by the configuration it was built from,
// since the plan does not itself include all of the information required to
// make the changes indicated.
type Plan struct {
VariableValues map[string]DynamicValue
Changes *Changes
TargetAddrs []addrs.Targetable
ProviderSHA256s map[string][]byte
Backend Backend
State *states.State
}
// Backend represents the backend-related configuration and other data as it

View File

@ -93,13 +93,14 @@ type ContextMeta struct {
// perform operations on infrastructure. This structure is built using
// NewContext.
type Context struct {
config *configs.Config
changes *plans.Changes
state *states.State
targets []addrs.Targetable
variables InputValues
meta *ContextMeta
destroy bool
config *configs.Config
changes *plans.Changes
state *states.State
refreshState *states.State
targets []addrs.Targetable
variables InputValues
meta *ContextMeta
destroy bool
hooks []Hook
components contextComponentFactory
@ -223,17 +224,18 @@ func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) {
}
return &Context{
components: components,
schemas: schemas,
destroy: opts.Destroy,
changes: changes,
hooks: hooks,
meta: opts.Meta,
config: config,
state: state,
targets: opts.Targets,
uiInput: opts.UIInput,
variables: variables,
components: components,
schemas: schemas,
destroy: opts.Destroy,
changes: changes,
hooks: hooks,
meta: opts.Meta,
config: config,
state: state,
refreshState: state.DeepCopy(),
targets: opts.Targets,
uiInput: opts.UIInput,
variables: variables,
parallelSem: NewSemaphore(par),
providerInputConfig: make(map[string]map[string]cty.Value),
@ -490,10 +492,15 @@ Note that the -target option is not suitable for routine use, and is provided on
))
}
// This isn't technically needed, but don't leave an old refreshed state
// around in case we re-use the context in internal tests.
c.refreshState = c.state.DeepCopy()
return c.state, diags
}
// Plan generates an execution plan for the given context.
// Plan generates an execution plan for the given context, and returns the
// refreshed state.
//
// The execution plan encapsulates the context and can be stored
// in order to reinstantiate a context later for Apply.
@ -538,31 +545,13 @@ The -target option is not for routine use, and is provided only for exceptional
ProviderSHA256s: c.providerSHA256s,
}
var operation walkOperation
if c.destroy {
operation = walkPlanDestroy
} else {
// Set our state to be something temporary. We do this so that
// the plan can update a fake state so that variables work, then
// we replace it back with our old state.
old := c.state
if old == nil {
c.state = states.NewState()
} else {
c.state = old.DeepCopy()
}
defer func() {
c.state = old
}()
operation = walkPlan
}
// Build the graph.
operation := walkPlan
graphType := GraphTypePlan
if c.destroy {
operation = walkPlanDestroy
graphType = GraphTypePlanDestroy
}
graph, graphDiags := c.Graph(graphType, nil)
diags = diags.Append(graphDiags)
if graphDiags.HasErrors() {
@ -578,6 +567,12 @@ The -target option is not for routine use, and is provided only for exceptional
}
p.Changes = c.changes
p.State = c.refreshState
// replace the working state with the updated state, so that immediate calls
// to Apply work as expected.
c.state = c.refreshState
return p, diags
}
@ -781,20 +776,31 @@ func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalk
}
func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker {
if operation == walkValidate {
return &ContextGraphWalker{
Context: c,
State: states.NewState().SyncWrapper(),
Changes: c.changes.SyncWrapper(),
InstanceExpander: instances.NewExpander(),
Operation: operation,
StopContext: c.runContext,
RootVariableValues: c.variables,
}
var state *states.SyncState
var refreshState *states.SyncState
switch operation {
case walkValidate:
// validate should not use any state
s := states.NewState()
state = s.SyncWrapper()
// validate currently uses the plan graph, so we have to populate the
// refreshState.
refreshState = s.SyncWrapper()
case walkPlan:
state = c.state.SyncWrapper()
refreshState = c.refreshState.SyncWrapper()
default:
state = c.state.SyncWrapper()
}
return &ContextGraphWalker{
Context: c,
State: c.state.SyncWrapper(),
State: state,
RefreshState: refreshState,
Changes: c.changes.SyncWrapper(),
InstanceExpander: instances.NewExpander(),
Operation: operation,

View File

@ -2376,12 +2376,10 @@ func TestContext2Apply_provisionerInterpCount(t *testing.T) {
t.Fatalf("plan failed unexpectedly: %s", diags.Err())
}
state := ctx.State()
// We'll marshal and unmarshal the plan here, to ensure that we have
// a clean new context as would be created if we separately ran
// terraform plan -out=tfplan && terraform apply tfplan
ctxOpts, err := contextOptsForPlanViaFile(snap, state, plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatal(err)
}
@ -6014,7 +6012,7 @@ func TestContext2Apply_destroyModuleWithAttrsReferencingResource(t *testing.T) {
t.Logf("Step 2 plan: %s", legacyDiffComparisonString(plan.Changes))
ctxOpts, err := contextOptsForPlanViaFile(snap, state, plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatalf("failed to round-trip through planfile: %s", err)
}
@ -6089,7 +6087,7 @@ func TestContext2Apply_destroyWithModuleVariableAndCount(t *testing.T) {
t.Fatalf("destroy plan err: %s", diags.Err())
}
ctxOpts, err := contextOptsForPlanViaFile(snap, state, plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatalf("failed to round-trip through planfile: %s", err)
}
@ -6245,7 +6243,7 @@ func TestContext2Apply_destroyWithModuleVariableAndCountNested(t *testing.T) {
t.Fatalf("destroy plan err: %s", diags.Err())
}
ctxOpts, err := contextOptsForPlanViaFile(snap, state, plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatalf("failed to round-trip through planfile: %s", err)
}
@ -8319,7 +8317,7 @@ func TestContext2Apply_issue7824(t *testing.T) {
}
// Write / Read plan to simulate running it through a Plan file
ctxOpts, err := contextOptsForPlanViaFile(snap, ctx.State(), plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatalf("failed to round-trip through planfile: %s", err)
}
@ -8396,7 +8394,7 @@ func TestContext2Apply_issue5254(t *testing.T) {
}
// Write / Read plan to simulate running it through a Plan file
ctxOpts, err := contextOptsForPlanViaFile(snap, state, plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatalf("failed to round-trip through planfile: %s", err)
}
@ -8473,7 +8471,7 @@ func TestContext2Apply_targetedWithTaintedInState(t *testing.T) {
}
// Write / Read plan to simulate running it through a Plan file
ctxOpts, err := contextOptsForPlanViaFile(snap, ctx.State(), plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatalf("failed to round-trip through planfile: %s", err)
}
@ -8730,7 +8728,7 @@ func TestContext2Apply_destroyNestedModuleWithAttrsReferencingResource(t *testin
t.Fatalf("destroy plan err: %s", diags.Err())
}
ctxOpts, err := contextOptsForPlanViaFile(snap, state, plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatalf("failed to round-trip through planfile: %s", err)
}
@ -9157,11 +9155,16 @@ func TestContext2Apply_destroyWithProviders(t *testing.T) {
}
// correct the state
state.Modules["module.mod.module.removed"].Resources["aws_instance.child"].ProviderConfig = addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("aws"),
Alias: "bar",
Module: addrs.RootModule,
}
state.Modules["module.mod.module.removed"].Resources["aws_instance.child"].ProviderConfig = mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"].bar`)
ctx = testContext2(t, &ContextOpts{
Config: m,
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p),
},
State: state,
Destroy: true,
})
if _, diags := ctx.Plan(); diags.HasErrors() {
t.Fatal(diags.Err())
@ -9313,7 +9316,7 @@ func TestContext2Apply_plannedInterpolatedCount(t *testing.T) {
// We'll marshal and unmarshal the plan here, to ensure that we have
// a clean new context as would be created if we separately ran
// terraform plan -out=tfplan && terraform apply tfplan
ctxOpts, err := contextOptsForPlanViaFile(snap, ctx.State(), plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatalf("failed to round-trip through planfile: %s", err)
}
@ -9376,7 +9379,7 @@ func TestContext2Apply_plannedDestroyInterpolatedCount(t *testing.T) {
// We'll marshal and unmarshal the plan here, to ensure that we have
// a clean new context as would be created if we separately ran
// terraform plan -out=tfplan && terraform apply tfplan
ctxOpts, err := contextOptsForPlanViaFile(snap, ctx.State(), plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatalf("failed to round-trip through planfile: %s", err)
}
@ -9826,7 +9829,7 @@ func TestContext2Apply_destroyDataCycle(t *testing.T) {
// We'll marshal and unmarshal the plan here, to ensure that we have
// a clean new context as would be created if we separately ran
// terraform plan -out=tfplan && terraform apply tfplan
ctxOpts, err := contextOptsForPlanViaFile(snap, state, plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatal(err)
}
@ -9858,6 +9861,22 @@ func TestContext2Apply_taintedDestroyFailure(t *testing.T) {
return testApplyFn(info, s, d)
}
p.GetSchemaReturn = &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test_instance": {
Attributes: map[string]*configschema.Attribute{
"id": {
Type: cty.String,
Computed: true,
},
"foo": {
Type: cty.String,
Optional: true,
},
},
},
},
}
state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
@ -9978,7 +9997,7 @@ func TestContext2Apply_taintedDestroyFailure(t *testing.T) {
t.Fatal("test_instance.c should have no deposed instances")
}
if string(c.Current.AttrsJSON) != `{"id":"c","foo":"old"}` {
if string(c.Current.AttrsJSON) != `{"foo":"old","id":"c"}` {
t.Fatalf("unexpected attrs for c: %q\n", c.Current.AttrsJSON)
}
}
@ -10141,7 +10160,7 @@ func TestContext2Apply_cbdCycle(t *testing.T) {
// We'll marshal and unmarshal the plan here, to ensure that we have
// a clean new context as would be created if we separately ran
// terraform plan -out=tfplan && terraform apply tfplan
ctxOpts, err := contextOptsForPlanViaFile(snap, state, plan)
ctxOpts, err := contextOptsForPlanViaFile(snap, plan)
if err != nil {
t.Fatal(err)
}

View File

@ -2829,7 +2829,7 @@ func TestContext2Plan_countDecreaseToOne(t *testing.T) {
}
}
expectedState := `aws_instance.foo.0:
expectedState := `aws_instance.foo:
ID = bar
provider = provider["registry.terraform.io/hashicorp/aws"]
foo = foo
@ -4068,7 +4068,7 @@ func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) {
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p),
},
State: state,
State: state.DeepCopy(),
})
plan, diags := ctx.Plan()
@ -4092,7 +4092,7 @@ func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) {
switch i := ric.Addr.String(); i {
case "aws_instance.foo[0]":
if res.Action != plans.DeleteThenCreate {
t.Fatalf("resource %s should be replaced", i)
t.Fatalf("resource %s should be replaced, not %s", i, res.Action)
}
checkVals(t, objectVal(t, schema, map[string]cty.Value{
"id": cty.StringVal("bar"),

View File

@ -749,7 +749,7 @@ func testProviderSchema(name string) *ProviderSchema {
// our context tests try to exercise lots of stuff at once and so having them
// round-trip things through on-disk files is often an important part of
// fully representing an old bug in a regression test.
func contextOptsForPlanViaFile(configSnap *configload.Snapshot, state *states.State, plan *plans.Plan) (*ContextOpts, error) {
func contextOptsForPlanViaFile(configSnap *configload.Snapshot, plan *plans.Plan) (*ContextOpts, error) {
dir, err := ioutil.TempDir("", "terraform-contextForPlanViaFile")
if err != nil {
return nil, err
@ -760,7 +760,7 @@ func contextOptsForPlanViaFile(configSnap *configload.Snapshot, state *states.St
// to run through any of the codepaths that care about Lineage/Serial/etc
// here anyway.
stateFile := &statefile.File{
State: state,
State: plan.State,
}
// To make life a little easier for test authors, we'll populate a simple

View File

@ -152,6 +152,11 @@ type EvalContext interface {
// the global state.
State() *states.SyncState
// RefreshState returns a wrapper object that provides safe concurrent
// access to the state used to store the most recently refreshed resource
// values.
RefreshState() *states.SyncState
// InstanceExpander returns a helper object for tracking the expansion of
// graph nodes during the plan phase in response to "count" and "for_each"
// arguments.

View File

@ -71,6 +71,7 @@ type BuiltinEvalContext struct {
ProvisionerLock *sync.Mutex
ChangesValue *plans.ChangesSync
StateValue *states.SyncState
RefreshStateValue *states.SyncState
InstanceExpanderValue *instances.Expander
}
@ -350,6 +351,10 @@ func (ctx *BuiltinEvalContext) State() *states.SyncState {
return ctx.StateValue
}
func (ctx *BuiltinEvalContext) RefreshState() *states.SyncState {
return ctx.RefreshStateValue
}
func (ctx *BuiltinEvalContext) InstanceExpander() *instances.Expander {
return ctx.InstanceExpanderValue
}

View File

@ -126,6 +126,9 @@ type MockEvalContext struct {
StateCalled bool
StateState *states.SyncState
RefreshStateCalled bool
RefreshStateState *states.SyncState
InstanceExpanderCalled bool
InstanceExpanderExpander *instances.Expander
}
@ -338,6 +341,11 @@ func (c *MockEvalContext) State() *states.SyncState {
return c.StateState
}
func (c *MockEvalContext) RefreshState() *states.SyncState {
c.RefreshStateCalled = true
return c.RefreshStateState
}
func (c *MockEvalContext) InstanceExpander() *instances.Expander {
c.InstanceExpanderCalled = true
return c.InstanceExpanderExpander

View File

@ -117,8 +117,12 @@ func evaluateCountExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.Val
// or this function will block forever awaiting the lock.
func fixResourceCountSetTransition(ctx EvalContext, addr addrs.ConfigResource, countEnabled bool) {
state := ctx.State()
changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled)
if changed {
if state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled) {
log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr)
}
refreshState := ctx.RefreshState()
if refreshState != nil && refreshState.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled) {
log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr)
}
}

View File

@ -13,6 +13,13 @@ import (
"github.com/hashicorp/terraform/tfdiags"
)
type phaseState int
const (
workingState phaseState = iota
refreshState
)
// EvalReadState is an EvalNode implementation that reads the
// current object for a specific instance in the state.
type EvalReadState struct {
@ -220,6 +227,10 @@ type EvalWriteState struct {
// Dependencies are the inter-resource dependencies to be stored in the
// state.
Dependencies *[]addrs.ConfigResource
// targetState determines which context state we're writing to during plan.
// The default is the global working state.
targetState phaseState
}
func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
@ -230,7 +241,15 @@ func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
}
absAddr := n.Addr.Absolute(ctx.Path())
state := ctx.State()
var state *states.SyncState
switch n.targetState {
case refreshState:
log.Printf("[TRACE] EvalWriteState: using RefreshState for %s", absAddr)
state = ctx.RefreshState()
default:
state = ctx.State()
}
if n.ProviderAddr.Provider.Type == "" {
return nil, fmt.Errorf("failed to write state for %s: missing provider type", absAddr)

View File

@ -658,24 +658,21 @@ func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.Sourc
case config.ForEach != nil:
return cty.EmptyObjectVal, diags
default:
// FIXME: try to prove this path should not be reached during plan.
//
// while we can reference an expanded resource with 0
// While we can reference an expanded resource with 0
// instances, we cannot reference instances that do not exist.
// Since we haven't ensured that all instances exist in all
// cases (this path only ever returned unknown), only log this as
// an error for now, and continue to return a DynamicVal
// Due to the fact that we may have direct references to
// instances that may end up in a root output during destroy
// (since a planned destroy cannot yet remove root outputs), we
// need to return a dynamic value here to allow evaluation to
// continue.
log.Printf("[ERROR] unknown instance %q referenced during plan", addr.Absolute(d.ModulePath))
return cty.DynamicVal, diags
}
default:
// we must return DynamicVal so that both interpretations
// can proceed without generating errors, and we'll deal with this
// in a later step where more information is gathered.
// (In practice we should only end up here during the validate walk,
// We should only end up here during the validate walk,
// since later walks should have at least partial states populated
// for all resources in the configuration.)
// for all resources in the configuration.
return cty.DynamicVal, diags
}
}

View File

@ -26,6 +26,7 @@ type ContextGraphWalker struct {
// Configurable values
Context *Context
State *states.SyncState // Used for safe concurrent access to state
RefreshState *states.SyncState // Used for safe concurrent access to state
Changes *plans.ChangesSync // Used for safe concurrent writes to changes
InstanceExpander *instances.Expander // Tracks our gradual expansion of module and resource instances
Operation walkOperation
@ -96,6 +97,7 @@ func (w *ContextGraphWalker) EvalContext() EvalContext {
ProvisionerLock: &w.provisionerLock,
ChangesValue: w.Changes,
StateValue: w.State,
RefreshStateValue: w.RefreshState,
Evaluator: evaluator,
VariableValues: w.variableValues,
VariableValuesLock: &w.variableValuesLock,

View File

@ -63,8 +63,7 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResou
Addr: addr.Resource,
Provider: &provider,
ProviderSchema: &providerSchema,
Output: &state,
Output: &state,
},
&EvalValidateSelfRef{
@ -108,7 +107,8 @@ func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsRe
var provider providers.Interface
var providerSchema *ProviderSchema
var change *plans.ResourceInstanceChange
var state *states.ResourceInstanceObject
var instanceRefreshState *states.ResourceInstanceObject
var instancePlanState *states.ResourceInstanceObject
return &EvalSequence{
Nodes: []EvalNode{
@ -118,19 +118,41 @@ func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsRe
Schema: &providerSchema,
},
&EvalReadState{
Addr: addr.Resource,
Provider: &provider,
ProviderSchema: &providerSchema,
Output: &state,
},
&EvalValidateSelfRef{
Addr: addr.Resource,
Config: config.Config,
ProviderSchema: &providerSchema,
},
// Refresh the instance
&EvalReadState{
Addr: addr.Resource,
Provider: &provider,
ProviderSchema: &providerSchema,
Output: &instanceRefreshState,
},
&EvalRefreshDependencies{
State: &instanceRefreshState,
Dependencies: &n.Dependencies,
},
&EvalRefresh{
Addr: addr.Resource,
ProviderAddr: n.ResolvedProvider,
Provider: &provider,
ProviderMetas: n.ProviderMetas,
ProviderSchema: &providerSchema,
State: &instanceRefreshState,
Output: &instanceRefreshState,
},
&EvalWriteState{
Addr: addr.Resource,
ProviderAddr: n.ResolvedProvider,
State: &instanceRefreshState,
ProviderSchema: &providerSchema,
targetState: refreshState,
},
// Plan the instance
&EvalDiff{
Addr: addr.Resource,
Config: n.Config,
@ -139,9 +161,9 @@ func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsRe
ProviderAddr: n.ResolvedProvider,
ProviderMetas: n.ProviderMetas,
ProviderSchema: &providerSchema,
State: &state,
State: &instanceRefreshState,
OutputChange: &change,
OutputState: &state,
OutputState: &instancePlanState,
},
&EvalCheckPreventDestroy{
Addr: addr.Resource,
@ -151,7 +173,7 @@ func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsRe
&EvalWriteState{
Addr: addr.Resource,
ProviderAddr: n.ResolvedProvider,
State: &state,
State: &instancePlanState,
ProviderSchema: &providerSchema,
},
&EvalWriteDiff{

View File

@ -1,9 +1,9 @@
variable "vpc_id" {}
resource "aws_instance" "child" {
vpc_id = "${var.vpc_id}"
vpc_id = var.vpc_id
}
output "modout" {
value = "${aws_instance.child.id}"
value = aws_instance.child.id
}

View File

@ -2,9 +2,9 @@ resource "aws_instance" "vpc" { }
module "child" {
source = "./child"
vpc_id = "${aws_instance.vpc.id}"
vpc_id = aws_instance.vpc.id
}
output "out" {
value = "${module.child.modout}"
value = module.child.modout
}