terraform/internal/terraform/node_resource_plan_instance.go

369 lines
12 KiB
Go
Raw Normal View History

package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/tfdiags"
"github.com/hashicorp/terraform/internal/addrs"
)
// NodePlannableResourceInstance represents a _single_ resource
// instance that is plannable. This means this represents a single
// count index, for example.
type NodePlannableResourceInstance struct {
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
*NodeAbstractResourceInstance
ForceCreateBeforeDestroy bool
// skipRefresh indicates that we should skip refreshing individual instances
skipRefresh bool
// skipPlanChanges indicates we should skip trying to plan change actions
// for any instances.
skipPlanChanges bool
// forceReplace are resource instance addresses where the user wants to
// force generating a replace action. This set isn't pre-filtered, so
// it might contain addresses that have nothing to do with the resource
// that this node represents, which the node itself must therefore ignore.
forceReplace []addrs.AbsResourceInstance
}
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
var (
_ GraphNodeModuleInstance = (*NodePlannableResourceInstance)(nil)
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
_ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil)
_ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil)
_ GraphNodeConfigResource = (*NodePlannableResourceInstance)(nil)
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
_ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil)
_ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil)
_ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil)
2020-09-29 20:31:20 +02:00
_ GraphNodeExecutable = (*NodePlannableResourceInstance)(nil)
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
)
// GraphNodeEvalable
func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
addr := n.ResourceInstanceAddr()
// Eval info is different depending on what kind of resource this is
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
return n.managedResourceExecute(ctx)
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
case addrs.DataResourceMode:
2020-09-29 20:31:20 +02:00
return n.dataResourceExecute(ctx)
default:
panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
}
}
func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
config := n.Config
2020-09-29 20:31:20 +02:00
addr := n.ResourceInstanceAddr()
var change *plans.ResourceInstanceChange
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
if diags.HasErrors() {
return diags
2020-09-29 20:31:20 +02:00
}
state, readDiags := n.readResourceInstanceState(ctx, addr)
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
2020-09-29 20:31:20 +02:00
}
// We'll save a snapshot of what we just read from the state into the
// prevRunState which will capture the result read in the previous
// run, possibly tweaked by any upgrade steps that
// readResourceInstanceState might've made.
// However, note that we don't have any explicit mechanism for upgrading
// data resource results as we do for managed resources, and so the
// prevRunState might not conform to the current schema if the
// previous run was with a different provider version.
diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState))
if diags.HasErrors() {
return diags
}
diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
2020-10-28 17:32:49 +01:00
if diags.HasErrors() {
return diags
2020-09-29 20:31:20 +02:00
}
checkRuleSeverity := tfdiags.Error
if n.skipPlanChanges {
checkRuleSeverity = tfdiags.Warning
}
change, state, repeatData, planDiags := n.planDataSource(ctx, state, checkRuleSeverity)
diags = diags.Append(planDiags)
2020-10-28 16:57:45 +01:00
if diags.HasErrors() {
return diags
2020-09-29 20:31:20 +02:00
}
// write the data source into both the refresh state and the
// working state
diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState))
2020-10-28 17:23:03 +01:00
if diags.HasErrors() {
return diags
2020-09-29 20:31:20 +02:00
}
diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState))
2020-10-28 17:23:03 +01:00
if diags.HasErrors() {
return diags
2020-09-29 20:31:20 +02:00
}
Eval() Refactor: Plan Edition (#27177) * terraforn: refactor EvalRefresh EvalRefresh.Eval(ctx) is now Refresh(evalRefreshReqest, ctx). While none of the inner logic of the function has changed, it now returns a states.ResourceInstanceObject instead of updating a pointer. This is a human-centric change, meant to make the logic flow (in the calling functions) easier to follow. * terraform: refactor EvalReadDataPlan and Apply This is a very minor refactor that removes the (currently) redundant types EvalReadDataPlan and EvalReadDataApply in favor of using EvalReadData with a Plan and Apply functions. This is in effect an aesthetic change; since there is no longer an Eval() abstraction we can rename functions to make their functionality as obvious as possible. * terraform: refactor EvalCheckPlannedChange EvalCheckPlannedChange was only used by NodeApplyableResourceInstance and has been refactored into a method on that type called checkPlannedChange. * terraform: refactor EvalDiff.Eval EvalDiff.Eval is now a method on NodeResourceAbstracted called Plan which takes as a parameter an EvalPlanRequest. Instead of updating pointers it returns a new plan and state. I removed as many redundant fields from the original EvalDiff struct as possible. * terraform: refactor EvalReduceDiff EvalReduceDiff is now reducePlan, a regular function (without a method) that returns a value. * terraform: refactor EvalDiffDestroy EvalDiffDestroy.Eval is now NodeAbstractResourceInstance.PlanDestroy which takes ctx, state and optional DeposedKey and returns a change. I've removed the state return value since it was only ever returning a nil state. * terraform: refactor EvalWriteDiff EvalWriteDiff.Eval is now NodeAbstractResourceInstance.WriteChange. * rename files to something more logical * terrafrom: refresh refactor, continued! I had originally made Refresh a stand-alone function since it was (obnoxiously) called from a graphNodeImportStateSub, but after some (greatly appreciated) prompting in the PR I instead made it a method on the NodeAbstractResourceInstance, in keeping with the other refactored eval nodes, and then built a NodeAbstractResourceInstance inside import. Since I did that I could also remove my duplicated 'writeState' code inside graphNodeImportStateSub and use n.writeResourceInstanceState, so double thanks! * unexport eval methods * re-refactor Plan, it made more sense on NodeAbstractResourceInstance. Sorry * Remove uninformative `Eval`s from EvalReadData, consolidate to a single file, and rename file to match function names. * manual rebase
2020-12-08 14:50:30 +01:00
diags = diags.Append(n.writeChange(ctx, change, ""))
// Post-conditions might block further progress. We intentionally do this
// _after_ writing the state/diff because we want to check against
// the result of the operation, and to fail on future operations
// until the user makes the condition succeed.
checkDiags := evalCheckRules(
checkResourcePostcondition,
n.Config.Postconditions,
ctx, addr.Resource, repeatData,
checkRuleSeverity,
)
diags = diags.Append(checkDiags)
return diags
}
func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
config := n.Config
2020-09-29 20:31:20 +02:00
addr := n.ResourceInstanceAddr()
var change *plans.ResourceInstanceChange
var instanceRefreshState *states.ResourceInstanceObject
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
if diags.HasErrors() {
return diags
2020-09-29 20:31:20 +02:00
}
diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
2020-10-28 17:32:49 +01:00
if diags.HasErrors() {
return diags
2020-09-29 20:31:20 +02:00
}
instanceRefreshState, readDiags := n.readResourceInstanceState(ctx, addr)
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
// We'll save a snapshot of what we just read from the state into the
// prevRunState before we do anything else, since this will capture the
// result of any schema upgrading that readResourceInstanceState just did,
// but not include any out-of-band changes we might detect in in the
// refresh step below.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
if diags.HasErrors() {
return diags
}
// Also the refreshState, because that should still reflect schema upgrades
// even if it doesn't reflect upstream changes.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
// In 0.13 we could be refreshing a resource with no config.
// We should be operating on managed resource, but check here to be certain
if n.Config == nil || n.Config.Managed == nil {
log.Printf("[WARN] managedResourceExecute: no Managed config value found in instance state for %q", n.Addr)
} else {
if instanceRefreshState != nil {
instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy
}
}
2020-09-29 20:31:20 +02:00
// Refresh, maybe
if !n.skipRefresh {
core: Treat deposed objects the same as orphaned current objects In many ways a deposed object is equivalent to an orphaned current object in that the only action we can take with it is to destroy it. However, we do still need to take some preparation steps in both cases: first, we must ensure we track the upgraded version of the existing object so that we'll be able to successfully render our plan, and secondly we must refresh the existing object to make sure it still exists in the remote system. We were previously doing these extra steps for orphan objects but not for deposed ones, which meant that the behavior for deposed objects would be subtly different and violate the invariants our callers expect in order to display a plan. This also created the risk that a deposed object already deleted in the remote system would become "stuck" because Terraform would still plan to destroy it, which might cause the provider to return an error when it tries to delete an already-absent object. This also makes the deposed object planning take into account the "skipPlanChanges" flag, which is important to get a correct result in the "refresh only" planning mode. It's a shame that we have almost identical code handling both the orphan and deposed situations, but they differ in that the latter must call different functions to interact with the deposed rather than the current objects in the state. Perhaps a later change can improve on this with some more refactoring, but this commit is already a little more disruptive than I'd like and so I'm intentionally deferring that for another day.
2021-05-13 00:18:25 +02:00
s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState)
Eval() Refactor: Plan Edition (#27177) * terraforn: refactor EvalRefresh EvalRefresh.Eval(ctx) is now Refresh(evalRefreshReqest, ctx). While none of the inner logic of the function has changed, it now returns a states.ResourceInstanceObject instead of updating a pointer. This is a human-centric change, meant to make the logic flow (in the calling functions) easier to follow. * terraform: refactor EvalReadDataPlan and Apply This is a very minor refactor that removes the (currently) redundant types EvalReadDataPlan and EvalReadDataApply in favor of using EvalReadData with a Plan and Apply functions. This is in effect an aesthetic change; since there is no longer an Eval() abstraction we can rename functions to make their functionality as obvious as possible. * terraform: refactor EvalCheckPlannedChange EvalCheckPlannedChange was only used by NodeApplyableResourceInstance and has been refactored into a method on that type called checkPlannedChange. * terraform: refactor EvalDiff.Eval EvalDiff.Eval is now a method on NodeResourceAbstracted called Plan which takes as a parameter an EvalPlanRequest. Instead of updating pointers it returns a new plan and state. I removed as many redundant fields from the original EvalDiff struct as possible. * terraform: refactor EvalReduceDiff EvalReduceDiff is now reducePlan, a regular function (without a method) that returns a value. * terraform: refactor EvalDiffDestroy EvalDiffDestroy.Eval is now NodeAbstractResourceInstance.PlanDestroy which takes ctx, state and optional DeposedKey and returns a change. I've removed the state return value since it was only ever returning a nil state. * terraform: refactor EvalWriteDiff EvalWriteDiff.Eval is now NodeAbstractResourceInstance.WriteChange. * rename files to something more logical * terrafrom: refresh refactor, continued! I had originally made Refresh a stand-alone function since it was (obnoxiously) called from a graphNodeImportStateSub, but after some (greatly appreciated) prompting in the PR I instead made it a method on the NodeAbstractResourceInstance, in keeping with the other refactored eval nodes, and then built a NodeAbstractResourceInstance inside import. Since I did that I could also remove my duplicated 'writeState' code inside graphNodeImportStateSub and use n.writeResourceInstanceState, so double thanks! * unexport eval methods * re-refactor Plan, it made more sense on NodeAbstractResourceInstance. Sorry * Remove uninformative `Eval`s from EvalReadData, consolidate to a single file, and rename file to match function names. * manual rebase
2020-12-08 14:50:30 +01:00
diags = diags.Append(refreshDiags)
2020-10-28 17:03:00 +01:00
if diags.HasErrors() {
return diags
2020-09-29 20:31:20 +02:00
}
instanceRefreshState = s
2020-09-29 20:31:20 +02:00
if instanceRefreshState != nil {
// When refreshing we start by merging the stored dependencies and
// the configured dependencies. The configured dependencies will be
// stored to state once the changes are applied. If the plan
// results in no changes, we will re-write these dependencies
// below.
instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
}
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
2020-10-28 17:23:03 +01:00
if diags.HasErrors() {
return diags
2020-09-29 20:31:20 +02:00
}
}
// Plan the instance, unless we're in the refresh-only mode
if !n.skipPlanChanges {
change, instancePlanState, repeatData, planDiags := n.plan(
ctx, change, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
)
diags = diags.Append(planDiags)
if diags.HasErrors() {
return diags
}
2020-09-29 20:31:20 +02:00
diags = diags.Append(n.checkPreventDestroy(change))
if diags.HasErrors() {
return diags
}
2020-09-29 20:31:20 +02:00
// FIXME: it is currently important that we write resource changes to
// the plan (n.writeChange) before we write the corresponding state
// (n.writeResourceInstanceState).
//
// This is because the planned resource state will normally have the
// status of states.ObjectPlanned, which causes later logic to refer to
// the contents of the plan to retrieve the resource data. Because
// there is no shared lock between these two data structures, reversing
// the order of these writes will cause a brief window of inconsistency
// which can lead to a failed safety check.
//
// Future work should adjust these APIs such that it is impossible to
// update these two data structures incorrectly through any objects
// reachable via the terraform.EvalContext API.
diags = diags.Append(n.writeChange(ctx, change, ""))
diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState))
if diags.HasErrors() {
return diags
}
// If this plan resulted in a NoOp, then apply won't have a chance to make
// any changes to the stored dependencies. Since this is a NoOp we know
// that the stored dependencies will have no effect during apply, and we can
// write them out now.
if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) {
// the refresh state will be the final state for this resource, so
// finalize the dependencies here if they need to be updated.
instanceRefreshState.Dependencies = n.Dependencies
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
}
// Post-conditions might block completion. We intentionally do this
// _after_ writing the state/diff because we want to check against
// the result of the operation, and to fail on future operations
// until the user makes the condition succeed.
// (Note that some preconditions will end up being skipped during
// planning, because their conditions depend on values not yet known.)
checkDiags := evalCheckRules(
checkResourcePostcondition,
n.Config.Postconditions,
ctx, addr.Resource, repeatData,
tfdiags.Error,
)
diags = diags.Append(checkDiags)
} else {
// In refresh-only mode we need to evaluate the for-each expression in
// order to supply the value to the pre- and post-condition check
// blocks. This has the unfortunate edge case of a refresh-only plan
// executing with a for-each map which has the same keys but different
// values, which could result in a post-condition check relying on that
// value being inaccurate. Unless we decide to store the value of the
// for-each expression in state, this is unavoidable.
forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx)
repeatData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach)
checkDiags := evalCheckRules(
checkResourcePrecondition,
n.Config.Preconditions,
ctx, nil, repeatData,
tfdiags.Warning,
)
diags = diags.Append(checkDiags)
// Even if we don't plan changes, we do still need to at least update
// the working state to reflect the refresh result. If not, then e.g.
// any output values refering to this will not react to the drift.
// (Even if we didn't actually refresh above, this will still save
// the result of any schema upgrading we did in readResourceInstanceState.)
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, workingState))
if diags.HasErrors() {
return diags
}
// Here we also evaluate post-conditions after updating the working
// state, because we want to check against the result of the refresh.
// Unlike in normal planning mode, these checks are still evaluated
// even if pre-conditions generated diagnostics, because we have no
// planned changes to block.
checkDiags = evalCheckRules(
checkResourcePostcondition,
n.Config.Postconditions,
ctx, addr.Resource, repeatData,
tfdiags.Warning,
)
diags = diags.Append(checkDiags)
}
return diags
}
// mergeDeps returns the union of 2 sets of dependencies
func mergeDeps(a, b []addrs.ConfigResource) []addrs.ConfigResource {
switch {
case len(a) == 0:
return b
case len(b) == 0:
return a
}
set := make(map[string]addrs.ConfigResource)
for _, dep := range a {
set[dep.String()] = dep
}
for _, dep := range b {
set[dep.String()] = dep
}
newDeps := make([]addrs.ConfigResource, 0, len(set))
for _, dep := range set {
newDeps = append(newDeps, dep)
}
return newDeps
}
func depsEqual(a, b []addrs.ConfigResource) bool {
if len(a) != len(b) {
return false
}
less := func(s []addrs.ConfigResource) func(i, j int) bool {
return func(i, j int) bool {
return s[i].String() < s[j].String()
}
}
sort.Slice(a, less(a))
sort.Slice(b, less(b))
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}