terraform/terraform/eval_diff.go

804 lines
28 KiB
Go
Raw Normal View History

2015-02-12 00:22:03 +01:00
package terraform
2015-02-13 04:48:57 +01:00
import (
"fmt"
2015-02-13 05:03:44 +01:00
"log"
"strings"
"github.com/hashicorp/hcl/v2"
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/plans/objchange"
"github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/tfdiags"
2015-02-13 04:48:57 +01:00
)
// EvalCheckPlannedChange is an EvalNode implementation that produces errors
// if the _actual_ expected value is not compatible with what was recorded
// in the plan.
//
// Errors here are most often indicative of a bug in the provider, so our
// error messages will report with that in mind. It's also possible that
// there's a bug in Terraform's Core's own "proposed new value" code in
// EvalDiff.
type EvalCheckPlannedChange struct {
Addr addrs.ResourceInstance
ProviderAddr addrs.AbsProviderConfig
ProviderSchema **ProviderSchema
// We take ResourceInstanceChange objects here just because that's what's
// convenient to pass in from the evaltree implementation, but we really
// only look at the "After" value of each change.
Planned, Actual **plans.ResourceInstanceChange
2015-02-13 04:48:57 +01:00
}
func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) {
providerSchema := *n.ProviderSchema
plannedChange := *n.Planned
actualChange := *n.Actual
2015-02-13 04:48:57 +01:00
schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type)
}
var diags tfdiags.Diagnostics
absAddr := n.Addr.Absolute(ctx.Path())
log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action)
if plannedChange.Action != actualChange.Action {
switch {
case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp:
// It's okay for an update to become a NoOp once we've filled in
// all of the unknown values, since the final values might actually
// match what was there before after all.
log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr)
default:
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced inconsistent final plan",
fmt.Sprintf(
"When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
absAddr, n.ProviderAddr.ProviderConfig.Type,
plannedChange.Action, actualChange.Action,
),
))
}
}
errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After)
for _, err := range errs {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced inconsistent final plan",
fmt.Sprintf(
"When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err),
),
))
}
return nil, diags.Err()
2015-02-13 04:48:57 +01:00
}
// EvalDiff is an EvalNode implementation that detects changes for a given
// resource instance.
2015-02-12 00:22:03 +01:00
type EvalDiff struct {
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
Addr addrs.ResourceInstance
Config *configs.Resource
Provider *providers.Interface
ProviderAddr addrs.AbsProviderConfig
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
ProviderSchema **ProviderSchema
State **states.ResourceInstanceObject
PreviousDiff **plans.ResourceInstanceChange
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
// CreateBeforeDestroy is set if either the resource's own config sets
// create_before_destroy explicitly or if dependencies have forced the
// resource to be handled as create_before_destroy in order to avoid
// a dependency cycle.
CreateBeforeDestroy bool
OutputChange **plans.ResourceInstanceChange
OutputValue *cty.Value
OutputState **states.ResourceInstanceObject
Stub bool
2015-02-12 00:22:03 +01:00
}
// TODO: test
2015-02-14 07:58:41 +01:00
func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
state := *n.State
config := *n.Config
provider := *n.Provider
providerSchema := *n.ProviderSchema
if providerSchema == nil {
return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr)
}
if n.ProviderAddr.ProviderConfig.Type.LegacyString() == "" {
panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path())))
}
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
var diags tfdiags.Diagnostics
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
// Evaluate the configuration
schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
}
2019-06-12 17:07:32 +02:00
forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
keyData := EvalDataForInstanceKey(n.Addr.Key, forEach)
configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, diags.Err()
}
absAddr := n.Addr.Absolute(ctx.Path())
var priorVal cty.Value
var priorValTainted cty.Value
var priorPrivate []byte
if state != nil {
if state.Status != states.ObjectTainted {
priorVal = state.Value
priorPrivate = state.Private
} else {
// If the prior state is tainted then we'll proceed below like
// we're creating an entirely new object, but then turn it into
// a synthetic "Replace" change at the end, creating the same
// result as if the provider had marked at least one argument
// change as "requires replacement".
priorValTainted = state.Value
priorVal = cty.NullVal(schema.ImpliedType())
}
} else {
priorVal = cty.NullVal(schema.ImpliedType())
}
proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal)
// Call pre-diff hook
if !n.Stub {
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal)
})
if err != nil {
return nil, err
}
}
log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path()))
// Allow the provider to validate the final set of values.
// The config was statically validated early on, but there may have been
// unknown values which the provider could not validate at the time.
validateResp := provider.ValidateResourceTypeConfig(
providers.ValidateResourceTypeConfigRequest{
TypeName: n.Addr.Resource.Type,
Config: configVal,
},
)
if validateResp.Diagnostics.HasErrors() {
return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err()
}
// The provider gets an opportunity to customize the proposed new value,
// which in turn produces the _planned_ new value. But before
// we send back this information, we need to process ignore_changes
// so that CustomizeDiff will not act on them
var ignoreChangeDiags tfdiags.Diagnostics
proposedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, proposedNewVal)
diags = diags.Append(ignoreChangeDiags)
if ignoreChangeDiags.HasErrors() {
return nil, diags.Err()
}
resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{
TypeName: n.Addr.Resource.Type,
Config: configVal,
PriorState: priorVal,
ProposedNewState: proposedNewVal,
PriorPrivate: priorPrivate,
})
diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config))
if diags.HasErrors() {
return nil, diags.Err()
}
plannedNewVal := resp.PlannedState
plannedPrivate := resp.PlannedPrivate
if plannedNewVal == cty.NilVal {
// Should never happen. Since real-world providers return via RPC a nil
// is always a bug in the client-side stub. This is more likely caused
// by an incompletely-configured mock provider in tests, though.
panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String()))
}
// We allow the planned new value to disagree with configuration _values_
// here, since that allows the provider to do special logic like a
// DiffSuppressFunc, but we still require that the provider produces
// a value whose type conforms to the schema.
for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced invalid plan",
fmt.Sprintf(
"Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
),
))
}
if diags.HasErrors() {
return nil, diags.Err()
}
if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 {
if resp.LegacyTypeSystem {
// The shimming of the old type system in the legacy SDK is not precise
// enough to pass this consistency check, so we'll give it a pass here,
// but we will generate a warning about it so that we are more likely
// to notice in the logs if an inconsistency beyond the type system
// leads to a downstream provider failure.
var buf strings.Builder
fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr)
for _, err := range errs {
fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err))
}
log.Print(buf.String())
} else {
for _, err := range errs {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced invalid plan",
fmt.Sprintf(
"Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
),
))
}
return nil, diags.Err()
}
}
2015-02-12 00:22:03 +01:00
2019-09-05 22:19:13 +02:00
// TODO: We should be able to remove this repeat of processing ignored changes
// after the plan, which helps providers relying on old behavior "just work"
// in the next major version, such that we can be stricter about ignore_changes
// values
2019-09-05 21:58:04 +02:00
plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, plannedNewVal)
diags = diags.Append(ignoreChangeDiags)
if ignoreChangeDiags.HasErrors() {
return nil, diags.Err()
}
// The provider produces a list of paths to attributes whose changes mean
// that we must replace rather than update an existing remote object.
// However, we only need to do that if the identified attributes _have_
// actually changed -- particularly after we may have undone some of the
// changes in processIgnoreChanges -- so now we'll filter that list to
// include only where changes are detected.
reqRep := cty.NewPathSet()
if len(resp.RequiresReplace) > 0 {
for _, path := range resp.RequiresReplace {
if priorVal.IsNull() {
// If prior is null then we don't expect any RequiresReplace at all,
// because this is a Create action.
continue
}
priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil)
plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil)
if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() {
// This means the path was invalid in both the prior and new
// values, which is an error with the provider itself.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced invalid plan",
fmt.Sprintf(
"Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
n.ProviderAddr.ProviderConfig.Type, absAddr, path,
),
))
continue
}
// Make sure we have valid Values for both values.
// Note: if the opposing value was of the type
// cty.DynamicPseudoType, the type assigned here may not exactly
// match the schema. This is fine here, since we're only going to
// check for equality, but if the NullVal is to be used, we need to
// check the schema for th true type.
switch {
case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal:
// this should never happen without ApplyPath errors above
panic("requires replace path returned 2 nil values")
case priorChangedVal == cty.NilVal:
priorChangedVal = cty.NullVal(plannedChangedVal.Type())
case plannedChangedVal == cty.NilVal:
plannedChangedVal = cty.NullVal(priorChangedVal.Type())
}
eqV := plannedChangedVal.Equals(priorChangedVal)
if !eqV.IsKnown() || eqV.False() {
reqRep.Add(path)
}
}
if diags.HasErrors() {
return nil, diags.Err()
}
}
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
eqV := plannedNewVal.Equals(priorVal)
eq := eqV.IsKnown() && eqV.True()
var action plans.Action
switch {
case priorVal.IsNull():
action = plans.Create
case eq:
action = plans.NoOp
case !reqRep.Empty():
// If there are any "requires replace" paths left _after our filtering
// above_ then this is a replace action.
if n.CreateBeforeDestroy {
action = plans.CreateThenDelete
} else {
action = plans.DeleteThenCreate
}
default:
action = plans.Update
// "Delete" is never chosen here, because deletion plans are always
// created more directly elsewhere, such as in "orphan" handling.
}
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
if action.IsReplace() {
// In this strange situation we want to produce a change object that
// shows our real prior object but has a _new_ object that is built
// from a null prior object, since we're going to delete the one
// that has all the computed values on it.
//
// Therefore we'll ask the provider to plan again here, giving it
// a null object for the prior, and then we'll meld that with the
// _actual_ prior state to produce a correctly-shaped replace change.
// The resulting change should show any computed attributes changing
// from known prior values to unknown values, unless the provider is
// able to predict new values for any of these computed attributes.
nullPriorVal := cty.NullVal(schema.ImpliedType())
// create a new proposed value from the null state and the config
proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal)
resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{
TypeName: n.Addr.Resource.Type,
Config: configVal,
PriorState: nullPriorVal,
ProposedNewState: proposedNewVal,
PriorPrivate: plannedPrivate,
})
// We need to tread carefully here, since if there are any warnings
// in here they probably also came out of our previous call to
// PlanResourceChange above, and so we don't want to repeat them.
// Consequently, we break from the usual pattern here and only
// append these new diagnostics if there's at least one error inside.
if resp.Diagnostics.HasErrors() {
diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config))
return nil, diags.Err()
}
plannedNewVal = resp.PlannedState
plannedPrivate = resp.PlannedPrivate
for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced invalid plan",
fmt.Sprintf(
"Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err),
),
))
}
if diags.HasErrors() {
return nil, diags.Err()
}
}
// If our prior value was tainted then we actually want this to appear
// as a replace change, even though so far we've been treating it as a
// create.
if action == plans.Create && priorValTainted != cty.NilVal {
if n.CreateBeforeDestroy {
action = plans.CreateThenDelete
} else {
action = plans.DeleteThenCreate
}
priorVal = priorValTainted
}
// As a special case, if we have a previous diff (presumably from the plan
// phases, whereas we're now in the apply phase) and it was for a replace,
// we've already deleted the original object from state by the time we
// get here and so we would've ended up with a _create_ action this time,
// which we now need to paper over to get a result consistent with what
// we originally intended.
if n.PreviousDiff != nil {
prevChange := *n.PreviousDiff
if prevChange.Action.IsReplace() && action == plans.Create {
log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action)
action = prevChange.Action
priorVal = prevChange.Before
}
}
// Call post-refresh hook
if !n.Stub {
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal)
})
if err != nil {
return nil, err
}
}
2016-04-21 21:59:10 +02:00
// Update our output if we care
if n.OutputChange != nil {
*n.OutputChange = &plans.ResourceInstanceChange{
Addr: absAddr,
Private: plannedPrivate,
ProviderAddr: n.ProviderAddr,
Change: plans.Change{
Action: action,
Before: priorVal,
After: plannedNewVal,
},
RequiredReplace: reqRep,
}
}
2015-02-12 00:22:03 +01:00
if n.OutputValue != nil {
*n.OutputValue = configVal
}
// Update the state if we care
if n.OutputState != nil {
*n.OutputState = &states.ResourceInstanceObject{
core: Prune placeholder objects from state after refresh Prior to our refactoring here, we were relying on a lucky coincidence for correct behavior of the plan walk following a refresh in the same run: - The refresh phase created placeholder objects in the state to represent any resource instance pending creation, to allow the interpolator to read attributes from them when evaluating "provider" and "data" blocks. In effect, the refresh walk is creating a partial plan that only covers creation actions, but was immediately discarding the actual diff entries and storing only the planned new state. - It happened that objects pending creation showed up in state with an empty ID value, since that only gets assigned by the provider during apply. - The Refresh function concluded by calling terraform.State.Prune, which deletes from the state any objects that have an empty ID value, which therefore prevented these temporary objects from surviving into the plan phase. After refactoring, we no longer have this special ID field on instance object state, and we instead rely on the Status field for tracking such things. We also no longer have an explicit "prune" step on state, since the state mutation methods themselves keep the structure pruned. To address this, here we introduce a new instance object status "planned", which is equivalent to having an empty ID value in the old world. We also introduce a new method on states.SyncState that deletes from the state any planned objects, which therefore replaces that portion of the old State.prune operation just for this refresh use-case. Finally, we are now expecting the expression evaluator to pull pending objects from the planned changeset rather than from the state directly, and so for correct results these placeholder resource creation changes must also be reported in a throwaway changeset during the refresh walk. The addition of states.ObjectPlanned also permits a previously-missing safety check in the expression evaluator to prevent us from relying on the incomplete value stored in state for a pending object, in the event that some bug prevents the real pending object from being written into the planned changeset.
2018-09-01 01:42:07 +02:00
// We use the special "planned" status here to note that this
// object's value is not yet complete. Objects with this status
// cannot be used during expression evaluation, so the caller
// must _also_ record the returned change in the active plan,
// which the expression evaluator will use in preference to this
// incomplete value recorded in the state.
Status: states.ObjectPlanned,
Value: plannedNewVal,
Private: plannedPrivate,
2015-02-12 00:22:03 +01:00
}
}
2015-02-12 00:22:03 +01:00
return nil, nil
}
2015-02-12 00:22:03 +01:00
func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) {
// ignore_changes only applies when an object already exists, since we
// can't ignore changes to a thing we've not created yet.
if prior.IsNull() {
return proposed, nil
}
ignoreChanges := n.Config.Managed.IgnoreChanges
ignoreAll := n.Config.Managed.IgnoreAllChanges
if len(ignoreChanges) == 0 && !ignoreAll {
return proposed, nil
}
if ignoreAll {
return prior, nil
}
if prior.IsNull() || proposed.IsNull() {
// Ignore changes doesn't apply when we're creating for the first time.
// Proposed should never be null here, but if it is then we'll just let it be.
return proposed, nil
}
2015-02-12 00:22:03 +01:00
return processIgnoreChangesIndividual(prior, proposed, ignoreChanges)
}
func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) {
// When we walk below we will be using cty.Path values for comparison, so
// we'll convert our traversals here so we can compare more easily.
ignoreChangesPath := make([]cty.Path, len(ignoreChanges))
for i, traversal := range ignoreChanges {
path := make(cty.Path, len(traversal))
for si, step := range traversal {
switch ts := step.(type) {
case hcl.TraverseRoot:
path[si] = cty.GetAttrStep{
Name: ts.Name,
}
case hcl.TraverseAttr:
path[si] = cty.GetAttrStep{
Name: ts.Name,
}
case hcl.TraverseIndex:
path[si] = cty.IndexStep{
Key: ts.Key,
}
default:
panic(fmt.Sprintf("unsupported traversal step %#v", step))
}
}
ignoreChangesPath[i] = path
}
2015-02-12 00:22:03 +01:00
var diags tfdiags.Diagnostics
ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) {
// First we must see if this is a path that's being ignored at all.
// We're looking for an exact match here because this walk will visit
// leaf values first and then their containers, and we want to do
// the "ignore" transform once we reach the point indicated, throwing
// away any deeper values we already produced at that point.
var ignoreTraversal hcl.Traversal
for i, candidate := range ignoreChangesPath {
if path.Equals(candidate) {
ignoreTraversal = ignoreChanges[i]
}
}
if ignoreTraversal == nil {
return v, nil
}
2015-02-13 04:48:57 +01:00
// If we're able to follow the same path through the prior value,
// we'll take the value there instead, effectively undoing the
// change that was planned.
priorV, diags := hcl.ApplyPath(prior, path, nil)
if diags.HasErrors() {
// We just ignore the errors and move on here, since we assume it's
// just because the prior value was a slightly-different shape.
// It could potentially also be that the traversal doesn't match
// the schema, but we should've caught that during the validate
// walk if so.
return v, nil
2015-02-13 04:48:57 +01:00
}
return priorV, nil
})
return ret, diags
2015-02-12 00:22:03 +01:00
}
// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
type flatAttrDiff map[string]*ResourceAttrDiff
// we need to keep all keys if any of them have a diff that's not ignored
func (f flatAttrDiff) keepDiff(ignoreChanges map[string]bool) bool {
for k, v := range f {
ignore := false
for attr := range ignoreChanges {
if strings.HasPrefix(k, attr) {
ignore = true
}
}
if !v.Empty() && !v.NewComputed && !ignore {
return true
}
}
return false
}
// EvalDiffDestroy is an EvalNode implementation that returns a plain
// destroy diff.
type EvalDiffDestroy struct {
Addr addrs.ResourceInstance
DeposedKey states.DeposedKey
State **states.ResourceInstanceObject
ProviderAddr addrs.AbsProviderConfig
Output **plans.ResourceInstanceChange
OutputState **states.ResourceInstanceObject
}
// TODO: test
2015-02-14 07:58:41 +01:00
func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
absAddr := n.Addr.Absolute(ctx.Path())
2015-02-14 07:58:41 +01:00
state := *n.State
if n.ProviderAddr.ProviderConfig.Type.LegacyString() == "" {
if n.DeposedKey == "" {
panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr))
} else {
panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey))
}
}
// If there is no state or our attributes object is null then we're already
// destroyed.
if state == nil || state.Value.IsNull() {
2015-02-12 21:42:33 +01:00
return nil, nil
}
// Call pre-diff hook
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(
absAddr, n.DeposedKey.Generation(),
state.Value,
cty.NullVal(cty.DynamicPseudoType),
)
})
if err != nil {
return nil, err
}
// Change is always the same for a destroy. We don't need the provider's
// help for this one.
// TODO: Should we give the provider an opportunity to veto this?
change := &plans.ResourceInstanceChange{
Addr: absAddr,
DeposedKey: n.DeposedKey,
Change: plans.Change{
Action: plans.Delete,
Before: state.Value,
After: cty.NullVal(cty.DynamicPseudoType),
},
Private: state.Private,
ProviderAddr: n.ProviderAddr,
}
// Call post-diff hook
err = ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostDiff(
absAddr,
n.DeposedKey.Generation(),
change.Action,
change.Before,
change.After,
)
})
if err != nil {
return nil, err
}
// Update our output
*n.Output = change
if n.OutputState != nil {
// Record our proposed new state, which is nil because we're destroying.
*n.OutputState = nil
}
return nil, nil
}
// EvalReduceDiff is an EvalNode implementation that takes a planned resource
// instance change as might be produced by EvalDiff or EvalDiffDestroy and
// "simplifies" it to a single atomic action to be performed by a specific
// graph node.
//
// Callers must specify whether they are a destroy node or a regular apply
// node. If the result is NoOp then the given change requires no action for
// the specific graph node calling this and so evaluation of the that graph
// node should exit early and take no action.
//
// The object written to OutChange may either be identical to InChange or
// a new change object derived from InChange. Because of the former case, the
// caller must not mutate the object returned in OutChange.
type EvalReduceDiff struct {
Addr addrs.ResourceInstance
InChange **plans.ResourceInstanceChange
Destroy bool
OutChange **plans.ResourceInstanceChange
}
// TODO: test
func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) {
in := *n.InChange
out := in.Simplify(n.Destroy)
if n.OutChange != nil {
*n.OutChange = out
}
if out.Action != in.Action {
if n.Destroy {
log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action)
} else {
log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action)
}
}
return nil, nil
}
// EvalReadDiff is an EvalNode implementation that retrieves the planned
// change for a particular resource instance object.
2015-02-12 23:46:22 +01:00
type EvalReadDiff struct {
Addr addrs.ResourceInstance
DeposedKey states.DeposedKey
ProviderSchema **ProviderSchema
Change **plans.ResourceInstanceChange
2015-02-12 23:46:22 +01:00
}
2015-02-14 07:58:41 +01:00
func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
providerSchema := *n.ProviderSchema
changes := ctx.Changes()
addr := n.Addr.Absolute(ctx.Path())
schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
}
gen := states.CurrentGen
if n.DeposedKey != states.NotDeposed {
gen = n.DeposedKey
}
csrc := changes.GetResourceInstanceChange(addr, gen)
if csrc == nil {
log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr)
return nil, nil
}
2015-02-12 23:46:22 +01:00
change, err := csrc.Decode(schema.ImpliedType())
if err != nil {
return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err)
}
if n.Change != nil {
*n.Change = change
}
2015-02-12 23:46:22 +01:00
log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr)
return nil, nil
2015-02-12 23:46:22 +01:00
}
// EvalWriteDiff is an EvalNode implementation that saves a planned change
// for an instance object into the set of global planned changes.
2015-02-12 00:22:03 +01:00
type EvalWriteDiff struct {
Addr addrs.ResourceInstance
DeposedKey states.DeposedKey
ProviderSchema **ProviderSchema
Change **plans.ResourceInstanceChange
2015-02-12 00:22:03 +01:00
}
// TODO: test
2015-02-14 07:58:41 +01:00
func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
changes := ctx.Changes()
addr := n.Addr.Absolute(ctx.Path())
if n.Change == nil || *n.Change == nil {
// Caller sets nil to indicate that we need to remove a change from
// the set of changes.
gen := states.CurrentGen
if n.DeposedKey != states.NotDeposed {
gen = n.DeposedKey
}
changes.RemoveResourceInstanceChange(addr, gen)
return nil, nil
}
providerSchema := *n.ProviderSchema
change := *n.Change
if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey {
// Should never happen, and indicates a bug in the caller.
panic("inconsistent address and/or deposed key in EvalWriteDiff")
}
schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
}
2015-02-12 00:22:03 +01:00
csrc, err := change.Encode(schema.ImpliedType())
if err != nil {
return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err)
}
2015-02-12 00:22:03 +01:00
changes.AppendResourceInstanceChange(csrc)
if n.DeposedKey == states.NotDeposed {
log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr)
} else {
log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey)
}
return nil, nil
2015-02-12 00:22:03 +01:00
}