terraform/internal/states/sync.go

555 lines
19 KiB
Go
Raw Normal View History

package states
import (
"log"
"sync"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/zclconf/go-cty/cty"
)
// SyncState is a wrapper around State that provides concurrency-safe access to
// various common operations that occur during a Terraform graph walk, or other
// similar concurrent contexts.
//
// When a SyncState wrapper is in use, no concurrent direct access to the
// underlying objects is permitted unless the caller first acquires an explicit
// lock, using the Lock and Unlock methods. Most callers should _not_
// explicitly lock, and should instead use the other methods of this type that
// handle locking automatically.
//
// Since SyncState is able to safely consolidate multiple updates into a single
// atomic operation, many of its methods are at a higher level than those
// of the underlying types, and operate on the state as a whole rather than
// on individual sub-structures of the state.
//
// SyncState can only protect against races within its own methods. It cannot
// provide any guarantees about the order in which concurrent operations will
// be processed, so callers may still need to employ higher-level techniques
// for ensuring correct operation sequencing, such as building and walking
// a dependency graph.
type SyncState struct {
state *State
lock sync.RWMutex
}
// Module returns a snapshot of the state of the module instance with the given
// address, or nil if no such module is tracked.
//
// The return value is a pointer to a copy of the module state, which the
// caller may then freely access and mutate. However, since the module state
// tends to be a large data structure with many child objects, where possible
// callers should prefer to use a more granular accessor to access a child
// module directly, and thus reduce the amount of copying required.
func (s *SyncState) Module(addr addrs.ModuleInstance) *Module {
s.lock.RLock()
ret := s.state.Module(addr).DeepCopy()
s.lock.RUnlock()
return ret
}
// ModuleOutputs returns the set of OutputValues that matches the given path.
func (s *SyncState) ModuleOutputs(parentAddr addrs.ModuleInstance, module addrs.ModuleCall) []*OutputValue {
s.lock.RLock()
defer s.lock.RUnlock()
var os []*OutputValue
for _, o := range s.state.ModuleOutputs(parentAddr, module) {
os = append(os, o.DeepCopy())
}
return os
}
// RemoveModule removes the entire state for the given module, taking with
// it any resources associated with the module. This should generally be
// called only for modules whose resources have all been destroyed, but
// that is not enforced by this method.
func (s *SyncState) RemoveModule(addr addrs.ModuleInstance) {
s.lock.Lock()
defer s.lock.Unlock()
s.state.RemoveModule(addr)
}
// OutputValue returns a snapshot of the state of the output value with the
// given address, or nil if no such output value is tracked.
//
// The return value is a pointer to a copy of the output value state, which the
// caller may then freely access and mutate.
func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue {
s.lock.RLock()
ret := s.state.OutputValue(addr).DeepCopy()
s.lock.RUnlock()
return ret
}
// SetOutputValue writes a given output value into the state, overwriting
// any existing value of the same name.
//
// If the module containing the output is not yet tracked in state then it
// be added as a side-effect.
func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.EnsureModule(addr.Module)
ms.SetOutputValue(addr.OutputValue.Name, value, sensitive)
}
// RemoveOutputValue removes the stored value for the output value with the
// given address.
//
// If this results in its containing module being empty, the module will be
// pruned from the state as a side-effect.
func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.Module(addr.Module)
if ms == nil {
return
}
ms.RemoveOutputValue(addr.OutputValue.Name)
s.maybePruneModule(addr.Module)
}
// LocalValue returns the current value associated with the given local value
// address.
func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value {
s.lock.RLock()
// cty.Value is immutable, so we don't need any extra copying here.
ret := s.state.LocalValue(addr)
s.lock.RUnlock()
return ret
}
// SetLocalValue writes a given output value into the state, overwriting
// any existing value of the same name.
//
// If the module containing the local value is not yet tracked in state then it
// will be added as a side-effect.
func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.EnsureModule(addr.Module)
ms.SetLocalValue(addr.LocalValue.Name, value)
}
// RemoveLocalValue removes the stored value for the local value with the
// given address.
//
// If this results in its containing module being empty, the module will be
// pruned from the state as a side-effect.
func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.Module(addr.Module)
if ms == nil {
return
}
ms.RemoveLocalValue(addr.LocalValue.Name)
s.maybePruneModule(addr.Module)
}
// Resource returns a snapshot of the state of the resource with the given
// address, or nil if no such resource is tracked.
//
// The return value is a pointer to a copy of the resource state, which the
// caller may then freely access and mutate.
func (s *SyncState) Resource(addr addrs.AbsResource) *Resource {
s.lock.RLock()
ret := s.state.Resource(addr).DeepCopy()
s.lock.RUnlock()
return ret
}
// ResourceInstance returns a snapshot of the state the resource instance with
// the given address, or nil if no such instance is tracked.
//
// The return value is a pointer to a copy of the instance state, which the
// caller may then freely access and mutate.
func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance {
s.lock.RLock()
ret := s.state.ResourceInstance(addr).DeepCopy()
s.lock.RUnlock()
return ret
}
// ResourceInstanceObject returns a snapshot of the current instance object
// of the given generation belonging to the instance with the given address,
// or nil if no such object is tracked..
//
// The return value is a pointer to a copy of the object, which the caller may
// then freely access and mutate.
func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc {
s.lock.RLock()
defer s.lock.RUnlock()
inst := s.state.ResourceInstance(addr)
if inst == nil {
return nil
}
return inst.GetGeneration(gen).DeepCopy()
}
// SetResourceMeta updates the resource-level metadata for the resource at
// the given address, creating the containing module state and resource state
// as a side-effect if not already present.
func (s *SyncState) SetResourceProvider(addr addrs.AbsResource, provider addrs.AbsProviderConfig) {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.EnsureModule(addr.Module)
ms.SetResourceProvider(addr.Resource, provider)
}
// RemoveResource removes the entire state for the given resource, taking with
// it any instances associated with the resource. This should generally be
// called only for resource objects whose instances have all been destroyed,
// but that is not enforced by this method. (Use RemoveResourceIfEmpty instead
// to safely check first.)
func (s *SyncState) RemoveResource(addr addrs.AbsResource) {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.EnsureModule(addr.Module)
ms.RemoveResource(addr.Resource)
s.maybePruneModule(addr.Module)
}
// RemoveResourceIfEmpty is similar to RemoveResource but first checks to
// make sure there are no instances or objects left in the resource.
//
// Returns true if the resource was removed, or false if remaining child
// objects prevented its removal. Returns true also if the resource was
// already absent, and thus no action needed to be taken.
func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.Module(addr.Module)
if ms == nil {
return true // nothing to do
}
rs := ms.Resource(addr.Resource)
if rs == nil {
return true // nothing to do
}
if len(rs.Instances) != 0 {
// We don't check here for the possibility of instances that exist
// but don't have any objects because it's the responsibility of the
// instance-mutation methods to prune those away automatically.
return false
}
ms.RemoveResource(addr.Resource)
s.maybePruneModule(addr.Module)
return true
}
// SetResourceInstanceCurrent saves the given instance object as the current
2019-08-13 23:22:14 +02:00
// generation of the resource instance with the given address, simultaneously
// updating the recorded provider configuration address, dependencies, and
// resource EachMode.
//
// Any existing current instance object for the given resource is overwritten.
// Set obj to nil to remove the primary generation object altogether. If there
// are no deposed objects then the instance as a whole will be removed, which
// may in turn also remove the containing module if it becomes empty.
//
// The caller must ensure that the given ResourceInstanceObject is not
// concurrently mutated during this call, but may be freely used again once
// this function returns.
//
2020-10-07 16:57:25 +02:00
// The provider address is a resource-wide settings and is updated
// for all other instances of the same resource as a side-effect of this call.
//
// If the containing module for this resource or the resource itself are not
// already tracked in state then they will be added as a side-effect.
func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.EnsureModule(addr.Module)
ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider)
s.maybePruneModule(addr.Module)
}
// SetResourceInstanceDeposed saves the given instance object as a deposed
// generation of the resource instance with the given address and deposed key.
//
// Call this method only for pre-existing deposed objects that already have
// a known DeposedKey. For example, this method is useful if reloading objects
// that were persisted to a state file. To mark the current object as deposed,
// use DeposeResourceInstanceObject instead.
//
// The caller must ensure that the given ResourceInstanceObject is not
// concurrently mutated during this call, but may be freely used again once
// this function returns.
//
// The resource that contains the given instance must already exist in the
// state, or this method will panic. Use Resource to check first if its
// presence is not already guaranteed.
//
// Any existing current instance object for the given resource and deposed key
// is overwritten. Set obj to nil to remove the deposed object altogether. If
// the instance is left with no objects after this operation then it will
// be removed from its containing resource altogether.
//
// If the containing module for this resource or the resource itself are not
// already tracked in state then they will be added as a side-effect.
func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.EnsureModule(addr.Module)
ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider)
s.maybePruneModule(addr.Module)
}
// DeposeResourceInstanceObject moves the current instance object for the
// given resource instance address into the deposed set, leaving the instance
// without a current object.
//
// The return value is the newly-allocated deposed key, or NotDeposed if the
// given instance is already lacking a current object.
//
// If the containing module for this resource or the resource itself are not
// already tracked in state then there cannot be a current object for the
// given instance, and so NotDeposed will be returned without modifying the
// state at all.
func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.Module(addr.Module)
if ms == nil {
return NotDeposed
}
core: Be more explicit in how we handle create_before_destroy Previously our handling of create_before_destroy -- and of deposed objects in particular -- was rather "implicit" and spread over various different subsystems. We'd quietly just destroy every deposed object during a destroy operation, without any user-visible plan to do so. Here we make things more explicit by tracking each deposed object individually by its pseudorandomly-allocated key. There are two different mechanisms at play here, building on the same concepts: - During a replace operation with create_before_destroy, we *pre-allocate* a DeposedKey to use for the prior object in the "apply" node and then pass that exact id to the destroy node, ensuring that we only destroy the single object we planned to destroy. In the happy path here the user never actually sees the allocated deposed key because we use it and then immediately destroy it within the same operation. However, that destroy may fail, which brings us to the second mechanism: - If any deposed objects are already present in state during _plan_, we insert a destroy change for them into the plan so that it's explicit to the user that we are going to destroy these additional objects, and then create an individual graph node for each one in DiffTransformer. The main motivation here is to be more careful in how we handle these destroys so that from a user's standpoint we never destroy something without the user knowing about it ahead of time. However, this new organization also hopefully makes the code itself a little easier to follow because the connection between the create and destroy steps of a Replace is reprseented in a single place (in DiffTransformer) and deposed instances each have their own explicit graph node rather than being secretly handled as part of the main instance-level graph node.
2018-09-20 21:30:52 +02:00
return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed)
}
// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject
// but uses a pre-allocated key. It's the caller's responsibility to ensure
// that there aren't any races to use a particular key; this method will panic
// if the given key is already in use.
func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) {
s.lock.Lock()
defer s.lock.Unlock()
if forcedKey == NotDeposed {
// Usage error: should use DeposeResourceInstanceObject in this case
panic("DeposeResourceInstanceObjectForceKey called without forced key")
}
ms := s.state.Module(addr.Module)
if ms == nil {
return // Nothing to do, since there can't be any current object either.
}
ms.deposeResourceInstanceObject(addr.Resource, forcedKey)
}
// ForgetResourceInstanceAll removes the record of all objects associated with
// the specified resource instance, if present. If not present, this is a no-op.
func (s *SyncState) ForgetResourceInstanceAll(addr addrs.AbsResourceInstance) {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.Module(addr.Module)
if ms == nil {
return
}
ms.ForgetResourceInstanceAll(addr.Resource)
s.maybePruneModule(addr.Module)
}
// ForgetResourceInstanceDeposed removes the record of the deposed object with
// the given address and key, if present. If not present, this is a no-op.
func (s *SyncState) ForgetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) {
s.lock.Lock()
defer s.lock.Unlock()
ms := s.state.Module(addr.Module)
if ms == nil {
return
}
ms.ForgetResourceInstanceDeposed(addr.Resource, key)
s.maybePruneModule(addr.Module)
}
// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the
// given key on the specified resource as the current object for that instance
// if and only if that would not cause us to forget an existing current
// object for that instance.
//
// Returns true if the object was restored to current, or false if no change
// was made at all.
func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool {
s.lock.Lock()
defer s.lock.Unlock()
if key == NotDeposed {
panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey")
}
ms := s.state.Module(addr.Module)
if ms == nil {
// Nothing to do, since the specified deposed object cannot exist.
return false
}
return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key)
}
core: Prune placeholder objects from state after refresh Prior to our refactoring here, we were relying on a lucky coincidence for correct behavior of the plan walk following a refresh in the same run: - The refresh phase created placeholder objects in the state to represent any resource instance pending creation, to allow the interpolator to read attributes from them when evaluating "provider" and "data" blocks. In effect, the refresh walk is creating a partial plan that only covers creation actions, but was immediately discarding the actual diff entries and storing only the planned new state. - It happened that objects pending creation showed up in state with an empty ID value, since that only gets assigned by the provider during apply. - The Refresh function concluded by calling terraform.State.Prune, which deletes from the state any objects that have an empty ID value, which therefore prevented these temporary objects from surviving into the plan phase. After refactoring, we no longer have this special ID field on instance object state, and we instead rely on the Status field for tracking such things. We also no longer have an explicit "prune" step on state, since the state mutation methods themselves keep the structure pruned. To address this, here we introduce a new instance object status "planned", which is equivalent to having an empty ID value in the old world. We also introduce a new method on states.SyncState that deletes from the state any planned objects, which therefore replaces that portion of the old State.prune operation just for this refresh use-case. Finally, we are now expecting the expression evaluator to pull pending objects from the planned changeset rather than from the state directly, and so for correct results these placeholder resource creation changes must also be reported in a throwaway changeset during the refresh walk. The addition of states.ObjectPlanned also permits a previously-missing safety check in the expression evaluator to prevent us from relying on the incomplete value stored in state for a pending object, in the event that some bug prevents the real pending object from being written into the planned changeset.
2018-09-01 01:42:07 +02:00
// RemovePlannedResourceInstanceObjects removes from the state any resource
// instance objects that have the status ObjectPlanned, indiciating that they
// are just transient placeholders created during planning.
//
// Note that this does not restore any "ready" or "tainted" object that might
// have been present before the planned object was written. The only real use
// for this method is in preparing the state created during a refresh walk,
// where we run the planning step for certain instances just to create enough
// information to allow correct expression evaluation within provider and
// data resource blocks. Discarding planned instances in that case is okay
// because the refresh phase only creates planned objects to stand in for
// objects that don't exist yet, and thus the planned object must have been
// absent before by definition.
func (s *SyncState) RemovePlannedResourceInstanceObjects() {
// TODO: Merge together the refresh and plan phases into a single walk,
// so we can remove the need to create this "partial plan" during refresh
// that we then need to clean up before proceeding.
s.lock.Lock()
defer s.lock.Unlock()
for _, ms := range s.state.Modules {
moduleAddr := ms.Addr
for _, rs := range ms.Resources {
resAddr := rs.Addr.Resource
core: Prune placeholder objects from state after refresh Prior to our refactoring here, we were relying on a lucky coincidence for correct behavior of the plan walk following a refresh in the same run: - The refresh phase created placeholder objects in the state to represent any resource instance pending creation, to allow the interpolator to read attributes from them when evaluating "provider" and "data" blocks. In effect, the refresh walk is creating a partial plan that only covers creation actions, but was immediately discarding the actual diff entries and storing only the planned new state. - It happened that objects pending creation showed up in state with an empty ID value, since that only gets assigned by the provider during apply. - The Refresh function concluded by calling terraform.State.Prune, which deletes from the state any objects that have an empty ID value, which therefore prevented these temporary objects from surviving into the plan phase. After refactoring, we no longer have this special ID field on instance object state, and we instead rely on the Status field for tracking such things. We also no longer have an explicit "prune" step on state, since the state mutation methods themselves keep the structure pruned. To address this, here we introduce a new instance object status "planned", which is equivalent to having an empty ID value in the old world. We also introduce a new method on states.SyncState that deletes from the state any planned objects, which therefore replaces that portion of the old State.prune operation just for this refresh use-case. Finally, we are now expecting the expression evaluator to pull pending objects from the planned changeset rather than from the state directly, and so for correct results these placeholder resource creation changes must also be reported in a throwaway changeset during the refresh walk. The addition of states.ObjectPlanned also permits a previously-missing safety check in the expression evaluator to prevent us from relying on the incomplete value stored in state for a pending object, in the event that some bug prevents the real pending object from being written into the planned changeset.
2018-09-01 01:42:07 +02:00
for ik, is := range rs.Instances {
instAddr := resAddr.Instance(ik)
if is.Current != nil && is.Current.Status == ObjectPlanned {
// Setting the current instance to nil removes it from the
// state altogether if there are not also deposed instances.
ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig)
}
for dk, obj := range is.Deposed {
// Deposed objects should never be "planned", but we'll
// do this anyway for the sake of completeness.
if obj.Status == ObjectPlanned {
ms.ForgetResourceInstanceDeposed(instAddr, dk)
}
}
}
}
// We may have deleted some objects, which means that we may have
// left a module empty, and so we must prune to preserve the invariant
// that only the root module is allowed to be empty.
s.maybePruneModule(moduleAddr)
}
}
// Lock acquires an explicit lock on the state, allowing direct read and write
// access to the returned state object. The caller must call Unlock once
// access is no longer needed, and then immediately discard the state pointer
// pointer.
//
// Most callers should not use this. Instead, use the concurrency-safe
// accessors and mutators provided directly on SyncState.
func (s *SyncState) Lock() *State {
s.lock.Lock()
return s.state
}
// Unlock releases a lock previously acquired by Lock, at which point the
// caller must cease all use of the state pointer that was returned.
//
// Do not call this method except to end an explicit lock acquired by
// Lock. If a caller calls Unlock without first holding the lock, behavior
// is undefined.
func (s *SyncState) Unlock() {
s.lock.Unlock()
}
core: Functional-style API for terraform.Context Previously terraform.Context was built in an unfortunate way where all of the data was provided up front in terraform.NewContext and then mutated directly by subsequent operations. That made the data flow hard to follow, commonly leading to bugs, and also meant that we were forced to take various actions too early in terraform.NewContext, rather than waiting until a more appropriate time during an operation. This (enormous) commit changes terraform.Context so that its fields are broadly just unchanging data about the execution context (current workspace name, available plugins, etc) whereas the main data Terraform works with arrives via individual method arguments and is returned in return values. Specifically, this means that terraform.Context no longer "has-a" config, state, and "planned changes", instead holding on to those only temporarily during an operation. The caller is responsible for propagating the outcome of one step into the next step so that the data flow between operations is actually visible. However, since that's a change to the main entry points in the "terraform" package, this commit also touches every file in the codebase which interacted with those APIs. Most of the noise here is in updating tests to take the same actions using the new API style, but this also affects the main-code callers in the backends and in the command package. My goal here was to refactor without changing observable behavior, but in practice there are a couple externally-visible behavior variations here that seemed okay in service of the broader goal: - The "terraform graph" command is no longer hooked directly into the core graph builders, because that's no longer part of the public API. However, I did include a couple new Context functions whose contract is to produce a UI-oriented graph, and _for now_ those continue to return the physical graph we use for those operations. There's no exported API for generating the "validate" and "eval" graphs, because neither is particularly interesting in its own right, and so "terraform graph" no longer supports those graph types. - terraform.NewContext no longer has the responsibility for collecting all of the provider schemas up front. Instead, we wait until we need them. However, that means that some of our error messages now have a slightly different shape due to unwinding through a differently-shaped call stack. As of this commit we also end up reloading the schemas multiple times in some cases, which is functionally acceptable but likely represents a performance regression. I intend to rework this to use caching, but I'm saving that for a later commit because this one is big enough already. The proximal reason for this change is to resolve the chicken/egg problem whereby there was previously no single point where we could apply "moved" statements to the previous run state before creating a plan. With this change in place, we can now do that as part of Context.Plan, prior to forking the input state into the three separate state artifacts we use during planning. However, this is at least the third project in a row where the previous API design led to piling more functionality into terraform.NewContext and then working around the incorrect order of operations that produces, so I intend that by paying the cost/risk of this large diff now we can in turn reduce the cost/risk of future projects that relate to our main workflow actions.
2021-08-24 21:06:38 +02:00
// Close extracts the underlying state from inside this wrapper, making the
// wrapper invalid for any future operations.
func (s *SyncState) Close() *State {
s.lock.Lock()
ret := s.state
s.state = nil // make sure future operations can't still modify it
s.lock.Unlock()
return ret
}
// maybePruneModule will remove a module from the state altogether if it is
// empty, unless it's the root module which must always be present.
//
// This helper method is not concurrency-safe on its own, so must only be
// called while the caller is already holding the lock for writing.
func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) {
if addr.IsRoot() {
// We never prune the root.
return
}
ms := s.state.Module(addr)
if ms == nil {
return
}
if ms.empty() {
log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr)
s.state.RemoveModule(addr)
}
}
func (s *SyncState) MoveAbsResource(src, dst addrs.AbsResource) {
s.lock.Lock()
defer s.lock.Unlock()
s.state.MoveAbsResource(src, dst)
}
func (s *SyncState) MaybeMoveAbsResource(src, dst addrs.AbsResource) bool {
s.lock.Lock()
defer s.lock.Unlock()
return s.state.MaybeMoveAbsResource(src, dst)
}
func (s *SyncState) MoveResourceInstance(src, dst addrs.AbsResourceInstance) {
s.lock.Lock()
defer s.lock.Unlock()
s.state.MoveAbsResourceInstance(src, dst)
}
func (s *SyncState) MaybeMoveResourceInstance(src, dst addrs.AbsResourceInstance) bool {
s.lock.Lock()
defer s.lock.Unlock()
return s.state.MaybeMoveAbsResourceInstance(src, dst)
}
func (s *SyncState) MoveModuleInstance(src, dst addrs.ModuleInstance) {
s.lock.Lock()
defer s.lock.Unlock()
s.state.MoveModuleInstance(src, dst)
}
func (s *SyncState) MaybeMoveModuleInstance(src, dst addrs.ModuleInstance) bool {
s.lock.Lock()
defer s.lock.Unlock()
return s.state.MaybeMoveModuleInstance(src, dst)
}