From d81d521bcd4b22b31f238ffb4064eef305fab605 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Lapeyre?= Date: Sun, 15 Nov 2020 14:54:57 +0100 Subject: [PATCH] Use a global sequence to create the IDs for each workspace Until now the default workspace for every project would have the ID 1, which would make it impossible to lock them at the same time since we use the ID to identify the lock. With a global sequence to generate the IDs, the default workspace will now have a different ID for each project and it will be possible to lock multiple unrelated projects at the same time. If an old version of Terraform tries to get the lock on a project created with this new version it will work as we continue to use the ID of the workspace, we just change the way we generate them. If this version tries to get a lock on a project created by an old version of Terraform it will work as usual, but we may encounter a conflict with another unrelated project. This is already the current behavior so it's not an issue to persist this behavior. As users migrate to an up-to-date version of Terraform this will stop. Projects already present in the database will keep their conflicting IDs, I did not wanted to change them as users may be reading the states directly in the database for some reason. They can if they want change them manually to remove conflicts, newly created projects will work without manual intervention. Closes https://github.com/hashicorp/terraform/issues/22833 --- backend/remote-state/pg/backend.go | 10 ++- backend/remote-state/pg/backend_test.go | 83 +++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 3 deletions(-) diff --git a/backend/remote-state/pg/backend.go b/backend/remote-state/pg/backend.go index 0191176a9..9883cc559 100644 --- a/backend/remote-state/pg/backend.go +++ b/backend/remote-state/pg/backend.go @@ -105,10 +105,14 @@ func (b *Backend) configure(ctx context.Context) error { } if !data.Get("skip_table_creation").(bool) { + if _, err := db.Exec("CREATE SEQUENCE IF NOT EXISTS public.global_states_id_seq AS bigint"); err != nil { + return err + } + query = `CREATE TABLE IF NOT EXISTS %s.%s ( - id SERIAL PRIMARY KEY, - name TEXT, - data TEXT + id bigint NOT NULL DEFAULT nextval('public.global_states_id_seq') PRIMARY KEY, + name text, + data text )` if _, err := db.Exec(fmt.Sprintf(query, b.schemaName, statesTableName)); err != nil { return err diff --git a/backend/remote-state/pg/backend_test.go b/backend/remote-state/pg/backend_test.go index 21a4272ad..00f1fd961 100644 --- a/backend/remote-state/pg/backend_test.go +++ b/backend/remote-state/pg/backend_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" "github.com/lib/pq" _ "github.com/lib/pq" ) @@ -266,6 +267,88 @@ func TestBackendStateLocks(t *testing.T) { backend.TestBackendStateLocks(t, b, bb) } +func TestBackendConcurrentLock(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + + getStateMgr := func(schemaName string) (statemgr.Full, *statemgr.LockInfo) { + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + b := backend.TestBackendConfig(t, New(), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + stateMgr, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("Failed to get the state manager: %v", err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Who = schemaName + + return stateMgr, info + } + + s1, i1 := getStateMgr(fmt.Sprintf("terraform_%s_1", t.Name())) + s2, i2 := getStateMgr(fmt.Sprintf("terraform_%s_2", t.Name())) + + // First we need to create the workspace as the lock for creating them is + // global + lockID1, err := s1.Lock(i1) + if err != nil { + t.Fatalf("failed to lock first state: %v", err) + } + + if err = s1.PersistState(); err != nil { + t.Fatalf("failed to persist state: %v", err) + } + + if err := s1.Unlock(lockID1); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } + + lockID2, err := s2.Lock(i2) + if err != nil { + t.Fatalf("failed to lock second state: %v", err) + } + + if err = s2.PersistState(); err != nil { + t.Fatalf("failed to persist state: %v", err) + } + + if err := s2.Unlock(lockID2); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } + + // Now we can test concurrent lock + lockID1, err = s1.Lock(i1) + if err != nil { + t.Fatalf("failed to lock first state: %v", err) + } + + lockID2, err = s2.Lock(i2) + if err != nil { + t.Fatalf("failed to lock second state: %v", err) + } + + if err := s1.Unlock(lockID1); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } + + if err := s2.Unlock(lockID2); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } +} + func getDatabaseUrl() string { return os.Getenv("DATABASE_URL") }