Merge pull request #10808 from hashicorp/f-postgresql-owner

Resource: `postgresql_schema` privilege support
This commit is contained in:
Sean Chittenden 2016-12-27 16:08:13 -08:00 committed by GitHub
commit 9dc71312b7
38 changed files with 2602 additions and 334 deletions

View File

@ -0,0 +1,2 @@
data/
pwfile

View File

@ -1,11 +1,19 @@
POSTGRES?=/opt/local/lib/postgresql96/bin/postgres
PSQL?=/opt/local/lib/postgresql96/bin/psql
# env TESTARGS='-run TestAccPostgresqlSchema_AddPolicy' TF_LOG=warn make test
#
# NOTE: As of PostgreSQL 9.6.1 the -test.parallel=1 is required when
# performing `DROP ROLE`-related actions. This behavior and requirement
# may change in the future and is likely not required when doing
# non-delete related operations. But for now it is.
POSTGRES?=$(wildcard /usr/local/bin/postgres /opt/local/lib/postgresql96/bin/postgres)
PSQL?=$(wildcard /usr/local/bin/psql /opt/local/lib/postgresql96/bin/psql)
INITDB?=$(wildcard /usr/local/bin/initdb /opt/local/lib/postgresql96/bin/initdb)
PGDATA?=$(GOPATH)/src/github.com/hashicorp/terraform/builtin/providers/postgresql/data
initdb::
echo "" > pwfile
/opt/local/lib/postgresql96/bin/initdb --no-locale -U postgres -A md5 --pwfile=pwfile -D $(PGDATA)
$(INITDB) --no-locale -U postgres -A md5 --pwfile=pwfile -D $(PGDATA)
startdb::
2>&1 \

View File

@ -5,7 +5,10 @@ import (
"database/sql"
"fmt"
"log"
"sync"
"unicode"
"github.com/hashicorp/errwrap"
_ "github.com/lib/pq" //PostgreSQL db
)
@ -26,6 +29,13 @@ type Config struct {
type Client struct {
username string
connStr string
// PostgreSQL lock on pg_catalog. Many of the operations that Terraform
// performs are not permitted to be concurrent. Unlike traditional
// PostgreSQL tables that use MVCC, many of the PostgreSQL system
// catalogs look like tables, but are not in-fact able to be
// concurrently updated.
catalogLock sync.RWMutex
}
// NewClient returns new client config
@ -38,7 +48,12 @@ func (c *Config) NewClient() (*Client, error) {
q := func(s string) string {
b := bytes.NewBufferString(`'`)
b.Grow(len(s) + 2)
var haveWhitespace bool
for _, r := range s {
if unicode.IsSpace(r) {
haveWhitespace = true
}
switch r {
case '\'':
b.WriteString(`\'`)
@ -50,7 +65,12 @@ func (c *Config) NewClient() (*Client, error) {
}
b.WriteString(`'`)
return b.String()
str := b.String()
if haveWhitespace || len(str) == 2 {
return str
}
return str[1 : len(str)-1]
}
logDSN := fmt.Sprintf(dsnFmt, q(c.Host), c.Port, q(c.Database), q(c.Username), q("<redacted>"), q(c.SSLMode), q(c.ApplicationName), c.ConnectTimeoutSec)
@ -70,7 +90,7 @@ func (c *Config) NewClient() (*Client, error) {
func (c *Client) Connect() (*sql.DB, error) {
db, err := sql.Open("postgres", c.connStr)
if err != nil {
return nil, fmt.Errorf("Error connecting to PostgreSQL server: %s", err)
return nil, errwrap.Wrapf("Error connecting to PostgreSQL server: {{err}}", err)
}
return db, nil

View File

@ -32,6 +32,7 @@ func resourcePostgreSQLDatabase() *schema.Resource {
Read: resourcePostgreSQLDatabaseRead,
Update: resourcePostgreSQLDatabaseUpdate,
Delete: resourcePostgreSQLDatabaseDelete,
Exists: resourcePostgreSQLDatabaseExists,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
@ -46,7 +47,7 @@ func resourcePostgreSQLDatabase() *schema.Resource {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "The role name of the user who will own the new database",
Description: "The ROLE which owns the database",
},
dbTemplateAttr: {
Type: schema.TypeString,
@ -107,6 +108,10 @@ func resourcePostgreSQLDatabase() *schema.Resource {
func resourcePostgreSQLDatabaseCreate(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return errwrap.Wrapf("Error connecting to PostgreSQL: {{err}}", err)
@ -184,11 +189,14 @@ func resourcePostgreSQLDatabaseCreate(d *schema.ResourceData, meta interface{})
d.SetId(dbName)
return resourcePostgreSQLDatabaseRead(d, meta)
return resourcePostgreSQLDatabaseReadImpl(d, meta)
}
func resourcePostgreSQLDatabaseDelete(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return errwrap.Wrapf("Error connecting to PostgreSQL: {{err}}", err)
@ -220,7 +228,38 @@ func resourcePostgreSQLDatabaseDelete(d *schema.ResourceData, meta interface{})
return nil
}
func resourcePostgreSQLDatabaseExists(d *schema.ResourceData, meta interface{}) (bool, error) {
c := meta.(*Client)
c.catalogLock.RLock()
defer c.catalogLock.RUnlock()
conn, err := c.Connect()
if err != nil {
return false, err
}
defer conn.Close()
var dbName string
err = conn.QueryRow("SELECT d.datname from pg_database d WHERE datname=$1", d.Id()).Scan(&dbName)
switch {
case err == sql.ErrNoRows:
return false, nil
case err != nil:
return false, err
}
return true, nil
}
func resourcePostgreSQLDatabaseRead(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.RLock()
defer c.catalogLock.RUnlock()
return resourcePostgreSQLDatabaseReadImpl(d, meta)
}
func resourcePostgreSQLDatabaseReadImpl(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
conn, err := c.Connect()
if err != nil {
@ -276,6 +315,9 @@ func resourcePostgreSQLDatabaseRead(d *schema.ResourceData, meta interface{}) er
func resourcePostgreSQLDatabaseUpdate(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return err
@ -308,7 +350,7 @@ func resourcePostgreSQLDatabaseUpdate(d *schema.ResourceData, meta interface{})
// Empty values: ALTER DATABASE name RESET configuration_parameter;
return resourcePostgreSQLDatabaseRead(d, meta)
return resourcePostgreSQLDatabaseReadImpl(d, meta)
}
func setDBName(conn *sql.DB, d *schema.ResourceData) error {

View File

@ -24,6 +24,7 @@ func resourcePostgreSQLExtension() *schema.Resource {
Read: resourcePostgreSQLExtensionRead,
Update: resourcePostgreSQLExtensionUpdate,
Delete: resourcePostgreSQLExtensionDelete,
Exists: resourcePostgreSQLExtensionExists,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
@ -52,6 +53,9 @@ func resourcePostgreSQLExtension() *schema.Resource {
func resourcePostgreSQLExtensionCreate(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return err
@ -61,7 +65,7 @@ func resourcePostgreSQLExtensionCreate(d *schema.ResourceData, meta interface{})
extName := d.Get(extNameAttr).(string)
b := bytes.NewBufferString("CREATE EXTENSION ")
fmt.Fprintf(b, pq.QuoteIdentifier(extName))
fmt.Fprint(b, pq.QuoteIdentifier(extName))
if v, ok := d.GetOk(extSchemaAttr); ok {
fmt.Fprint(b, " SCHEMA ", pq.QuoteIdentifier(v.(string)))
@ -79,11 +83,43 @@ func resourcePostgreSQLExtensionCreate(d *schema.ResourceData, meta interface{})
d.SetId(extName)
return resourcePostgreSQLExtensionRead(d, meta)
return resourcePostgreSQLExtensionReadImpl(d, meta)
}
func resourcePostgreSQLExtensionExists(d *schema.ResourceData, meta interface{}) (bool, error) {
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return false, err
}
defer conn.Close()
var extName string
err = conn.QueryRow("SELECT extname FROM pg_catalog.pg_extension WHERE extname = $1", d.Id()).Scan(&extName)
switch {
case err == sql.ErrNoRows:
return false, nil
case err != nil:
return false, err
}
return true, nil
}
func resourcePostgreSQLExtensionRead(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.RLock()
defer c.catalogLock.RUnlock()
return resourcePostgreSQLExtensionReadImpl(d, meta)
}
func resourcePostgreSQLExtensionReadImpl(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
conn, err := c.Connect()
if err != nil {
return err
@ -111,6 +147,9 @@ func resourcePostgreSQLExtensionRead(d *schema.ResourceData, meta interface{}) e
func resourcePostgreSQLExtensionDelete(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return err
@ -132,6 +171,9 @@ func resourcePostgreSQLExtensionDelete(d *schema.ResourceData, meta interface{})
func resourcePostgreSQLExtensionUpdate(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return err
@ -148,7 +190,7 @@ func resourcePostgreSQLExtensionUpdate(d *schema.ResourceData, meta interface{})
return err
}
return resourcePostgreSQLExtensionRead(d, meta)
return resourcePostgreSQLExtensionReadImpl(d, meta)
}
func setExtSchema(conn *sql.DB, d *schema.ResourceData) error {

View File

@ -13,18 +13,20 @@ import (
)
const (
roleBypassRLSAttr = "bypass_row_level_security"
roleConnLimitAttr = "connection_limit"
roleCreateDBAttr = "create_database"
roleCreateRoleAttr = "create_role"
roleEncryptedPassAttr = "encrypted_password"
roleInheritAttr = "inherit"
roleLoginAttr = "login"
roleNameAttr = "name"
rolePasswordAttr = "password"
roleReplicationAttr = "replication"
roleSuperuserAttr = "superuser"
roleValidUntilAttr = "valid_until"
roleBypassRLSAttr = "bypass_row_level_security"
roleConnLimitAttr = "connection_limit"
roleCreateDBAttr = "create_database"
roleCreateRoleAttr = "create_role"
roleEncryptedPassAttr = "encrypted_password"
roleInheritAttr = "inherit"
roleLoginAttr = "login"
roleNameAttr = "name"
rolePasswordAttr = "password"
roleReplicationAttr = "replication"
roleSkipDropRoleAttr = "skip_drop_role"
roleSkipReassignOwnedAttr = "skip_reassign_owned"
roleSuperuserAttr = "superuser"
roleValidUntilAttr = "valid_until"
// Deprecated options
roleDepEncryptedAttr = "encrypted"
@ -36,6 +38,7 @@ func resourcePostgreSQLRole() *schema.Resource {
Read: resourcePostgreSQLRoleRead,
Update: resourcePostgreSQLRoleUpdate,
Delete: resourcePostgreSQLRoleDelete,
Exists: resourcePostgreSQLRoleExists,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
@ -120,12 +123,27 @@ func resourcePostgreSQLRole() *schema.Resource {
Default: false,
Description: "Determine whether a role bypasses every row-level security (RLS) policy",
},
roleSkipDropRoleAttr: {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Skip actually running the DROP ROLE command when removing a ROLE from PostgreSQL",
},
roleSkipReassignOwnedAttr: {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Skip actually running the REASSIGN OWNED command when removing a role from PostgreSQL",
},
},
}
}
func resourcePostgreSQLRoleCreate(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return errwrap.Wrapf("Error connecting to PostgreSQL: {{err}}", err)
@ -229,22 +247,49 @@ func resourcePostgreSQLRoleCreate(d *schema.ResourceData, meta interface{}) erro
d.SetId(roleName)
return resourcePostgreSQLRoleRead(d, meta)
return resourcePostgreSQLRoleReadImpl(d, meta)
}
func resourcePostgreSQLRoleDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*Client)
conn, err := client.Connect()
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return err
}
defer conn.Close()
roleName := d.Get(roleNameAttr).(string)
query := fmt.Sprintf("DROP ROLE %s", pq.QuoteIdentifier(roleName))
_, err = conn.Query(query)
txn, err := conn.Begin()
if err != nil {
return errwrap.Wrapf("Error deleting role: {{err}}", err)
return err
}
defer txn.Rollback()
roleName := d.Get(roleNameAttr).(string)
queries := make([]string, 0, 3)
if !d.Get(roleSkipReassignOwnedAttr).(bool) {
queries = append(queries, fmt.Sprintf("REASSIGN OWNED BY %s TO CURRENT_USER", pq.QuoteIdentifier(roleName)))
queries = append(queries, fmt.Sprintf("DROP OWNED BY %s", pq.QuoteIdentifier(roleName)))
}
if !d.Get(roleSkipDropRoleAttr).(bool) {
queries = append(queries, fmt.Sprintf("DROP ROLE %s", pq.QuoteIdentifier(roleName)))
}
if len(queries) > 0 {
for _, query := range queries {
_, err = conn.Query(query)
if err != nil {
return errwrap.Wrapf("Error deleting role: {{err}}", err)
}
}
if err := txn.Commit(); err != nil {
return errwrap.Wrapf("Error committing schema: {{err}}", err)
}
}
d.SetId("")
@ -252,7 +297,38 @@ func resourcePostgreSQLRoleDelete(d *schema.ResourceData, meta interface{}) erro
return nil
}
func resourcePostgreSQLRoleExists(d *schema.ResourceData, meta interface{}) (bool, error) {
c := meta.(*Client)
c.catalogLock.RLock()
defer c.catalogLock.RUnlock()
conn, err := c.Connect()
if err != nil {
return false, err
}
defer conn.Close()
var roleName string
err = conn.QueryRow("SELECT rolname FROM pg_catalog.pg_roles WHERE rolname=$1", d.Id()).Scan(&roleName)
switch {
case err == sql.ErrNoRows:
return false, nil
case err != nil:
return false, err
}
return true, nil
}
func resourcePostgreSQLRoleRead(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.RLock()
defer c.catalogLock.RUnlock()
return resourcePostgreSQLRoleReadImpl(d, meta)
}
func resourcePostgreSQLRoleReadImpl(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
conn, err := c.Connect()
if err != nil {
@ -282,6 +358,8 @@ func resourcePostgreSQLRoleRead(d *schema.ResourceData, meta interface{}) error
d.Set(roleInheritAttr, roleInherit)
d.Set(roleLoginAttr, roleCanLogin)
d.Set(roleReplicationAttr, roleReplication)
d.Set(roleSkipDropRoleAttr, d.Get(roleSkipDropRoleAttr).(bool))
d.Set(roleSkipReassignOwnedAttr, d.Get(roleSkipReassignOwnedAttr).(bool))
d.Set(roleSuperuserAttr, roleSuperuser)
d.Set(roleValidUntilAttr, roleValidUntil)
d.SetId(roleName)
@ -296,7 +374,7 @@ func resourcePostgreSQLRoleRead(d *schema.ResourceData, meta interface{}) error
err = conn.QueryRow("SELECT COALESCE(passwd, '') FROM pg_catalog.pg_shadow AS s WHERE s.usename = $1", roleId).Scan(&rolePassword)
switch {
case err == sql.ErrNoRows:
return fmt.Errorf("PostgreSQL role (%s) not found in shadow database: {{err}}", roleId)
return errwrap.Wrapf(fmt.Sprintf("PostgreSQL role (%s) not found in shadow database: {{err}}", roleId), err)
case err != nil:
return errwrap.Wrapf("Error reading role: {{err}}", err)
default:
@ -307,6 +385,9 @@ func resourcePostgreSQLRoleRead(d *schema.ResourceData, meta interface{}) error
func resourcePostgreSQLRoleUpdate(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return err
@ -353,7 +434,7 @@ func resourcePostgreSQLRoleUpdate(d *schema.ResourceData, meta interface{}) erro
return err
}
return resourcePostgreSQLRoleRead(d, meta)
return resourcePostgreSQLRoleReadImpl(d, meta)
}
func setRoleName(conn *sql.DB, d *schema.ResourceData) error {

View File

@ -23,6 +23,10 @@ func TestAccPostgresqlRole_Basic(t *testing.T) {
"postgresql_role.myrole2", "name", "myrole2"),
resource.TestCheckResourceAttr(
"postgresql_role.myrole2", "login", "true"),
resource.TestCheckResourceAttr(
"postgresql_role.myrole2", "skip_drop_role", "false"),
resource.TestCheckResourceAttr(
"postgresql_role.myrole2", "skip_reassign_owned", "false"),
resource.TestCheckResourceAttr(
"postgresql_role.role_with_defaults", "name", "testing_role_with_defaults"),
@ -46,6 +50,10 @@ func TestAccPostgresqlRole_Basic(t *testing.T) {
"postgresql_role.role_with_defaults", "password", ""),
resource.TestCheckResourceAttr(
"postgresql_role.role_with_defaults", "valid_until", "infinity"),
resource.TestCheckResourceAttr(
"postgresql_role.role_with_defaults", "skip_drop_role", "false"),
resource.TestCheckResourceAttr(
"postgresql_role.role_with_defaults", "skip_reassign_owned", "false"),
),
},
},
@ -164,6 +172,8 @@ resource "postgresql_role" "role_with_defaults" {
connection_limit = -1
encrypted_password = true
password = ""
skip_drop_role = false
skip_reassign_owned = false
valid_until = "infinity"
}
`

View File

@ -6,14 +6,26 @@ import (
"errors"
"fmt"
"log"
"reflect"
"strings"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
"github.com/lib/pq"
"github.com/sean-/postgresql-acl"
)
const (
schemaNameAttr = "name"
schemaNameAttr = "name"
schemaOwnerAttr = "owner"
schemaPolicyAttr = "policy"
schemaIfNotExists = "if_not_exists"
schemaPolicyCreateAttr = "create"
schemaPolicyCreateWithGrantAttr = "create_with_grant"
schemaPolicyRoleAttr = "role"
schemaPolicyUsageAttr = "usage"
schemaPolicyUsageWithGrantAttr = "usage_with_grant"
)
func resourcePostgreSQLSchema() *schema.Resource {
@ -22,6 +34,7 @@ func resourcePostgreSQLSchema() *schema.Resource {
Read: resourcePostgreSQLSchemaRead,
Update: resourcePostgreSQLSchemaUpdate,
Delete: resourcePostgreSQLSchemaDelete,
Exists: resourcePostgreSQLSchemaExists,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
@ -32,54 +45,212 @@ func resourcePostgreSQLSchema() *schema.Resource {
Required: true,
Description: "The name of the schema",
},
schemaOwnerAttr: {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "The ROLE name who owns the schema",
},
schemaIfNotExists: {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "When true, use the existing schema if it exsts",
},
schemaPolicyAttr: &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
schemaPolicyCreateAttr: {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "If true, allow the specified ROLEs to CREATE new objects within the schema(s)",
ConflictsWith: []string{schemaPolicyAttr + "." + schemaPolicyCreateWithGrantAttr},
},
schemaPolicyCreateWithGrantAttr: {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "If true, allow the specified ROLEs to CREATE new objects within the schema(s) and GRANT the same CREATE privilege to different ROLEs",
ConflictsWith: []string{schemaPolicyAttr + "." + schemaPolicyCreateAttr},
},
schemaPolicyRoleAttr: {
Type: schema.TypeString,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
Default: "",
Description: "ROLE who will receive this policy (default: PUBLIC)",
},
schemaPolicyUsageAttr: {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "If true, allow the specified ROLEs to use objects within the schema(s)",
ConflictsWith: []string{schemaPolicyAttr + "." + schemaPolicyUsageWithGrantAttr},
},
schemaPolicyUsageWithGrantAttr: {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "If true, allow the specified ROLEs to use objects within the schema(s) and GRANT the same USAGE privilege to different ROLEs",
ConflictsWith: []string{schemaPolicyAttr + "." + schemaPolicyUsageAttr},
},
},
},
},
},
}
}
func resourcePostgreSQLSchemaCreate(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
queries := []string{}
schemaName := d.Get(schemaNameAttr).(string)
{
b := bytes.NewBufferString("CREATE SCHEMA ")
if v := d.Get(schemaIfNotExists); v.(bool) {
fmt.Fprint(b, "IF NOT EXISTS ")
}
fmt.Fprint(b, pq.QuoteIdentifier(schemaName))
switch v, ok := d.GetOk(schemaOwnerAttr); {
case ok:
fmt.Fprint(b, " AUTHORIZATION ", pq.QuoteIdentifier(v.(string)))
}
queries = append(queries, b.String())
}
// ACL objects that can generate the necessary SQL
type RoleKey string
var schemaPolicies map[RoleKey]acl.Schema
if policiesRaw, ok := d.GetOk(schemaPolicyAttr); ok {
policiesList := policiesRaw.(*schema.Set).List()
// NOTE: len(policiesList) doesn't take into account multiple
// roles per policy.
schemaPolicies = make(map[RoleKey]acl.Schema, len(policiesList))
for _, policyRaw := range policiesList {
policyMap := policyRaw.(map[string]interface{})
rolePolicy := schemaPolicyToACL(policyMap)
roleKey := RoleKey(strings.ToLower(rolePolicy.Role))
if existingRolePolicy, ok := schemaPolicies[roleKey]; ok {
schemaPolicies[roleKey] = existingRolePolicy.Merge(rolePolicy)
} else {
schemaPolicies[roleKey] = rolePolicy
}
}
}
for _, policy := range schemaPolicies {
queries = append(queries, policy.Grants(schemaName)...)
}
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return errwrap.Wrapf("Error connecting to PostgreSQL: {{err}}", err)
}
defer conn.Close()
schemaName := d.Get(schemaNameAttr).(string)
b := bytes.NewBufferString("CREATE SCHEMA ")
fmt.Fprintf(b, pq.QuoteIdentifier(schemaName))
query := b.String()
_, err = conn.Query(query)
txn, err := conn.Begin()
if err != nil {
return errwrap.Wrapf(fmt.Sprintf("Error creating schema %s: {{err}}", schemaName), err)
return err
}
defer txn.Rollback()
for _, query := range queries {
_, err = txn.Query(query)
if err != nil {
return errwrap.Wrapf(fmt.Sprintf("Error creating schema %s: {{err}}", schemaName), err)
}
}
if err := txn.Commit(); err != nil {
return errwrap.Wrapf("Error committing schema: {{err}}", err)
}
d.SetId(schemaName)
return resourcePostgreSQLSchemaRead(d, meta)
return resourcePostgreSQLSchemaReadImpl(d, meta)
}
func resourcePostgreSQLSchemaDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*Client)
conn, err := client.Connect()
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return err
}
defer conn.Close()
txn, err := conn.Begin()
if err != nil {
return err
}
defer txn.Rollback()
schemaName := d.Get(schemaNameAttr).(string)
// NOTE(sean@): Deliberately not performing a cascading drop.
query := fmt.Sprintf("DROP SCHEMA %s", pq.QuoteIdentifier(schemaName))
_, err = conn.Query(query)
_, err = txn.Query(query)
if err != nil {
return errwrap.Wrapf("Error deleting schema: {{err}}", err)
}
if err := txn.Commit(); err != nil {
return errwrap.Wrapf("Error committing schema: {{err}}", err)
}
d.SetId("")
return nil
}
func resourcePostgreSQLSchemaExists(d *schema.ResourceData, meta interface{}) (bool, error) {
c := meta.(*Client)
c.catalogLock.RLock()
defer c.catalogLock.RUnlock()
conn, err := c.Connect()
if err != nil {
return false, err
}
defer conn.Close()
var schemaName string
err = conn.QueryRow("SELECT n.nspname FROM pg_catalog.pg_namespace n WHERE n.nspname=$1", d.Id()).Scan(&schemaName)
switch {
case err == sql.ErrNoRows:
return false, nil
case err != nil:
return false, errwrap.Wrapf("Error reading schema: {{err}}", err)
}
return true, nil
}
func resourcePostgreSQLSchemaRead(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.RLock()
defer c.catalogLock.RUnlock()
return resourcePostgreSQLSchemaReadImpl(d, meta)
}
func resourcePostgreSQLSchemaReadImpl(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
conn, err := c.Connect()
if err != nil {
@ -88,8 +259,9 @@ func resourcePostgreSQLSchemaRead(d *schema.ResourceData, meta interface{}) erro
defer conn.Close()
schemaId := d.Id()
var schemaName string
err = conn.QueryRow("SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname=$1", schemaId).Scan(&schemaName)
var schemaName, schemaOwner string
var schemaACLs []string
err = conn.QueryRow("SELECT n.nspname, pg_catalog.pg_get_userbyid(n.nspowner), COALESCE(n.nspacl, '{}'::aclitem[])::TEXT[] FROM pg_catalog.pg_namespace n WHERE n.nspname=$1", schemaId).Scan(&schemaName, &schemaOwner, pq.Array(&schemaACLs))
switch {
case err == sql.ErrNoRows:
log.Printf("[WARN] PostgreSQL schema (%s) not found", schemaId)
@ -98,7 +270,31 @@ func resourcePostgreSQLSchemaRead(d *schema.ResourceData, meta interface{}) erro
case err != nil:
return errwrap.Wrapf("Error reading schema: {{err}}", err)
default:
type RoleKey string
schemaPolicies := make(map[RoleKey]acl.Schema, len(schemaACLs))
for _, aclStr := range schemaACLs {
aclItem, err := acl.Parse(aclStr)
if err != nil {
return errwrap.Wrapf("Error parsing aclitem: {{err}}", err)
}
schemaACL, err := acl.NewSchema(aclItem)
if err != nil {
return errwrap.Wrapf("invalid perms for schema: {{err}}", err)
}
roleKey := RoleKey(strings.ToLower(schemaACL.Role))
var mergedPolicy acl.Schema
if existingRolePolicy, ok := schemaPolicies[roleKey]; ok {
mergedPolicy = existingRolePolicy.Merge(schemaACL)
} else {
mergedPolicy = schemaACL
}
schemaPolicies[roleKey] = mergedPolicy
}
d.Set(schemaNameAttr, schemaName)
d.Set(schemaOwnerAttr, schemaOwner)
d.SetId(schemaName)
return nil
}
@ -106,20 +302,41 @@ func resourcePostgreSQLSchemaRead(d *schema.ResourceData, meta interface{}) erro
func resourcePostgreSQLSchemaUpdate(d *schema.ResourceData, meta interface{}) error {
c := meta.(*Client)
c.catalogLock.Lock()
defer c.catalogLock.Unlock()
conn, err := c.Connect()
if err != nil {
return err
}
defer conn.Close()
if err := setSchemaName(conn, d); err != nil {
txn, err := conn.Begin()
if err != nil {
return err
}
defer txn.Rollback()
if err := setSchemaName(txn, d); err != nil {
return err
}
return resourcePostgreSQLSchemaRead(d, meta)
if err := setSchemaOwner(txn, d); err != nil {
return err
}
if err := setSchemaPolicy(txn, d); err != nil {
return err
}
if err := txn.Commit(); err != nil {
return errwrap.Wrapf("Error committing schema: {{err}}", err)
}
return resourcePostgreSQLSchemaReadImpl(d, meta)
}
func setSchemaName(conn *sql.DB, d *schema.ResourceData) error {
func setSchemaName(txn *sql.Tx, d *schema.ResourceData) error {
if !d.HasChange(schemaNameAttr) {
return nil
}
@ -132,10 +349,193 @@ func setSchemaName(conn *sql.DB, d *schema.ResourceData) error {
}
query := fmt.Sprintf("ALTER SCHEMA %s RENAME TO %s", pq.QuoteIdentifier(o), pq.QuoteIdentifier(n))
if _, err := conn.Query(query); err != nil {
if _, err := txn.Query(query); err != nil {
return errwrap.Wrapf("Error updating schema NAME: {{err}}", err)
}
d.SetId(n)
return nil
}
func setSchemaOwner(txn *sql.Tx, d *schema.ResourceData) error {
if !d.HasChange(schemaOwnerAttr) {
return nil
}
oraw, nraw := d.GetChange(schemaOwnerAttr)
o := oraw.(string)
n := nraw.(string)
if n == "" {
return errors.New("Error setting schema owner to an empty string")
}
query := fmt.Sprintf("ALTER SCHEMA %s OWNER TO %s", pq.QuoteIdentifier(o), pq.QuoteIdentifier(n))
if _, err := txn.Query(query); err != nil {
return errwrap.Wrapf("Error updating schema OWNER: {{err}}", err)
}
return nil
}
func setSchemaPolicy(txn *sql.Tx, d *schema.ResourceData) error {
if !d.HasChange(schemaPolicyAttr) {
return nil
}
schemaName := d.Get(schemaNameAttr).(string)
oraw, nraw := d.GetChange(schemaPolicyAttr)
oldList := oraw.(*schema.Set).List()
newList := nraw.(*schema.Set).List()
queries := make([]string, 0, len(oldList)+len(newList))
dropped, added, updated, _ := schemaChangedPolicies(oldList, newList)
for _, p := range dropped {
pMap := p.(map[string]interface{})
rolePolicy := schemaPolicyToACL(pMap)
// The PUBLIC role can not be DROP'ed, therefore we do not need
// to prevent revoking against it not existing.
if rolePolicy.Role != "" {
var foundUser bool
err := txn.QueryRow(`SELECT TRUE FROM pg_catalog.pg_user WHERE usename = $1`, rolePolicy.Role).Scan(&foundUser)
switch {
case err == sql.ErrNoRows:
// Don't execute this role's REVOKEs because the role
// was dropped first and therefore doesn't exist.
case err != nil:
return errwrap.Wrapf("Error reading schema: {{err}}", err)
default:
queries = append(queries, rolePolicy.Revokes(schemaName)...)
}
}
}
for _, p := range added {
pMap := p.(map[string]interface{})
rolePolicy := schemaPolicyToACL(pMap)
queries = append(queries, rolePolicy.Grants(schemaName)...)
}
for _, p := range updated {
policies := p.([]interface{})
if len(policies) != 2 {
panic("expected 2 policies, old and new")
}
{
oldPolicies := policies[0].(map[string]interface{})
rolePolicy := schemaPolicyToACL(oldPolicies)
queries = append(queries, rolePolicy.Revokes(schemaName)...)
}
{
newPolicies := policies[1].(map[string]interface{})
rolePolicy := schemaPolicyToACL(newPolicies)
queries = append(queries, rolePolicy.Grants(schemaName)...)
}
}
for _, query := range queries {
if _, err := txn.Query(query); err != nil {
return errwrap.Wrapf("Error updating schema DCL: {{err}}", err)
}
}
return nil
}
// schemaChangedPolicies walks old and new to create a set of queries that can
// be executed to enact each type of state change (roles that have been dropped
// from the policy, added to a policy, have updated privilges, or are
// unchanged).
func schemaChangedPolicies(old, new []interface{}) (dropped, added, update, unchanged map[string]interface{}) {
type RoleKey string
oldLookupMap := make(map[RoleKey]interface{}, len(old))
for idx, _ := range old {
v := old[idx]
schemaPolicy := v.(map[string]interface{})
if roleRaw, ok := schemaPolicy[schemaPolicyRoleAttr]; ok {
role := roleRaw.(string)
roleKey := strings.ToLower(role)
oldLookupMap[RoleKey(roleKey)] = schemaPolicy
}
}
newLookupMap := make(map[RoleKey]interface{}, len(new))
for idx, _ := range new {
v := new[idx]
schemaPolicy := v.(map[string]interface{})
if roleRaw, ok := schemaPolicy[schemaPolicyRoleAttr]; ok {
role := roleRaw.(string)
roleKey := strings.ToLower(role)
newLookupMap[RoleKey(roleKey)] = schemaPolicy
}
}
droppedRoles := make(map[string]interface{}, len(old))
for kOld, vOld := range oldLookupMap {
if _, ok := newLookupMap[kOld]; !ok {
droppedRoles[string(kOld)] = vOld
}
}
addedRoles := make(map[string]interface{}, len(new))
for kNew, vNew := range newLookupMap {
if _, ok := oldLookupMap[kNew]; !ok {
addedRoles[string(kNew)] = vNew
}
}
updatedRoles := make(map[string]interface{}, len(new))
unchangedRoles := make(map[string]interface{}, len(new))
for kOld, vOld := range oldLookupMap {
if vNew, ok := newLookupMap[kOld]; ok {
if reflect.DeepEqual(vOld, vNew) {
unchangedRoles[string(kOld)] = vOld
} else {
updatedRoles[string(kOld)] = []interface{}{vOld, vNew}
}
}
}
return droppedRoles, addedRoles, updatedRoles, unchangedRoles
}
func schemaPolicyToHCL(s *acl.Schema) map[string]interface{} {
return map[string]interface{}{
schemaPolicyRoleAttr: s.Role,
schemaPolicyCreateAttr: s.GetPrivilege(acl.Create),
schemaPolicyCreateWithGrantAttr: s.GetGrantOption(acl.Create),
schemaPolicyUsageAttr: s.GetPrivilege(acl.Usage),
schemaPolicyUsageWithGrantAttr: s.GetGrantOption(acl.Usage),
}
}
func schemaPolicyToACL(policyMap map[string]interface{}) acl.Schema {
var rolePolicy acl.Schema
if policyMap[schemaPolicyCreateAttr].(bool) {
rolePolicy.Privileges |= acl.Create
}
if policyMap[schemaPolicyCreateWithGrantAttr].(bool) {
rolePolicy.Privileges |= acl.Create
rolePolicy.GrantOptions |= acl.Create
}
if policyMap[schemaPolicyUsageAttr].(bool) {
rolePolicy.Privileges |= acl.Usage
}
if policyMap[schemaPolicyUsageWithGrantAttr].(bool) {
rolePolicy.Privileges |= acl.Usage
rolePolicy.GrantOptions |= acl.Usage
}
if roleRaw, ok := policyMap[schemaPolicyRoleAttr]; ok {
rolePolicy.Role = roleRaw.(string)
}
return rolePolicy
}

View File

@ -19,13 +19,142 @@ func TestAccPostgresqlSchema_Basic(t *testing.T) {
Config: testAccPostgresqlSchemaConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckPostgresqlSchemaExists("postgresql_schema.test1", "foo"),
resource.TestCheckResourceAttr(
"postgresql_role.myrole3", "name", "myrole3"),
resource.TestCheckResourceAttr(
"postgresql_role.myrole3", "login", "true"),
resource.TestCheckResourceAttr("postgresql_role.role_all_without_grant", "name", "role_all_without_grant"),
resource.TestCheckResourceAttr("postgresql_role.role_all_without_grant", "login", "true"),
resource.TestCheckResourceAttr(
"postgresql_schema.test1", "name", "foo"),
resource.TestCheckResourceAttr("postgresql_role.role_all_with_grant", "name", "role_all_with_grant"),
resource.TestCheckResourceAttr("postgresql_schema.test1", "name", "foo"),
resource.TestCheckResourceAttr("postgresql_schema.test2", "name", "bar"),
resource.TestCheckResourceAttr("postgresql_schema.test2", "owner", "role_all_without_grant"),
resource.TestCheckResourceAttr("postgresql_schema.test2", "if_not_exists", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.#", "1"),
resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.1948480595.create", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.1948480595.create_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.1948480595.usage", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.1948480595.usage_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.1948480595.role", "role_all_without_grant"),
resource.TestCheckResourceAttr("postgresql_schema.test3", "name", "baz"),
resource.TestCheckResourceAttr("postgresql_schema.test3", "owner", "role_all_without_grant"),
resource.TestCheckResourceAttr("postgresql_schema.test3", "if_not_exists", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.#", "2"),
resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1013320538.create_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1013320538.usage_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1013320538.role", "role_all_with_grant"),
resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1948480595.create", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1948480595.usage", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1948480595.role", "role_all_without_grant"),
),
},
},
})
}
func TestAccPostgresqlSchema_AddPolicy(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckPostgresqlSchemaDestroy,
Steps: []resource.TestStep{
{
Config: testAccPostgresqlSchemaGrant1,
Check: resource.ComposeTestCheckFunc(
testAccCheckPostgresqlSchemaExists("postgresql_schema.test4", "test4"),
resource.TestCheckResourceAttr("postgresql_role.all_without_grant_stay", "name", "all_without_grant_stay"),
resource.TestCheckResourceAttr("postgresql_role.all_without_grant_drop", "name", "all_without_grant_drop"),
resource.TestCheckResourceAttr("postgresql_role.policy_compose", "name", "policy_compose"),
resource.TestCheckResourceAttr("postgresql_role.policy_move", "name", "policy_move"),
resource.TestCheckResourceAttr("postgresql_role.all_with_grantstay", "name", "all_with_grantstay"),
resource.TestCheckResourceAttr("postgresql_role.all_with_grantdrop", "name", "all_with_grantdrop"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "name", "test4"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "owner", "all_without_grant_stay"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.#", "7"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.create", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.create_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.role", "all_with_grantstay"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.usage", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.usage_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1417738359.create", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1417738359.create_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1417738359.role", "policy_move"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1417738359.usage", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1417738359.usage_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1762357194.create", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1762357194.create_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1762357194.role", "all_without_grant_drop"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1762357194.usage", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1762357194.usage_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.create", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.create_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.role", "all_without_grant_stay"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.usage", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.usage_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.create", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.create_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.role", "policy_compose"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.usage", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.usage_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.4178211897.create", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.4178211897.create_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.4178211897.role", "all_with_grantdrop"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.4178211897.usage", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.4178211897.usage_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.create", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.create_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.role", "policy_compose"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.usage", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.usage_with_grant", "false"),
),
},
{
Config: testAccPostgresqlSchemaGrant2,
Check: resource.ComposeTestCheckFunc(
testAccCheckPostgresqlSchemaExists("postgresql_schema.test4", "test4"),
resource.TestCheckResourceAttr("postgresql_role.all_without_grant_stay", "name", "all_without_grant_stay"),
resource.TestCheckResourceAttr("postgresql_role.all_without_grant_drop", "name", "all_without_grant_drop"),
resource.TestCheckResourceAttr("postgresql_role.policy_compose", "name", "policy_compose"),
resource.TestCheckResourceAttr("postgresql_role.policy_move", "name", "policy_move"),
resource.TestCheckResourceAttr("postgresql_role.all_with_grantstay", "name", "all_with_grantstay"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "name", "test4"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "owner", "all_without_grant_stay"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.#", "6"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.create", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.create_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.role", "all_with_grantstay"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.usage", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.usage_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.create", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.create_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.role", "all_without_grant_stay"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.usage", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.usage_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3831594020.create", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3831594020.create_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3831594020.role", "policy_move"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3831594020.usage", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3831594020.usage_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.create", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.create_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.role", "policy_compose"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.usage", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.usage_with_grant", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.468685299.create", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.468685299.create_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.468685299.role", "policy_new"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.468685299.usage", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.468685299.usage_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.create", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.create_with_grant", "false"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.role", "policy_compose"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.usage", "true"),
resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.usage_with_grant", "false"),
),
},
},
@ -103,13 +232,187 @@ func checkSchemaExists(client *Client, schemaName string) (bool, error) {
}
}
var testAccPostgresqlSchemaConfig = `
resource "postgresql_role" "myrole3" {
name = "myrole3"
const testAccPostgresqlSchemaConfig = `
resource "postgresql_role" "role_all_without_grant" {
name = "role_all_without_grant"
login = true
}
resource "postgresql_role" "role_all_with_grant" {
name = "role_all_with_grant"
}
resource "postgresql_schema" "test1" {
name = "foo"
}
resource "postgresql_schema" "test2" {
name = "bar"
owner = "${postgresql_role.role_all_without_grant.name}"
if_not_exists = false
policy {
create = true
usage = true
role = "${postgresql_role.role_all_without_grant.name}"
}
}
resource "postgresql_schema" "test3" {
name = "baz"
owner = "${postgresql_role.role_all_without_grant.name}"
if_not_exists = true
policy {
create_with_grant = true
usage_with_grant = true
role = "${postgresql_role.role_all_with_grant.name}"
}
policy {
create = true
usage = true
role = "${postgresql_role.role_all_without_grant.name}"
}
}
`
const testAccPostgresqlSchemaGrant1 = `
resource "postgresql_role" "all_without_grant_stay" {
name = "all_without_grant_stay"
}
resource "postgresql_role" "all_without_grant_drop" {
name = "all_without_grant_drop"
}
resource "postgresql_role" "policy_compose" {
name = "policy_compose"
}
resource "postgresql_role" "policy_move" {
name = "policy_move"
}
resource "postgresql_role" "all_with_grantstay" {
name = "all_with_grantstay"
}
resource "postgresql_role" "all_with_grantdrop" {
name = "all_with_grantdrop"
}
resource "postgresql_schema" "test4" {
name = "test4"
owner = "${postgresql_role.all_without_grant_stay.name}"
policy {
create = true
usage = true
role = "${postgresql_role.all_without_grant_stay.name}"
}
policy {
create = true
usage = true
role = "${postgresql_role.all_without_grant_drop.name}"
}
policy {
create = true
usage = true
role = "${postgresql_role.policy_compose.name}"
}
policy {
create = true
usage = true
role = "${postgresql_role.policy_move.name}"
}
policy {
create_with_grant = true
usage_with_grant = true
role = "${postgresql_role.all_with_grantstay.name}"
}
policy {
create_with_grant = true
usage_with_grant = true
role = "${postgresql_role.all_with_grantdrop.name}"
}
policy {
create_with_grant = true
usage_with_grant = true
role = "${postgresql_role.policy_compose.name}"
}
}
`
const testAccPostgresqlSchemaGrant2 = `
resource "postgresql_role" "all_without_grant_stay" {
name = "all_without_grant_stay"
}
resource "postgresql_role" "all_without_grant_drop" {
name = "all_without_grant_drop"
}
resource "postgresql_role" "policy_compose" {
name = "policy_compose"
}
resource "postgresql_role" "policy_move" {
name = "policy_move"
}
resource "postgresql_role" "all_with_grantstay" {
name = "all_with_grantstay"
}
resource "postgresql_role" "policy_new" {
name = "policy_new"
}
resource "postgresql_schema" "test4" {
name = "test4"
owner = "${postgresql_role.all_without_grant_stay.name}"
policy {
create = true
usage = true
role = "${postgresql_role.all_without_grant_stay.name}"
}
policy {
create = true
usage = true
role = "${postgresql_role.policy_compose.name}"
}
policy {
create_with_grant = true
usage_with_grant = true
role = "${postgresql_role.all_with_grantstay.name}"
}
policy {
create_with_grant = true
usage_with_grant = true
role = "${postgresql_role.policy_compose.name}"
}
policy {
create_with_grant = true
usage_with_grant = true
role = "${postgresql_role.policy_move.name}"
}
policy {
create = true
usage = true
role = "${postgresql_role.policy_new.name}"
}
}
`

View File

@ -1,4 +0,0 @@
.db
*.test
*~
*.swp

71
vendor/github.com/lib/pq/.travis.yml generated vendored
View File

@ -1,71 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- tip
before_install:
- psql --version
- sudo /etc/init.d/postgresql stop
- sudo apt-get -y --purge remove postgresql libpq-dev libpq5 postgresql-client-common postgresql-common
- sudo rm -rf /var/lib/postgresql
- wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
- sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/postgresql.list"
- sudo apt-get update -qq
- sudo apt-get -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::="--force-confnew" install postgresql-$PGVERSION postgresql-server-dev-$PGVERSION postgresql-contrib-$PGVERSION
- sudo chmod 777 /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "local all postgres trust" > /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "local all all trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "hostnossl all pqgossltest 127.0.0.1/32 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "hostnossl all pqgosslcert 127.0.0.1/32 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "hostssl all pqgossltest 127.0.0.1/32 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "hostssl all pqgosslcert 127.0.0.1/32 cert" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "host all all 127.0.0.1/32 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "hostnossl all pqgossltest ::1/128 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "hostnossl all pqgosslcert ::1/128 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "hostssl all pqgossltest ::1/128 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "hostssl all pqgosslcert ::1/128 cert" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- echo "host all all ::1/128 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ certs/server.key certs/server.crt certs/root.crt
- sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_cert_file = 'server.crt'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)"
- sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_key_file = 'server.key'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)"
- sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_ca_file = 'root.crt'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)"
- sudo sh -c "echo 127.0.0.1 postgres >> /etc/hosts"
- sudo ls -l /var/lib/postgresql/$PGVERSION/main/
- sudo cat /etc/postgresql/$PGVERSION/main/postgresql.conf
- sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key
- sudo /etc/init.d/postgresql restart
env:
global:
- PGUSER=postgres
- PQGOSSLTESTS=1
- PQSSLCERTTEST_PATH=$PWD/certs
- PGHOST=127.0.0.1
matrix:
- PGVERSION=9.5 PQTEST_BINARY_PARAMETERS=yes
- PGVERSION=9.4 PQTEST_BINARY_PARAMETERS=yes
- PGVERSION=9.3 PQTEST_BINARY_PARAMETERS=yes
- PGVERSION=9.2 PQTEST_BINARY_PARAMETERS=yes
- PGVERSION=9.1 PQTEST_BINARY_PARAMETERS=yes
- PGVERSION=9.0 PQTEST_BINARY_PARAMETERS=yes
- PGVERSION=8.4 PQTEST_BINARY_PARAMETERS=yes
- PGVERSION=9.5 PQTEST_BINARY_PARAMETERS=no
- PGVERSION=9.4 PQTEST_BINARY_PARAMETERS=no
- PGVERSION=9.3 PQTEST_BINARY_PARAMETERS=no
- PGVERSION=9.2 PQTEST_BINARY_PARAMETERS=no
- PGVERSION=9.1 PQTEST_BINARY_PARAMETERS=no
- PGVERSION=9.0 PQTEST_BINARY_PARAMETERS=no
- PGVERSION=8.4 PQTEST_BINARY_PARAMETERS=no
script:
- go test -v ./...
before_script:
- psql -c 'create database pqgotest' -U postgres
- psql -c 'create user pqgossltest' -U postgres
- psql -c 'create user pqgosslcert' -U postgres

105
vendor/github.com/lib/pq/README.md generated vendored
View File

@ -1,105 +0,0 @@
# pq - A pure Go postgres driver for Go's database/sql package
[![Build Status](https://travis-ci.org/lib/pq.png?branch=master)](https://travis-ci.org/lib/pq)
## Install
go get github.com/lib/pq
## Docs
For detailed documentation and basic usage examples, please see the package
documentation at <http://godoc.org/github.com/lib/pq>.
## Tests
`go test` is used for testing. A running PostgreSQL server is
required, with the ability to log in. The default database to connect
to test with is "pqgotest," but it can be overridden using environment
variables.
Example:
PGHOST=/var/run/postgresql go test github.com/lib/pq
Optionally, a benchmark suite can be run as part of the tests:
PGHOST=/var/run/postgresql go test -bench .
## Features
* SSL
* Handles bad connections for `database/sql`
* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
* Scan binary blobs correctly (i.e. `bytea`)
* Package for `hstore` support
* COPY FROM support
* pq.ParseURL for converting urls to connection strings for sql.Open.
* Many libpq compatible environment variables
* Unix socket support
* Notifications: `LISTEN`/`NOTIFY`
* pgpass support
## Future / Things you can help with
* Better COPY FROM / COPY TO (see discussion in #181)
## Thank you (alphabetical)
Some of these contributors are from the original library `bmizerany/pq.go` whose
code still exists in here.
* Andy Balholm (andybalholm)
* Ben Berkert (benburkert)
* Benjamin Heatwole (bheatwole)
* Bill Mill (llimllib)
* Bjørn Madsen (aeons)
* Blake Gentry (bgentry)
* Brad Fitzpatrick (bradfitz)
* Charlie Melbye (cmelbye)
* Chris Bandy (cbandy)
* Chris Gilling (cgilling)
* Chris Walsh (cwds)
* Dan Sosedoff (sosedoff)
* Daniel Farina (fdr)
* Eric Chlebek (echlebek)
* Eric Garrido (minusnine)
* Eric Urban (hydrogen18)
* Everyone at The Go Team
* Evan Shaw (edsrzf)
* Ewan Chou (coocood)
* Fazal Majid (fazalmajid)
* Federico Romero (federomero)
* Fumin (fumin)
* Gary Burd (garyburd)
* Heroku (heroku)
* James Pozdena (jpoz)
* Jason McVetta (jmcvetta)
* Jeremy Jay (pbnjay)
* Joakim Sernbrant (serbaut)
* John Gallagher (jgallagher)
* Jonathan Rudenberg (titanous)
* Joël Stemmer (jstemmer)
* Kamil Kisiel (kisielk)
* Kelly Dunn (kellydunn)
* Keith Rarick (kr)
* Kir Shatrov (kirs)
* Lann Martin (lann)
* Maciek Sakrejda (deafbybeheading)
* Marc Brinkmann (mbr)
* Marko Tiikkaja (johto)
* Matt Newberry (MattNewberry)
* Matt Robenolt (mattrobenolt)
* Martin Olsen (martinolsen)
* Mike Lewis (mikelikespie)
* Nicolas Patry (Narsil)
* Oliver Tonnhofer (olt)
* Patrick Hayes (phayes)
* Paul Hammond (paulhammond)
* Ryan Smith (ryandotsmith)
* Samuel Stauffer (samuel)
* Timothée Peignier (cyberdelia)
* Travis Cline (tmc)
* TruongSinh Tran-Nguyen (truongsinh)
* Yaismel Miranda (ympons)
* notedit (notedit)

727
vendor/github.com/lib/pq/array.go generated vendored Normal file
View File

@ -0,0 +1,727 @@
package pq
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
)
var typeByteSlice = reflect.TypeOf([]byte{})
var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
var typeSqlScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
// Array returns the optimal driver.Valuer and sql.Scanner for an array or
// slice of any dimension.
//
// For example:
// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
//
// var x []sql.NullInt64
// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
//
// Scanning multi-dimensional arrays is not supported. Arrays where the lower
// bound is not one (such as `[0:0]={1}') are not supported.
func Array(a interface{}) interface {
driver.Valuer
sql.Scanner
} {
switch a := a.(type) {
case []bool:
return (*BoolArray)(&a)
case []float64:
return (*Float64Array)(&a)
case []int64:
return (*Int64Array)(&a)
case []string:
return (*StringArray)(&a)
case *[]bool:
return (*BoolArray)(a)
case *[]float64:
return (*Float64Array)(a)
case *[]int64:
return (*Int64Array)(a)
case *[]string:
return (*StringArray)(a)
}
return GenericArray{a}
}
// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
// to override the array delimiter used by GenericArray.
type ArrayDelimiter interface {
// ArrayDelimiter returns the delimiter character(s) for this element's type.
ArrayDelimiter() string
}
// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
type BoolArray []bool
// Scan implements the sql.Scanner interface.
func (a *BoolArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
}
return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
}
func (a *BoolArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
if err != nil {
return err
}
if len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(BoolArray, len(elems))
for i, v := range elems {
if len(v) != 1 {
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
switch v[0] {
case 't':
b[i] = true
case 'f':
b[i] = false
default:
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a BoolArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be exactly two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1+2*n)
for i := 0; i < n; i++ {
b[2*i] = ','
if a[i] {
b[1+2*i] = 't'
} else {
b[1+2*i] = 'f'
}
}
b[0] = '{'
b[2*n] = '}'
return string(b), nil
}
return "{}", nil
}
// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
type ByteaArray [][]byte
// Scan implements the sql.Scanner interface.
func (a *ByteaArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
}
return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
}
func (a *ByteaArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
if err != nil {
return err
}
if len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(ByteaArray, len(elems))
for i, v := range elems {
b[i], err = parseBytea(v)
if err != nil {
return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface. It uses the "hex" format which
// is only supported on PostgreSQL 9.0 or newer.
func (a ByteaArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
size := 1 + 6*n
for _, x := range a {
size += hex.EncodedLen(len(x))
}
b := make([]byte, size)
for i, s := 0, b; i < n; i++ {
o := copy(s, `,"\\x`)
o += hex.Encode(s[o:], a[i])
s[o] = '"'
s = s[o+1:]
}
b[0] = '{'
b[size-1] = '}'
return string(b), nil
}
return "{}", nil
}
// Float64Array represents a one-dimensional array of the PostgreSQL double
// precision type.
type Float64Array []float64
// Scan implements the sql.Scanner interface.
func (a *Float64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
}
return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
}
func (a *Float64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
if err != nil {
return err
}
if len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Float64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Float64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
// an array or slice of any dimension.
type GenericArray struct{ A interface{} }
func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
var assign func([]byte, reflect.Value) error
var del = ","
// TODO calculate the assign function for other types
// TODO repeat this section on the element type of arrays or slices (multidimensional)
{
if reflect.PtrTo(rt).Implements(typeSqlScanner) {
// dest is always addressable because it is an element of a slice.
assign = func(src []byte, dest reflect.Value) (err error) {
ss := dest.Addr().Interface().(sql.Scanner)
if src == nil {
err = ss.Scan(nil)
} else {
err = ss.Scan(src)
}
return
}
goto FoundType
}
assign = func([]byte, reflect.Value) error {
return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
}
}
FoundType:
if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
return rt, assign, del
}
// Scan implements the sql.Scanner interface.
func (a GenericArray) Scan(src interface{}) error {
dpv := reflect.ValueOf(a.A)
switch {
case dpv.Kind() != reflect.Ptr:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
case dpv.IsNil():
return fmt.Errorf("pq: destination %T is nil", a.A)
}
dv := dpv.Elem()
switch dv.Kind() {
case reflect.Slice:
case reflect.Array:
default:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
}
switch src := src.(type) {
case []byte:
return a.scanBytes(src, dv)
case string:
return a.scanBytes([]byte(src), dv)
}
return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
}
func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
dims, elems, err := parseArray(src, []byte(del))
if err != nil {
return err
}
// TODO allow multidimensional
if len(dims) > 1 {
return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
strings.Replace(fmt.Sprint(dims), " ", "][", -1))
}
// Treat a zero-dimensional array like an array with a single dimension of zero.
if len(dims) == 0 {
dims = append(dims, 0)
}
for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
switch rt.Kind() {
case reflect.Slice:
case reflect.Array:
if rt.Len() != dims[i] {
return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
}
default:
// TODO handle multidimensional
}
}
values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
for i, e := range elems {
if err := assign(e, values.Index(i)); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
// TODO handle multidimensional
switch dv.Kind() {
case reflect.Slice:
dv.Set(values.Slice(0, dims[0]))
case reflect.Array:
for i := 0; i < dims[0]; i++ {
dv.Index(i).Set(values.Index(i))
}
}
return nil
}
// Value implements the driver.Valuer interface.
func (a GenericArray) Value() (driver.Value, error) {
if a.A == nil {
return nil, nil
}
rv := reflect.ValueOf(a.A)
if k := rv.Kind(); k != reflect.Array && k != reflect.Slice {
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
}
if n := rv.Len(); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 0, 1+2*n)
b, _, err := appendArray(b, rv, n)
return string(b), err
}
return "{}", nil
}
// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
type Int64Array []int64
// Scan implements the sql.Scanner interface.
func (a *Int64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
}
return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
}
func (a *Int64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
if err != nil {
return err
}
if len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Int64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Int64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendInt(b, a[0], 10)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendInt(b, a[i], 10)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// StringArray represents a one-dimensional array of the PostgreSQL character types.
type StringArray []string
// Scan implements the sql.Scanner interface.
func (a *StringArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
}
return fmt.Errorf("pq: cannot convert %T to StringArray", src)
}
func (a *StringArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "StringArray")
if err != nil {
return err
}
if len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(StringArray, len(elems))
for i, v := range elems {
if b[i] = string(v); v == nil {
return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a StringArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+3*n)
b[0] = '{'
b = appendArrayQuotedBytes(b, []byte(a[0]))
for i := 1; i < n; i++ {
b = append(b, ',')
b = appendArrayQuotedBytes(b, []byte(a[i]))
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// appendArray appends rv to the buffer, returning the extended buffer and
// the delimiter used between elements.
//
// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
var del string
var err error
b = append(b, '{')
if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
return b, del, err
}
for i := 1; i < n; i++ {
b = append(b, del...)
if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
return b, del, err
}
}
return append(b, '}'), del, nil
}
// appendArrayElement appends rv to the buffer, returning the extended buffer
// and the delimiter to use before the next element.
//
// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
// using driver.DefaultParameterConverter and the resulting []byte or string
// is double-quoted.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
if n := rv.Len(); n > 0 {
return appendArray(b, rv, n)
}
return b, "", nil
}
}
var del string = ","
var err error
var iv interface{} = rv.Interface()
if ad, ok := iv.(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
return b, del, err
}
switch v := iv.(type) {
case nil:
return append(b, "NULL"...), del, nil
case []byte:
return appendArrayQuotedBytes(b, v), del, nil
case string:
return appendArrayQuotedBytes(b, []byte(v)), del, nil
}
b, err = appendValue(b, iv)
return b, del, err
}
func appendArrayQuotedBytes(b, v []byte) []byte {
b = append(b, '"')
for {
i := bytes.IndexAny(v, `"\`)
if i < 0 {
b = append(b, v...)
break
}
if i > 0 {
b = append(b, v[:i]...)
}
b = append(b, '\\', v[i])
v = v[i+1:]
}
return append(b, '"')
}
func appendValue(b []byte, v driver.Value) ([]byte, error) {
return append(b, encode(nil, v, 0)...), nil
}
// parseArray extracts the dimensions and elements of an array represented in
// text format. Only representations emitted by the backend are supported.
// Notably, whitespace around brackets and delimiters is significant, and NULL
// is case-sensitive.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
var depth, i int
if len(src) < 1 || src[0] != '{' {
return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
}
Open:
for i < len(src) {
switch src[i] {
case '{':
depth++
i++
case '}':
elems = make([][]byte, 0)
goto Close
default:
break Open
}
}
dims = make([]int, i)
Element:
for i < len(src) {
switch src[i] {
case '{':
depth++
dims[depth-1] = 0
i++
case '"':
var elem = []byte{}
var escape bool
for i++; i < len(src); i++ {
if escape {
elem = append(elem, src[i])
escape = false
} else {
switch src[i] {
default:
elem = append(elem, src[i])
case '\\':
escape = true
case '"':
elems = append(elems, elem)
i++
break Element
}
}
}
default:
for start := i; i < len(src); i++ {
if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
elem := src[start:i]
if len(elem) == 0 {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
if bytes.Equal(elem, []byte("NULL")) {
elem = nil
}
elems = append(elems, elem)
break Element
}
}
}
}
for i < len(src) {
if bytes.HasPrefix(src[i:], del) {
dims[depth-1]++
i += len(del)
goto Element
} else if src[i] == '}' {
dims[depth-1]++
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
Close:
for i < len(src) {
if src[i] == '}' && depth > 0 {
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
if depth > 0 {
err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
}
if err == nil {
for _, d := range dims {
if (len(elems) % d) != 0 {
err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
}
}
}
return
}
func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
dims, elems, err := parseArray(src, del)
if err != nil {
return nil, err
}
if len(dims) > 1 {
return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
}
return elems, err
}

36
vendor/github.com/lib/pq/conn.go generated vendored
View File

@ -164,7 +164,7 @@ func (c *conn) handlePgpass(o values) {
return
}
mode := fileinfo.Mode()
if mode & (0x77) != 0 {
if mode&(0x77) != 0 {
// XXX should warn about incorrect .pgpass permissions as psql does
return
}
@ -180,7 +180,7 @@ func (c *conn) handlePgpass(o values) {
db := o.Get("dbname")
username := o.Get("user")
// From: https://github.com/tg/pgpass/blob/master/reader.go
getFields := func (s string) []string {
getFields := func(s string) []string {
fs := make([]string, 0, 5)
f := make([]rune, 0, len(s))
@ -200,7 +200,7 @@ func (c *conn) handlePgpass(o values) {
}
}
return append(fs, string(f))
}
}
for scanner.Scan() {
line := scanner.Text()
if len(line) == 0 || line[0] == '#' {
@ -210,7 +210,7 @@ func (c *conn) handlePgpass(o values) {
if len(split) != 5 {
continue
}
if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) {
if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) {
o["password"] = split[4]
return
}
@ -362,7 +362,7 @@ func network(o values) (string, string) {
return "unix", sockPath
}
return "tcp", host + ":" + o.Get("port")
return "tcp", net.JoinHostPort(host, o.Get("port"))
}
type values map[string]string
@ -614,8 +614,6 @@ func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err
func (cn *conn) simpleQuery(q string) (res *rows, err error) {
defer cn.errRecover(&err)
st := &stmt{cn: cn, name: ""}
b := cn.writeBuf('Q')
b.string(q)
cn.send(b)
@ -634,10 +632,7 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) {
}
if res == nil {
res = &rows{
cn: cn,
colNames: st.colNames,
colTyps: st.colTyps,
colFmts: st.colFmts,
cn: cn,
}
}
res.done = true
@ -973,8 +968,23 @@ func (cn *conn) ssl(o values) {
verifyCaOnly := false
tlsConf := tls.Config{}
switch mode := o.Get("sslmode"); mode {
case "require", "":
// "require" is the default.
case "", "require":
// We must skip TLS's own verification since it requires full
// verification since Go 1.3.
tlsConf.InsecureSkipVerify = true
// From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
// Note: For backwards compatibility with earlier versions of PostgreSQL, if a
// root CA file exists, the behavior of sslmode=require will be the same as
// that of verify-ca, meaning the server certificate is validated against the
// CA. Relying on this behavior is discouraged, and applications that need
// certificate validation should always use verify-ca or verify-full.
if _, err := os.Stat(o.Get("sslrootcert")); err == nil {
verifyCaOnly = true
} else {
o.Set("sslrootcert", "")
}
case "verify-ca":
// We must skip TLS's own verification since it requires full
// verification since Go 1.3.
@ -985,7 +995,7 @@ func (cn *conn) ssl(o values) {
case "disable":
return
default:
errorf(`unsupported sslmode %q; only "require" (default), "verify-full", and "disable" supported`, mode)
errorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
}
cn.setupSSLClientCertificates(&tlsConf, o)

179
vendor/github.com/lib/pq/encode.go generated vendored
View File

@ -5,6 +5,7 @@ import (
"database/sql/driver"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"strconv"
@ -22,7 +23,6 @@ func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
default:
return encode(parameterStatus, x, oid.T_unknown)
}
panic("not reached")
}
func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
@ -56,10 +56,13 @@ func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) [
}
func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
if f == formatBinary {
switch f {
case formatBinary:
return binaryDecode(parameterStatus, s, typ)
} else {
case formatText:
return textDecode(parameterStatus, s, typ)
default:
panic("not reached")
}
}
@ -75,7 +78,7 @@ func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) inter
return int64(int16(binary.BigEndian.Uint16(s)))
default:
errorf("don't know how to decode binary parameter of type %u", uint32(typ))
errorf("don't know how to decode binary parameter of type %d", uint32(typ))
}
panic("not reached")
@ -83,8 +86,14 @@ func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) inter
func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_char, oid.T_varchar, oid.T_text:
return string(s)
case oid.T_bytea:
return parseBytea(s)
b, err := parseBytea(s)
if err != nil {
errorf("%s", err)
}
return b
case oid.T_timestamptz:
return parseTs(parameterStatus.currentLocation, string(s))
case oid.T_timestamp, oid.T_date:
@ -195,16 +204,39 @@ func mustParse(f string, typ oid.Oid, s []byte) time.Time {
return t
}
func expect(str, char string, pos int) {
if c := str[pos : pos+1]; c != char {
errorf("expected '%v' at position %v; got '%v'", char, pos, c)
var errInvalidTimestamp = errors.New("invalid timestamp")
type timestampParser struct {
err error
}
func (p *timestampParser) expect(str string, char byte, pos int) {
if p.err != nil {
return
}
if pos+1 > len(str) {
p.err = errInvalidTimestamp
return
}
if c := str[pos]; c != char && p.err == nil {
p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
}
}
func mustAtoi(str string) int {
result, err := strconv.Atoi(str)
func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
if p.err != nil {
return 0
}
if begin < 0 || end < 0 || begin > end || end > len(str) {
p.err = errInvalidTimestamp
return 0
}
result, err := strconv.Atoi(str[begin:end])
if err != nil {
errorf("expected number; got '%v'", str)
if p.err == nil {
p.err = fmt.Errorf("expected number; got '%v'", str)
}
return 0
}
return result
}
@ -219,7 +251,7 @@ type locationCache struct {
// about 5% speed could be gained by putting the cache in the connection and
// losing the mutex, at the cost of a small amount of memory and a somewhat
// significant increase in code complexity.
var globalLocationCache *locationCache = newLocationCache()
var globalLocationCache = newLocationCache()
func newLocationCache() *locationCache {
return &locationCache{cache: make(map[int]*time.Location)}
@ -249,26 +281,26 @@ const (
infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
)
/*
* If EnableInfinityTs is not called, "-infinity" and "infinity" will return
* []byte("-infinity") and []byte("infinity") respectively, and potentially
* cause error "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time",
* when scanning into a time.Time value.
*
* Once EnableInfinityTs has been called, all connections created using this
* driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
* "timestamp with time zone" and "date" types to the predefined minimum and
* maximum times, respectively. When encoding time.Time values, any time which
* equals or preceeds the predefined minimum time will be encoded to
* "-infinity". Any values at or past the maximum time will similarly be
* encoded to "infinity".
*
*
* If EnableInfinityTs is called with negative >= positive, it will panic.
* Calling EnableInfinityTs after a connection has been established results in
* undefined behavior. If EnableInfinityTs is called more than once, it will
* panic.
*/
// EnableInfinityTs controls the handling of Postgres' "-infinity" and
// "infinity" "timestamp"s.
//
// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
// []byte("-infinity") and []byte("infinity") respectively, and potentially
// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
//
// Once EnableInfinityTs has been called, all connections created using this
// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
// "timestamp with time zone" and "date" types to the predefined minimum and
// maximum times, respectively. When encoding time.Time values, any time which
// equals or precedes the predefined minimum time will be encoded to
// "-infinity". Any values at or past the maximum time will similarly be
// encoded to "infinity".
//
// If EnableInfinityTs is called with negative >= positive, it will panic.
// Calling EnableInfinityTs after a connection has been established results in
// undefined behavior. If EnableInfinityTs is called more than once, it will
// panic.
func EnableInfinityTs(negative time.Time, positive time.Time) {
if infinityTsEnabled {
panic(infinityTsEnabledAlready)
@ -305,28 +337,41 @@ func parseTs(currentLocation *time.Location, str string) interface{} {
}
return []byte(str)
}
t, err := ParseTimestamp(currentLocation, str)
if err != nil {
panic(err)
}
return t
}
// ParseTimestamp parses Postgres' text format. It returns a time.Time in
// currentLocation iff that time's offset agrees with the offset sent from the
// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
// fixed offset offset provided by the Postgres server.
func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
p := timestampParser{}
monSep := strings.IndexRune(str, '-')
// this is Gregorian year, not ISO Year
// In Gregorian system, the year 1 BC is followed by AD 1
year := mustAtoi(str[:monSep])
year := p.mustAtoi(str, 0, monSep)
daySep := monSep + 3
month := mustAtoi(str[monSep+1 : daySep])
expect(str, "-", daySep)
month := p.mustAtoi(str, monSep+1, daySep)
p.expect(str, '-', daySep)
timeSep := daySep + 3
day := mustAtoi(str[daySep+1 : timeSep])
day := p.mustAtoi(str, daySep+1, timeSep)
var hour, minute, second int
if len(str) > monSep+len("01-01")+1 {
expect(str, " ", timeSep)
p.expect(str, ' ', timeSep)
minSep := timeSep + 3
expect(str, ":", minSep)
hour = mustAtoi(str[timeSep+1 : minSep])
p.expect(str, ':', minSep)
hour = p.mustAtoi(str, timeSep+1, minSep)
secSep := minSep + 3
expect(str, ":", secSep)
minute = mustAtoi(str[minSep+1 : secSep])
p.expect(str, ':', secSep)
minute = p.mustAtoi(str, minSep+1, secSep)
secEnd := secSep + 3
second = mustAtoi(str[secSep+1 : secEnd])
second = p.mustAtoi(str, secSep+1, secEnd)
}
remainderIdx := monSep + len("01-01 00:00:00") + 1
// Three optional (but ordered) sections follow: the
@ -337,49 +382,50 @@ func parseTs(currentLocation *time.Location, str string) interface{} {
nanoSec := 0
tzOff := 0
if remainderIdx < len(str) && str[remainderIdx:remainderIdx+1] == "." {
if remainderIdx < len(str) && str[remainderIdx] == '.' {
fracStart := remainderIdx + 1
fracOff := strings.IndexAny(str[fracStart:], "-+ ")
if fracOff < 0 {
fracOff = len(str) - fracStart
}
fracSec := mustAtoi(str[fracStart : fracStart+fracOff])
fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
remainderIdx += fracOff + 1
}
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart:tzStart+1] == "-" || str[tzStart:tzStart+1] == "+") {
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
// time zone separator is always '-' or '+' (UTC is +00)
var tzSign int
if c := str[tzStart : tzStart+1]; c == "-" {
switch c := str[tzStart]; c {
case '-':
tzSign = -1
} else if c == "+" {
case '+':
tzSign = +1
} else {
errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
default:
return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
}
tzHours := mustAtoi(str[tzStart+1 : tzStart+3])
tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
remainderIdx += 3
var tzMin, tzSec int
if tzStart+3 < len(str) && str[tzStart+3:tzStart+4] == ":" {
tzMin = mustAtoi(str[tzStart+4 : tzStart+6])
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
if tzStart+6 < len(str) && str[tzStart+6:tzStart+7] == ":" {
tzSec = mustAtoi(str[tzStart+7 : tzStart+9])
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
}
var isoYear int
if remainderIdx < len(str) && str[remainderIdx:remainderIdx+3] == " BC" {
if remainderIdx+3 <= len(str) && str[remainderIdx:remainderIdx+3] == " BC" {
isoYear = 1 - year
remainderIdx += 3
} else {
isoYear = year
}
if remainderIdx < len(str) {
errorf("expected end of input, got %v", str[remainderIdx:])
return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
}
t := time.Date(isoYear, time.Month(month), day,
hour, minute, second, nanoSec,
@ -396,11 +442,11 @@ func parseTs(currentLocation *time.Location, str string) interface{} {
}
}
return t
return t, p.err
}
// formatTs formats t into a format postgres understands.
func formatTs(t time.Time) (b []byte) {
func formatTs(t time.Time) []byte {
if infinityTsEnabled {
// t <= -infinity : ! (t > -infinity)
if !t.After(infinityTsNegative) {
@ -411,6 +457,11 @@ func formatTs(t time.Time) (b []byte) {
return []byte("infinity")
}
}
return FormatTimestamp(t)
}
// FormatTimestamp formats t into Postgres' text format for timestamps.
func FormatTimestamp(t time.Time) []byte {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
@ -420,7 +471,7 @@ func formatTs(t time.Time) (b []byte) {
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b = []byte(t.Format(time.RFC3339Nano))
b := []byte(t.Format(time.RFC3339Nano))
_, offset := t.Zone()
offset = offset % 60
@ -445,14 +496,14 @@ func formatTs(t time.Time) (b []byte) {
// Parse a bytea value received from the server. Both "hex" and the legacy
// "escape" format are supported.
func parseBytea(s []byte) (result []byte) {
func parseBytea(s []byte) (result []byte, err error) {
if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
// bytea_output = hex
s = s[2:] // trim off leading "\\x"
result = make([]byte, hex.DecodedLen(len(s)))
_, err := hex.Decode(result, s)
if err != nil {
errorf("%s", err)
return nil, err
}
} else {
// bytea_output = escape
@ -467,11 +518,11 @@ func parseBytea(s []byte) (result []byte) {
// '\\' followed by an octal number
if len(s) < 4 {
errorf("invalid bytea sequence %v", s)
return nil, fmt.Errorf("invalid bytea sequence %v", s)
}
r, err := strconv.ParseInt(string(s[1:4]), 8, 9)
if err != nil {
errorf("could not parse bytea value: %s", err.Error())
return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
}
result = append(result, byte(r))
s = s[4:]
@ -489,7 +540,7 @@ func parseBytea(s []byte) (result []byte) {
}
}
return result
return result, nil
}
func encodeBytea(serverVersion int, v []byte) (result []byte) {

22
vendor/github.com/lib/pq/notify.go generated vendored
View File

@ -62,14 +62,18 @@ type ListenerConn struct {
// Creates a new ListenerConn. Use NewListener instead.
func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) {
cn, err := Open(name)
return newDialListenerConn(defaultDialer{}, name, notificationChan)
}
func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) {
cn, err := DialOpen(d, name)
if err != nil {
return nil, err
}
l := &ListenerConn{
cn: cn.(*conn),
notificationChan: notificationChan,
notificationChan: c,
connState: connStateIdle,
replyChan: make(chan message, 2),
}
@ -391,6 +395,7 @@ type Listener struct {
name string
minReconnectInterval time.Duration
maxReconnectInterval time.Duration
dialer Dialer
eventCallback EventCallbackType
lock sync.Mutex
@ -421,10 +426,21 @@ func NewListener(name string,
minReconnectInterval time.Duration,
maxReconnectInterval time.Duration,
eventCallback EventCallbackType) *Listener {
return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback)
}
// NewDialListener is like NewListener but it takes a Dialer.
func NewDialListener(d Dialer,
name string,
minReconnectInterval time.Duration,
maxReconnectInterval time.Duration,
eventCallback EventCallbackType) *Listener {
l := &Listener{
name: name,
minReconnectInterval: minReconnectInterval,
maxReconnectInterval: maxReconnectInterval,
dialer: d,
eventCallback: eventCallback,
channels: make(map[string]struct{}),
@ -660,7 +676,7 @@ func (l *Listener) closed() bool {
func (l *Listener) connect() error {
notificationChan := make(chan *Notification, 32)
cn, err := NewListenerConn(l.name, notificationChan)
cn, err := newDialListenerConn(l.dialer, l.name, notificationChan)
if err != nil {
return err
}

8
vendor/github.com/lib/pq/url.go generated vendored
View File

@ -2,6 +2,7 @@ package pq
import (
"fmt"
"net"
nurl "net/url"
"sort"
"strings"
@ -54,12 +55,11 @@ func ParseURL(url string) (string, error) {
accrue("password", v)
}
i := strings.Index(u.Host, ":")
if i < 0 {
if host, port, err := net.SplitHostPort(u.Host); err != nil {
accrue("host", u.Host)
} else {
accrue("host", u.Host[:i])
accrue("port", u.Host[i+1:])
accrue("host", host)
accrue("port", port)
}
if u.Path != "" {

View File

@ -1,6 +1,6 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun
package pq

25
vendor/github.com/sean-/postgresql-acl/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
BSD 2-Clause License
Copyright (c) 2016, Sean Chittenden
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

270
vendor/github.com/sean-/postgresql-acl/acl.go generated vendored Normal file
View File

@ -0,0 +1,270 @@
package acl
import (
"bytes"
"fmt"
"strings"
"github.com/lib/pq"
)
// ACL represents a single PostgreSQL `aclitem` entry.
type ACL struct {
Privileges Privileges
GrantOptions Privileges
Role string
GrantedBy string
}
// GetGrantOption returns true if the acl has the grant option set for the
// specified priviledge.
func (a ACL) GetGrantOption(priv Privileges) bool {
if a.GrantOptions&priv != 0 {
return true
}
return false
}
// GetPriviledge returns true if the acl has the specified priviledge set.
func (a ACL) GetPrivilege(priv Privileges) bool {
if a.Privileges&priv != 0 {
return true
}
return false
}
// Parse parses a PostgreSQL aclitem string and returns an ACL
func Parse(aclStr string) (ACL, error) {
acl := ACL{}
idx := strings.IndexByte(aclStr, '=')
if idx == -1 {
return ACL{}, fmt.Errorf("invalid aclStr format: %+q", aclStr)
}
acl.Role = aclStr[:idx]
aclLen := len(aclStr)
var i int
withGrant := func() bool {
if i+1 >= aclLen {
return false
}
if aclStr[i+1] == '*' {
i++
return true
}
return false
}
SCAN:
for i = idx + 1; i < aclLen; i++ {
switch aclStr[i] {
case 'w':
acl.Privileges |= Update
if withGrant() {
acl.GrantOptions |= Update
}
case 'r':
acl.Privileges |= Select
if withGrant() {
acl.GrantOptions |= Select
}
case 'a':
acl.Privileges |= Insert
if withGrant() {
acl.GrantOptions |= Insert
}
case 'd':
acl.Privileges |= Delete
if withGrant() {
acl.GrantOptions |= Delete
}
case 'D':
acl.Privileges |= Truncate
if withGrant() {
acl.GrantOptions |= Truncate
}
case 'x':
acl.Privileges |= References
if withGrant() {
acl.GrantOptions |= References
}
case 't':
acl.Privileges |= Trigger
if withGrant() {
acl.GrantOptions |= Trigger
}
case 'X':
acl.Privileges |= Execute
if withGrant() {
acl.GrantOptions |= Execute
}
case 'U':
acl.Privileges |= Usage
if withGrant() {
acl.GrantOptions |= Usage
}
case 'C':
acl.Privileges |= Create
if withGrant() {
acl.GrantOptions |= Create
}
case 'T':
acl.Privileges |= Temporary
if withGrant() {
acl.GrantOptions |= Temporary
}
case 'c':
acl.Privileges |= Connect
if withGrant() {
acl.GrantOptions |= Connect
}
case '/':
if i+1 <= aclLen {
acl.GrantedBy = aclStr[i+1:]
}
break SCAN
default:
return ACL{}, fmt.Errorf("invalid byte %c in aclitem at %d: %+q", aclStr[i], i, aclStr)
}
}
return acl, nil
}
// String produces a PostgreSQL aclitem-compatible string
func (a ACL) String() string {
b := new(bytes.Buffer)
bitMaskStr := permString(a.Privileges, a.GrantOptions)
role := a.Role
grantedBy := a.GrantedBy
b.Grow(len(role) + len("=") + len(bitMaskStr) + len("/") + len(grantedBy))
fmt.Fprint(b, role, "=", bitMaskStr)
if grantedBy != "" {
fmt.Fprint(b, "/", grantedBy)
}
return b.String()
}
// permString is a small helper function that emits the permission bitmask as a
// string.
func permString(perms, grantOptions Privileges) string {
b := new(bytes.Buffer)
b.Grow(int(numPrivileges) * 2)
// From postgresql/src/include/utils/acl.h:
//
// /* string holding all privilege code chars, in order by bitmask position */
// #define ACL_ALL_RIGHTS_STR "arwdDxtXUCTc"
if perms&Insert != 0 {
fmt.Fprint(b, "a")
if grantOptions&Insert != 0 {
fmt.Fprint(b, "*")
}
}
if perms&Select != 0 {
fmt.Fprint(b, "r")
if grantOptions&Select != 0 {
fmt.Fprint(b, "*")
}
}
if perms&Update != 0 {
fmt.Fprint(b, "w")
if grantOptions&Update != 0 {
fmt.Fprint(b, "*")
}
}
if perms&Delete != 0 {
fmt.Fprint(b, "d")
if grantOptions&Delete != 0 {
fmt.Fprint(b, "*")
}
}
if perms&Truncate != 0 {
fmt.Fprint(b, "D")
if grantOptions&Truncate != 0 {
fmt.Fprint(b, "*")
}
}
if perms&References != 0 {
fmt.Fprint(b, "x")
if grantOptions&References != 0 {
fmt.Fprint(b, "*")
}
}
if perms&Trigger != 0 {
fmt.Fprint(b, "t")
if grantOptions&Trigger != 0 {
fmt.Fprint(b, "*")
}
}
if perms&Execute != 0 {
fmt.Fprint(b, "X")
if grantOptions&Execute != 0 {
fmt.Fprint(b, "*")
}
}
if perms&Usage != 0 {
fmt.Fprint(b, "U")
if grantOptions&Usage != 0 {
fmt.Fprint(b, "*")
}
}
if perms&Create != 0 {
fmt.Fprint(b, "C")
if grantOptions&Create != 0 {
fmt.Fprint(b, "*")
}
}
if perms&Temporary != 0 {
fmt.Fprint(b, "T")
if grantOptions&Temporary != 0 {
fmt.Fprint(b, "*")
}
}
if perms&Connect != 0 {
fmt.Fprint(b, "c")
if grantOptions&Connect != 0 {
fmt.Fprint(b, "*")
}
}
return b.String()
}
// quoteRole is a small helper function that handles the quoting of a role name,
// or PUBLIC, if no role is specified.
func quoteRole(role string) string {
if role == "" {
return "PUBLIC"
}
return pq.QuoteIdentifier(role)
}
// validRights checks to make sure a given acl's permissions and grant options
// don't exceed the specified mask valid privileges.
func validRights(acl ACL, validPrivs Privileges) bool {
if (acl.Privileges|validPrivs) == validPrivs &&
(acl.GrantOptions|validPrivs) == validPrivs {
return true
}
return false
}

17
vendor/github.com/sean-/postgresql-acl/column.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package acl
import "fmt"
// Column models the privileges of a column aclitem
type Column struct {
ACL
}
// NewColumn parses an ACL object and returns a Column object.
func NewColumn(acl ACL) (Column, error) {
if !validRights(acl, validColumnPrivs) {
return Column{}, fmt.Errorf("invalid flags set for column (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validColumnPrivs)
}
return Column{ACL: acl}, nil
}

17
vendor/github.com/sean-/postgresql-acl/database.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package acl
import "fmt"
// Database models the privileges of a database aclitem
type Database struct {
ACL
}
// NewDatabase parses an ACL object and returns a Database object.
func NewDatabase(acl ACL) (Database, error) {
if !validRights(acl, validDatabasePrivs) {
return Database{}, fmt.Errorf("invalid flags set for database (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validDatabasePrivs)
}
return Database{ACL: acl}, nil
}

17
vendor/github.com/sean-/postgresql-acl/domain.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package acl
import "fmt"
// Domain models the privileges of a domain aclitem
type Domain struct {
ACL
}
// NewDomain parses an ACL object and returns a Domain object.
func NewDomain(acl ACL) (Domain, error) {
if !validRights(acl, validDomainPrivs) {
return Domain{}, fmt.Errorf("invalid flags set for domain (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validDomainPrivs)
}
return Domain{ACL: acl}, nil
}

View File

@ -0,0 +1,17 @@
package acl
import "fmt"
// ForeignDataWrapper models the privileges of a domain aclitem
type ForeignDataWrapper struct {
ACL
}
// NewForeignDataWrapper parses an ACL object and returns a ForeignDataWrapper object.
func NewForeignDataWrapper(acl ACL) (ForeignDataWrapper, error) {
if !validRights(acl, validForeignDataWrapperPrivs) {
return ForeignDataWrapper{}, fmt.Errorf("invalid flags set for domain (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validForeignDataWrapperPrivs)
}
return ForeignDataWrapper{ACL: acl}, nil
}

View File

@ -0,0 +1,17 @@
package acl
import "fmt"
// ForeignServer models the privileges of a foreign server aclitem
type ForeignServer struct {
ACL
}
// NewForeignServer parses an ACL object and returns a ForeignServer object.
func NewForeignServer(acl ACL) (ForeignServer, error) {
if !validRights(acl, validForeignServerPrivs) {
return ForeignServer{}, fmt.Errorf("invalid flags set for foreign server (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validForeignServerPrivs)
}
return ForeignServer{ACL: acl}, nil
}

17
vendor/github.com/sean-/postgresql-acl/function.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package acl
import "fmt"
// Function models the privileges of a function aclitem
type Function struct {
ACL
}
// NewFunction parses an ACL object and returns a Function object.
func NewFunction(acl ACL) (Function, error) {
if !validRights(acl, validFunctionPrivs) {
return Function{}, fmt.Errorf("invalid flags set for function (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validFunctionPrivs)
}
return Function{ACL: acl}, nil
}

17
vendor/github.com/sean-/postgresql-acl/language.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package acl
import "fmt"
// Language models the privileges of a language aclitem
type Language struct {
ACL
}
// NewLanguage parses an ACL object and returns a Language object.
func NewLanguage(acl ACL) (Language, error) {
if !validRights(acl, validLanguagePrivs) {
return Language{}, fmt.Errorf("invalid flags set for language (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validLanguagePrivs)
}
return Language{ACL: acl}, nil
}

17
vendor/github.com/sean-/postgresql-acl/large_object.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package acl
import "fmt"
// LargeObject models the privileges of a large object aclitem
type LargeObject struct {
ACL
}
// NewLargeObject parses an ACL object and returns a LargeObject object.
func NewLargeObject(acl ACL) (LargeObject, error) {
if !validRights(acl, validLargeObjectPrivs) {
return LargeObject{}, fmt.Errorf("invalid flags set for large object (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validLargeObjectPrivs)
}
return LargeObject{ACL: acl}, nil
}

42
vendor/github.com/sean-/postgresql-acl/privileges.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package acl
// Privileges represents a PostgreSQL ACL bitmask
type Privileges uint16
// See postgresql/src/include/utils/acl.h for inspiration. Like PostgreSQL,
// "rights" refer to the combined grant option and privilege bits fields.
const (
NoPrivs Privileges = 0
// Ordering taken from postgresql/src/include/nodes/parsenodes.h
Insert Privileges = 1 << iota
Select
Update
Delete
Truncate
References
Trigger
Execute
Usage
Create
Temporary
Connect
numPrivileges
)
const (
validColumnPrivs = Insert | Select | Update | References
validDatabasePrivs = Create | Temporary | Connect
validDomainPrivs = Usage
validForeignDataWrapperPrivs = Usage
validForeignServerPrivs = Usage
validFunctionPrivs = Execute
validLanguagePrivs = Usage
validLargeObjectPrivs = Select | Update
validSchemaPrivs = Usage | Create
validSequencePrivs = Usage | Select | Update
validTablePrivs = Insert | Select | Update | Delete | Truncate | References | Trigger
validTablespacePrivs = Create
validTypePrivs = Usage
)

109
vendor/github.com/sean-/postgresql-acl/schema.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
package acl
import (
"bytes"
"fmt"
"github.com/lib/pq"
)
// Schema models the privileges of a schema aclitem
type Schema struct {
ACL
}
// NewSchema parses an ACL object and returns a Schema object.
func NewSchema(acl ACL) (Schema, error) {
if !validRights(acl, validSchemaPrivs) {
return Schema{}, fmt.Errorf("invalid flags set for schema (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validSchemaPrivs)
}
return Schema{ACL: acl}, nil
}
// Merge adds the argument's attributes to the receiver for values that are
// composable or not set and returns a new Schema object with the resulting
// values. Be careful with the role "" which is implicitly interpreted as the
// PUBLIC role.
func (s Schema) Merge(x Schema) Schema {
role := s.Role
if role == "" {
role = x.Role
}
grantedBy := s.GrantedBy
if grantedBy == "" {
grantedBy = x.GrantedBy
}
return Schema{
ACL{
Privileges: s.Privileges | x.Privileges,
GrantOptions: s.GrantOptions | x.GrantOptions,
Role: role,
GrantedBy: grantedBy,
},
}
}
// Grants returns a list of SQL queries that constitute the privileges specified
// in the receiver for the target schema.
func (s Schema) Grants(target string) []string {
const maxQueries = 2
queries := make([]string, 0, maxQueries)
if s.GetPrivilege(Create) {
b := bytes.NewBufferString("GRANT CREATE ON SCHEMA ")
fmt.Fprint(b, pq.QuoteIdentifier(target), " TO ", quoteRole(s.Role))
if s.GetGrantOption(Create) {
fmt.Fprint(b, " WITH GRANT OPTION")
}
queries = append(queries, b.String())
}
if s.GetPrivilege(Usage) {
b := bytes.NewBufferString("GRANT USAGE ON SCHEMA ")
fmt.Fprint(b, pq.QuoteIdentifier(target), " TO ", quoteRole(s.Role))
if s.GetGrantOption(Usage) {
fmt.Fprint(b, " WITH GRANT OPTION")
}
queries = append(queries, b.String())
}
return queries
}
// Revokes returns a list of SQL queries that remove the privileges specified
// in the receiver from the target schema.
func (s Schema) Revokes(target string) []string {
const maxQueries = 2
queries := make([]string, 0, maxQueries)
if s.GetPrivilege(Create) {
b := bytes.NewBufferString("REVOKE")
if s.GetGrantOption(Create) {
fmt.Fprint(b, " GRANT OPTION FOR")
}
fmt.Fprint(b, " CREATE ON SCHEMA ")
fmt.Fprint(b, pq.QuoteIdentifier(target), " FROM ", quoteRole(s.Role))
queries = append(queries, b.String())
}
if s.GetPrivilege(Usage) {
b := bytes.NewBufferString("REVOKE")
if s.GetGrantOption(Usage) {
fmt.Fprint(b, " GRANT OPTION FOR")
}
fmt.Fprint(b, " USAGE ON SCHEMA ")
fmt.Fprint(b, pq.QuoteIdentifier(target), " FROM ", quoteRole(s.Role))
queries = append(queries, b.String())
}
return queries
}

18
vendor/github.com/sean-/postgresql-acl/sequence.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package acl
import "fmt"
// Sequence models the privileges of a sequence aclitem
type Sequence struct {
ACL
}
// NewSequence parses a PostgreSQL ACL string for a sequence and returns a Sequence
// object
func NewSequence(acl ACL) (Sequence, error) {
if !validRights(acl, validSequencePrivs) {
return Sequence{}, fmt.Errorf("invalid flags set for sequence (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validSequencePrivs)
}
return Sequence{ACL: acl}, nil
}

18
vendor/github.com/sean-/postgresql-acl/table.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package acl
import "fmt"
// Table models the privileges of a table aclitem
type Table struct {
ACL
}
// NewTable parses a PostgreSQL ACL string for a table and returns a Table
// object
func NewTable(acl ACL) (Table, error) {
if !validRights(acl, validTablePrivs) {
return Table{}, fmt.Errorf("invalid flags set for table (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validTablePrivs)
}
return Table{ACL: acl}, nil
}

17
vendor/github.com/sean-/postgresql-acl/tablespace.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package acl
import "fmt"
// Tablespace models the privileges of a tablespace aclitem
type Tablespace struct {
ACL
}
// NewTablespace parses an ACL object and returns a Tablespace object.
func NewTablespace(acl ACL) (Tablespace, error) {
if !validRights(acl, validTablespacePrivs) {
return Tablespace{}, fmt.Errorf("invalid flags set for tablespace (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validTablespacePrivs)
}
return Tablespace{ACL: acl}, nil
}

17
vendor/github.com/sean-/postgresql-acl/type.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package acl
import "fmt"
// Type models the privileges of a type aclitem
type Type struct {
ACL
}
// NewType parses an ACL object and returns a Type object.
func NewType(acl ACL) (Type, error) {
if !validRights(acl, validTypePrivs) {
return Type{}, fmt.Errorf("invalid flags set for type (%+q), only %+q allowed", permString(acl.Privileges, acl.GrantOptions), validTypePrivs)
}
return Type{ACL: acl}, nil
}

10
vendor/vendor.json vendored
View File

@ -1982,9 +1982,11 @@
"revision": "0826b98aaa29c0766956cb40d45cf7482a597671"
},
{
"checksumSHA1": "dNYxHiBLalTqluak2/Z8c3RsSEM=",
"comment": "go1.0-cutoff-74-g8ad2b29",
"path": "github.com/lib/pq",
"revision": "8ad2b298cadd691a77015666a5372eae5dbfac8f"
"revision": "50761b0867bd1d9d069276790bcd4a3bccf2324a",
"revisionTime": "2016-08-31T22:25:20Z"
},
{
"comment": "go1.0-cutoff-74-g8ad2b29",
@ -2268,6 +2270,12 @@
"revision": "88132ecdd39da62f7c73c5a8e1a383d7da5e0e09",
"revisionTime": "2016-10-27T15:40:24Z"
},
{
"checksumSHA1": "tEKRyau4iRjmq2iwJbduD9RhN5s=",
"path": "github.com/sean-/postgresql-acl",
"revision": "d10489e5d217ebe9c23470c4d0ba7081a6d1e799",
"revisionTime": "2016-12-25T12:04:19Z"
},
{
"checksumSHA1": "BqtlwAjgFuHsVVdnw+dGSe+CKLM=",
"path": "github.com/sethvargo/go-fastly",

View File

@ -8,8 +8,9 @@ description: |-
# postgresql\_database
The ``postgresql_database`` resource creates and manages a database instance on
a PostgreSQL server.
The ``postgresql_database`` resource creates and manages [database
objects](https://www.postgresql.org/docs/current/static/managing-databases.html)
within a PostgreSQL server instance.
## Usage

View File

@ -11,6 +11,15 @@ description: |-
The ``postgresql_role`` resource creates and manages a role on a PostgreSQL
server.
When a ``postgresql_role`` resource is removed, the PostgreSQL ROLE will
automatically run a [`REASSIGN
OWNED`](https://www.postgresql.org/docs/current/static/sql-reassign-owned.html)
and [`DROP
OWNED`](https://www.postgresql.org/docs/current/static/sql-drop-owned.html) to
the `CURRENT_USER` (normally the connected user for the provider). If the
specified PostgreSQL ROLE owns objects in multiple PostgreSQL databases in the
same PostgreSQL Cluster, one PostgreSQL provider per database must be created
and all but the final ``postgresql_role`` must specify a `skip_drop_role`.
## Usage
@ -82,6 +91,23 @@ resource "postgresql_role" "my_replication_role" {
datetime. If omitted or the magic value `NULL` is used, `valid_until` will be
set to `infinity`. Default is `NULL`, therefore `infinity`.
* `skip_drop_role` - (Optional) When a PostgreSQL ROLE exists in multiple
databases and the ROLE is dropped, the
[cleanup of ownership of objects](https://www.postgresql.org/docs/current/static/role-removal.html)
in each of the respective databases must occur before the ROLE can be dropped
from the catalog. Set this option to true when there are multiple databases
in a PostgreSQL cluster using the same PostgreSQL ROLE for object ownership.
This is the third and final step taken when removing a ROLE from a database.
* `skip_reassign_owned` - (Optional) When a PostgreSQL ROLE exists in multiple
databases and the ROLE is dropped, a
[`REASSIGN OWNED`](https://www.postgresql.org/docs/current/static/sql-reassign-owned.html) in
must be executed on each of the respective databases before the `DROP ROLE`
can be executed to dropped the ROLE from the catalog. This is the first and
second steps taken when removing a ROLE from a database (the second step being
an implicit
[`DROP OWNED`](https://www.postgresql.org/docs/current/static/sql-drop-owned.html)).
## Import Example
`postgresql_role` supports importing resources. Supposing the following

View File

@ -8,15 +8,48 @@ description: |-
# postgresql\_schema
The ``postgresql_schema`` resource creates and manages a schema within a
PostgreSQL database.
The ``postgresql_schema`` resource creates and manages [schema
objects](https://www.postgresql.org/docs/current/static/ddl-schemas.html) within
a PostgreSQL database.
## Usage
```
resource "postgresql_role" "app_www" {
name = "app_www"
}
resource "postgresql_role" "app_dba" {
name = "app_dba"
}
resource "postgresql_role" "app_releng" {
name = "app_releng"
}
resource "postgresql_schema" "my_schema" {
name = "my_schema"
name = "my_schema"
owner = "postgres"
policy {
usage = true
role = "${postgresql_role.app_www.name}"
}
# app_releng can create new objects in the schema. This is the role that
# migrations are executed as.
policy {
create = true
usage = true
role = "${postgresql_role.app_releng.name}"
}
policy {
create_with_grant = true
usage_with_grant = true
role = "${postgresql_role.app_dba.name}"
}
}
```
@ -24,6 +57,19 @@ resource "postgresql_schema" "my_schema" {
* `name` - (Required) The name of the schema. Must be unique in the PostgreSQL
database instance where it is configured.
* `owner` - (Optional) The ROLE who owns the schema.
* `policy` - (Optional) Can be specified multiple times for each policy. Each
policy block supports fields documented below.
The `policy` block supports:
* `create` - (Optional) Should the specified ROLE have CREATE privileges to the specified SCHEMA.
* `create_with_grant` - (Optional) Should the specified ROLE have CREATE privileges to the specified SCHEMA and the ability to GRANT the CREATE privilege to other ROLEs.
* `role` - (Optional) The ROLE who is receiving the policy. If this value is empty or not specified it implies the policy is referring to the [`PUBLIC` role](https://www.postgresql.org/docs/current/static/sql-grant.html).
* `usage` - (Optional) Should the specified ROLE have USAGE privileges to the specified SCHEMA.
* `usage_with_grant` - (Optional) Should the specified ROLE have USAGE privileges to the specified SCHEMA and the ability to GRANT the USAGE privilege to other ROLEs.
~> **NOTE on `policy`:** The permissions of a role specified in multiple policy blocks is cumulative. For example, if the same role is specified in two different `policy` each with different permissions (e.g. `create` and `usage_with_grant`, respectively), then the specified role with have both `create` and `usage_with_grant` privileges.
## Import Example
@ -31,14 +77,17 @@ resource "postgresql_schema" "my_schema" {
Terraform:
```
provider "postgresql" {
alias = "admindb"
resource "postgresql_schema" "public" {
name = "public"
}
resource "postgresql_schema" "schema_foo" {
provider = "postgresql.admindb"
name = "my_schema"
owner = "postgres"
name = "my_schema"
policy {
usage = true
}
}
```