provider/google: SQL instance & database tests & documentation

This commit is contained in:
Lars Wander 2015-10-23 10:10:41 -04:00
parent b19953e48c
commit 07de5e7260
35 changed files with 1949 additions and 53 deletions

View File

@ -18,6 +18,7 @@ import (
"google.golang.org/api/container/v1"
"google.golang.org/api/dns/v1"
"google.golang.org/api/storage/v1"
"google.golang.org/api/sqladmin/v1beta4"
)
// Config is the configuration structure used to instantiate the Google
@ -31,6 +32,7 @@ type Config struct {
clientContainer *container.Service
clientDns *dns.Service
clientStorage *storage.Service
clientSqlAdmin *sqladmin.Service
}
func (c *Config) loadAndValidate() error {
@ -149,6 +151,13 @@ func (c *Config) loadAndValidate() error {
}
c.clientStorage.UserAgent = userAgent
log.Printf("[INFO] Instantiating Google SqlAdmin Client...")
c.clientSqlAdmin, err = sqladmin.New(client)
if err != nil {
return err
}
c.clientSqlAdmin.UserAgent = userAgent
return nil
}

View File

@ -53,6 +53,8 @@ func Provider() terraform.ResourceProvider {
"google_dns_managed_zone": resourceDnsManagedZone(),
"google_dns_record_set": resourceDnsRecordSet(),
"google_compute_instance_group_manager": resourceComputeInstanceGroupManager(),
"google_sql_database": resourceSqlDatabase(),
"google_sql_database_instance": resourceSqlDatabaseInstance(),
"google_storage_bucket": resourceStorageBucket(),
"google_storage_bucket_acl": resourceStorageBucketAcl(),
"google_storage_bucket_object": resourceStorageBucketObject(),

View File

@ -0,0 +1,113 @@
package google
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/sqladmin/v1beta4"
)
func resourceSqlDatabase() *schema.Resource {
return &schema.Resource{
Create: resourceSqlDatabaseCreate,
Read: resourceSqlDatabaseRead,
Delete: resourceSqlDatabaseDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"instance": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"self_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceSqlDatabaseCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
database_name := d.Get("name").(string)
instance_name := d.Get("instance").(string)
project := config.Project
db := &sqladmin.Database{
Name: database_name,
Instance: instance_name,
}
op, err := config.clientSqlAdmin.Databases.Insert(project, instance_name,
db).Do()
if err != nil {
return fmt.Errorf("Error, failed to insert "+
"database %s into instance %s: %s", database_name,
instance_name, err)
}
err = sqladminOperationWait(config, op, "Insert Database")
if err != nil {
return fmt.Errorf("Error, failure waiting for insertion of %s "+
"into %s: %s", database_name, instance_name, err)
}
return resourceSqlDatabaseRead(d, meta)
}
func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
database_name := d.Get("name").(string)
instance_name := d.Get("instance").(string)
project := config.Project
db, err := config.clientSqlAdmin.Databases.Get(project, instance_name,
database_name).Do()
if err != nil {
return fmt.Errorf("Error, failed to get"+
"database %s in instance %s: %s", database_name,
instance_name, err)
}
d.Set("self_link", db.SelfLink)
d.SetId(instance_name + ":" + database_name)
return nil
}
func resourceSqlDatabaseDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
database_name := d.Get("name").(string)
instance_name := d.Get("instance").(string)
project := config.Project
op, err := config.clientSqlAdmin.Databases.Delete(project, instance_name,
database_name).Do()
if err != nil {
return fmt.Errorf("Error, failed to delete"+
"database %s in instance %s: %s", database_name,
instance_name, err)
}
err = sqladminOperationWait(config, op, "Delete Database")
if err != nil {
return fmt.Errorf("Error, failure waiting for deletion of %s "+
"in %s: %s", database_name, instance_name, err)
}
return nil
}

View File

@ -0,0 +1,943 @@
package google
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/sqladmin/v1beta4"
)
func resourceSqlDatabaseInstance() *schema.Resource {
return &schema.Resource{
Create: resourceSqlDatabaseInstanceCreate,
Read: resourceSqlDatabaseInstanceRead,
Update: resourceSqlDatabaseInstanceUpdate,
Delete: resourceSqlDatabaseInstanceDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"master_instance_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"database_version": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "MYSQL_5_5",
ForceNew: true,
},
"region": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"self_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"settings": &schema.Schema{
Type: schema.TypeList,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"version": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"tier": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"activation_policy": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"authorized_gae_applications": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"backup_configuration": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"binary_log_enabled": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
"enabled": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
"start_time": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
},
"crash_safe_replication": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
"database_flags": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"value": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
},
"ip_configuration": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"authorized_networks": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"expiration_time": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"value": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
},
"ipv4_enabled": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
"require_ssl": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
},
},
},
"location_preference": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"follow_gae_application": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
},
"pricing_plan": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"replication_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
},
"replica_configuration": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ca_certificate": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"client_certificate": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"client_key": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"connect_retry_interval": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"dump_file_path": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"master_heartbeat_period": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"password": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"ssl_cipher": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"username": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"verify_server_certificate": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
},
},
},
},
}
}
func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
name := d.Get("name").(string)
region := d.Get("region").(string)
databaseVersion := d.Get("database_version").(string)
_settingsList := d.Get("settings").([]interface{})
if len(_settingsList) > 1 {
return fmt.Errorf("At most one settings block is allowed")
}
_settings := _settingsList[0].(map[string]interface{})
settings := &sqladmin.Settings{
Tier: _settings["tier"].(string),
}
if v, ok := _settings["activation_policy"]; ok {
settings.ActivationPolicy = v.(string)
}
if v, ok := _settings["authorized_gae_applications"]; ok {
settings.AuthorizedGaeApplications = make([]string, 0)
for _, app := range v.([]interface{}) {
settings.AuthorizedGaeApplications = append(settings.AuthorizedGaeApplications,
app.(string))
}
}
if v, ok := _settings["backup_configuration"]; ok {
_backupConfigurationList := v.([]interface{})
if len(_backupConfigurationList) > 1 {
return fmt.Errorf("At most one backup_configuration block is allowed")
}
if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil {
settings.BackupConfiguration = &sqladmin.BackupConfiguration{}
_backupConfiguration := _backupConfigurationList[0].(map[string]interface{})
if vp, okp := _backupConfiguration["binary_log_enabled"]; okp {
settings.BackupConfiguration.BinaryLogEnabled = vp.(bool)
}
if vp, okp := _backupConfiguration["enabled"]; okp {
settings.BackupConfiguration.Enabled = vp.(bool)
}
if vp, okp := _backupConfiguration["start_time"]; okp {
settings.BackupConfiguration.StartTime = vp.(string)
}
}
}
if v, ok := _settings["crash_safe_replication"]; ok {
settings.CrashSafeReplicationEnabled = v.(bool)
}
if v, ok := _settings["database_flags"]; ok {
settings.DatabaseFlags = make([]*sqladmin.DatabaseFlags, 0)
_databaseFlagsList := v.([]interface{})
for _, _flag := range _databaseFlagsList {
_entry := _flag.(map[string]interface{})
flag := &sqladmin.DatabaseFlags{}
if vp, okp := _entry["name"]; okp {
flag.Name = vp.(string)
}
if vp, okp := _entry["value"]; okp {
flag.Value = vp.(string)
}
settings.DatabaseFlags = append(settings.DatabaseFlags, flag)
}
}
if v, ok := _settings["ip_configuration"]; ok {
_ipConfigurationList := v.([]interface{})
if len(_ipConfigurationList) > 1 {
return fmt.Errorf("At most one ip_configuration block is allowed")
}
if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil {
settings.IpConfiguration = &sqladmin.IpConfiguration{}
_ipConfiguration := _ipConfigurationList[0].(map[string]interface{})
if vp, okp := _ipConfiguration["ipv4_enabled"]; okp {
settings.IpConfiguration.Ipv4Enabled = vp.(bool)
}
if vp, okp := _ipConfiguration["require_ssl"]; okp {
settings.IpConfiguration.RequireSsl = vp.(bool)
}
if vp, okp := _ipConfiguration["authorized_networks"]; okp {
settings.IpConfiguration.AuthorizedNetworks = make([]*sqladmin.AclEntry, 0)
_authorizedNetworksList := vp.([]interface{})
for _, _acl := range _authorizedNetworksList {
_entry := _acl.(map[string]interface{})
entry := &sqladmin.AclEntry{}
if vpp, okpp := _entry["expiration_time"]; okpp {
entry.ExpirationTime = vpp.(string)
}
if vpp, okpp := _entry["name"]; okpp {
entry.Name = vpp.(string)
}
if vpp, okpp := _entry["value"]; okpp {
entry.Value = vpp.(string)
}
settings.IpConfiguration.AuthorizedNetworks = append(
settings.IpConfiguration.AuthorizedNetworks, entry)
}
}
}
}
if v, ok := _settings["location_preference"]; ok {
_locationPreferenceList := v.([]interface{})
if len(_locationPreferenceList) > 1 {
return fmt.Errorf("At most one location_preference block is allowed")
}
if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil {
settings.LocationPreference = &sqladmin.LocationPreference{}
_locationPreference := _locationPreferenceList[0].(map[string]interface{})
if vp, okp := _locationPreference["follow_gae_application"]; okp {
settings.LocationPreference.FollowGaeApplication = vp.(string)
}
if vp, okp := _locationPreference["zone"]; okp {
settings.LocationPreference.Zone = vp.(string)
}
}
}
if v, ok := _settings["pricing_plan"]; ok {
settings.PricingPlan = v.(string)
}
if v, ok := _settings["replication_type"]; ok {
settings.ReplicationType = v.(string)
}
instance := &sqladmin.DatabaseInstance{
Name: name,
Region: region,
Settings: settings,
DatabaseVersion: databaseVersion,
}
if v, ok := d.GetOk("replica_configuration"); ok {
_replicaConfigurationList := v.([]interface{})
if len(_replicaConfigurationList) > 1 {
return fmt.Errorf("Only one replica_configuration block may be defined")
}
if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil {
replicaConfiguration := &sqladmin.ReplicaConfiguration{}
mySqlReplicaConfiguration := &sqladmin.MySqlReplicaConfiguration{}
_replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{})
if vp, okp := _replicaConfiguration["ca_certificate"]; okp {
mySqlReplicaConfiguration.CaCertificate = vp.(string)
}
if vp, okp := _replicaConfiguration["client_certificate"]; okp {
mySqlReplicaConfiguration.ClientCertificate = vp.(string)
}
if vp, okp := _replicaConfiguration["client_key"]; okp {
mySqlReplicaConfiguration.ClientKey = vp.(string)
}
if vp, okp := _replicaConfiguration["connect_retry_interval"]; okp {
mySqlReplicaConfiguration.ConnectRetryInterval = int64(vp.(int))
}
if vp, okp := _replicaConfiguration["dump_file_path"]; okp {
mySqlReplicaConfiguration.DumpFilePath = vp.(string)
}
if vp, okp := _replicaConfiguration["master_heartbeat_period"]; okp {
mySqlReplicaConfiguration.MasterHeartbeatPeriod = int64(vp.(int))
}
if vp, okp := _replicaConfiguration["password"]; okp {
mySqlReplicaConfiguration.Password = vp.(string)
}
if vp, okp := _replicaConfiguration["ssl_cipher"]; okp {
mySqlReplicaConfiguration.SslCipher = vp.(string)
}
if vp, okp := _replicaConfiguration["username"]; okp {
mySqlReplicaConfiguration.Username = vp.(string)
}
if vp, okp := _replicaConfiguration["verify_server_certificate"]; okp {
mySqlReplicaConfiguration.VerifyServerCertificate = vp.(bool)
}
replicaConfiguration.MysqlReplicaConfiguration = mySqlReplicaConfiguration
instance.ReplicaConfiguration = replicaConfiguration
}
}
if v, ok := d.GetOk("master_instance_name"); ok {
instance.MasterInstanceName = v.(string)
}
op, err := config.clientSqlAdmin.Instances.Insert(config.Project, instance).Do()
if err != nil {
return fmt.Errorf("Error, failed to create instance %s: %s", name, err)
}
err = sqladminOperationWait(config, op, "Create Instance")
if err != nil {
return err
}
return resourceSqlDatabaseInstanceRead(d, meta)
}
func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
instance, err := config.clientSqlAdmin.Instances.Get(config.Project,
d.Get("name").(string)).Do()
if err != nil {
return fmt.Errorf("Error retrieving instance %s: %s",
d.Get("name").(string), err)
}
_settingsList := d.Get("settings").([]interface{})
_settings := _settingsList[0].(map[string]interface{})
settings := instance.Settings
_settings["version"] = settings.SettingsVersion
_settings["tier"] = settings.Tier
// Take care to only update attributes that the user has defined explicitly
if v, ok := _settings["activation_policy"]; ok && len(v.(string)) > 0 {
_settings["activation_policy"] = settings.ActivationPolicy
}
if v, ok := _settings["authorized_gae_applications"]; ok && len(v.([]interface{})) > 0 {
_authorized_gae_applications := make([]interface{}, 0)
for _, app := range settings.AuthorizedGaeApplications {
_authorized_gae_applications = append(_authorized_gae_applications, app)
}
_settings["authorized_gae_applications"] = _authorized_gae_applications
}
if v, ok := _settings["backup_configuration"]; ok {
_backupConfigurationList := v.([]interface{})
if len(_backupConfigurationList) > 1 {
return fmt.Errorf("At most one backup_configuration block is allowed")
}
if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil {
_backupConfiguration := _backupConfigurationList[0].(map[string]interface{})
if vp, okp := _backupConfiguration["binary_log_enabled"]; okp && vp != nil {
_backupConfiguration["binary_log_enabled"] = settings.BackupConfiguration.BinaryLogEnabled
}
if vp, okp := _backupConfiguration["enabled"]; okp && vp != nil {
_backupConfiguration["enabled"] = settings.BackupConfiguration.Enabled
}
if vp, okp := _backupConfiguration["start_time"]; okp && vp != nil {
_backupConfiguration["start_time"] = settings.BackupConfiguration.StartTime
}
_backupConfigurationList[0] = _backupConfiguration
_settings["backup_configuration"] = _backupConfigurationList
}
}
if v, ok := _settings["crash_safe_replication"]; ok && v != nil {
_settings["crash_safe_replication"] = settings.CrashSafeReplicationEnabled
}
if v, ok := _settings["database_flags"]; ok && len(v.([]interface{})) > 0 {
_flag_map := make(map[string]string)
// First keep track of localy defined flag pairs
for _, _flag := range _settings["database_flags"].([]interface{}) {
_entry := _flag.(map[string]interface{})
_flag_map[_entry["name"].(string)] = _entry["value"].(string)
}
_database_flags := make([]interface{}, 0)
// Next read the flag pairs from the server, and reinsert those that
// correspond to ones defined locally
for _, entry := range settings.DatabaseFlags {
if _, okp := _flag_map[entry.Name]; okp {
_entry := make(map[string]interface{})
_entry["name"] = entry.Name
_entry["value"] = entry.Value
_database_flags = append(_database_flags, _entry)
}
}
_settings["database_flags"] = _database_flags
}
if v, ok := _settings["ip_configuration"]; ok {
_ipConfigurationList := v.([]interface{})
if len(_ipConfigurationList) > 1 {
return fmt.Errorf("At most one ip_configuration block is allowed")
}
if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil {
_ipConfiguration := _ipConfigurationList[0].(map[string]interface{})
if vp, okp := _ipConfiguration["ipv4_enabled"]; okp && vp != nil {
_ipConfiguration["ipv4_enabled"] = settings.IpConfiguration.Ipv4Enabled
}
if vp, okp := _ipConfiguration["require_ssl"]; okp && vp != nil {
_ipConfiguration["require_ssl"] = settings.IpConfiguration.RequireSsl
}
if vp, okp := _ipConfiguration["authorized_networks"]; okp && vp != nil {
_ipc_map := make(map[string]interface{})
// First keep track of localy defined ip configurations
for _, _ipc := range _ipConfigurationList {
_entry := _ipc.(map[string]interface{})
if _entry["value"] == nil {
continue
}
_value := make(map[string]interface{})
_value["name"] = _entry["name"]
_value["expiration_time"] = _entry["expiration_time"]
// We key on value, since that is the only required part of
// this 3-tuple
_ipc_map[_entry["value"].(string)] = _value
}
_authorized_networks := make([]interface{}, 0)
// Next read the network tuples from the server, and reinsert those that
// correspond to ones defined locally
for _, entry := range settings.IpConfiguration.AuthorizedNetworks {
if _, okp := _ipc_map[entry.Value]; okp {
_entry := make(map[string]interface{})
_entry["value"] = entry.Value
_entry["name"] = entry.Name
_entry["expiration_time"] = entry.ExpirationTime
_authorized_networks = append(_authorized_networks, _entry)
}
}
_ipConfiguration["authorized_networks"] = _authorized_networks
}
_ipConfigurationList[0] = _ipConfiguration
_settings["ip_configuration"] = _ipConfigurationList
}
}
if v, ok := _settings["location_preference"]; ok && len(v.([]interface{})) > 0 {
_locationPreferenceList := v.([]interface{})
if len(_locationPreferenceList) > 1 {
return fmt.Errorf("At most one location_preference block is allowed")
}
if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil &&
settings.LocationPreference != nil {
_locationPreference := _locationPreferenceList[0].(map[string]interface{})
if vp, okp := _locationPreference["follow_gae_application"]; okp && vp != nil {
_locationPreference["follow_gae_application"] =
settings.LocationPreference.FollowGaeApplication
}
if vp, okp := _locationPreference["zone"]; okp && vp != nil {
_locationPreference["zone"] = settings.LocationPreference.Zone
}
_locationPreferenceList[0] = _locationPreference
_settings["location_preference"] = _locationPreferenceList[0]
}
}
if v, ok := _settings["pricing_plan"]; ok && len(v.(string)) > 0 {
_settings["pricing_plan"] = settings.PricingPlan
}
if v, ok := _settings["replication_type"]; ok && len(v.(string)) > 0 {
_settings["replication_type"] = settings.ReplicationType
}
_settingsList[0] = _settings
d.Set("settings", _settingsList)
if v, ok := d.GetOk("replica_configuration"); ok && v != nil {
_replicaConfigurationList := v.([]interface{})
if len(_replicaConfigurationList) > 1 {
return fmt.Errorf("Only one replica_configuration block may be defined")
}
if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil {
mySqlReplicaConfiguration := instance.ReplicaConfiguration.MysqlReplicaConfiguration
_replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{})
if vp, okp := _replicaConfiguration["ca_certificate"]; okp && vp != nil {
_replicaConfiguration["ca_certificate"] = mySqlReplicaConfiguration.CaCertificate
}
if vp, okp := _replicaConfiguration["client_certificate"]; okp && vp != nil {
_replicaConfiguration["client_certificate"] = mySqlReplicaConfiguration.ClientCertificate
}
if vp, okp := _replicaConfiguration["client_key"]; okp && vp != nil {
_replicaConfiguration["client_key"] = mySqlReplicaConfiguration.ClientKey
}
if vp, okp := _replicaConfiguration["connect_retry_interval"]; okp && vp != nil {
_replicaConfiguration["connect_retry_interval"] = mySqlReplicaConfiguration.ConnectRetryInterval
}
if vp, okp := _replicaConfiguration["dump_file_path"]; okp && vp != nil {
_replicaConfiguration["dump_file_path"] = mySqlReplicaConfiguration.DumpFilePath
}
if vp, okp := _replicaConfiguration["master_heartbeat_period"]; okp && vp != nil {
_replicaConfiguration["master_heartbeat_period"] = mySqlReplicaConfiguration.MasterHeartbeatPeriod
}
if vp, okp := _replicaConfiguration["password"]; okp && vp != nil {
_replicaConfiguration["password"] = mySqlReplicaConfiguration.Password
}
if vp, okp := _replicaConfiguration["ssl_cipher"]; okp && vp != nil {
_replicaConfiguration["ssl_cipher"] = mySqlReplicaConfiguration.SslCipher
}
if vp, okp := _replicaConfiguration["username"]; okp && vp != nil {
_replicaConfiguration["username"] = mySqlReplicaConfiguration.Username
}
if vp, okp := _replicaConfiguration["verify_server_certificate"]; okp && vp != nil {
_replicaConfiguration["verify_server_certificate"] = mySqlReplicaConfiguration.VerifyServerCertificate
}
_replicaConfigurationList[0] = _replicaConfiguration
d.Set("replica_configuration", _replicaConfigurationList)
}
}
if v, ok := d.GetOk("master_instance_name"); ok && v != nil {
d.Set("master_instance_name", instance.MasterInstanceName)
}
d.Set("self_link", instance.SelfLink)
d.SetId(instance.Name)
return nil
}
func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
d.Partial(true)
instance, err := config.clientSqlAdmin.Instances.Get(config.Project,
d.Get("name").(string)).Do()
if err != nil {
return fmt.Errorf("Error retrieving instance %s: %s",
d.Get("name").(string), err)
}
if d.HasChange("settings") {
_oListCast, _settingsListCast := d.GetChange("settings")
_oList := _oListCast.([]interface{})
_o := _oList[0].(map[string]interface{})
_settingsList := _settingsListCast.([]interface{})
if len(_settingsList) > 1 {
return fmt.Errorf("At most one settings block is allowed")
}
_settings := _settingsList[0].(map[string]interface{})
settings := &sqladmin.Settings{
Tier: _settings["tier"].(string),
SettingsVersion: instance.Settings.SettingsVersion,
}
if v, ok := _settings["activation_policy"]; ok {
settings.ActivationPolicy = v.(string)
}
if v, ok := _settings["authorized_gae_applications"]; ok {
settings.AuthorizedGaeApplications = make([]string, 0)
for _, app := range v.([]interface{}) {
settings.AuthorizedGaeApplications = append(settings.AuthorizedGaeApplications,
app.(string))
}
}
if v, ok := _settings["backup_configuration"]; ok {
_backupConfigurationList := v.([]interface{})
if len(_backupConfigurationList) > 1 {
return fmt.Errorf("At most one backup_configuration block is allowed")
}
if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil {
settings.BackupConfiguration = &sqladmin.BackupConfiguration{}
_backupConfiguration := _backupConfigurationList[0].(map[string]interface{})
if vp, okp := _backupConfiguration["binary_log_enabled"]; okp {
settings.BackupConfiguration.BinaryLogEnabled = vp.(bool)
}
if vp, okp := _backupConfiguration["enabled"]; okp {
settings.BackupConfiguration.Enabled = vp.(bool)
}
if vp, okp := _backupConfiguration["start_time"]; okp {
settings.BackupConfiguration.StartTime = vp.(string)
}
}
}
if v, ok := _settings["crash_safe_replication"]; ok {
settings.CrashSafeReplicationEnabled = v.(bool)
}
_oldDatabaseFlags := make([]interface{}, 0)
if ov, ook := _o["database_flags"]; ook {
_oldDatabaseFlags = ov.([]interface{})
}
if v, ok := _settings["database_flags"]; ok || len(_oldDatabaseFlags) > 0 {
oldDatabaseFlags := settings.DatabaseFlags
settings.DatabaseFlags = make([]*sqladmin.DatabaseFlags, 0)
_databaseFlagsList := make([]interface{}, 0)
if v != nil {
_databaseFlagsList = v.([]interface{})
}
_odbf_map := make(map[string]interface{})
for _, _dbf := range _oldDatabaseFlags {
_entry := _dbf.(map[string]interface{})
_odbf_map[_entry["name"].(string)] = true
}
// First read the flags from the server, and reinsert those that
// were not previously defined
for _, entry := range oldDatabaseFlags {
_, ok_old := _odbf_map[entry.Name]
if !ok_old {
settings.DatabaseFlags = append(
settings.DatabaseFlags, entry)
}
}
// finally, insert only those that were previously defined
// and are still defined.
for _, _flag := range _databaseFlagsList {
_entry := _flag.(map[string]interface{})
flag := &sqladmin.DatabaseFlags{}
if vp, okp := _entry["name"]; okp {
flag.Name = vp.(string)
}
if vp, okp := _entry["value"]; okp {
flag.Value = vp.(string)
}
settings.DatabaseFlags = append(settings.DatabaseFlags, flag)
}
}
if v, ok := _settings["ip_configuration"]; ok {
_ipConfigurationList := v.([]interface{})
if len(_ipConfigurationList) > 1 {
return fmt.Errorf("At most one ip_configuration block is allowed")
}
if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil {
settings.IpConfiguration = &sqladmin.IpConfiguration{}
_ipConfiguration := _ipConfigurationList[0].(map[string]interface{})
if vp, okp := _ipConfiguration["ipv4_enabled"]; okp {
settings.IpConfiguration.Ipv4Enabled = vp.(bool)
}
if vp, okp := _ipConfiguration["require_ssl"]; okp {
settings.IpConfiguration.RequireSsl = vp.(bool)
}
_oldAuthorizedNetworkList := make([]interface{}, 0)
if ov, ook := _o["ip_configuration"]; ook {
_oldIpConfList := ov.([]interface{})
if len(_oldIpConfList) > 0 {
_oldIpConf := _oldIpConfList[0].(map[string]interface{})
if ovp, ookp := _oldIpConf["authorized_networks"]; ookp {
_oldAuthorizedNetworkList = ovp.([]interface{})
}
}
}
if vp, okp := _ipConfiguration["authorized_networks"]; okp || len(_oldAuthorizedNetworkList) > 0 {
oldAuthorizedNetworks := settings.IpConfiguration.AuthorizedNetworks
settings.IpConfiguration.AuthorizedNetworks = make([]*sqladmin.AclEntry, 0)
_authorizedNetworksList := make([]interface{}, 0)
if vp != nil {
_authorizedNetworksList = vp.([]interface{})
}
_oipc_map := make(map[string]interface{})
for _, _ipc := range _oldAuthorizedNetworkList {
_entry := _ipc.(map[string]interface{})
_oipc_map[_entry["value"].(string)] = true
}
// Next read the network tuples from the server, and reinsert those that
// were not previously defined
for _, entry := range oldAuthorizedNetworks {
_, ok_old := _oipc_map[entry.Value]
if !ok_old {
settings.IpConfiguration.AuthorizedNetworks = append(
settings.IpConfiguration.AuthorizedNetworks, entry)
}
}
// finally, insert only those that were previously defined
// and are still defined.
for _, _ipc := range _authorizedNetworksList {
_entry := _ipc.(map[string]interface{})
if _, ok_old := _oipc_map[_entry["value"].(string)]; ok_old {
entry := &sqladmin.AclEntry{}
if vpp, okpp := _entry["expiration_time"]; okpp {
entry.ExpirationTime = vpp.(string)
}
if vpp, okpp := _entry["name"]; okpp {
entry.Name = vpp.(string)
}
if vpp, okpp := _entry["value"]; okpp {
entry.Value = vpp.(string)
}
settings.IpConfiguration.AuthorizedNetworks = append(
settings.IpConfiguration.AuthorizedNetworks, entry)
}
}
}
}
}
if v, ok := _settings["location_preference"]; ok {
_locationPreferenceList := v.([]interface{})
if len(_locationPreferenceList) > 1 {
return fmt.Errorf("At most one location_preference block is allowed")
}
if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil {
settings.LocationPreference = &sqladmin.LocationPreference{}
_locationPreference := _locationPreferenceList[0].(map[string]interface{})
if vp, okp := _locationPreference["follow_gae_application"]; okp {
settings.LocationPreference.FollowGaeApplication = vp.(string)
}
if vp, okp := _locationPreference["zone"]; okp {
settings.LocationPreference.Zone = vp.(string)
}
}
}
if v, ok := _settings["pricing_plan"]; ok {
settings.PricingPlan = v.(string)
}
if v, ok := _settings["replication_type"]; ok {
settings.ReplicationType = v.(string)
}
instance.Settings = settings
}
d.Partial(false)
op, err := config.clientSqlAdmin.Instances.Update(config.Project, instance.Name, instance).Do()
if err != nil {
return fmt.Errorf("Error, failed to update instance %s: %s", instance.Name, err)
}
err = sqladminOperationWait(config, op, "Create Instance")
if err != nil {
return err
}
return resourceSqlDatabaseInstanceRead(d, meta)
}
func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
op, err := config.clientSqlAdmin.Instances.Delete(config.Project, d.Get("name").(string)).Do()
if err != nil {
return fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err)
}
err = sqladminOperationWait(config, op, "Delete Instance")
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,409 @@
package google
/**
* Note! You must run these tests once at a time. Google Cloud SQL does
* not allow you to reuse a database for a short time after you reserved it,
* and for this reason the tests will fail if the same config is used serveral
* times in short succession.
*/
import (
"fmt"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/sqladmin/v1beta4"
)
func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) {
var instance sqladmin.DatabaseInstance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleSqlDatabaseInstance_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
},
})
}
func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) {
var instance sqladmin.DatabaseInstance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleSqlDatabaseInstance_settings,
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
},
})
}
func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) {
var instance sqladmin.DatabaseInstance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleSqlDatabaseInstance_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
resource.TestStep{
Config: testGoogleSqlDatabaseInstance_settings,
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
},
})
}
func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) {
var instance sqladmin.DatabaseInstance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleSqlDatabaseInstance_settings,
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
resource.TestStep{
Config: testGoogleSqlDatabaseInstance_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
},
})
}
func testAccCheckGoogleSqlDatabaseInstanceEquals(n string,
instance *sqladmin.DatabaseInstance) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
attributes := rs.Primary.Attributes
server := instance.Name
local := attributes["name"]
if server != local {
return fmt.Errorf("Error name mismatch, (%s, %s)", server, local)
}
server = instance.Settings.Tier
local = attributes["settings.0.tier"]
if server != local {
return fmt.Errorf("Error settings.tier mismatch, (%s, %s)", server, local)
}
server = instance.MasterInstanceName
local = attributes["master_instance_name"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error master_instance_name mismatch, (%s, %s)", server, local)
}
server = instance.Settings.ActivationPolicy
local = attributes["settings.0.activation_policy"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.activation_policy mismatch, (%s, %s)", server, local)
}
if instance.Settings.BackupConfiguration != nil {
server = strconv.FormatBool(instance.Settings.BackupConfiguration.BinaryLogEnabled)
local = attributes["settings.0.backup_configuration.0.binary_log_enabled"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.backup_configuration.binary_log_enabled mismatch, (%s, %s)", server, local)
}
server = strconv.FormatBool(instance.Settings.BackupConfiguration.Enabled)
local = attributes["settings.0.backup_configuration.0.enabled"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.backup_configuration.enabled mismatch, (%s, %s)", server, local)
}
server = instance.Settings.BackupConfiguration.StartTime
local = attributes["settings.0.backup_configuration.0.start_time"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.backup_configuration.start_time mismatch, (%s, %s)", server, local)
}
}
server = strconv.FormatBool(instance.Settings.CrashSafeReplicationEnabled)
local = attributes["settings.0.crash_safe_replication"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.crash_safe_replication mismatch, (%s, %s)", server, local)
}
if instance.Settings.IpConfiguration != nil {
server = strconv.FormatBool(instance.Settings.IpConfiguration.Ipv4Enabled)
local = attributes["settings.0.ip_configuration.0.ipv4_enabled"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.ip_configuration.ipv4_enabled mismatch, (%s, %s)", server, local)
}
server = strconv.FormatBool(instance.Settings.IpConfiguration.RequireSsl)
local = attributes["settings.0.ip_configuration.0.require_ssl"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.ip_configuration.require_ssl mismatch, (%s, %s)", server, local)
}
}
if instance.Settings.LocationPreference != nil {
server = instance.Settings.LocationPreference.FollowGaeApplication
local = attributes["settings.0.location_preference.0.follow_gae_application"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.location_preference.follow_gae_application mismatch, (%s, %s)", server, local)
}
server = instance.Settings.LocationPreference.Zone
local = attributes["settings.0.location_preference.0.zone"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.location_preference.zone mismatch, (%s, %s)", server, local)
}
}
server = instance.Settings.PricingPlan
local = attributes["settings.0.pricing_plan"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.pricing_plan mismatch, (%s, %s)", server, local)
}
if instance.ReplicaConfiguration != nil &&
instance.ReplicaConfiguration.MysqlReplicaConfiguration != nil {
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.CaCertificate
local = attributes["replica_configuration.0.ca_certificate"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.ca_certificate mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientCertificate
local = attributes["replica_configuration.0.client_certificate"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.client_certificate mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientKey
local = attributes["replica_configuration.0.client_key"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.client_key mismatch, (%s, %s)", server, local)
}
server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.ConnectRetryInterval, 10)
local = attributes["replica_configuration.0.connect_retry_interval"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.connect_retry_interval mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.DumpFilePath
local = attributes["replica_configuration.0.dump_file_path"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.dump_file_path mismatch, (%s, %s)", server, local)
}
server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.MasterHeartbeatPeriod, 10)
local = attributes["replica_configuration.0.master_heartbeat_period"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.master_heartbeat_period mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Password
local = attributes["replica_configuration.0.password"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.password mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.SslCipher
local = attributes["replica_configuration.0.ssl_cipher"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.ssl_cipher mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Username
local = attributes["replica_configuration.0.username"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.username mismatch, (%s, %s)", server, local)
}
server = strconv.FormatBool(instance.ReplicaConfiguration.MysqlReplicaConfiguration.VerifyServerCertificate)
local = attributes["replica_configuration.0.verify_server_certificate"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.verify_server_certificate mismatch, (%s, %s)", server, local)
}
}
return nil
}
}
func testAccCheckGoogleSqlDatabaseInstanceExists(n string,
instance *sqladmin.DatabaseInstance) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
found, err := config.clientSqlAdmin.Instances.Get(config.Project,
rs.Primary.Attributes["name"]).Do()
*instance = *found
if err != nil {
return fmt.Errorf("Not found: %s", n)
}
return nil
}
}
func testAccGoogleSqlDatabaseInstanceDestroy(s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
config := testAccProvider.Meta().(*Config)
if rs.Type != "google_sql_database_instance" {
continue
}
_, err := config.clientSqlAdmin.Instances.Get(config.Project,
rs.Primary.Attributes["name"]).Do()
if err == nil {
return fmt.Errorf("Database Instance still exists")
}
}
return nil
}
var databaseId = genRandInt()
var testGoogleSqlDatabaseInstance_basic = fmt.Sprintf(`
resource "google_sql_database_instance" "instance" {
name = "tf-lw-%d"
region = "us-central"
settings {
tier = "D0"
crash_safe_replication = false
}
}
`, databaseId)
var testGoogleSqlDatabaseInstance_settings = fmt.Sprintf(`
resource "google_sql_database_instance" "instance" {
name = "tf-lw-%d"
region = "us-central"
settings {
tier = "D0"
crash_safe_replication = false
replication_type = "ASYNCHRONOUS"
location_preference {
zone = "us-central1-f"
}
ip_configuration {
ipv4_enabled = "true"
authorized_networks {
value = "108.12.12.12"
name = "misc"
expiration_time = "2017-11-15T16:19:00.094Z"
}
}
backup_configuration {
enabled = "true"
start_time = "19:19"
}
activation_policy = "ON_DEMAND"
}
}
`, databaseId)
// Note - this test is not feasible to run unless we generate
// backups first.
var testGoogleSqlDatabaseInstance_replica = fmt.Sprintf(`
resource "google_sql_database_instance" "instance_master" {
name = "tf-lw-%d"
database_version = "MYSQL_5_6"
region = "us-east1"
settings {
tier = "D0"
crash_safe_replication = true
backup_configuration {
enabled = true
start_time = "00:00"
binary_log_enabled = true
}
}
}
resource "google_sql_database_instance" "instance" {
name = "tf-lw-%d"
database_version = "MYSQL_5_6"
region = "us-central"
settings {
tier = "D0"
}
master_instance_name = "${google_sql_database_instance.instance_master.name}"
replica_configuration {
ca_certificate = "${file("~/tmp/fake.pem")}"
client_certificate = "${file("~/tmp/fake.pem")}"
client_key = "${file("~/tmp/fake.pem")}"
connect_retry_interval = 100
master_heartbeat_period = 10000
password = "password"
username = "username"
ssl_cipher = "ALL"
verify_server_certificate = false
}
}
`, genRandInt(), genRandInt())

View File

@ -0,0 +1,113 @@
package google
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/sqladmin/v1beta4"
)
func TestAccGoogleSqlDatabase_basic(t *testing.T) {
var database sqladmin.Database
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleSqlDatabase_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseExists(
"google_sql_database.database", &database),
testAccCheckGoogleSqlDatabaseEquals(
"google_sql_database.database", &database),
),
},
},
})
}
func testAccCheckGoogleSqlDatabaseEquals(n string,
database *sqladmin.Database) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Resource not found: %s", n)
}
database_name := rs.Primary.Attributes["name"]
instance_name := rs.Primary.Attributes["instance"]
if database_name != database.Name {
return fmt.Errorf("Error name mismatch, (%s, %s)", database_name, database.Name)
}
if instance_name != database.Instance {
return fmt.Errorf("Error instance_name mismatch, (%s, %s)", instance_name, database.Instance)
}
return nil
}
}
func testAccCheckGoogleSqlDatabaseExists(n string,
database *sqladmin.Database) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Resource not found: %s", n)
}
database_name := rs.Primary.Attributes["name"]
instance_name := rs.Primary.Attributes["instance"]
found, err := config.clientSqlAdmin.Databases.Get(config.Project,
instance_name, database_name).Do()
if err != nil {
return fmt.Errorf("Not found: %s: %s", n, err)
}
*database = *found
return nil
}
}
func testAccGoogleSqlDatabaseDestroy(s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
config := testAccProvider.Meta().(*Config)
if rs.Type != "google_sql_database" {
continue
}
database_name := rs.Primary.Attributes["name"]
instance_name := rs.Primary.Attributes["instance"]
_, err := config.clientSqlAdmin.Databases.Get(config.Project,
instance_name, database_name).Do()
if err == nil {
return fmt.Errorf("Database resource still exists")
}
}
return nil
}
var testGoogleSqlDatabase_basic = fmt.Sprintf(`
resource "google_sql_database_instance" "instance" {
name = "tf-lw-%d"
settings {
tier = "D0"
}
}
resource "google_sql_database" "database" {
name = "database1"
instance = "${google_sql_database_instance.instance.name}"
}
`, genRandInt())

View File

@ -2,9 +2,7 @@ package google
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@ -20,7 +18,7 @@ var roleEntityBasic3_owner = "OWNER:user-yetanotheremail@gmail.com"
var roleEntityBasic3_reader = "READER:user-yetanotheremail@gmail.com"
var testAclBucketName = fmt.Sprintf("%s-%d", "tf-test-acl-bucket", rand.New(rand.NewSource(time.Now().UnixNano())).Int())
var testAclBucketName = fmt.Sprintf("%s-%d", "tf-test-acl-bucket", genRandInt())
func TestAccGoogleStorageBucketAcl_basic(t *testing.T) {
resource.Test(t, resource.TestCase{

View File

@ -3,9 +3,7 @@ package google
import (
"bytes"
"fmt"
"math/rand"
"testing"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@ -207,7 +205,7 @@ func testAccGoogleStorageDestroy(s *terraform.State) error {
return nil
}
var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
var randInt = genRandInt()
var testGoogleStorageBucketsReaderDefaults = fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {

View File

@ -0,0 +1,80 @@
package google
import (
"bytes"
"fmt"
"log"
"time"
"github.com/hashicorp/terraform/helper/resource"
"google.golang.org/api/sqladmin/v1beta4"
)
type SqlAdminOperationWaiter struct {
Service *sqladmin.Service
Op *sqladmin.Operation
Project string
}
func (w *SqlAdminOperationWaiter) RefreshFunc() resource.StateRefreshFunc {
return func() (interface{}, string, error) {
var op *sqladmin.Operation
var err error
log.Printf("[DEBUG] self_link: %s", w.Op.SelfLink)
op, err = w.Service.Operations.Get(w.Project, w.Op.Name).Do()
if err != nil {
return nil, "", err
}
log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name)
return op, op.Status, nil
}
}
func (w *SqlAdminOperationWaiter) Conf() *resource.StateChangeConf {
return &resource.StateChangeConf{
Pending: []string{"PENDING", "RUNNING"},
Target: "DONE",
Refresh: w.RefreshFunc(),
}
}
// SqlAdminOperationError wraps sqladmin.OperationError and implements the
// error interface so it can be returned.
type SqlAdminOperationError sqladmin.OperationErrors
func (e SqlAdminOperationError) Error() string {
var buf bytes.Buffer
for _, err := range e.Errors {
buf.WriteString(err.Message + "\n")
}
return buf.String()
}
func sqladminOperationWait(config *Config, op *sqladmin.Operation, activity string) error {
w := &SqlAdminOperationWaiter{
Service: config.clientSqlAdmin,
Op: op,
Project: config.Project,
}
state := w.Conf()
state.Timeout = 5 * time.Minute
state.MinTimeout = 2 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for %s: %s", activity, err)
}
op = opRaw.(*sqladmin.Operation)
if op.Error != nil {
return SqlAdminOperationError(*op.Error)
}
return nil
}

View File

@ -0,0 +1,11 @@
package google
import (
"time"
"math/rand"
)
func genRandInt() int {
return rand.New(rand.NewSource(time.Now().UnixNano())).Int()
}

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_address"
sidebar_current: "docs-google-resource-address"
sidebar_current: "docs-google-compute-address"
description: |-
Creates a static IP address resource for Google Compute Engine.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_autoscaler"
sidebar_current: "docs-google-resource-compute-autoscaler"
sidebar_current: "docs-google-compute-autoscaler"
description: |-
Manages an Autoscaler within GCE.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_backend_service"
sidebar_current: "docs-google-resource-backend-service"
sidebar_current: "docs-google-compute-backend-service"
description: |-
Creates a Backend Service resource for Google Compute Engine.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_disk"
sidebar_current: "docs-google-resource-disk"
sidebar_current: "docs-google-compute-disk"
description: |-
Creates a new persistent disk within GCE, based on another disk.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_firewall"
sidebar_current: "docs-google-resource-firewall"
sidebar_current: "docs-google-compute-firewall"
description: |-
Manages a firewall resource within GCE.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_forwarding_rule"
sidebar_current: "docs-google-resource-forwarding_rule"
sidebar_current: "docs-google-compute-forwarding-rule"
description: |-
Manages a Target Pool within GCE.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_http_health_check"
sidebar_current: "docs-google-resource-http_health_check"
sidebar_current: "docs-google-compute-http-health-check"
description: |-
Manages an HTTP Health Check within GCE.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_instance"
sidebar_current: "docs-google-resource-instance"
sidebar_current: "docs-google-compute-instance"
description: |-
Manages a VM instance resource within GCE.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_instance_group_manager"
sidebar_current: "docs-google-resource-compute-instance_group_manager"
sidebar_current: "docs-google-compute-instance-group-manager"
description: |-
Manages an Instance Group within GCE.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_instance_template"
sidebar_current: "docs-google-resource-instance_template"
sidebar_current: "docs-google-compute-instance-template"
description: |-
Manages a VM instance template resource within GCE.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_network"
sidebar_current: "docs-google-resource-network"
sidebar_current: "docs-google-compute-network"
description: |-
Manages a network within GCE.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_project_metadata"
sidebar_current: "docs-google-resource-project-metadata"
sidebar_current: "docs-google-compute-project-metadata"
description: |-
Manages common instance metadata
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_route"
sidebar_current: "docs-google-resource-route"
sidebar_current: "docs-google-compute-route"
description: |-
Manages a network route within GCE.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_target_pool"
sidebar_current: "docs-google-resource-target_pool"
sidebar_current: "docs-google-compute-target-pool"
description: |-
Manages a Target Pool within GCE.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_vpn_gateway"
sidebar_current: "docs-google-resource-vpn-gateway"
sidebar_current: "docs-google-compute-vpn-gateway"
description: |-
Manages a VPN Gateway in the GCE network
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_compute_vpn_tunnel"
sidebar_current: "docs-google-resource-vpn-tunnel"
sidebar_current: "docs-google-compute-vpn-tunnel"
description: |-
Manages a VPN Tunnel to the GCE network
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_container_cluster"
sidebar_current: "docs-google-resource-container-cluster"
sidebar_current: "docs-google-container-cluster"
description: |-
Creates a GKE cluster.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_dns_managed_zone"
sidebar_current: "docs-google-resource-dns-managed-zone"
sidebar_current: "docs-google-dns-managed-zone"
description: |-
Manages a zone within Google Cloud DNS.
---

View File

@ -0,0 +1,45 @@
---
layout: "google"
page_title: "Google: google_sql_database"
sidebar_current: "docs-google-sql-database"
description: |-
Creates a new SQL database in Google Cloud SQL.
---
# google\_sql\_database
Creates a new Google SQL Database on a Google SQL Database Instance. For more information, see the [official documentation](https://cloud.google.com/sql/), or the [JSON API](https://cloud.google.com/sql/docs/admin-api/v1beta4/databases).
## Example Usage
Example creating a SQL Database.
```
resource "google_sql_database_instance" "master" {
name = "master-instance"
settings {
tier = "D0"
}
}
resource "google_sql_database" "users" {
name = "image-store-bucket"
instance = "${google_sql_database_instance.master.name}"
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required) The name of the database.
* `instance` - (Required) The name of containing instance.
## Attributes Reference
The following attributes are exported:
* `self_link` - The URI of the created resource.

View File

@ -0,0 +1,147 @@
---
layout: "google"
page_title: "Google: google_sql_database_instance"
sidebar_current: "docs-google-sql-database-instance"
description: |-
Creates a new SQL database instance in Google Cloud SQL.
---
# google\_sql\_database\_instance
Creates a new Google SQL Database Instance. For more information, see the [official documentation](https://cloud.google.com/sql/), or the [JSON API](https://cloud.google.com/sql/docs/admin-api/v1beta4/instances).
## Example Usage
Example creating a SQL Database.
```
resource "google_sql_database_instance" "master" {
name = "master-instance"
settings {
tier = "D0"
}
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required) The name of the instance.
* `region` - (Required) The region the instance will sit in. Note, this does
not line up with the Google Compute Engine (GCE) regions - your options are
`us-central`, `asia-west1`, `europe-west1`, and `us-east1`.
* `master_instance_name` - (Optional) The name of the instance that will act as
the master in the replication setup. Note, this requires the master to have
`binary_log_enabled` set, as well as existing backups.
* `database_version` - (Optional, Default: `MYSQL_5_5`) The MySQL version to
use. Can be either `MYSQL_5_5` or `MYSQL_5_6`.
* `pricing_plan` - (Optional) Pricing plan for this instance, can be one of
`PER_USE` or `PACKAGE`.
* `replication_type` - (Optional) Replication type for this instance, can be one of
`ASYNCHRONOUS` or `SYNCHRONOUS`.
The required `settings` block supports:
* `tier` - (Required) The machine tier to use. See
[pricing](https://cloud.google.com/sql/pricing) for more details and
supported versions.
* `activation_policy` - (Optional) This specifies when the instance should be
active. Can be either `ALWAYS`, `NEVER` or `ON_DEMAND`.
* `authorized_gae_applications` - (Optional) A list of Google App Engine (GAE) project names that
are allowed to access this instance.
* `crash_safe_replication` - (Optional) Specific to read instances, indicates
when crash-safe replication flags are enabled.
The optional `settings.database_flags` sublist supports:
* `name` - (Optional) Name of the flag.
* `value` - (Optional) Value of the flag.
The optional `settings.backup_configuration` subblock supports:
* `binary_log_enabled` - (Optional) True iff binary logging is enabled. If
`logging` is false, this must be as well.
* `enabled` - (Optional) True iff backup configuration is enabled.
* `start_time` - (Optional) `HH:MM` format time indicating when backup
configuration starts.
The optional `settings.ip_configuration` subblock supports:
* `ipv4_enabled` - (Optional) True iff the instance should be assigned an IP
address.
* `require_ssl` - (Optional) True iff mysqld should default to `REQUIRE X509`
for users connecting over IP.
The optional `settings.ip_configuration.authorized_networks[]` sublist supports:
* `expiration_time` - (Optional) The [RFC
3339](https://tools.ietf.org/html/rfc3339) formatted date time string
indicating when this whitelist expires.
* `name` - (Optional) A name for this whitelist entry.
* `value` - (Optional) A CIDR notation IPv4 or IPv6 address that is allowed to
access this instance. Must be set even if other two attributes are not for
the whitelist to become active.
The optional `settings.location_preference` subblock supports:
* `follow_gae_application` - (Optional) A GAE application whose zone to remain
in. Must be in the same region as this instance.
* `zone` - (Optional) The preferred compute engine
[zone](https://cloud.google.com/compute/docs/zones?hl=en).
The optional `replica_configuration` block must have
`master_instance_name` set to work, cannot be updated, and supports:
* `ca_certificate` - (Optional) PEM representation of the trusted CA's x509
certificate.
* `client_certificate` - (Optional) PEM representation of the slave's x509
certificate.
* `client_key` - (Optional) PEM representation of the slave's private key.
The corresponding public key in encoded in the `client_certificate`.
* `connect_retry_interval` - (Optional, Default: 60) The number of seconds
between connect retries.
* `dump_file_path` - (Optional) Path to a SQL file in GCS from which slave
instances are created. Format is `gs://bucket/filename`.
* `master_heartbeat_period` - (Optional) Time in ms between replication
heartbeats.
* `password` - (Optional) Password for the replication connection.
* `sslCipher` - (Optional) Permissible ciphers for use in SSL encryption.
* `username` - (Optional) Username for replication connection.
* `verify_server_certificate` - (Optional) True iff the master's common name
value is checked during the SSL handshake.
## Attributes Reference
The following attributes are exported:
* `self_link` - The URI of the created resource.
The `settings` block exports:
* `version` - Used to make sure changes to the `settings` block are atomic.

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_storage_bucket"
sidebar_current: "docs-google-resource-storage"
sidebar_current: "docs-google-storage-bucket"
description: |-
Creates a new bucket in Google Cloud Storage.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_storage_bucket_acl"
sidebar_current: "docs-google-resource-storage-acl"
sidebar_current: "docs-google-storage-bucket-acl"
description: |-
Creates a new bucket ACL in Google Cloud Storage.
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_storage_bucket_object"
sidebar_current: "docs-google-resource-storage-object"
sidebar_current: "docs-google-storage-bucket-object"
description: |-
Creates a new object inside a specified bucket
---

View File

@ -1,7 +1,7 @@
---
layout: "google"
page_title: "Google: google_storage_object_acl"
sidebar_current: "docs-google-resource-storage-acl"
sidebar_current: "docs-google-storage-object-acl"
description: |-
Creates a new object ACL in Google Cloud Storage.
---

View File

@ -10,98 +10,126 @@
<a href="/docs/providers/google/index.html">Google Provider</a>
</li>
<li<%= sidebar_current(/^docs-google-resource/) %>>
<a href="#">Resources</a>
<li<%= sidebar_current(/^docs-google-compute/) %>>
<a href="#">Google Compute Engine Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-google-resource-address") %>>
<li<%= sidebar_current("docs-google-compute-address") %>>
<a href="/docs/providers/google/r/compute_address.html">google_compute_address</a>
</li>
<li<%= sidebar_current("docs-google-resource-compute-autoscaler") %>>
<li<%= sidebar_current("docs-google-compute-autoscaler") %>>
<a href="/docs/providers/google/r/compute_autoscaler.html">google_compute_autoscaler</a>
</li>
<li<%= sidebar_current("docs-google-resource-backend-service") %>>
<li<%= sidebar_current("docs-google-compute-backend-service") %>>
<a href="/docs/providers/google/r/compute_backend_service.html">google_compute_backend_service</a>
</li>
<li<%= sidebar_current("docs-google-resource-disk") %>>
<li<%= sidebar_current("docs-google-compute-disk") %>>
<a href="/docs/providers/google/r/compute_disk.html">google_compute_disk</a>
</li>
<li<%= sidebar_current("docs-google-resource-firewall") %>>
<li<%= sidebar_current("docs-google-compute-firewall") %>>
<a href="/docs/providers/google/r/compute_firewall.html">google_compute_firewall</a>
</li>
<li<%= sidebar_current("docs-google-resource-forwarding-rule") %>>
<li<%= sidebar_current("docs-google-compute-forwarding-rule") %>>
<a href="/docs/providers/google/r/compute_forwarding_rule.html">google_compute_forwarding_rule</a>
</li>
<li<%= sidebar_current("docs-google-resource-http-health-check") %>>
<li<%= sidebar_current("docs-google-compute-http-health-check") %>>
<a href="/docs/providers/google/r/compute_http_health_check.html">google_compute_http_health_check</a>
</li>
<li<%= sidebar_current("docs-google-resource-instance") %>>
<li<%= sidebar_current("docs-google-compute-instance") %>>
<a href="/docs/providers/google/r/compute_instance.html">google_compute_instance</a>
</li>
<li<%= sidebar_current("docs-google-resource--compute-instance-group-manager") %>>
<li<%= sidebar_current("docs-google-compute-instance-group-manager") %>>
<a href="/docs/providers/google/r/compute_instance_group_manager.html">google_compute_instance_group_manager</a>
</li>
<li<%= sidebar_current("docs-google-resource-instance-template") %>>
<li<%= sidebar_current("docs-google-compute-instance-template") %>>
<a href="/docs/providers/google/r/compute_instance_template.html">google_compute_instance_template</a>
</li>
<li<%= sidebar_current("docs-google-resource-network") %>>
<li<%= sidebar_current("docs-google-compute-network") %>>
<a href="/docs/providers/google/r/compute_network.html">google_compute_network</a>
</li>
<li<%= sidebar_current("docs-google-resource-project-metadata") %>>
<li<%= sidebar_current("docs-google-compute-project-metadata") %>>
<a href="/docs/providers/google/r/compute_project_metadata.html">google_compute_project_metadata</a>
</li>
<li<%= sidebar_current("docs-google-resource-route") %>>
<li<%= sidebar_current("docs-google-compute-route") %>>
<a href="/docs/providers/google/r/compute_route.html">google_compute_route</a>
</li>
<li<%= sidebar_current("docs-google-resource-target-pool") %>>
<li<%= sidebar_current("docs-google-compute-target-pool") %>>
<a href="/docs/providers/google/r/compute_target_pool.html">google_compute_target_pool</a>
</li>
<li<%= sidebar_current("docs-google-resource-vpn-gateway") %>>
<li<%= sidebar_current("docs-google-compute-vpn-gateway") %>>
<a href="/docs/providers/google/r/compute_vpn_gateway.html">google_compute_vpn_gateway</a>
</li>
<li<%= sidebar_current("docs-google-resource-vpn-tunnel") %>>
<li<%= sidebar_current("docs-google-compute-vpn-tunnel") %>>
<a href="/docs/providers/google/r/compute_vpn_tunnel.html">google_compute_vpn_tunnel</a>
</li>
</ul>
</li>
<li<%= sidebar_current("docs-google-resource-container-cluster") %>>
<li<%= sidebar_current(/^docs-google-container/) %>>
<a href="#">Google Container Engine Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-google-container-cluster") %>>
<a href="/docs/providers/google/r/container_cluster.html">google_container_cluster</a>
</li>
</ul>
</li>
<li<%= sidebar_current("docs-google-resource-dns-managed-zone") %>>
<li<%= sidebar_current(/^docs-google-dns/) %>>
<a href="#">Google DNS Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-google-dns-managed-zone") %>>
<a href="/docs/providers/google/r/dns_managed_zone.html">google_dns_managed_zone</a>
</li>
<li<%= sidebar_current("docs-google-resource-dns-record-set") %>>
<li<%= sidebar_current("docs-google-dns-record-set") %>>
<a href="/docs/providers/google/r/dns_record_set.html">google_dns_record_set</a>
</li>
</ul>
</li>
<li<%= sidebar_current("docs-google-resource-storage-bucket") %>>
<li<%= sidebar_current(/^docs-google-sql/) %>>
<a href="#">Google SQL Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-google-sql-database") %>>
<a href="/docs/providers/google/r/sql_database.html">google_sql_database</a>
</li>
<li<%= sidebar_current("docs-google-sql-database-instance") %>>
<a href="/docs/providers/google/r/sql_database_instance.html">google_sql_database_instance</a>
</li>
</ul>
</li>
<li<%= sidebar_current(/^docs-google-storage/) %>>
<a href="#">Google Storaege Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-google-storage-bucket") %>>
<a href="/docs/providers/google/r/storage_bucket.html">google_storage_bucket</a>
</li>
<li<%= sidebar_current("docs-google-resource-storage-bucket-acl") %>>
<li<%= sidebar_current("docs-google-storage-bucket-acl") %>>
<a href="/docs/providers/google/r/storage_bucket_acl.html">google_storage_bucket_acl</a>
</li>
<li<%= sidebar_current("docs-google-resource-storage-bucket-object") %>>
<li<%= sidebar_current("docs-google-storage-bucket-object") %>>
<a href="/docs/providers/google/r/storage_bucket_object.html">google_storage_bucket_object</a>
</li>
<li<%= sidebar_current("docs-google-resource-storage-object-acl") %>>
<li<%= sidebar_current("docs-google-storage-object-acl") %>>
<a href="/docs/providers/google/r/storage_object_acl.html">google_storage_object_acl</a>
</li>
</ul>