From 793ce368fcfb5110c1531d0d92f6b1eb400c6078 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sun, 2 Apr 2017 07:08:16 +0100 Subject: [PATCH 1/2] kubernetes: Make generatable name optional in metadataSchema --- .../resource_kubernetes_namespace.go | 2 +- .../providers/kubernetes/schema_metadata.go | 34 +++++++++++-------- builtin/providers/kubernetes/structures.go | 4 ++- 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/builtin/providers/kubernetes/resource_kubernetes_namespace.go b/builtin/providers/kubernetes/resource_kubernetes_namespace.go index a26ca5751..0f1a7366e 100644 --- a/builtin/providers/kubernetes/resource_kubernetes_namespace.go +++ b/builtin/providers/kubernetes/resource_kubernetes_namespace.go @@ -25,7 +25,7 @@ func resourceKubernetesNamespace() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "metadata": metadataSchema("namespace"), + "metadata": metadataSchema("namespace", true), }, } } diff --git a/builtin/providers/kubernetes/schema_metadata.go b/builtin/providers/kubernetes/schema_metadata.go index 27644f83a..684acd6ae 100644 --- a/builtin/providers/kubernetes/schema_metadata.go +++ b/builtin/providers/kubernetes/schema_metadata.go @@ -26,13 +26,12 @@ func metadataFields(objectName string) map[string]*schema.Schema { ValidateFunc: validateLabels, }, "name": { - Type: schema.TypeString, - Description: fmt.Sprintf("Name of the %s, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", objectName), - Optional: true, - ForceNew: true, - Computed: true, - ValidateFunc: validateName, - ConflictsWith: []string{"metadata.generate_name"}, + Type: schema.TypeString, + Description: fmt.Sprintf("Name of the %s, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", objectName), + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: validateName, }, "resource_version": { Type: schema.TypeString, @@ -52,15 +51,19 @@ func metadataFields(objectName string) map[string]*schema.Schema { } } -func metadataSchema(objectName string) *schema.Schema { +func metadataSchema(objectName string, generatableName bool) *schema.Schema { fields := metadataFields(objectName) - fields["generate_name"] = &schema.Schema{ - Type: schema.TypeString, - Description: "Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency", - Optional: true, - ForceNew: true, - ValidateFunc: validateGenerateName, - ConflictsWith: []string{"metadata.name"}, + + if generatableName { + fields["generate_name"] = &schema.Schema{ + Type: schema.TypeString, + Description: "Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency", + Optional: true, + ForceNew: true, + ValidateFunc: validateGenerateName, + ConflictsWith: []string{"metadata.name"}, + } + fields["name"].ConflictsWith = []string{"metadata.generate_name"} } return &schema.Schema{ @@ -92,6 +95,7 @@ func namespacedMetadataSchema(objectName string, generatableName bool) *schema.S ValidateFunc: validateGenerateName, ConflictsWith: []string{"metadata.name"}, } + fields["name"].ConflictsWith = []string{"metadata.generate_name"} } return &schema.Schema{ diff --git a/builtin/providers/kubernetes/structures.go b/builtin/providers/kubernetes/structures.go index 878890d56..4c724ddbb 100644 --- a/builtin/providers/kubernetes/structures.go +++ b/builtin/providers/kubernetes/structures.go @@ -68,7 +68,9 @@ func expandStringMap(m map[string]interface{}) map[string]string { func flattenMetadata(meta api.ObjectMeta) []map[string]interface{} { m := make(map[string]interface{}) m["annotations"] = filterAnnotations(meta.Annotations) - m["generate_name"] = meta.GenerateName + if meta.GenerateName != "" { + m["generate_name"] = meta.GenerateName + } m["labels"] = meta.Labels m["name"] = meta.Name m["resource_version"] = meta.ResourceVersion From 71a05e6f672d5a2f07b1f65e366d18875400ccf4 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sun, 2 Apr 2017 07:08:55 +0100 Subject: [PATCH 2/2] provider/kubernetes: Add support for persistent_volume --- builtin/providers/kubernetes/provider.go | 7 +- .../resource_kubernetes_persistent_volume.go | 195 ++++ ...ource_kubernetes_persistent_volume_test.go | 368 +++++++ .../kubernetes/schema_volume_source.go | 557 ++++++++++ .../structure_persistent_volume_spec.go | 988 ++++++++++++++++++ builtin/providers/kubernetes/structures.go | 79 ++ builtin/providers/kubernetes/validators.go | 13 + .../r/persistent_volume.html.markdown | 256 +++++ website/source/layouts/kubernetes.erb | 3 + 9 files changed, 2463 insertions(+), 3 deletions(-) create mode 100644 builtin/providers/kubernetes/resource_kubernetes_persistent_volume.go create mode 100644 builtin/providers/kubernetes/resource_kubernetes_persistent_volume_test.go create mode 100644 builtin/providers/kubernetes/schema_volume_source.go create mode 100644 builtin/providers/kubernetes/structure_persistent_volume_spec.go create mode 100644 website/source/docs/providers/kubernetes/r/persistent_volume.html.markdown diff --git a/builtin/providers/kubernetes/provider.go b/builtin/providers/kubernetes/provider.go index 46a472f78..61c18e80a 100644 --- a/builtin/providers/kubernetes/provider.go +++ b/builtin/providers/kubernetes/provider.go @@ -86,9 +86,10 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "kubernetes_config_map": resourceKubernetesConfigMap(), - "kubernetes_namespace": resourceKubernetesNamespace(), - "kubernetes_secret": resourceKubernetesSecret(), + "kubernetes_config_map": resourceKubernetesConfigMap(), + "kubernetes_namespace": resourceKubernetesNamespace(), + "kubernetes_persistent_volume": resourceKubernetesPersistentVolume(), + "kubernetes_secret": resourceKubernetesSecret(), }, ConfigureFunc: providerConfigure, } diff --git a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume.go b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume.go new file mode 100644 index 000000000..8a74c9843 --- /dev/null +++ b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume.go @@ -0,0 +1,195 @@ +package kubernetes + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + pkgApi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + api "k8s.io/kubernetes/pkg/api/v1" + kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" +) + +func resourceKubernetesPersistentVolume() *schema.Resource { + return &schema.Resource{ + Create: resourceKubernetesPersistentVolumeCreate, + Read: resourceKubernetesPersistentVolumeRead, + Exists: resourceKubernetesPersistentVolumeExists, + Update: resourceKubernetesPersistentVolumeUpdate, + Delete: resourceKubernetesPersistentVolumeDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "metadata": metadataSchema("persistent volume", false), + "spec": { + Type: schema.TypeList, + Description: "Spec of the persistent volume owned by the cluster", + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_modes": { + Type: schema.TypeSet, + Description: "Contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes", + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "capacity": { + Type: schema.TypeMap, + Description: "A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity", + Required: true, + Elem: schema.TypeString, + ValidateFunc: validateResourceList, + }, + "persistent_volume_reclaim_policy": { + Type: schema.TypeString, + Description: "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy", + Optional: true, + Default: "Retain", + }, + "persistent_volume_source": { + Type: schema.TypeList, + Description: "The specification of a persistent volume.", + Required: true, + MaxItems: 1, + Elem: persistentVolumeSourceSchema(), + }, + }, + }, + }, + }, + } +} + +func resourceKubernetesPersistentVolumeCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*kubernetes.Clientset) + + metadata := expandMetadata(d.Get("metadata").([]interface{})) + spec, err := expandPersistentVolumeSpec(d.Get("spec").([]interface{})) + if err != nil { + return err + } + volume := api.PersistentVolume{ + ObjectMeta: metadata, + Spec: spec, + } + + log.Printf("[INFO] Creating new persistent volume: %#v", volume) + out, err := conn.CoreV1().PersistentVolumes().Create(&volume) + if err != nil { + return err + } + log.Printf("[INFO] Submitted new persistent volume: %#v", out) + + stateConf := &resource.StateChangeConf{ + Target: []string{"Available", "Bound"}, + Pending: []string{"Pending"}, + Timeout: 5 * time.Minute, + Refresh: func() (interface{}, string, error) { + out, err := conn.CoreV1().PersistentVolumes().Get(metadata.Name) + if err != nil { + log.Printf("[ERROR] Received error: %#v", err) + return out, "Error", err + } + + statusPhase := fmt.Sprintf("%v", out.Status.Phase) + log.Printf("[DEBUG] Persistent volume %s status received: %#v", out.Name, statusPhase) + return out, statusPhase, nil + }, + } + _, err = stateConf.WaitForState() + if err != nil { + return err + } + log.Printf("[INFO] Persistent volume %s created", out.Name) + + d.SetId(out.Name) + + return resourceKubernetesPersistentVolumeRead(d, meta) +} + +func resourceKubernetesPersistentVolumeRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*kubernetes.Clientset) + + name := d.Id() + log.Printf("[INFO] Reading persistent volume %s", name) + volume, err := conn.CoreV1().PersistentVolumes().Get(name) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return err + } + log.Printf("[INFO] Received persistent volume: %#v", volume) + err = d.Set("metadata", flattenMetadata(volume.ObjectMeta)) + if err != nil { + return err + } + err = d.Set("spec", flattenPersistentVolumeSpec(volume.Spec)) + if err != nil { + return err + } + + return nil +} + +func resourceKubernetesPersistentVolumeUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*kubernetes.Clientset) + + ops := patchMetadata("metadata.0.", "/metadata/", d) + if d.HasChange("spec") { + specOps, err := patchPersistentVolumeSpec("/spec", "spec", d) + if err != nil { + return err + } + ops = append(ops, specOps...) + } + data, err := ops.MarshalJSON() + if err != nil { + return fmt.Errorf("Failed to marshal update operations: %s", err) + } + + log.Printf("[INFO] Updating persistent volume %s: %s", d.Id(), ops) + out, err := conn.CoreV1().PersistentVolumes().Patch(d.Id(), pkgApi.JSONPatchType, data) + if err != nil { + return err + } + log.Printf("[INFO] Submitted updated persistent volume: %#v", out) + d.SetId(out.Name) + + return resourceKubernetesPersistentVolumeRead(d, meta) +} + +func resourceKubernetesPersistentVolumeDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*kubernetes.Clientset) + + name := d.Id() + log.Printf("[INFO] Deleting persistent volume: %#v", name) + err := conn.CoreV1().PersistentVolumes().Delete(name, &api.DeleteOptions{}) + if err != nil { + return err + } + + log.Printf("[INFO] Persistent volume %s deleted", name) + + d.SetId("") + return nil +} + +func resourceKubernetesPersistentVolumeExists(d *schema.ResourceData, meta interface{}) (bool, error) { + conn := meta.(*kubernetes.Clientset) + + name := d.Id() + log.Printf("[INFO] Checking persistent volume %s", name) + _, err := conn.CoreV1().PersistentVolumes().Get(name) + if err != nil { + if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { + return false, nil + } + log.Printf("[DEBUG] Received error: %#v", err) + } + return true, err +} diff --git a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_test.go b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_test.go new file mode 100644 index 000000000..17fb4034c --- /dev/null +++ b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_test.go @@ -0,0 +1,368 @@ +package kubernetes + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + api "k8s.io/kubernetes/pkg/api/v1" + kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" +) + +func TestAccKubernetesPersistentVolume_basic(t *testing.T) { + var conf api.PersistentVolume + randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := fmt.Sprintf("tf-acc-test-%s", randString) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "kubernetes_persistent_volume.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckKubernetesPersistentVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesPersistentVolumeConfig_basic(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &conf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.%", "2"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.TestAnnotationOne", "one"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.TestAnnotationTwo", "two"), + testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.%", "3"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelOne", "one"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelTwo", "two"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelThree", "three"), + testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.name", name), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.storage", "123Gi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.0.pd_name", "test123"), + ), + }, + { + Config: testAccKubernetesPersistentVolumeConfig_modified(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &conf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.%", "2"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.TestAnnotationOne", "one"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.TestAnnotationTwo", "two"), + testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.%", "3"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelOne", "one"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelTwo", "two"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelThree", "three"), + testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.name", name), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.storage", "42Mi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.#", "2"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1245328686", "ReadWriteOnce"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.0.fs_type", "ntfs"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.0.pd_name", "test123"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.0.read_only", "true"), + ), + }, + }, + }) +} + +func TestAccKubernetesPersistentVolume_importBasic(t *testing.T) { + resourceName := "kubernetes_persistent_volume.test" + randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := fmt.Sprintf("tf-acc-test-import-%s", randString) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckKubernetesPersistentVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesPersistentVolumeConfig_basic(name), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccKubernetesPersistentVolume_volumeSource(t *testing.T) { + var conf api.PersistentVolume + randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := fmt.Sprintf("tf-acc-test-%s", randString) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "kubernetes_persistent_volume.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckKubernetesPersistentVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesPersistentVolumeConfig_volumeSource(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &conf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.%", "0"), + testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.%", "0"), + testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.name", name), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.storage", "123Gi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.aws_elastic_block_store.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.aws_elastic_block_store.0.volume_id", "vol-12345678"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.aws_elastic_block_store.0.fs_type", "ntfs"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.aws_elastic_block_store.0.partition", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.aws_elastic_block_store.0.read_only", "true"), + ), + }, + { + Config: testAccKubernetesPersistentVolumeConfig_volumeSource_modified(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &conf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.%", "0"), + testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.%", "0"), + testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.name", name), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.storage", "123Gi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.host_path.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.host_path.0.path", "/custom/testing/path"), + ), + }, + }, + }) +} + +func TestAccKubernetesPersistentVolume_cephFsSecretRef(t *testing.T) { + var conf api.PersistentVolume + randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := fmt.Sprintf("tf-acc-test-%s", randString) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "kubernetes_persistent_volume.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckKubernetesPersistentVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesPersistentVolumeConfig_cephFsSecretRef(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &conf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.%", "0"), + testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.%", "0"), + testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.name", name), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.storage", "2Gi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.0.monitors.#", "2"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.0.monitors.2848821021", "10.16.154.78:6789"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.0.monitors.4263435410", "10.16.154.82:6789"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.0.secret_ref.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.0.secret_ref.0.name", "ceph-secret"), + ), + }, + }, + }) +} + +func testAccCheckKubernetesPersistentVolumeDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*kubernetes.Clientset) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "kubernetes_persistent_volume" { + continue + } + name := rs.Primary.ID + resp, err := conn.CoreV1().PersistentVolumes().Get(name) + if err == nil { + if resp.Name == rs.Primary.ID { + return fmt.Errorf("Persistent Volume still exists: %s", rs.Primary.ID) + } + } + } + + return nil +} + +func testAccCheckKubernetesPersistentVolumeExists(n string, obj *api.PersistentVolume) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := testAccProvider.Meta().(*kubernetes.Clientset) + name := rs.Primary.ID + out, err := conn.CoreV1().PersistentVolumes().Get(name) + if err != nil { + return err + } + + *obj = *out + return nil + } +} + +func testAccKubernetesPersistentVolumeConfig_basic(name string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test" { + metadata { + annotations { + TestAnnotationOne = "one" + TestAnnotationTwo = "two" + } + labels { + TestLabelOne = "one" + TestLabelTwo = "two" + TestLabelThree = "three" + } + name = "%s" + } + spec { + capacity { + storage = "123Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + gce_persistent_disk { + pd_name = "test123" + } + } + } +}`, name) +} + +func testAccKubernetesPersistentVolumeConfig_modified(name string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test" { + metadata { + annotations { + TestAnnotationOne = "one" + TestAnnotationTwo = "two" + } + labels { + TestLabelOne = "one" + TestLabelTwo = "two" + TestLabelThree = "three" + } + name = "%s" + } + spec { + capacity { + storage = "42Mi" + } + access_modes = ["ReadWriteMany", "ReadWriteOnce"] + persistent_volume_source { + gce_persistent_disk { + fs_type = "ntfs" + pd_name = "test123" + read_only = true + } + } + } +}`, name) +} + +func testAccKubernetesPersistentVolumeConfig_volumeSource(name string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test" { + metadata { + name = "%s" + } + spec { + capacity { + storage = "123Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + aws_elastic_block_store { + volume_id = "vol-12345678" + fs_type = "ntfs" + partition = 1 + read_only = true + } + } + } +}`, name) +} + +func testAccKubernetesPersistentVolumeConfig_volumeSource_modified(name string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test" { + metadata { + name = "%s" + } + spec { + capacity { + storage = "123Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + host_path { + path = "/custom/testing/path" + } + } + } +}`, name) +} + +func testAccKubernetesPersistentVolumeConfig_cephFsSecretRef(name string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test" { + metadata { + name = "%s" + } + spec { + capacity { + storage = "2Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + ceph_fs { + monitors = ["10.16.154.78:6789", "10.16.154.82:6789"] + secret_ref { + name = "ceph-secret" + } + } + } + } +}`, name) +} diff --git a/builtin/providers/kubernetes/schema_volume_source.go b/builtin/providers/kubernetes/schema_volume_source.go new file mode 100644 index 000000000..408a46850 --- /dev/null +++ b/builtin/providers/kubernetes/schema_volume_source.go @@ -0,0 +1,557 @@ +package kubernetes + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func persistentVolumeSourceSchema() *schema.Resource { + volumeSources["host_path"] = &schema.Schema{ + Type: schema.TypeList, + Description: "Represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Description: "Path of the directory on the host. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", + Optional: true, + }, + }, + }, + } + return &schema.Resource{ + Schema: volumeSources, + } +} + +// Common volume sources between Persistent Volumes and Pod Volumes +var volumeSources = map[string]*schema.Schema{ + "aws_elastic_block_store": { + Type: schema.TypeList, + Description: "Represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fs_type": { + Type: schema.TypeString, + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + Optional: true, + }, + "partition": { + Type: schema.TypeInt, + Description: "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + Optional: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to set the read-only property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + Optional: true, + }, + "volume_id": { + Type: schema.TypeString, + Description: "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + Required: true, + }, + }, + }, + }, + "azure_disk": { + Type: schema.TypeList, + Description: "Represents an Azure Data Disk mount on the host and bind mount to the pod.", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "caching_mode": { + Type: schema.TypeString, + Description: "Host Caching mode: None, Read Only, Read Write.", + Required: true, + }, + "data_disk_uri": { + Type: schema.TypeString, + Description: "The URI the data disk in the blob storage", + Required: true, + }, + "disk_name": { + Type: schema.TypeString, + Description: "The Name of the data disk in the blob storage", + Required: true, + }, + "fs_type": { + Type: schema.TypeString, + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Optional: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).", + Optional: true, + Default: false, + }, + }, + }, + }, + "azure_file": { + Type: schema.TypeList, + Description: "Represents an Azure File Service mount on the host and bind mount to the pod.", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).", + Optional: true, + }, + "secret_name": { + Type: schema.TypeString, + Description: "The name of secret that contains Azure Storage Account Name and Key", + Required: true, + }, + "share_name": { + Type: schema.TypeString, + Description: "Share Name", + Required: true, + }, + }, + }, + }, + "ceph_fs": { + Type: schema.TypeList, + Description: "Represents a Ceph FS mount on the host that shares a pod's lifetime", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "monitors": { + Type: schema.TypeSet, + Description: "Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "path": { + Type: schema.TypeString, + Description: "Used as the mounted root, rather than the full Ceph tree, default is /", + Optional: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the read-only setting in VolumeMounts. Defaults to `false` (read/write). More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + Optional: true, + }, + "secret_file": { + Type: schema.TypeString, + Description: "The path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + Optional: true, + }, + "secret_ref": { + Type: schema.TypeList, + Description: "Reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + Optional: true, + }, + }, + }, + }, + "user": { + Type: schema.TypeString, + Description: "User is the rados user name, default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + Optional: true, + }, + }, + }, + }, + "cinder": { + Type: schema.TypeList, + Description: "Represents a cinder volume attached and mounted on kubelets host machine. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fs_type": { + Type: schema.TypeString, + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + Optional: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write). More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + Optional: true, + }, + "volume_id": { + Type: schema.TypeString, + Description: "Volume ID used to identify the volume in Cinder. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + Required: true, + }, + }, + }, + }, + "fc": { + Type: schema.TypeList, + Description: "Represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fs_type": { + Type: schema.TypeString, + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Optional: true, + }, + "lun": { + Type: schema.TypeInt, + Description: "FC target lun number", + Required: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).", + Optional: true, + }, + "target_ww_ns": { + Type: schema.TypeSet, + Description: "FC target worldwide names (WWNs)", + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + "flex_volume": { + Type: schema.TypeList, + Description: "Represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver": { + Type: schema.TypeString, + Description: "Driver is the name of the driver to use for this volume.", + Required: true, + }, + "fs_type": { + Type: schema.TypeString, + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + Optional: true, + }, + "options": { + Type: schema.TypeMap, + Description: "Extra command options if any.", + Optional: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the ReadOnly setting in VolumeMounts. Defaults to false (read/write).", + Optional: true, + }, + "secret_ref": { + Type: schema.TypeList, + Description: "Reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "flocker": { + Type: schema.TypeList, + Description: "Represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_name": { + Type: schema.TypeString, + Description: "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", + Optional: true, + }, + "dataset_uuid": { + Type: schema.TypeString, + Description: "UUID of the dataset. This is unique identifier of a Flocker dataset", + Optional: true, + }, + }, + }, + }, + "gce_persistent_disk": { + Type: schema.TypeList, + Description: "Represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fs_type": { + Type: schema.TypeString, + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + Optional: true, + }, + "partition": { + Type: schema.TypeInt, + Description: "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + Optional: true, + }, + "pd_name": { + Type: schema.TypeString, + Description: "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + Required: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + Optional: true, + }, + }, + }, + }, + "glusterfs": { + Type: schema.TypeList, + Description: "Represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpoints_name": { + Type: schema.TypeString, + Description: "The endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + Required: true, + }, + "path": { + Type: schema.TypeString, + Description: "The Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + Required: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + Optional: true, + }, + }, + }, + }, + "iscsi": { + Type: schema.TypeList, + Description: "Represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fs_type": { + Type: schema.TypeString, + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi", + Optional: true, + }, + "iqn": { + Type: schema.TypeString, + Description: "Target iSCSI Qualified Name.", + Required: true, + }, + "iscsi_interface": { + Type: schema.TypeString, + Description: "iSCSI interface name that uses an iSCSI transport. Defaults to 'default' (tcp).", + Optional: true, + Default: "default", + }, + "lun": { + Type: schema.TypeInt, + Description: "iSCSI target lun number.", + Optional: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false.", + Optional: true, + }, + "target_portal": { + Type: schema.TypeString, + Description: "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + Required: true, + }, + }, + }, + }, + "nfs": { + Type: schema.TypeList, + Description: "Represents an NFS mount on the host. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Description: "Path that is exported by the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + Required: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + Optional: true, + }, + "server": { + Type: schema.TypeString, + Description: "Server is the hostname or IP address of the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + Required: true, + }, + }, + }, + }, + "photon_persistent_disk": { + Type: schema.TypeList, + Description: "Represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fs_type": { + Type: schema.TypeString, + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Optional: true, + }, + "pd_id": { + Type: schema.TypeString, + Description: "ID that identifies Photon Controller persistent disk", + Required: true, + }, + }, + }, + }, + "quobyte": { + Type: schema.TypeList, + Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": { + Type: schema.TypeString, + Description: "Group to map volume access to Default is no group", + Optional: true, + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + Optional: true, + }, + "registry": { + Type: schema.TypeString, + Description: "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + Required: true, + }, + "user": { + Type: schema.TypeString, + Description: "User to map volume access to Defaults to serivceaccount user", + Optional: true, + }, + "volume": { + Type: schema.TypeString, + Description: "Volume is a string that references an already created Quobyte volume by name.", + Required: true, + }, + }, + }, + }, + "rbd": { + Type: schema.TypeList, + Description: "Represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ceph_monitors": { + Type: schema.TypeSet, + Description: "A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "fs_type": { + Type: schema.TypeString, + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#rbd", + Optional: true, + }, + "keyring": { + Type: schema.TypeString, + Description: "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Optional: true, + Computed: true, + }, + "rados_user": { + Type: schema.TypeString, + Description: "The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Optional: true, + Default: "admin", + }, + "rbd_image": { + Type: schema.TypeString, + Description: "The rados image name. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Required: true, + }, + "rbd_pool": { + Type: schema.TypeString, + Description: "The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.", + Optional: true, + Default: "rbd", + }, + "read_only": { + Type: schema.TypeBool, + Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Optional: true, + Default: false, + }, + "secret_ref": { + Type: schema.TypeList, + Description: "Name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "vsphere_volume": { + Type: schema.TypeList, + Description: "Represents a vSphere volume attached and mounted on kubelets host machine", + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fs_type": { + Type: schema.TypeString, + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Optional: true, + }, + "volume_path": { + Type: schema.TypeString, + Description: "Path that identifies vSphere volume vmdk", + Required: true, + }, + }, + }, + }, +} diff --git a/builtin/providers/kubernetes/structure_persistent_volume_spec.go b/builtin/providers/kubernetes/structure_persistent_volume_spec.go new file mode 100644 index 000000000..f5463d2ac --- /dev/null +++ b/builtin/providers/kubernetes/structure_persistent_volume_spec.go @@ -0,0 +1,988 @@ +package kubernetes + +import ( + "k8s.io/kubernetes/pkg/api/v1" + + "github.com/hashicorp/terraform/helper/schema" +) + +// Flatteners + +func flattenAWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["volume_id"] = in.VolumeID + if in.FSType != "" { + att["fs_type"] = in.FSType + } + if in.Partition != 0 { + att["partition"] = in.Partition + } + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + return []interface{}{att} +} + +func flattenAzureDiskVolumeSource(in *v1.AzureDiskVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["disk_name"] = in.DiskName + att["data_disk_uri"] = in.DataDiskURI + att["caching_mode"] = string(*in.CachingMode) + if in.FSType != nil { + att["fs_type"] = *in.FSType + } + if in.ReadOnly != nil { + att["read_only"] = *in.ReadOnly + } + return []interface{}{att} +} + +func flattenAzureFileVolumeSource(in *v1.AzureFileVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["secret_name"] = in.SecretName + att["share_name"] = in.ShareName + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + return []interface{}{att} +} + +func flattenCephFSVolumeSource(in *v1.CephFSVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["monitors"] = newStringSet(schema.HashString, in.Monitors) + if in.Path != "" { + att["path"] = in.Path + } + if in.User != "" { + att["user"] = in.User + } + if in.SecretFile != "" { + att["secret_file"] = in.SecretFile + } + if in.SecretRef != nil { + att["secret_ref"] = flattenLocalObjectReference(in.SecretRef) + } + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + return []interface{}{att} +} + +func flattenCinderVolumeSource(in *v1.CinderVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["volume_id"] = in.VolumeID + if in.FSType != "" { + att["fs_type"] = in.FSType + } + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + return []interface{}{att} +} + +func flattenFCVolumeSource(in *v1.FCVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["target_ww_ns"] = newStringSet(schema.HashString, in.TargetWWNs) + att["lun"] = *in.Lun + if in.FSType != "" { + att["fs_type"] = in.FSType + } + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + return []interface{}{att} +} + +func flattenFlexVolumeSource(in *v1.FlexVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["driver"] = in.Driver + if in.FSType != "" { + att["fs_type"] = in.FSType + } + if in.SecretRef != nil { + att["secret_ref"] = flattenLocalObjectReference(in.SecretRef) + } + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + if len(in.Options) > 0 { + att["options"] = in.Options + } + return []interface{}{att} +} + +func flattenFlockerVolumeSource(in *v1.FlockerVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["dataset_name"] = in.DatasetName + att["dataset_uuid"] = in.DatasetUUID + return []interface{}{att} +} + +func flattenGCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["pd_name"] = in.PDName + if in.FSType != "" { + att["fs_type"] = in.FSType + } + if in.Partition != 0 { + att["partition"] = in.Partition + } + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + return []interface{}{att} +} + +func flattenGlusterfsVolumeSource(in *v1.GlusterfsVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["endpoints_name"] = in.EndpointsName + att["path"] = in.Path + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + return []interface{}{att} +} + +func flattenHostPathVolumeSource(in *v1.HostPathVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["path"] = in.Path + return []interface{}{att} +} + +func flattenISCSIVolumeSource(in *v1.ISCSIVolumeSource) []interface{} { + att := make(map[string]interface{}) + if in.TargetPortal != "" { + att["target_portal"] = in.TargetPortal + } + if in.IQN != "" { + att["iqn"] = in.IQN + } + if in.Lun != 0 { + att["lun"] = in.Lun + } + if in.ISCSIInterface != "" { + att["iscsi_interface"] = in.ISCSIInterface + } + if in.FSType != "" { + att["fs_type"] = in.FSType + } + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + return []interface{}{att} +} + +func flattenLocalObjectReference(in *v1.LocalObjectReference) []interface{} { + att := make(map[string]interface{}) + if in.Name != "" { + att["name"] = in.Name + } + return []interface{}{att} +} + +func flattenNFSVolumeSource(in *v1.NFSVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["server"] = in.Server + att["path"] = in.Path + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + return []interface{}{att} +} + +func flattenPersistentVolumeSource(in v1.PersistentVolumeSource) []interface{} { + att := make(map[string]interface{}) + if in.GCEPersistentDisk != nil { + att["gce_persistent_disk"] = flattenGCEPersistentDiskVolumeSource(in.GCEPersistentDisk) + } + if in.AWSElasticBlockStore != nil { + att["aws_elastic_block_store"] = flattenAWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore) + } + if in.HostPath != nil { + att["host_path"] = flattenHostPathVolumeSource(in.HostPath) + } + if in.Glusterfs != nil { + att["glusterfs"] = flattenGlusterfsVolumeSource(in.Glusterfs) + } + if in.NFS != nil { + att["nfs"] = flattenNFSVolumeSource(in.NFS) + } + if in.RBD != nil { + att["rbd"] = flattenRBDVolumeSource(in.RBD) + } + if in.ISCSI != nil { + att["iscsi"] = flattenISCSIVolumeSource(in.ISCSI) + } + if in.Cinder != nil { + att["cinder"] = flattenCinderVolumeSource(in.Cinder) + } + if in.CephFS != nil { + att["ceph_fs"] = flattenCephFSVolumeSource(in.CephFS) + } + if in.FC != nil { + att["fc"] = flattenFCVolumeSource(in.FC) + } + if in.Flocker != nil { + att["flocker"] = flattenFlockerVolumeSource(in.Flocker) + } + if in.FlexVolume != nil { + att["flex_volume"] = flattenFlexVolumeSource(in.FlexVolume) + } + if in.AzureFile != nil { + att["azure_file"] = flattenAzureFileVolumeSource(in.AzureFile) + } + if in.VsphereVolume != nil { + att["vsphere_volume"] = flattenVsphereVirtualDiskVolumeSource(in.VsphereVolume) + } + if in.Quobyte != nil { + att["quobyte"] = flattenQuobyteVolumeSource(in.Quobyte) + } + if in.AzureDisk != nil { + att["azure_disk"] = flattenAzureDiskVolumeSource(in.AzureDisk) + } + if in.PhotonPersistentDisk != nil { + att["photon_persistent_disk"] = flattenPhotonPersistentDiskVolumeSource(in.PhotonPersistentDisk) + } + return []interface{}{att} +} + +func flattenPersistentVolumeSpec(in v1.PersistentVolumeSpec) []interface{} { + att := make(map[string]interface{}) + if len(in.Capacity) > 0 { + att["capacity"] = flattenResourceList(in.Capacity) + } + + att["persistent_volume_source"] = flattenPersistentVolumeSource(in.PersistentVolumeSource) + if len(in.AccessModes) > 0 { + att["access_modes"] = flattenPersistentVolumeAccessModes(in.AccessModes) + } + if in.PersistentVolumeReclaimPolicy != "" { + att["persistent_volume_reclaim_policy"] = in.PersistentVolumeReclaimPolicy + } + return []interface{}{att} +} + +func flattenPhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["pd_id"] = in.PdID + if in.FSType != "" { + att["fs_type"] = in.FSType + } + return []interface{}{att} +} + +func flattenQuobyteVolumeSource(in *v1.QuobyteVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["registry"] = in.Registry + att["volume"] = in.Volume + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + if in.User != "" { + att["user"] = in.User + } + if in.Group != "" { + att["group"] = in.Group + } + return []interface{}{att} +} + +func flattenRBDVolumeSource(in *v1.RBDVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["ceph_monitors"] = newStringSet(schema.HashString, in.CephMonitors) + att["rbd_image"] = in.RBDImage + if in.FSType != "" { + att["fs_type"] = in.FSType + } + if in.RBDPool != "" { + att["rbd_pool"] = in.RBDPool + } + if in.RadosUser != "" { + att["rados_user"] = in.RadosUser + } + if in.Keyring != "" { + att["keyring"] = in.Keyring + } + if in.SecretRef != nil { + att["secret_ref"] = flattenLocalObjectReference(in.SecretRef) + } + if in.ReadOnly != false { + att["read_only"] = in.ReadOnly + } + return []interface{}{att} +} + +func flattenVsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource) []interface{} { + att := make(map[string]interface{}) + att["volume_path"] = in.VolumePath + if in.FSType != "" { + att["fs_type"] = in.FSType + } + return []interface{}{att} +} + +// Expanders + +func expandAWSElasticBlockStoreVolumeSource(l []interface{}) *v1.AWSElasticBlockStoreVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.AWSElasticBlockStoreVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: in["volume_id"].(string), + } + if v, ok := in["fs_type"].(string); ok { + obj.FSType = v + } + if v, ok := in["partition"].(int); ok { + obj.Partition = int32(v) + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + return obj +} + +func expandAzureDiskVolumeSource(l []interface{}) *v1.AzureDiskVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.AzureDiskVolumeSource{} + } + in := l[0].(map[string]interface{}) + cachingMode := v1.AzureDataDiskCachingMode(in["caching_mode"].(string)) + obj := &v1.AzureDiskVolumeSource{ + CachingMode: &cachingMode, + DiskName: in["disk_name"].(string), + DataDiskURI: in["data_disk_uri"].(string), + } + if v, ok := in["fs_type"].(string); ok { + obj.FSType = ptrToString(v) + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = ptrToBool(v) + } + return obj +} + +func expandAzureFileVolumeSource(l []interface{}) *v1.AzureFileVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.AzureFileVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.AzureFileVolumeSource{ + SecretName: in["secret_name"].(string), + ShareName: in["share_name"].(string), + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + return obj +} + +func expandCephFSVolumeSource(l []interface{}) *v1.CephFSVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.CephFSVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.CephFSVolumeSource{ + Monitors: sliceOfString(in["monitors"].(*schema.Set).List()), + } + if v, ok := in["path"].(string); ok { + obj.Path = v + } + if v, ok := in["user"].(string); ok { + obj.User = v + } + if v, ok := in["secret_file"].(string); ok { + obj.SecretFile = v + } + if v, ok := in["secret_ref"].([]interface{}); ok && len(v) > 0 { + obj.SecretRef = expandLocalObjectReference(v) + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + return obj +} + +func expandCinderVolumeSource(l []interface{}) *v1.CinderVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.CinderVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.CinderVolumeSource{ + VolumeID: in["volume_id"].(string), + } + if v, ok := in["fs_type"].(string); ok { + obj.FSType = v + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + return obj +} + +func expandFCVolumeSource(l []interface{}) *v1.FCVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.FCVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.FCVolumeSource{ + TargetWWNs: sliceOfString(in["target_ww_ns"].(*schema.Set).List()), + Lun: ptrToInt32(int32(in["lun"].(int))), + } + if v, ok := in["fs_type"].(string); ok { + obj.FSType = v + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + return obj +} + +func expandFlexVolumeSource(l []interface{}) *v1.FlexVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.FlexVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.FlexVolumeSource{ + Driver: in["driver"].(string), + } + if v, ok := in["fs_type"].(string); ok { + obj.FSType = v + } + if v, ok := in["secret_ref"].([]interface{}); ok && len(v) > 0 { + obj.SecretRef = expandLocalObjectReference(v) + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + if v, ok := in["options"].(map[string]interface{}); ok && len(v) > 0 { + obj.Options = expandStringMap(v) + } + return obj +} + +func expandFlockerVolumeSource(l []interface{}) *v1.FlockerVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.FlockerVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.FlockerVolumeSource{ + DatasetName: in["dataset_name"].(string), + DatasetUUID: in["dataset_uuid"].(string), + } + return obj +} + +func expandGCEPersistentDiskVolumeSource(l []interface{}) *v1.GCEPersistentDiskVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.GCEPersistentDiskVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.GCEPersistentDiskVolumeSource{ + PDName: in["pd_name"].(string), + } + if v, ok := in["fs_type"].(string); ok { + obj.FSType = v + } + if v, ok := in["partition"].(int); ok { + obj.Partition = int32(v) + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + return obj +} + +func expandGlusterfsVolumeSource(l []interface{}) *v1.GlusterfsVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.GlusterfsVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.GlusterfsVolumeSource{ + EndpointsName: in["endpoints_name"].(string), + Path: in["path"].(string), + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + return obj +} + +func expandHostPathVolumeSource(l []interface{}) *v1.HostPathVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.HostPathVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.HostPathVolumeSource{ + Path: in["path"].(string), + } + return obj +} + +func expandISCSIVolumeSource(l []interface{}) *v1.ISCSIVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.ISCSIVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.ISCSIVolumeSource{ + TargetPortal: in["target_portal"].(string), + IQN: in["iqn"].(string), + } + if v, ok := in["lun"].(int); ok { + obj.Lun = int32(v) + } + if v, ok := in["iscsi_interface"].(string); ok { + obj.ISCSIInterface = v + } + if v, ok := in["fs_type"].(string); ok { + obj.FSType = v + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + return obj +} + +func expandLocalObjectReference(l []interface{}) *v1.LocalObjectReference { + if len(l) == 0 || l[0] == nil { + return &v1.LocalObjectReference{} + } + in := l[0].(map[string]interface{}) + obj := &v1.LocalObjectReference{} + if v, ok := in["name"].(string); ok { + obj.Name = v + } + return obj +} + +func expandNFSVolumeSource(l []interface{}) *v1.NFSVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.NFSVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.NFSVolumeSource{ + Server: in["server"].(string), + Path: in["path"].(string), + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + return obj +} + +func expandPersistentVolumeSource(l []interface{}) v1.PersistentVolumeSource { + if len(l) == 0 || l[0] == nil { + return v1.PersistentVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := v1.PersistentVolumeSource{} + if v, ok := in["gce_persistent_disk"].([]interface{}); ok && len(v) > 0 { + obj.GCEPersistentDisk = expandGCEPersistentDiskVolumeSource(v) + } + if v, ok := in["aws_elastic_block_store"].([]interface{}); ok && len(v) > 0 { + obj.AWSElasticBlockStore = expandAWSElasticBlockStoreVolumeSource(v) + } + if v, ok := in["host_path"].([]interface{}); ok && len(v) > 0 { + obj.HostPath = expandHostPathVolumeSource(v) + } + if v, ok := in["glusterfs"].([]interface{}); ok && len(v) > 0 { + obj.Glusterfs = expandGlusterfsVolumeSource(v) + } + if v, ok := in["nfs"].([]interface{}); ok && len(v) > 0 { + obj.NFS = expandNFSVolumeSource(v) + } + if v, ok := in["rbd"].([]interface{}); ok && len(v) > 0 { + obj.RBD = expandRBDVolumeSource(v) + } + if v, ok := in["iscsi"].([]interface{}); ok && len(v) > 0 { + obj.ISCSI = expandISCSIVolumeSource(v) + } + if v, ok := in["cinder"].([]interface{}); ok && len(v) > 0 { + obj.Cinder = expandCinderVolumeSource(v) + } + if v, ok := in["ceph_fs"].([]interface{}); ok && len(v) > 0 { + obj.CephFS = expandCephFSVolumeSource(v) + } + if v, ok := in["fc"].([]interface{}); ok && len(v) > 0 { + obj.FC = expandFCVolumeSource(v) + } + if v, ok := in["flocker"].([]interface{}); ok && len(v) > 0 { + obj.Flocker = expandFlockerVolumeSource(v) + } + if v, ok := in["flex_volume"].([]interface{}); ok && len(v) > 0 { + obj.FlexVolume = expandFlexVolumeSource(v) + } + if v, ok := in["azure_file"].([]interface{}); ok && len(v) > 0 { + obj.AzureFile = expandAzureFileVolumeSource(v) + } + if v, ok := in["vsphere_volume"].([]interface{}); ok && len(v) > 0 { + obj.VsphereVolume = expandVsphereVirtualDiskVolumeSource(v) + } + if v, ok := in["quobyte"].([]interface{}); ok && len(v) > 0 { + obj.Quobyte = expandQuobyteVolumeSource(v) + } + if v, ok := in["azure_disk"].([]interface{}); ok && len(v) > 0 { + obj.AzureDisk = expandAzureDiskVolumeSource(v) + } + if v, ok := in["photon_persistent_disk"].([]interface{}); ok && len(v) > 0 { + obj.PhotonPersistentDisk = expandPhotonPersistentDiskVolumeSource(v) + } + return obj +} + +func expandPersistentVolumeSpec(l []interface{}) (v1.PersistentVolumeSpec, error) { + if len(l) == 0 || l[0] == nil { + return v1.PersistentVolumeSpec{}, nil + } + in := l[0].(map[string]interface{}) + obj := v1.PersistentVolumeSpec{} + if v, ok := in["capacity"].(map[string]interface{}); ok && len(v) > 0 { + var err error + obj.Capacity, err = expandMapToResourceList(v) + if err != nil { + return obj, err + } + } + if v, ok := in["persistent_volume_source"].([]interface{}); ok && len(v) > 0 { + obj.PersistentVolumeSource = expandPersistentVolumeSource(v) + } + if v, ok := in["access_modes"].(*schema.Set); ok && v.Len() > 0 { + obj.AccessModes = expandPersistentVolumeAccessModes(v.List()) + } + if v, ok := in["persistent_volume_reclaim_policy"].(string); ok { + obj.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(v) + } + return obj, nil +} + +func expandPhotonPersistentDiskVolumeSource(l []interface{}) *v1.PhotonPersistentDiskVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.PhotonPersistentDiskVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.PhotonPersistentDiskVolumeSource{ + PdID: in["pd_id"].(string), + } + if v, ok := in["fs_type"].(string); ok { + obj.FSType = v + } + return obj +} + +func expandQuobyteVolumeSource(l []interface{}) *v1.QuobyteVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.QuobyteVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.QuobyteVolumeSource{ + Registry: in["registry"].(string), + Volume: in["volume"].(string), + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + if v, ok := in["user"].(string); ok { + obj.User = v + } + if v, ok := in["group"].(string); ok { + obj.Group = v + } + return obj +} + +func expandRBDVolumeSource(l []interface{}) *v1.RBDVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.RBDVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.RBDVolumeSource{ + CephMonitors: expandStringSlice(in["ceph_monitors"].(*schema.Set).List()), + RBDImage: in["rbd_image"].(string), + } + if v, ok := in["fs_type"].(string); ok { + obj.FSType = v + } + if v, ok := in["rbd_pool"].(string); ok { + obj.RBDPool = v + } + if v, ok := in["rados_user"].(string); ok { + obj.RadosUser = v + } + if v, ok := in["keyring"].(string); ok { + obj.Keyring = v + } + if v, ok := in["secret_ref"].([]interface{}); ok && len(v) > 0 { + obj.SecretRef = expandLocalObjectReference(v) + } + if v, ok := in["read_only"].(bool); ok { + obj.ReadOnly = v + } + return obj +} + +func expandVsphereVirtualDiskVolumeSource(l []interface{}) *v1.VsphereVirtualDiskVolumeSource { + if len(l) == 0 || l[0] == nil { + return &v1.VsphereVirtualDiskVolumeSource{} + } + in := l[0].(map[string]interface{}) + obj := &v1.VsphereVirtualDiskVolumeSource{ + VolumePath: in["volume_path"].(string), + } + if v, ok := in["fs_type"].(string); ok { + obj.FSType = v + } + return obj +} + +func patchPersistentVolumeSpec(pathPrefix, prefix string, d *schema.ResourceData) (PatchOperations, error) { + ops := make([]PatchOperation, 0) + prefix += ".0." + + if d.HasChange(prefix + "capacity") { + v := d.Get(prefix + "capacity").(map[string]interface{}) + capacity, err := expandMapToResourceList(v) + if err != nil { + return ops, err + } + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/capacity", + Value: capacity, + }) + } + + if d.HasChange(prefix + "persistent_volume_source") { + ops = append(ops, patchPersistentVolumeSource( + pathPrefix, + prefix+"persistent_volume_source.0.", + d, + )...) + } + + if d.HasChange(prefix + "access_modes") { + v := d.Get(prefix + "access_modes").(*schema.Set) + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/accessModes", + Value: expandPersistentVolumeAccessModes(v.List()), + }) + } + if d.HasChange(prefix + "access_modes") { + v := d.Get(prefix + "persistent_volume_reclaim_policy").(string) + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/persistentVolumeReclaimPolicy", + Value: v1.PersistentVolumeReclaimPolicy(v), + }) + } + + return ops, nil +} + +func patchPersistentVolumeSource(pathPrefix, prefix string, d *schema.ResourceData) []PatchOperation { + ops := make([]PatchOperation, 0) + + if d.HasChange(prefix + "gce_persistent_disk") { + oldIn, newIn := d.GetChange(prefix + "gce_persistent_disk") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/gcePersistentDisk", + Value: expandGCEPersistentDiskVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/gcePersistentDisk"}) + } + } + + if d.HasChange(prefix + "aws_elastic_block_store") { + oldIn, newIn := d.GetChange(prefix + "aws_elastic_block_store") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/awsElasticBlockStore", + Value: expandAWSElasticBlockStoreVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/awsElasticBlockStore"}) + } + } + + if d.HasChange(prefix + "host_path") { + oldIn, newIn := d.GetChange(prefix + "host_path") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/hostPath", + Value: expandHostPathVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/hostPath"}) + } + } + + if d.HasChange(prefix + "glusterfs") { + oldIn, newIn := d.GetChange(prefix + "glusterfs") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/glusterfs", + Value: expandGlusterfsVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/glusterfs"}) + } + } + + if d.HasChange(prefix + "nfs") { + oldIn, newIn := d.GetChange(prefix + "nfs") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/nfs", + Value: expandNFSVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/nfs"}) + } + } + + if d.HasChange(prefix + "rbd") { + oldIn, newIn := d.GetChange(prefix + "rbd") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/rbd", + Value: expandRBDVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/rbd"}) + } + } + + if d.HasChange(prefix + "iscsi") { + oldIn, newIn := d.GetChange(prefix + "iscsi") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/iscsi", + Value: expandISCSIVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/iscsi"}) + } + } + + if d.HasChange(prefix + "cinder") { + oldIn, newIn := d.GetChange(prefix + "cinder") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/cinder", + Value: expandCinderVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/cinder"}) + } + } + + if d.HasChange(prefix + "ceph_fs") { + oldIn, newIn := d.GetChange(prefix + "ceph_fs") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/cephfs", + Value: expandCephFSVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/cephfs"}) + } + } + + if d.HasChange(prefix + "fc") { + oldIn, newIn := d.GetChange(prefix + "fc") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/fc", + Value: expandFCVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/fc"}) + } + } + + if d.HasChange(prefix + "flocker") { + oldIn, newIn := d.GetChange(prefix + "flocker") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/flocker", + Value: expandFlockerVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/flocker"}) + } + } + + if d.HasChange(prefix + "flex_volume") { + oldIn, newIn := d.GetChange(prefix + "flex_volume") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/flexVolume", + Value: expandFlexVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/flexVolume"}) + } + } + + if d.HasChange(prefix + "azure_file") { + oldIn, newIn := d.GetChange(prefix + "azure_file") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/azureFile", + Value: expandAzureFileVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/azureFile"}) + } + } + + if d.HasChange(prefix + "vsphere_volume") { + oldIn, newIn := d.GetChange(prefix + "vsphere_volume") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/vsphereVolume", + Value: expandVsphereVirtualDiskVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/vsphereVolume"}) + } + } + + if d.HasChange(prefix + "quobyte") { + oldIn, newIn := d.GetChange(prefix + "quobyte") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/quobyte", + Value: expandQuobyteVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/quobyte"}) + } + } + + if d.HasChange(prefix + "azure_disk") { + oldIn, newIn := d.GetChange(prefix + "azure_disk") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/azureDisk", + Value: expandAzureDiskVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/azureDisk"}) + } + } + + if d.HasChange(prefix + "photon_persistent_disk") { + oldIn, newIn := d.GetChange(prefix + "photon_persistent_disk") + if v, ok := newIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &ReplaceOperation{ + Path: pathPrefix + "/photonPersistentDisk", + Value: expandPhotonPersistentDiskVolumeSource(v), + }) + } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { + ops = append(ops, &RemoveOperation{Path: pathPrefix + "/photonPersistentDisk"}) + } + } + + return ops +} diff --git a/builtin/providers/kubernetes/structures.go b/builtin/providers/kubernetes/structures.go index 4c724ddbb..19b73aabd 100644 --- a/builtin/providers/kubernetes/structures.go +++ b/builtin/providers/kubernetes/structures.go @@ -7,6 +7,7 @@ import ( "encoding/base64" "github.com/hashicorp/terraform/helper/schema" + "k8s.io/kubernetes/pkg/api/resource" api "k8s.io/kubernetes/pkg/api/v1" ) @@ -65,6 +66,14 @@ func expandStringMap(m map[string]interface{}) map[string]string { return result } +func expandStringSlice(s []interface{}) []string { + result := make([]string, len(s), len(s)) + for k, v := range s { + result[k] = v.(string) + } + return result +} + func flattenMetadata(meta api.ObjectMeta) []map[string]interface{} { m := make(map[string]interface{}) m["annotations"] = filterAnnotations(meta.Annotations) @@ -111,6 +120,30 @@ func byteMapToStringMap(m map[string][]byte) map[string]string { return result } +func ptrToString(s string) *string { + return &s +} + +func ptrToInt(i int) *int { + return &i +} + +func ptrToBool(b bool) *bool { + return &b +} + +func ptrToInt32(i int32) *int32 { + return &i +} + +func sliceOfString(slice []interface{}) []string { + result := make([]string, len(slice), len(slice)) + for i, s := range slice { + result[i] = s.(string) + } + return result +} + func base64EncodeStringMap(m map[string]interface{}) map[string]interface{} { result := make(map[string]interface{}) for k, v := range m { @@ -119,3 +152,49 @@ func base64EncodeStringMap(m map[string]interface{}) map[string]interface{} { } return result } + +func flattenResourceList(l api.ResourceList) map[string]string { + m := make(map[string]string) + for k, v := range l { + m[string(k)] = v.String() + } + return m +} + +func expandMapToResourceList(m map[string]interface{}) (api.ResourceList, error) { + out := make(map[api.ResourceName]resource.Quantity) + for stringKey, v := range m { + key := api.ResourceName(stringKey) + value, err := resource.ParseQuantity(v.(string)) + if err != nil { + return out, err + } + + out[key] = value + } + return out, nil +} + +func flattenPersistentVolumeAccessModes(in []api.PersistentVolumeAccessMode) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + out[i] = string(v) + } + return schema.NewSet(schema.HashString, out) +} + +func expandPersistentVolumeAccessModes(s []interface{}) []api.PersistentVolumeAccessMode { + out := make([]api.PersistentVolumeAccessMode, len(s), len(s)) + for i, v := range s { + out[i] = api.PersistentVolumeAccessMode(v.(string)) + } + return out +} + +func newStringSet(f schema.SchemaSetFunc, in []string) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + out[i] = v + } + return schema.NewSet(f, out) +} diff --git a/builtin/providers/kubernetes/validators.go b/builtin/providers/kubernetes/validators.go index 22309a34e..f1dde2029 100644 --- a/builtin/providers/kubernetes/validators.go +++ b/builtin/providers/kubernetes/validators.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + "k8s.io/kubernetes/pkg/api/resource" apiValidation "k8s.io/kubernetes/pkg/api/validation" utilValidation "k8s.io/kubernetes/pkg/util/validation" ) @@ -58,3 +59,15 @@ func validateLabels(value interface{}, key string) (ws []string, es []error) { } return } + +func validateResourceList(value interface{}, key string) (ws []string, es []error) { + m := value.(map[string]interface{}) + for k, v := range m { + val := v.(string) + _, err := resource.ParseQuantity(val) + if err != nil { + es = append(es, fmt.Errorf("%s.%s (%q): %s", key, k, val, err)) + } + } + return +} diff --git a/website/source/docs/providers/kubernetes/r/persistent_volume.html.markdown b/website/source/docs/providers/kubernetes/r/persistent_volume.html.markdown new file mode 100644 index 000000000..04a8c43f9 --- /dev/null +++ b/website/source/docs/providers/kubernetes/r/persistent_volume.html.markdown @@ -0,0 +1,256 @@ +--- +layout: "kubernetes" +page_title: "Kubernetes: kubernetes_persistent_volume" +sidebar_current: "docs-kubernetes-resource-persistent-volume" +description: |- + A Persistent Volume (PV) is a piece of networked storage in the cluster that has been provisioned by an administrator. +--- + +# kubernetes_persistent_volume + +The resource provides a piece of networked storage in the cluster provisioned by an administrator. It is a resource in the cluster just like a node is a cluster resource. Persistent Volumes have a lifecycle independent of any individual pod that uses the PV. + +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + +## Example Usage + +```hcl +resource "kubernetes_persistent_volume" "example" { + metadata { + name = "terraform-example" + } + spec { + capacity { + storage = "2Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + vsphere_volume { + volume_path = "/absolute/path" + } + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `metadata` - (Required) Standard persistent volume's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata +* `spec` - (Required) Spec of the persistent volume owned by the cluster. See below. + +## Nested Blocks + +### `spec` + +#### Arguments + +* `access_modes` - (Required) Contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes +* `capacity` - (Required) A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity +* `persistent_volume_reclaim_policy` - (Optional) What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy +* `persistent_volume_source` - (Required) The specification of a persistent volume. + +### `persistent_volume_source` + +#### Arguments + +* `aws_elastic_block_store` - (Optional) Represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore +* `azure_disk` - (Optional) Represents an Azure Data Disk mount on the host and bind mount to the pod. +* `azure_file` - (Optional) Represents an Azure File Service mount on the host and bind mount to the pod. +* `ceph_fs` - (Optional) Represents a Ceph FS mount on the host that shares a pod's lifetime +* `cinder` - (Optional) Represents a cinder volume attached and mounted on kubelets host machine. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md +* `fc` - (Optional) Represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. +* `flex_volume` - (Optional) Represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +* `flocker` - (Optional) Represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running +* `gce_persistent_disk` - (Optional) Represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk +* `glusterfs` - (Optional) Represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md +* `host_path` - (Optional) Represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath +* `iscsi` - (Optional) Represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. +* `nfs` - (Optional) Represents an NFS mount on the host. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#nfs +* `photon_persistent_disk` - (Optional) Represents a PhotonController persistent disk attached and mounted on kubelets host machine +* `quobyte` - (Optional) Quobyte represents a Quobyte mount on the host that shares a pod's lifetime +* `rbd` - (Optional) Represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md +* `vsphere_volume` - (Optional) Represents a vSphere volume attached and mounted on kubelets host machine + + +### `aws_elastic_block_store` + +#### Arguments + +* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore +* `partition` - (Optional) The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). +* `read_only` - (Optional) Whether to set the read-only property in VolumeMounts to "true". If omitted, the default is "false". More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore +* `volume_id` - (Required) Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + +### `azure_disk` + +#### Arguments + +* `caching_mode` - (Required) Host Caching mode: None, Read Only, Read Write. +* `data_disk_uri` - (Required) The URI the data disk in the blob storage +* `disk_name` - (Required) The Name of the data disk in the blob storage +* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. +* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write). + +### `azure_file` + +#### Arguments + +* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write). +* `secret_name` - (Required) The name of secret that contains Azure Storage Account Name and Key +* `share_name` - (Required) Share Name + +### `ceph_fs` + +#### Arguments + +* `monitors` - (Required) Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it +* `path` - (Optional) Used as the mounted root, rather than the full Ceph tree, default is / +* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to `false` (read/write). More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it +* `secret_file` - (Optional) The path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it +* `secret_ref` - (Optional) Reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it +* `user` - (Optional) User is the rados user name, default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + +### `cinder` + +#### Arguments + +* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md +* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write). More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md +* `volume_id` - (Required) Volume ID used to identify the volume in Cinder. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + +### `fc` + +#### Arguments + +* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. +* `lun` - (Required) FC target lun number +* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write). +* `target_ww_ns` - (Required) FC target worldwide names (WWNs) + +### `flex_volume` + +#### Arguments + +* `driver` - (Required) Driver is the name of the driver to use for this volume. +* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. +* `options` - (Optional) Extra command options if any. +* `read_only` - (Optional) Whether to force the ReadOnly setting in VolumeMounts. Defaults to false (read/write). +* `secret_ref` - (Optional) Reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts. + +### `flocker` + +#### Arguments + +* `dataset_name` - (Optional) Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated +* `dataset_uuid` - (Optional) UUID of the dataset. This is unique identifier of a Flocker dataset + +### `gce_persistent_disk` + +#### Arguments + +* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk +* `partition` - (Optional) The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk +* `pd_name` - (Required) Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk +* `read_only` - (Optional) Whether to force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + +### `glusterfs` + +#### Arguments + +* `endpoints_name` - (Required) The endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod +* `path` - (Required) The Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod +* `read_only` - (Optional) Whether to force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + +### `host_path` + +#### Arguments + +* `path` - (Optional) Path of the directory on the host. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + +### `iscsi` + +#### Arguments + +* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi +* `iqn` - (Required) Target iSCSI Qualified Name. +* `iscsi_interface` - (Optional) iSCSI interface name that uses an iSCSI transport. Defaults to 'default' (tcp). +* `lun` - (Optional) iSCSI target lun number. +* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false. +* `target_portal` - (Required) iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + +### `metadata` + +#### Arguments + +* `annotations` - (Optional) An unstructured key value map stored with the persistent volume that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations +* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the persistent volume. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels +* `name` - (Optional) Name of the persistent volume, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names + +#### Attributes + + +* `generation` - A sequence number representing a specific generation of the desired state. +* `resource_version` - An opaque value that represents the internal version of this persistent volume that can be used by clients to determine when persistent volume has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency +* `self_link` - A URL representing this persistent volume. +* `uid` - The unique in time and space value for this persistent volume. More info: http://kubernetes.io/docs/user-guide/identifiers#uids + +### `nfs` + +#### Arguments + +* `path` - (Required) Path that is exported by the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs +* `read_only` - (Optional) Whether to force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#nfs +* `server` - (Required) Server is the hostname or IP address of the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs + +### `photon_persistent_disk` + +#### Arguments + +* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. +* `pd_id` - (Required) ID that identifies Photon Controller persistent disk + +### `quobyte` + +#### Arguments + +* `group` - (Optional) Group to map volume access to Default is no group +* `read_only` - (Optional) Whether to force the Quobyte volume to be mounted with read-only permissions. Defaults to false. +* `registry` - (Required) Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes +* `user` - (Optional) User to map volume access to Defaults to serivceaccount user +* `volume` - (Required) Volume is a string that references an already created Quobyte volume by name. + +### `rbd` + +#### Arguments + +* `ceph_monitors` - (Required) A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it +* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#rbd +* `keyring` - (Optional) Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it +* `rados_user` - (Optional) The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it +* `rbd_image` - (Required) The rados image name. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it +* `rbd_pool` - (Optional) The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it. +* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it +* `secret_ref` - (Optional) Name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + +### `secret_ref` + +#### Arguments + +* `name` - (Optional) Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names + +### `vsphere_volume` + +#### Arguments + +* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. +* `volume_path` - (Required) Path that identifies vSphere volume vmdk + +## Import + +Persistent Volume can be imported using its name, e.g. + +``` +$ terraform import kubernetes_persistent_volume.example terraform-example +``` diff --git a/website/source/layouts/kubernetes.erb b/website/source/layouts/kubernetes.erb index aa31b5b5a..39fcd0815 100644 --- a/website/source/layouts/kubernetes.erb +++ b/website/source/layouts/kubernetes.erb @@ -19,6 +19,9 @@ > kubernetes_namespace + > + kubernetes_persistent_volume + > kubernetes_secret