From 914e86d087cb9085df7890795dc1c425b8e02494 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 1 Mar 2017 09:30:02 +0000 Subject: [PATCH] vendor: k8s.io/kubernetes/pkg/api/validation@v1.5.3 (#12267) --- .../github.com/exponent-io/jsonpath/LICENSE | 21 + .../github.com/exponent-io/jsonpath/README.md | 66 + .../exponent-io/jsonpath/decoder.go | 210 + .../github.com/exponent-io/jsonpath/path.go | 67 + .../exponent-io/jsonpath/pathaction.go | 61 + vendor/k8s.io/apiserver/LICENSE | 202 + .../apiserver/pkg/features/kube_features.go | 47 + .../pkg/util/feature/feature_gate.go | 237 ++ vendor/k8s.io/kubernetes/LICENSE | 2 +- .../k8s.io/kubernetes/pkg/api/endpoints/BUILD | 34 + .../kubernetes/pkg/api/endpoints/util.go | 238 ++ vendor/k8s.io/kubernetes/pkg/api/pod/BUILD | 32 + vendor/k8s.io/kubernetes/pkg/api/pod/util.go | 61 + .../k8s.io/kubernetes/pkg/api/service/BUILD | 36 + .../kubernetes/pkg/api/service/annotations.go | 111 + .../k8s.io/kubernetes/pkg/api/service/util.go | 68 + .../pkg/api/unversioned/validation/BUILD | 30 + .../api/unversioned/validation/validation.go | 74 + vendor/k8s.io/kubernetes/pkg/api/util/BUILD | 25 + .../kubernetes/pkg/api/util/group_version.go | 48 + .../kubernetes/pkg/api/validation/BUILD | 87 + .../kubernetes/pkg/api/validation/doc.go | 19 + .../kubernetes/pkg/api/validation/events.go | 80 + .../kubernetes/pkg/api/validation/schema.go | 435 ++ .../pkg/api/validation/validation.go | 3737 +++++++++++++++++ .../k8s.io/kubernetes/pkg/capabilities/BUILD | 30 + .../pkg/capabilities/capabilities.go | 94 + .../k8s.io/kubernetes/pkg/capabilities/doc.go | 18 + vendor/k8s.io/kubernetes/pkg/features/BUILD | 31 + .../kubernetes/pkg/features/kube_features.go | 97 + .../kubernetes/pkg/security/apparmor/BUILD | 52 + .../pkg/security/apparmor/helpers.go | 77 + .../pkg/security/apparmor/validate.go | 228 + .../security/apparmor/validate_disabled.go | 24 + .../k8s.io/kubernetes/pkg/util/config/BUILD | 40 + .../kubernetes/pkg/util/config/config.go | 140 + .../pkg/util/config/configuration_map.go | 53 + .../k8s.io/kubernetes/pkg/util/config/doc.go | 20 + .../pkg/util/config/feature_gate.go | 273 ++ .../pkg/util/config/namedcertkey_flag.go | 113 + vendor/k8s.io/kubernetes/pkg/util/hash/BUILD | 26 + .../k8s.io/kubernetes/pkg/util/hash/hash.go | 37 + .../k8s.io/kubernetes/pkg/util/net/sets/BUILD | 28 + .../kubernetes/pkg/util/net/sets/doc.go | 28 + .../kubernetes/pkg/util/net/sets/ipnet.go | 119 + vendor/vendor.json | 108 + 46 files changed, 7663 insertions(+), 1 deletion(-) create mode 100644 vendor/github.com/exponent-io/jsonpath/LICENSE create mode 100644 vendor/github.com/exponent-io/jsonpath/README.md create mode 100644 vendor/github.com/exponent-io/jsonpath/decoder.go create mode 100644 vendor/github.com/exponent-io/jsonpath/path.go create mode 100644 vendor/github.com/exponent-io/jsonpath/pathaction.go create mode 100644 vendor/k8s.io/apiserver/LICENSE create mode 100644 vendor/k8s.io/apiserver/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/pod/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/api/pod/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/service/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/api/service/annotations.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/service/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/util/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/api/util/group_version.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/validation/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/api/validation/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/validation/events.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/validation/schema.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/validation/validation.go create mode 100644 vendor/k8s.io/kubernetes/pkg/capabilities/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go create mode 100644 vendor/k8s.io/kubernetes/pkg/capabilities/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/features/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go create mode 100644 vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/config/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/util/config/config.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/config/configuration_map.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/config/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/config/feature_gate.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/config/namedcertkey_flag.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/hash/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/util/hash/hash.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/util/net/sets/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go diff --git a/vendor/github.com/exponent-io/jsonpath/LICENSE b/vendor/github.com/exponent-io/jsonpath/LICENSE new file mode 100644 index 000000000..541977250 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Exponent Labs LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/exponent-io/jsonpath/README.md b/vendor/github.com/exponent-io/jsonpath/README.md new file mode 100644 index 000000000..382fb3138 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/README.md @@ -0,0 +1,66 @@ +[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath) +[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath) + +# jsonpath + +This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used. + +This Decoder has the following enhancements... + * The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions). + * The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path. + * The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token. + * The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string. + +## Installation + + go get -u github.com/exponent-io/jsonpath + +## Example Usage + +#### SeekTo + +```go +import "github.com/exponent-io/jsonpath" + +var j = []byte(`[ + {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}}, + {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}} +]`) + +w := json.NewDecoder(bytes.NewReader(j)) +var v interface{} + +w.SeekTo(1, "Point", "G") +w.Decode(&v) // v is 218 +``` + +#### Scan with PathActions + +```go +var j = []byte(`{"colors":[ + {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}}, + {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}} +]}`) + +var actions PathActions + +// Extract the value at Point.A +actions.Add(func(d *Decoder) error { + var alpha int + err := d.Decode(&alpha) + fmt.Printf("Alpha: %v\n", alpha) + return err +}, "Point", "A") + +w := NewDecoder(bytes.NewReader(j)) +w.SeekTo("colors", 0) + +var ok = true +var err error +for ok { + ok, err = w.Scan(&actions) + if err != nil && err != io.EOF { + panic(err) + } +} +``` diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go new file mode 100644 index 000000000..31de46c73 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/decoder.go @@ -0,0 +1,210 @@ +package jsonpath + +import ( + "encoding/json" + "io" +) + +// KeyString is returned from Decoder.Token to represent each key in a JSON object value. +type KeyString string + +// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens. +type Decoder struct { + json.Decoder + + path JsonPath + context jsonContext +} + +// NewDecoder creates a new instance of the extended JSON Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{Decoder: *json.NewDecoder(r)} +} + +// SeekTo causes the Decoder to move forward to a given path in the JSON structure. +// +// The path argument must consist of strings or integers. Each string specifies an JSON object key, and +// each integer specifies an index into a JSON array. +// +// Consider the JSON structure +// +// { "a": [0,"s",12e4,{"b":0,"v":35} ] } +// +// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object, +// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v". +// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35. +// +// SeekTo returns a boolean value indicating whether a match was found. +// +// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only. +func (d *Decoder) SeekTo(path ...interface{}) (bool, error) { + + if len(path) == 0 { + return len(d.path) == 0, nil + } + last := len(path) - 1 + if i, ok := path[last].(int); ok { + path[last] = i - 1 + } + + for { + if d.path.Equal(path) { + return true, nil + } + _, err := d.Token() + if err == io.EOF { + return false, nil + } else if err != nil { + return false, err + } + } +} + +// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is +// equivalent to encoding/json.Decode(). +func (d *Decoder) Decode(v interface{}) error { + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + return d.Decoder.Decode(v) +} + +// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the +// position of the most-recently parsed token. +func (d *Decoder) Path() JsonPath { + p := make(JsonPath, len(d.path)) + copy(p, d.path) + return p +} + +// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes +// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a +// KeyString rather than as a native string. +func (d *Decoder) Token() (json.Token, error) { + t, err := d.Decoder.Token() + if err != nil { + return t, err + } + + if t == nil { + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + return t, err + } + + switch t := t.(type) { + case json.Delim: + switch t { + case json.Delim('{'): + if d.context == arrValue { + d.path.incTop() + } + d.path.push("") + d.context = objKey + break + case json.Delim('}'): + d.path.pop() + d.context = d.path.inferContext() + break + case json.Delim('['): + if d.context == arrValue { + d.path.incTop() + } + d.path.push(-1) + d.context = arrValue + break + case json.Delim(']'): + d.path.pop() + d.context = d.path.inferContext() + break + } + case float64, json.Number, bool: + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + break + case string: + switch d.context { + case objKey: + d.path.nameTop(t) + d.context = objValue + return KeyString(t), err + case objValue: + d.context = objKey + case arrValue: + d.path.incTop() + } + break + } + + return t, err +} + +// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array) +// invoking each matching PathAction along the way. +// +// Scan returns true if there are more contiguous values to scan (for example in an array). +func (d *Decoder) Scan(ext *PathActions) (bool, error) { + + rootPath := d.Path() + + // If this is an array path, increment the root path in our local copy. + if rootPath.inferContext() == arrValue { + rootPath.incTop() + } + + for { + // advance the token position + _, err := d.Token() + if err != nil { + return false, err + } + + match: + var relPath JsonPath + + // capture the new JSON path + path := d.Path() + + if len(path) > len(rootPath) { + // capture the path relative to where the scan started + relPath = path[len(rootPath):] + } else { + // if the path is not longer than the root, then we are done with this scan + // return boolean flag indicating if there are more items to scan at the same level + return d.Decoder.More(), nil + } + + // match the relative path against the path actions + if node := ext.node.match(relPath); node != nil { + if node.action != nil { + // we have a match so execute the action + err = node.action(d) + if err != nil { + return d.Decoder.More(), err + } + // The action may have advanced the decoder. If we are in an array, advancing it further would + // skip tokens. So, if we are scanning an array, jump to the top without advancing the token. + if d.path.inferContext() == arrValue && d.Decoder.More() { + goto match + } + } + } + } +} diff --git a/vendor/github.com/exponent-io/jsonpath/path.go b/vendor/github.com/exponent-io/jsonpath/path.go new file mode 100644 index 000000000..d7db2ad33 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/path.go @@ -0,0 +1,67 @@ +// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens. +package jsonpath + +import "fmt" + +type jsonContext int + +const ( + none jsonContext = iota + objKey + objValue + arrValue +) + +// AnyIndex can be used in a pattern to match any array index. +const AnyIndex = -2 + +// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and +// each integer specifies an index into a JSON array. +type JsonPath []interface{} + +func (p *JsonPath) push(n interface{}) { *p = append(*p, n) } +func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] } + +// increment the index at the top of the stack (must be an array index) +func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 } + +// name the key at the top of the stack (must be an object key) +func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n } + +// infer the context from the item at the top of the stack +func (p *JsonPath) inferContext() jsonContext { + if len(*p) == 0 { + return none + } + t := (*p)[len(*p)-1] + switch t.(type) { + case string: + return objKey + case int: + return arrValue + default: + panic(fmt.Sprintf("Invalid stack type %T", t)) + } +} + +// Equal tests for equality between two JsonPath types. +func (p *JsonPath) Equal(o JsonPath) bool { + if len(*p) != len(o) { + return false + } + for i, v := range *p { + if v != o[i] { + return false + } + } + return true +} + +func (p *JsonPath) HasPrefix(o JsonPath) bool { + for i, v := range o { + if v != (*p)[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction.go b/vendor/github.com/exponent-io/jsonpath/pathaction.go new file mode 100644 index 000000000..497ed686c --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/pathaction.go @@ -0,0 +1,61 @@ +package jsonpath + +// pathNode is used to construct a trie of paths to be matched +type pathNode struct { + matchOn interface{} // string, or integer + childNodes []pathNode + action DecodeAction +} + +// match climbs the trie to find a node that matches the given JSON path. +func (n *pathNode) match(path JsonPath) *pathNode { + var node *pathNode = n + for _, ps := range path { + found := false + for i, n := range node.childNodes { + if n.matchOn == ps { + node = &node.childNodes[i] + found = true + break + } else if _, ok := ps.(int); ok && n.matchOn == AnyIndex { + node = &node.childNodes[i] + found = true + break + } + } + if !found { + return nil + } + } + return node +} + +// PathActions represents a collection of DecodeAction functions that should be called at certain path positions +// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams. +type PathActions struct { + node pathNode +} + +// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail. +type DecodeAction func(d *Decoder) error + +// Add specifies an action to call on the Decoder when the specified path is encountered. +func (je *PathActions) Add(action DecodeAction, path ...interface{}) { + + var node *pathNode = &je.node + for _, ps := range path { + found := false + for i, n := range node.childNodes { + if n.matchOn == ps { + node = &node.childNodes[i] + found = true + break + } + } + if !found { + node.childNodes = append(node.childNodes, pathNode{matchOn: ps}) + node = &node.childNodes[len(node.childNodes)-1] + } + } + node.action = action +} diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/k8s.io/apiserver/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go new file mode 100644 index 000000000..8ab10fa20 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -0,0 +1,47 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature() bool + + // owner: timstclair + // alpha: v1.5 + // + // StreamingProxyRedirects controls whether the apiserver should intercept (and follow) + // redirects from the backend (Kubelet) for streaming requests (exec/attach/port-forward). + StreamingProxyRedirects utilfeature.Feature = "StreamingProxyRedirects" +) + +func init() { + utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) +} + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ + StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, +} diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go new file mode 100644 index 000000000..ebb81425d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -0,0 +1,237 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/golang/glog" + "github.com/spf13/pflag" +) + +type Feature string + +const ( + flagName = "feature-gates" + + // allAlphaGate is a global toggle for alpha features. Per-feature key + // values override the default set by allAlphaGate. Examples: + // AllAlpha=false,NewFeature=true will result in newFeature=true + // AllAlpha=true,NewFeature=false will result in newFeature=false + allAlphaGate Feature = "AllAlpha" +) + +var ( + // The generic features. + defaultFeatures = map[Feature]FeatureSpec{ + allAlphaGate: {Default: false, PreRelease: Alpha}, + } + + // Special handling for a few gates. + specialFeatures = map[Feature]func(f *featureGate, val bool){ + allAlphaGate: setUnsetAlphaGates, + } + + // DefaultFeatureGate is a shared global FeatureGate. + DefaultFeatureGate = &featureGate{ + known: defaultFeatures, + special: specialFeatures, + } +) + +type FeatureSpec struct { + Default bool + PreRelease prerelease +} + +type prerelease string + +const ( + // Values for PreRelease. + Alpha = prerelease("ALPHA") + Beta = prerelease("BETA") + GA = prerelease("") +) + +// FeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +type FeatureGate interface { + AddFlag(fs *pflag.FlagSet) + Set(value string) error + Add(features map[Feature]FeatureSpec) + KnownFeatures() []string + + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature() bool + + // owner: @timstclair + // beta: v1.4 + AppArmor() bool + + // owner: @girishkalele + // alpha: v1.4 + ExternalTrafficLocalOnly() bool + + // owner: @saad-ali + // alpha: v1.3 + DynamicVolumeProvisioning() bool + + // owner: @mtaufen + // alpha: v1.4 + DynamicKubeletConfig() bool + + // owner: timstclair + // alpha: v1.5 + StreamingProxyRedirects() bool + + // owner: @pweil- + // alpha: v1.5 + ExperimentalHostUserNamespaceDefaulting() bool + + // owner: @davidopp + // alpha: v1.6 + // TODO: remove when alpha support for affinity is removed + AffinityInAnnotations() bool +} + +// featureGate implements FeatureGate as well as pflag.Value for flag parsing. +type featureGate struct { + known map[Feature]FeatureSpec + special map[Feature]func(*featureGate, bool) + enabled map[Feature]bool + + // is set to true when AddFlag is called. Note: initialization is not go-routine safe, lookup is + closed bool +} + +func setUnsetAlphaGates(f *featureGate, val bool) { + for k, v := range f.known { + if v.PreRelease == Alpha { + if _, found := f.enabled[k]; !found { + f.enabled[k] = val + } + } + } +} + +// Set, String, and Type implement pflag.Value +var _ pflag.Value = &featureGate{} + +// Set Parses a string of the form // "key1=value1,key2=value2,..." into a +// map[string]bool of known keys or returns an error. +func (f *featureGate) Set(value string) error { + f.enabled = make(map[Feature]bool) + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + k := Feature(strings.TrimSpace(arr[0])) + _, ok := f.known[Feature(k)] + if !ok { + return fmt.Errorf("unrecognized key: %s", k) + } + if len(arr) != 2 { + return fmt.Errorf("missing bool value for %s", k) + } + v := strings.TrimSpace(arr[1]) + boolValue, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("invalid value of %s: %s, err: %v", k, v, err) + } + f.enabled[k] = boolValue + + // Handle "special" features like "all alpha gates" + if fn, found := f.special[k]; found { + fn(f, boolValue) + } + } + + glog.Infof("feature gates: %v", f.enabled) + return nil +} + +func (f *featureGate) String() string { + pairs := []string{} + for k, v := range f.enabled { + pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (f *featureGate) Type() string { + return "mapStringBool" +} + +func (f *featureGate) Add(features map[Feature]FeatureSpec) error { + if f.closed { + return fmt.Errorf("cannot add a feature gate after adding it to the flag set") + } + + for name, spec := range features { + if existingSpec, found := f.known[name]; found { + if existingSpec == spec { + continue + } + return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec) + } + + f.known[name] = spec + } + return nil +} + +func (f *featureGate) Enabled(key Feature) bool { + defaultValue := f.known[key].Default + if f.enabled != nil { + if v, ok := f.enabled[key]; ok { + return v + } + } + return defaultValue +} + +// AddFlag adds a flag for setting global feature gates to the specified FlagSet. +func (f *featureGate) AddFlag(fs *pflag.FlagSet) { + f.closed = true + + known := f.KnownFeatures() + fs.Var(f, flagName, ""+ + "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ + "Options are:\n"+strings.Join(known, "\n")) +} + +// Returns a string describing the FeatureGate's known features. +func (f *featureGate) KnownFeatures() []string { + var known []string + for k, v := range f.known { + pre := "" + if v.PreRelease != GA { + pre = fmt.Sprintf("%s - ", v.PreRelease) + } + known = append(known, fmt.Sprintf("%s=true|false (%sdefault=%t)", k, pre, v.Default)) + } + sort.Strings(known) + return known +} diff --git a/vendor/k8s.io/kubernetes/LICENSE b/vendor/k8s.io/kubernetes/LICENSE index 00b240110..d64569567 100644 --- a/vendor/k8s.io/kubernetes/LICENSE +++ b/vendor/k8s.io/kubernetes/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2014 The Kubernetes Authors. + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD b/vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD new file mode 100644 index 000000000..f0f5a5c6a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD @@ -0,0 +1,34 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_binary", + "go_library", + "go_test", + "cgo_library", +) + +go_library( + name = "go_default_library", + srcs = ["util.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/types:go_default_library", + "//pkg/util/hash:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + library = "go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/types:go_default_library", + "//vendor:github.com/davecgh/go-spew/spew", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go b/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go new file mode 100644 index 000000000..792a2536f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go @@ -0,0 +1,238 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "hash" + "sort" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/types" + hashutil "k8s.io/kubernetes/pkg/util/hash" +) + +const ( + // TODO: to be deleted after v1.3 is released + // Its value is the json representation of map[string(IP)][HostRecord] + // example: '{"10.245.1.6":{"HostName":"my-webserver"}}' + PodHostnamesAnnotation = "endpoints.beta.kubernetes.io/hostnames-map" +) + +// TODO: to be deleted after v1.3 is released +type HostRecord struct { + HostName string +} + +// RepackSubsets takes a slice of EndpointSubset objects, expands it to the full +// representation, and then repacks that into the canonical layout. This +// ensures that code which operates on these objects can rely on the common +// form for things like comparison. The result is a newly allocated slice. +func RepackSubsets(subsets []api.EndpointSubset) []api.EndpointSubset { + // First map each unique port definition to the sets of hosts that + // offer it. + allAddrs := map[addressKey]*api.EndpointAddress{} + portToAddrReadyMap := map[api.EndpointPort]addressSet{} + for i := range subsets { + for _, port := range subsets[i].Ports { + for k := range subsets[i].Addresses { + mapAddressByPort(&subsets[i].Addresses[k], port, true, allAddrs, portToAddrReadyMap) + } + for k := range subsets[i].NotReadyAddresses { + mapAddressByPort(&subsets[i].NotReadyAddresses[k], port, false, allAddrs, portToAddrReadyMap) + } + } + } + + // Next, map the sets of hosts to the sets of ports they offer. + // Go does not allow maps or slices as keys to maps, so we have + // to synthesize an artificial key and do a sort of 2-part + // associative entity. + type keyString string + keyToAddrReadyMap := map[keyString]addressSet{} + addrReadyMapKeyToPorts := map[keyString][]api.EndpointPort{} + for port, addrs := range portToAddrReadyMap { + key := keyString(hashAddresses(addrs)) + keyToAddrReadyMap[key] = addrs + addrReadyMapKeyToPorts[key] = append(addrReadyMapKeyToPorts[key], port) + } + + // Next, build the N-to-M association the API wants. + final := []api.EndpointSubset{} + for key, ports := range addrReadyMapKeyToPorts { + var readyAddrs, notReadyAddrs []api.EndpointAddress + for addr, ready := range keyToAddrReadyMap[key] { + if ready { + readyAddrs = append(readyAddrs, *addr) + } else { + notReadyAddrs = append(notReadyAddrs, *addr) + } + } + final = append(final, api.EndpointSubset{Addresses: readyAddrs, NotReadyAddresses: notReadyAddrs, Ports: ports}) + } + + // Finally, sort it. + return SortSubsets(final) +} + +// The sets of hosts must be de-duped, using IP+UID as the key. +type addressKey struct { + ip string + uid types.UID +} + +// mapAddressByPort adds an address into a map by its ports, registering the address with a unique pointer, and preserving +// any existing ready state. +func mapAddressByPort(addr *api.EndpointAddress, port api.EndpointPort, ready bool, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) *api.EndpointAddress { + // use addressKey to distinguish between two endpoints that are identical addresses + // but may have come from different hosts, for attribution. For instance, Mesos + // assigns pods the node IP, but the pods are distinct. + key := addressKey{ip: addr.IP} + if addr.TargetRef != nil { + key.uid = addr.TargetRef.UID + } + + // Accumulate the address. The full EndpointAddress structure is preserved for use when + // we rebuild the subsets so that the final TargetRef has all of the necessary data. + existingAddress := allAddrs[key] + if existingAddress == nil { + // Make a copy so we don't write to the + // input args of this function. + existingAddress = &api.EndpointAddress{} + *existingAddress = *addr + allAddrs[key] = existingAddress + } + + // Remember that this port maps to this address. + if _, found := portToAddrReadyMap[port]; !found { + portToAddrReadyMap[port] = addressSet{} + } + // if we have not yet recorded this port for this address, or if the previous + // state was ready, write the current ready state. not ready always trumps + // ready. + if wasReady, found := portToAddrReadyMap[port][existingAddress]; !found || wasReady { + portToAddrReadyMap[port][existingAddress] = ready + } + return existingAddress +} + +type addressSet map[*api.EndpointAddress]bool + +type addrReady struct { + addr *api.EndpointAddress + ready bool +} + +func hashAddresses(addrs addressSet) string { + // Flatten the list of addresses into a string so it can be used as a + // map key. Unfortunately, DeepHashObject is implemented in terms of + // spew, and spew does not handle non-primitive map keys well. So + // first we collapse it into a slice, sort the slice, then hash that. + slice := make([]addrReady, 0, len(addrs)) + for k, ready := range addrs { + slice = append(slice, addrReady{k, ready}) + } + sort.Sort(addrsReady(slice)) + hasher := md5.New() + hashutil.DeepHashObject(hasher, slice) + return hex.EncodeToString(hasher.Sum(nil)[0:]) +} + +func lessAddrReady(a, b addrReady) bool { + // ready is not significant to hashing since we can't have duplicate addresses + return LessEndpointAddress(a.addr, b.addr) +} + +type addrsReady []addrReady + +func (sl addrsReady) Len() int { return len(sl) } +func (sl addrsReady) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl addrsReady) Less(i, j int) bool { + return lessAddrReady(sl[i], sl[j]) +} + +func LessEndpointAddress(a, b *api.EndpointAddress) bool { + ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP)) + if ipComparison != 0 { + return ipComparison < 0 + } + if b.TargetRef == nil { + return false + } + if a.TargetRef == nil { + return true + } + return a.TargetRef.UID < b.TargetRef.UID +} + +type addrPtrsByIpAndUID []*api.EndpointAddress + +func (sl addrPtrsByIpAndUID) Len() int { return len(sl) } +func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl addrPtrsByIpAndUID) Less(i, j int) bool { + return LessEndpointAddress(sl[i], sl[j]) +} + +// SortSubsets sorts an array of EndpointSubset objects in place. For ease of +// use it returns the input slice. +func SortSubsets(subsets []api.EndpointSubset) []api.EndpointSubset { + for i := range subsets { + ss := &subsets[i] + sort.Sort(addrsByIpAndUID(ss.Addresses)) + sort.Sort(addrsByIpAndUID(ss.NotReadyAddresses)) + sort.Sort(portsByHash(ss.Ports)) + } + sort.Sort(subsetsByHash(subsets)) + return subsets +} + +func hashObject(hasher hash.Hash, obj interface{}) []byte { + hashutil.DeepHashObject(hasher, obj) + return hasher.Sum(nil) +} + +type subsetsByHash []api.EndpointSubset + +func (sl subsetsByHash) Len() int { return len(sl) } +func (sl subsetsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl subsetsByHash) Less(i, j int) bool { + hasher := md5.New() + h1 := hashObject(hasher, sl[i]) + h2 := hashObject(hasher, sl[j]) + return bytes.Compare(h1, h2) < 0 +} + +type addrsByIpAndUID []api.EndpointAddress + +func (sl addrsByIpAndUID) Len() int { return len(sl) } +func (sl addrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl addrsByIpAndUID) Less(i, j int) bool { + return LessEndpointAddress(&sl[i], &sl[j]) +} + +type portsByHash []api.EndpointPort + +func (sl portsByHash) Len() int { return len(sl) } +func (sl portsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl portsByHash) Less(i, j int) bool { + hasher := md5.New() + h1 := hashObject(hasher, sl[i]) + h2 := hashObject(hasher, sl[j]) + return bytes.Compare(h1, h2) < 0 +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD b/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD new file mode 100644 index 000000000..fa6546406 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_binary", + "go_library", + "go_test", + "cgo_library", +) + +go_library( + name = "go_default_library", + srcs = ["util.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/util/intstr:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + library = "go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/util/intstr:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go b/vendor/k8s.io/kubernetes/pkg/api/pod/util.go new file mode 100644 index 000000000..dfc12db60 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/pod/util.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/intstr" +) + +const ( + // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Hostname field. + // The annotation value is a string specifying the hostname to be used for the pod e.g 'my-webserver-1' + PodHostnameAnnotation = "pod.beta.kubernetes.io/hostname" + + // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Subdomain field. + // The annotation value is a string specifying the subdomain e.g. "my-web-service" + // If specified, on the pod itself, ".my-web-service..svc." would resolve to + // the pod's IP. + // If there is a headless service named "my-web-service" in the same namespace as the pod, then, + // .my-web-service..svc." would be resolved by the cluster DNS Server. + PodSubdomainAnnotation = "pod.beta.kubernetes.io/subdomain" +) + +// FindPort locates the container port for the given pod and portName. If the +// targetPort is a number, use that. If the targetPort is a string, look that +// string up in all named ports in all containers in the target pod. If no +// match is found, fail. +func FindPort(pod *api.Pod, svcPort *api.ServicePort) (int, error) { + portName := svcPort.TargetPort + switch portName.Type { + case intstr.String: + name := portName.StrVal + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.Name == name && port.Protocol == svcPort.Protocol { + return int(port.ContainerPort), nil + } + } + } + case intstr.Int: + return portName.IntValue(), nil + } + + return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/BUILD b/vendor/k8s.io/kubernetes/pkg/api/service/BUILD new file mode 100644 index 000000000..e97b7e1aa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/service/BUILD @@ -0,0 +1,36 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_binary", + "go_library", + "go_test", + "cgo_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "annotations.go", + "util.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/util/net/sets:go_default_library", + "//vendor:github.com/golang/glog", + ], +) + +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + library = "go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/util/net/sets:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go b/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go new file mode 100644 index 000000000..4b4554998 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go @@ -0,0 +1,111 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "strconv" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" +) + +const ( + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // AnnotationValueExternalTrafficLocal Value of annotation to specify local endpoints behaviour + AnnotationValueExternalTrafficLocal = "OnlyLocal" + // AnnotationValueExternalTrafficGlobal Value of annotation to specify global (legacy) behaviour + AnnotationValueExternalTrafficGlobal = "Global" + + // TODO: The alpha annotations have been deprecated, remove them when we move this feature to GA. + + // AlphaAnnotationHealthCheckNodePort Annotation specifying the healthcheck nodePort for the service + // If not specified, annotation is created by the service api backend with the allocated nodePort + // Will use user-specified nodePort value if specified by the client + AlphaAnnotationHealthCheckNodePort = "service.alpha.kubernetes.io/healthcheck-nodeport" + + // AlphaAnnotationExternalTraffic An annotation that denotes if this Service desires to route external traffic to local + // endpoints only. This preserves Source IP and avoids a second hop. + AlphaAnnotationExternalTraffic = "service.alpha.kubernetes.io/external-traffic" + + // BetaAnnotationHealthCheckNodePort is the beta version of AlphaAnnotationHealthCheckNodePort. + BetaAnnotationHealthCheckNodePort = "service.beta.kubernetes.io/healthcheck-nodeport" + + // BetaAnnotationExternalTraffic is the beta version of AlphaAnnotationExternalTraffic. + BetaAnnotationExternalTraffic = "service.beta.kubernetes.io/external-traffic" +) + +// NeedsHealthCheck Check service for health check annotations +func NeedsHealthCheck(service *api.Service) bool { + // First check the alpha annotation and then the beta. This is so existing + // Services continue to work till the user decides to transition to beta. + // If they transition to beta, there's no way to go back to alpha without + // rolling back the cluster. + for _, annotation := range []string{AlphaAnnotationExternalTraffic, BetaAnnotationExternalTraffic} { + if l, ok := service.Annotations[annotation]; ok { + if l == AnnotationValueExternalTrafficLocal { + return true + } else if l == AnnotationValueExternalTrafficGlobal { + return false + } else { + glog.Errorf("Invalid value for annotation %v: %v", annotation, l) + } + } + } + return false +} + +// GetServiceHealthCheckNodePort Return health check node port annotation for service, if one exists +func GetServiceHealthCheckNodePort(service *api.Service) int32 { + if !NeedsHealthCheck(service) { + return 0 + } + // First check the alpha annotation and then the beta. This is so existing + // Services continue to work till the user decides to transition to beta. + // If they transition to beta, there's no way to go back to alpha without + // rolling back the cluster. + for _, annotation := range []string{AlphaAnnotationHealthCheckNodePort, BetaAnnotationHealthCheckNodePort} { + if l, ok := service.Annotations[annotation]; ok { + p, err := strconv.Atoi(l) + if err != nil { + glog.Errorf("Failed to parse annotation %v: %v", annotation, err) + continue + } + return int32(p) + } + } + return 0 +} + +// GetServiceHealthCheckPathPort Return the path and nodePort programmed into the Cloud LB Health Check +func GetServiceHealthCheckPathPort(service *api.Service) (string, int32) { + if !NeedsHealthCheck(service) { + return "", 0 + } + port := GetServiceHealthCheckNodePort(service) + if port == 0 { + return "", 0 + } + return "/healthz", port +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/util.go b/vendor/k8s.io/kubernetes/pkg/api/service/util.go new file mode 100644 index 000000000..6f0e14e2b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/service/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/api" + netsets "k8s.io/kubernetes/pkg/util/net/sets" +) + +const ( + defaultLoadBalancerSourceRanges = "0.0.0.0/0" +) + +// IsAllowAll checks whether the netsets.IPNet allows traffic from 0.0.0.0/0 +func IsAllowAll(ipnets netsets.IPNet) bool { + for _, s := range ipnets.StringSlice() { + if s == "0.0.0.0/0" { + return true + } + } + return false +} + +// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service. +// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service, +// extracting the source ranges to allow, and if not present returns a default (allow-all) value. +func GetLoadBalancerSourceRanges(service *api.Service) (netsets.IPNet, error) { + var ipnets netsets.IPNet + var err error + // if SourceRange field is specified, ignore sourceRange annotation + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + specs := service.Spec.LoadBalancerSourceRanges + ipnets, err = netsets.ParseIPNets(specs...) + + if err != nil { + return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err) + } + } else { + val := service.Annotations[AnnotationLoadBalancerSourceRangesKey] + val = strings.TrimSpace(val) + if val == "" { + val = defaultLoadBalancerSourceRanges + } + specs := strings.Split(val, ",") + ipnets, err = netsets.ParseIPNets(specs...) + if err != nil { + return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", AnnotationLoadBalancerSourceRangesKey, val) + } + } + return ipnets, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/BUILD new file mode 100644 index 000000000..d0d0cbd14 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/BUILD @@ -0,0 +1,30 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_binary", + "go_library", + "go_test", + "cgo_library", +) + +go_library( + name = "go_default_library", + srcs = ["validation.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api/unversioned:go_default_library", + "//pkg/util/validation:go_default_library", + "//pkg/util/validation/field:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["validation_test.go"], + library = "go_default_library", + tags = ["automanaged"], + deps = ["//pkg/util/validation/field:go_default_library"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go new file mode 100644 index 000000000..ecb968bdc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go @@ -0,0 +1,74 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/util/validation" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func ValidateLabelSelector(ps *unversioned.LabelSelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if ps == nil { + return allErrs + } + allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) + for i, expr := range ps.MatchExpressions { + allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) + } + return allErrs +} + +func ValidateLabelSelectorRequirement(sr unversioned.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch sr.Operator { + case unversioned.LabelSelectorOpIn, unversioned.LabelSelectorOpNotIn: + if len(sr.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case unversioned.LabelSelectorOpExists, unversioned.LabelSelectorOpDoesNotExist: + if len(sr.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } + allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) + return allErrs +} + +// ValidateLabelName validates that the label name is correctly defined. +func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(labelName) { + allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) + } + return allErrs +} + +// ValidateLabels validates that a set of labels are correctly defined. +func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, v := range labels { + allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) + for _, msg := range validation.IsValidLabelValue(v) { + allErrs = append(allErrs, field.Invalid(fldPath, v, msg)) + } + } + return allErrs +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/util/BUILD b/vendor/k8s.io/kubernetes/pkg/api/util/BUILD new file mode 100644 index 000000000..4637acb3d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/util/BUILD @@ -0,0 +1,25 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_binary", + "go_library", + "go_test", + "cgo_library", +) + +go_library( + name = "go_default_library", + srcs = ["group_version.go"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["group_version_test.go"], + library = "go_default_library", + tags = ["automanaged"], + deps = [], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go b/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go new file mode 100644 index 000000000..fea2f17f8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: This GetVersion/GetGroup arrangement is temporary and will be replaced +// with a GroupAndVersion type. +package util + +import "strings" + +func GetVersion(groupVersion string) string { + s := strings.Split(groupVersion, "/") + if len(s) != 2 { + // e.g. return "v1" for groupVersion="v1" + return s[len(s)-1] + } + return s[1] +} + +func GetGroup(groupVersion string) string { + s := strings.Split(groupVersion, "/") + if len(s) == 1 { + // e.g. return "" for groupVersion="v1" + return "" + } + return s[0] +} + +// GetGroupVersion returns the "group/version". It returns "version" is if group +// is empty. It returns "group/" if version is empty. +func GetGroupVersion(group, version string) string { + if len(group) == 0 { + return version + } + return group + "/" + version +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD new file mode 100644 index 000000000..63169c20d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD @@ -0,0 +1,87 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_binary", + "go_library", + "go_test", + "cgo_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "events.go", + "schema.go", + "validation.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/api/endpoints:go_default_library", + "//pkg/api/meta:go_default_library", + "//pkg/api/pod:go_default_library", + "//pkg/api/resource:go_default_library", + "//pkg/api/service:go_default_library", + "//pkg/api/unversioned:go_default_library", + "//pkg/api/unversioned/validation:go_default_library", + "//pkg/api/util:go_default_library", + "//pkg/api/v1:go_default_library", + "//pkg/apimachinery/registered:go_default_library", + "//pkg/capabilities:go_default_library", + "//pkg/labels:go_default_library", + "//pkg/runtime:go_default_library", + "//pkg/security/apparmor:go_default_library", + "//pkg/util/config:go_default_library", + "//pkg/util/errors:go_default_library", + "//pkg/util/intstr:go_default_library", + "//pkg/util/sets:go_default_library", + "//pkg/util/validation:go_default_library", + "//pkg/util/validation/field:go_default_library", + "//pkg/util/yaml:go_default_library", + "//vendor:github.com/emicklei/go-restful/swagger", + "//vendor:github.com/exponent-io/jsonpath", + "//vendor:github.com/golang/glog", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "events_test.go", + "schema_test.go", + "validation_test.go", + ], + data = [ + "testdata/v1/invalidPod.yaml", + "testdata/v1/invalidPod1.json", + "testdata/v1/invalidPod2.json", + "testdata/v1/invalidPod3.json", + "testdata/v1/invalidPod4.yaml", + "testdata/v1/validPod.yaml", + "//api/swagger-spec", + ], + library = "go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/api/resource:go_default_library", + "//pkg/api/service:go_default_library", + "//pkg/api/testapi:go_default_library", + "//pkg/api/testing:go_default_library", + "//pkg/api/unversioned:go_default_library", + "//pkg/apimachinery/registered:go_default_library", + "//pkg/apis/extensions:go_default_library", + "//pkg/capabilities:go_default_library", + "//pkg/runtime:go_default_library", + "//pkg/security/apparmor:go_default_library", + "//pkg/util/intstr:go_default_library", + "//pkg/util/sets:go_default_library", + "//pkg/util/validation/field:go_default_library", + "//pkg/util/yaml:go_default_library", + "//vendor:github.com/ghodss/yaml", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go b/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go new file mode 100644 index 000000000..30f541de3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation has functions for validating the correctness of api +// objects and explaining what is wrong with them when they aren't valid. +package validation // import "k8s.io/kubernetes/pkg/api/validation" diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go b/vendor/k8s.io/kubernetes/pkg/api/validation/events.go new file mode 100644 index 000000000..589fe919f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/events.go @@ -0,0 +1,80 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + apiutil "k8s.io/kubernetes/pkg/api/util" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/util/validation" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// ValidateEvent makes sure that the event makes sense. +func ValidateEvent(event *api.Event) field.ErrorList { + allErrs := field.ErrorList{} + + // Make sure event.Namespace and the involvedObject.Namespace agree + if len(event.InvolvedObject.Namespace) == 0 { + // event.Namespace must also be empty (or "default", for compatibility with old clients) + if event.Namespace != api.NamespaceNone && event.Namespace != api.NamespaceDefault { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + } else { + // event namespace must match + if event.Namespace != event.InvolvedObject.Namespace { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + } + + // For kinds we recognize, make sure involvedObject.Namespace is set for namespaced kinds + if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil { + if namespaced && len(event.InvolvedObject.Namespace) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind))) + } + if !namespaced && len(event.InvolvedObject.Namespace) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind))) + } + } + + for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { + allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) + } + return allErrs +} + +// Check whether the kind in groupVersion is scoped at the root of the api hierarchy +func isNamespacedKind(kind, groupVersion string) (bool, error) { + group := apiutil.GetGroup(groupVersion) + g, err := registered.Group(group) + if err != nil { + return false, err + } + restMapping, err := g.RESTMapper.RESTMapping(unversioned.GroupKind{Group: group, Kind: kind}, apiutil.GetVersion(groupVersion)) + if err != nil { + return false, err + } + scopeName := restMapping.Scope.Name() + if scopeName == meta.RESTScopeNameNamespace { + return true, nil + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go b/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go new file mode 100644 index 000000000..f694baeae --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go @@ -0,0 +1,435 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/emicklei/go-restful/swagger" + ejson "github.com/exponent-io/jsonpath" + "github.com/golang/glog" + apiutil "k8s.io/kubernetes/pkg/api/util" + "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/yaml" +) + +type InvalidTypeError struct { + ExpectedKind reflect.Kind + ObservedKind reflect.Kind + FieldName string +} + +func (i *InvalidTypeError) Error() string { + return fmt.Sprintf("expected type %s, for field %s, got %s", i.ExpectedKind.String(), i.FieldName, i.ObservedKind.String()) +} + +func NewInvalidTypeError(expected reflect.Kind, observed reflect.Kind, fieldName string) error { + return &InvalidTypeError{expected, observed, fieldName} +} + +// TypeNotFoundError is returned when specified type +// can not found in schema +type TypeNotFoundError string + +func (tnfe TypeNotFoundError) Error() string { + return fmt.Sprintf("couldn't find type: %s", string(tnfe)) +} + +// Schema is an interface that knows how to validate an API object serialized to a byte array. +type Schema interface { + ValidateBytes(data []byte) error +} + +type NullSchema struct{} + +func (NullSchema) ValidateBytes(data []byte) error { return nil } + +type NoDoubleKeySchema struct{} + +func (NoDoubleKeySchema) ValidateBytes(data []byte) error { + var list []error = nil + if err := validateNoDuplicateKeys(data, "metadata", "labels"); err != nil { + list = append(list, err) + } + if err := validateNoDuplicateKeys(data, "metadata", "annotations"); err != nil { + list = append(list, err) + } + return utilerrors.NewAggregate(list) +} + +func validateNoDuplicateKeys(data []byte, path ...string) error { + r := ejson.NewDecoder(bytes.NewReader(data)) + // This is Go being unfriendly. The 'path ...string' comes in as a + // []string, and SeekTo takes ...interface{}, so we can't just pass + // the path straight in, we have to copy it. *sigh* + ifacePath := []interface{}{} + for ix := range path { + ifacePath = append(ifacePath, path[ix]) + } + found, err := r.SeekTo(ifacePath...) + if err != nil { + return err + } + if !found { + return nil + } + seen := map[string]bool{} + for { + tok, err := r.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case json.Delim: + if t.String() == "}" { + return nil + } + case ejson.KeyString: + if seen[string(t)] { + return fmt.Errorf("duplicate key: %s", string(t)) + } else { + seen[string(t)] = true + } + } + } +} + +type ConjunctiveSchema []Schema + +func (c ConjunctiveSchema) ValidateBytes(data []byte) error { + var list []error = nil + schemas := []Schema(c) + for ix := range schemas { + if err := schemas[ix].ValidateBytes(data); err != nil { + list = append(list, err) + } + } + return utilerrors.NewAggregate(list) +} + +type SwaggerSchema struct { + api swagger.ApiDeclaration + delegate Schema // For delegating to other api groups +} + +func NewSwaggerSchemaFromBytes(data []byte, factory Schema) (Schema, error) { + schema := &SwaggerSchema{} + err := json.Unmarshal(data, &schema.api) + if err != nil { + return nil, err + } + schema.delegate = factory + return schema, nil +} + +// validateList unpacks a list and validate every item in the list. +// It return nil if every item is ok. +// Otherwise it return an error list contain errors of every item. +func (s *SwaggerSchema) validateList(obj map[string]interface{}) []error { + items, exists := obj["items"] + if !exists { + return []error{fmt.Errorf("no items field in %#v", obj)} + } + return s.validateItems(items) +} + +func (s *SwaggerSchema) validateItems(items interface{}) []error { + allErrs := []error{} + itemList, ok := items.([]interface{}) + if !ok { + return append(allErrs, fmt.Errorf("items isn't a slice")) + } + for i, item := range itemList { + fields, ok := item.(map[string]interface{}) + if !ok { + allErrs = append(allErrs, fmt.Errorf("items[%d] isn't a map[string]interface{}", i)) + continue + } + groupVersion := fields["apiVersion"] + if groupVersion == nil { + allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion not set", i)) + continue + } + itemVersion, ok := groupVersion.(string) + if !ok { + allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion isn't string type", i)) + continue + } + if len(itemVersion) == 0 { + allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion is empty", i)) + } + kind := fields["kind"] + if kind == nil { + allErrs = append(allErrs, fmt.Errorf("items[%d].kind not set", i)) + continue + } + itemKind, ok := kind.(string) + if !ok { + allErrs = append(allErrs, fmt.Errorf("items[%d].kind isn't string type", i)) + continue + } + if len(itemKind) == 0 { + allErrs = append(allErrs, fmt.Errorf("items[%d].kind is empty", i)) + } + version := apiutil.GetVersion(itemVersion) + errs := s.ValidateObject(item, "", version+"."+itemKind) + if len(errs) >= 1 { + allErrs = append(allErrs, errs...) + } + } + + return allErrs +} + +func (s *SwaggerSchema) ValidateBytes(data []byte) error { + var obj interface{} + out, err := yaml.ToJSON(data) + if err != nil { + return err + } + data = out + if err := json.Unmarshal(data, &obj); err != nil { + return err + } + fields, ok := obj.(map[string]interface{}) + if !ok { + return fmt.Errorf("error in unmarshaling data %s", string(data)) + } + groupVersion := fields["apiVersion"] + if groupVersion == nil { + return fmt.Errorf("apiVersion not set") + } + if _, ok := groupVersion.(string); !ok { + return fmt.Errorf("apiVersion isn't string type") + } + kind := fields["kind"] + if kind == nil { + return fmt.Errorf("kind not set") + } + if _, ok := kind.(string); !ok { + return fmt.Errorf("kind isn't string type") + } + if strings.HasSuffix(kind.(string), "List") { + return utilerrors.NewAggregate(s.validateList(fields)) + } + version := apiutil.GetVersion(groupVersion.(string)) + allErrs := s.ValidateObject(obj, "", version+"."+kind.(string)) + if len(allErrs) == 1 { + return allErrs[0] + } + return utilerrors.NewAggregate(allErrs) +} + +func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName string) []error { + allErrs := []error{} + models := s.api.Models + model, ok := models.At(typeName) + + // Verify the api version matches. This is required for nested types with differing api versions because + // s.api only has schema for 1 api version (the parent object type's version). + // e.g. an extensions/v1beta1 Template embedding a /v1 Service requires the schema for the extensions/v1beta1 + // api to delegate to the schema for the /v1 api. + // Only do this for !ok objects so that cross ApiVersion vendored types take precedence. + if !ok && s.delegate != nil { + fields, mapOk := obj.(map[string]interface{}) + if !mapOk { + return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj)) + } + if delegated, err := s.delegateIfDifferentApiVersion(runtime.Unstructured{Object: fields}); delegated { + if err != nil { + allErrs = append(allErrs, err) + } + return allErrs + } + } + + if !ok { + return append(allErrs, TypeNotFoundError(typeName)) + } + properties := model.Properties + if len(properties.List) == 0 { + // The object does not have any sub-fields. + return nil + } + fields, ok := obj.(map[string]interface{}) + if !ok { + return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj)) + } + if len(fieldName) > 0 { + fieldName = fieldName + "." + } + // handle required fields + for _, requiredKey := range model.Required { + if _, ok := fields[requiredKey]; !ok { + allErrs = append(allErrs, fmt.Errorf("field %s: is required", requiredKey)) + } + } + for key, value := range fields { + details, ok := properties.At(key) + + // Special case for runtime.RawExtension and runtime.Objects because they always fail to validate + // This is because the actual values will be of some sub-type (e.g. Deployment) not the expected + // super-type (RawExtension) + if s.isGenericArray(details) { + errs := s.validateItems(value) + if len(errs) > 0 { + allErrs = append(allErrs, errs...) + } + continue + } + if !ok { + allErrs = append(allErrs, fmt.Errorf("found invalid field %s for %s", key, typeName)) + continue + } + if details.Type == nil && details.Ref == nil { + allErrs = append(allErrs, fmt.Errorf("could not find the type of %s from object: %v", key, details)) + } + var fieldType string + if details.Type != nil { + fieldType = *details.Type + } else { + fieldType = *details.Ref + } + if value == nil { + glog.V(2).Infof("Skipping nil field: %s", key) + continue + } + errs := s.validateField(value, fieldName+key, fieldType, &details) + if len(errs) > 0 { + allErrs = append(allErrs, errs...) + } + } + return allErrs +} + +// delegateIfDifferentApiVersion delegates the validation of an object if its ApiGroup does not match the +// current SwaggerSchema. +// First return value is true if the validation was delegated (by a different ApiGroup SwaggerSchema) +// Second return value is the result of the delegated validation if performed. +func (s *SwaggerSchema) delegateIfDifferentApiVersion(obj runtime.Unstructured) (bool, error) { + // Never delegate objects in the same ApiVersion or we will get infinite recursion + if !s.isDifferentApiVersion(obj) { + return false, nil + } + + // Convert the object back into bytes so that we can pass it to the ValidateBytes function + m, err := json.Marshal(obj.Object) + if err != nil { + return true, err + } + + // Delegate validation of this object to the correct SwaggerSchema for its ApiGroup + return true, s.delegate.ValidateBytes(m) +} + +// isDifferentApiVersion Returns true if obj lives in a different ApiVersion than the SwaggerSchema does. +// The SwaggerSchema will not be able to process objects in different ApiVersions unless they are vendored. +func (s *SwaggerSchema) isDifferentApiVersion(obj runtime.Unstructured) bool { + groupVersion := obj.GetAPIVersion() + return len(groupVersion) > 0 && s.api.ApiVersion != groupVersion +} + +// isGenericArray Returns true if p is an array of generic Objects - either RawExtension or Object. +func (s *SwaggerSchema) isGenericArray(p swagger.ModelProperty) bool { + return p.DataTypeFields.Type != nil && + *p.DataTypeFields.Type == "array" && + p.Items != nil && + p.Items.Ref != nil && + (*p.Items.Ref == "runtime.RawExtension" || *p.Items.Ref == "runtime.Object") +} + +// This matches type name in the swagger spec, such as "v1.Binding". +var versionRegexp = regexp.MustCompile(`^(v.+|unversioned)\..*`) + +func (s *SwaggerSchema) validateField(value interface{}, fieldName, fieldType string, fieldDetails *swagger.ModelProperty) []error { + allErrs := []error{} + if reflect.TypeOf(value) == nil { + return append(allErrs, fmt.Errorf("unexpected nil value for field %v", fieldName)) + } + // TODO: caesarxuchao: because we have multiple group/versions and objects + // may reference objects in other group, the commented out way of checking + // if a filedType is a type defined by us is outdated. We use a hacky way + // for now. + // TODO: the type name in the swagger spec is something like "v1.Binding", + // and the "v1" is generated from the package name, not the groupVersion of + // the type. We need to fix go-restful to embed the group name in the type + // name, otherwise we couldn't handle identically named types in different + // groups correctly. + if versionRegexp.MatchString(fieldType) { + // if strings.HasPrefix(fieldType, apiVersion) { + return s.ValidateObject(value, fieldName, fieldType) + } + switch fieldType { + case "string": + // Be loose about what we accept for 'string' since we use IntOrString in a couple of places + _, isString := value.(string) + _, isNumber := value.(float64) + _, isInteger := value.(int) + if !isString && !isNumber && !isInteger { + return append(allErrs, NewInvalidTypeError(reflect.String, reflect.TypeOf(value).Kind(), fieldName)) + } + case "array": + arr, ok := value.([]interface{}) + if !ok { + return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName)) + } + var arrType string + if fieldDetails.Items.Ref == nil && fieldDetails.Items.Type == nil { + return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName)) + } + if fieldDetails.Items.Ref != nil { + arrType = *fieldDetails.Items.Ref + } else { + arrType = *fieldDetails.Items.Type + } + for ix := range arr { + errs := s.validateField(arr[ix], fmt.Sprintf("%s[%d]", fieldName, ix), arrType, nil) + if len(errs) > 0 { + allErrs = append(allErrs, errs...) + } + } + case "uint64": + case "int64": + case "integer": + _, isNumber := value.(float64) + _, isInteger := value.(int) + if !isNumber && !isInteger { + return append(allErrs, NewInvalidTypeError(reflect.Int, reflect.TypeOf(value).Kind(), fieldName)) + } + case "float64": + if _, ok := value.(float64); !ok { + return append(allErrs, NewInvalidTypeError(reflect.Float64, reflect.TypeOf(value).Kind(), fieldName)) + } + case "boolean": + if _, ok := value.(bool); !ok { + return append(allErrs, NewInvalidTypeError(reflect.Bool, reflect.TypeOf(value).Kind(), fieldName)) + } + // API servers before release 1.3 produce swagger spec with `type: "any"` as the fallback type, while newer servers produce spec with `type: "object"`. + // We have both here so that kubectl can work with both old and new api servers. + case "object": + case "any": + default: + return append(allErrs, fmt.Errorf("unexpected type: %v", fieldType)) + } + return allErrs +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go new file mode 100644 index 000000000..1c24a0d8c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go @@ -0,0 +1,3737 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "encoding/json" + "fmt" + "net" + "os" + "path" + "reflect" + "regexp" + "strings" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/endpoints" + utilpod "k8s.io/kubernetes/pkg/api/pod" + "k8s.io/kubernetes/pkg/api/resource" + apiservice "k8s.io/kubernetes/pkg/api/service" + "k8s.io/kubernetes/pkg/api/unversioned" + unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/capabilities" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/security/apparmor" + utilconfig "k8s.io/kubernetes/pkg/util/config" + "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/validation" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// TODO: delete this global variable when we enable the validation of common +// fields by default. +var RepairMalformedUpdates bool = true + +const isNegativeErrorMsg string = `must be greater than or equal to 0` +const isInvalidQuotaResource string = `must be a standard resource for quota` +const fieldImmutableErrorMsg string = `field is immutable` +const isNotIntegerErrorMsg string = `must be an integer` + +var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255) +var volumeModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive" + +const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB + +// BannedOwners is a black list of object that are not allowed to be owners. +var BannedOwners = map[unversioned.GroupVersionKind]struct{}{ + v1.SchemeGroupVersion.WithKind("Event"): {}, +} + +// ValidateHasLabel requires that api.ObjectMeta has a Label with key and expectedValue +func ValidateHasLabel(meta api.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList { + allErrs := field.ErrorList{} + actualValue, found := meta.Labels[key] + if !found { + allErrs = append(allErrs, field.Required(fldPath.Child("labels").Key(key), + fmt.Sprintf("must be '%s'", expectedValue))) + return allErrs + } + if actualValue != expectedValue { + allErrs = append(allErrs, field.Invalid(fldPath.Child("labels").Key(key), meta.Labels, + fmt.Sprintf("must be '%s'", expectedValue))) + } + return allErrs +} + +// ValidateAnnotations validates that a set of annotations are correctly defined. +func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + var totalSize int64 + for k, v := range annotations { + for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { + allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) + } + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(totalAnnotationSizeLimitB) { + allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB)) + } + return allErrs +} + +func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsDNS1123Label(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + return allErrs +} + +// ValidateDNS1123Subdomain validates that a name is a proper DNS subdomain. +func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsDNS1123Subdomain(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + return allErrs +} + +func ValidatePodSpecificAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if annotations[api.AffinityAnnotationKey] != "" { + allErrs = append(allErrs, ValidateAffinityInPodAnnotations(annotations, fldPath)...) + } + + if annotations[api.TolerationsAnnotationKey] != "" { + allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...) + } + + // TODO: remove these after we EOL the annotations. + if hostname, exists := annotations[utilpod.PodHostnameAnnotation]; exists { + allErrs = append(allErrs, ValidateDNS1123Label(hostname, fldPath.Key(utilpod.PodHostnameAnnotation))...) + } + if subdomain, exists := annotations[utilpod.PodSubdomainAnnotation]; exists { + allErrs = append(allErrs, ValidateDNS1123Label(subdomain, fldPath.Key(utilpod.PodSubdomainAnnotation))...) + } + + allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...) + allErrs = append(allErrs, ValidateAppArmorPodAnnotations(annotations, spec, fldPath)...) + + sysctls, err := api.SysctlsFromPodAnnotation(annotations[api.SysctlsPodAnnotationKey]) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Key(api.SysctlsPodAnnotationKey), annotations[api.SysctlsPodAnnotationKey], err.Error())) + } else { + allErrs = append(allErrs, validateSysctls(sysctls, fldPath.Key(api.SysctlsPodAnnotationKey))...) + } + unsafeSysctls, err := api.SysctlsFromPodAnnotation(annotations[api.UnsafeSysctlsPodAnnotationKey]) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), annotations[api.UnsafeSysctlsPodAnnotationKey], err.Error())) + } else { + allErrs = append(allErrs, validateSysctls(unsafeSysctls, fldPath.Key(api.UnsafeSysctlsPodAnnotationKey))...) + } + inBoth := sysctlIntersection(sysctls, unsafeSysctls) + if len(inBoth) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), strings.Join(inBoth, ", "), "can not be safe and unsafe")) + } + + return allErrs +} + +func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + newAnnotations := newPod.Annotations + oldAnnotations := oldPod.Annotations + for k, oldVal := range oldAnnotations { + if newAnnotations[k] == oldVal { + continue // No change. + } + if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not update AppArmor annotations")) + } + } + // Check for removals. + for k := range newAnnotations { + if _, ok := oldAnnotations[k]; ok { + continue // No change. + } + if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove AppArmor annotations")) + } + } + allErrs = append(allErrs, ValidatePodSpecificAnnotations(newAnnotations, &newPod.Spec, fldPath)...) + return allErrs +} + +func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO: remove this after we EOL the annotation. + hostnamesMap, exists := annotations[endpoints.PodHostnamesAnnotation] + if exists && !isValidHostnamesMap(hostnamesMap) { + allErrs = append(allErrs, field.Invalid(fldPath, endpoints.PodHostnamesAnnotation, + `must be a valid json representation of map[string(IP)][HostRecord] e.g. "{"10.245.1.6":{"HostName":"my-webserver"}}"`)) + } + + return allErrs +} + +func validateOwnerReference(ownerReference api.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + gvk := unversioned.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind) + // gvk.Group is empty for the legacy group. + if len(gvk.Version) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty")) + } + if len(gvk.Kind) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty")) + } + if len(ownerReference.Name) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty")) + } + if len(ownerReference.UID) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty")) + } + if _, ok := BannedOwners[gvk]; ok { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk))) + } + return allErrs +} + +func ValidateOwnerReferences(ownerReferences []api.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + controllerName := "" + for _, ref := range ownerReferences { + allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...) + if ref.Controller != nil && *ref.Controller { + if controllerName != "" { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences, + fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", controllerName, ref.Name))) + } else { + controllerName = ref.Name + } + } + } + return allErrs +} + +// ValidateNameFunc validates that the provided name is valid for a given resource type. +// Not all resources have the same validation rules for names. Prefix is true +// if the name will have a value appended to it. If the name is not valid, +// this returns a list of descriptions of individual characteristics of the +// value that were not valid. Otherwise this returns an empty list or nil. +type ValidateNameFunc func(name string, prefix bool) []string + +// maskTrailingDash replaces the final character of a string with a subdomain safe +// value if is a dash. +func maskTrailingDash(name string) string { + if strings.HasSuffix(name, "-") { + return name[:len(name)-2] + "a" + } + return name +} + +// ValidatePodName can be used to check whether the given pod name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidatePodName = NameIsDNSSubdomain + +// ValidateReplicationControllerName can be used to check whether the given replication +// controller name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateReplicationControllerName = NameIsDNSSubdomain + +// ValidateServiceName can be used to check whether the given service name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceName = NameIsDNS1035Label + +// ValidateNodeName can be used to check whether the given node name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNodeName = NameIsDNSSubdomain + +// ValidateNamespaceName can be used to check whether the given namespace name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNamespaceName = NameIsDNSLabel + +// ValidateLimitRangeName can be used to check whether the given limit range name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateLimitRangeName = NameIsDNSSubdomain + +// ValidateResourceQuotaName can be used to check whether the given +// resource quota name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateResourceQuotaName = NameIsDNSSubdomain + +// ValidateSecretName can be used to check whether the given secret name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateSecretName = NameIsDNSSubdomain + +// ValidateServiceAccountName can be used to check whether the given service account name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceAccountName = NameIsDNSSubdomain + +// ValidateEndpointsName can be used to check whether the given endpoints name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateEndpointsName = NameIsDNSSubdomain + +// ValidateClusterName can be used to check whether the given cluster name is valid. +var ValidateClusterName = NameIsDNS1035Label + +// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. +func NameIsDNSSubdomain(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Subdomain(name) +} + +// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. +func NameIsDNSLabel(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Label(name) +} + +// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label. +func NameIsDNS1035Label(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1035Label(name) +} + +// Validates that given value is not negative. +func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, isNegativeErrorMsg)) + } + return allErrs +} + +// Validates that a Quantity is not negative +func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value.Cmp(resource.Quantity{}) < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg)) + } + return allErrs +} + +func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !api.Semantic.DeepEqual(oldVal, newVal) { + allErrs = append(allErrs, field.Invalid(fldPath, newVal, fieldImmutableErrorMsg)) + } + return allErrs +} + +// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +// TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate. +func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(meta.GenerateName) != 0 { + for _, msg := range nameFn(meta.GenerateName, true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GenerateName, msg)) + } + } + // If the generated name validates, but the calculated value does not, it's a problem with generation, and we + // report it here. This may confuse users, but indicates a programming bug and still must be validated. + // If there are multiple fields out of which one is required then add an or as a separator + if len(meta.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required")) + } else { + for _, msg := range nameFn(meta.Name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.Name, msg)) + } + } + if requiresNamespace { + if len(meta.Namespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) + } else { + for _, msg := range ValidateNamespaceName(meta.Namespace, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.Namespace, msg)) + } + } + } else { + if len(meta.Namespace) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type")) + } + } + if len(meta.ClusterName) != 0 { + for _, msg := range ValidateClusterName(meta.ClusterName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterName"), meta.ClusterName, msg)) + } + } + allErrs = append(allErrs, ValidateNonnegativeField(meta.Generation, fldPath.Child("generation"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(meta.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(meta.Annotations, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(meta.OwnerReferences, fldPath.Child("ownerReferences"))...) + for _, finalizer := range meta.Finalizers { + allErrs = append(allErrs, validateFinalizerName(finalizer, fldPath.Child("finalizers"))...) + } + return allErrs +} + +// ValidateObjectMetaUpdate validates an object's metadata when updated +func ValidateObjectMetaUpdate(newMeta, oldMeta *api.ObjectMeta, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if !RepairMalformedUpdates && newMeta.UID != oldMeta.UID { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), newMeta.UID, "field is immutable")) + } + // in the event it is left empty, set it, to allow clients more flexibility + // TODO: remove the following code that repairs the update request when we retire the clients that modify the immutable fields. + // Please do not copy this pattern elsewhere; validation functions should not be modifying the objects they are passed! + if RepairMalformedUpdates { + if len(newMeta.UID) == 0 { + newMeta.UID = oldMeta.UID + } + // ignore changes to timestamp + if oldMeta.CreationTimestamp.IsZero() { + oldMeta.CreationTimestamp = newMeta.CreationTimestamp + } else { + newMeta.CreationTimestamp = oldMeta.CreationTimestamp + } + // an object can never remove a deletion timestamp or clear/change grace period seconds + if !oldMeta.DeletionTimestamp.IsZero() { + newMeta.DeletionTimestamp = oldMeta.DeletionTimestamp + } + if oldMeta.DeletionGracePeriodSeconds != nil && newMeta.DeletionGracePeriodSeconds == nil { + newMeta.DeletionGracePeriodSeconds = oldMeta.DeletionGracePeriodSeconds + } + } + + // TODO: needs to check if newMeta==nil && oldMeta !=nil after the repair logic is removed. + if newMeta.DeletionGracePeriodSeconds != nil && (oldMeta.DeletionGracePeriodSeconds == nil || *newMeta.DeletionGracePeriodSeconds != *oldMeta.DeletionGracePeriodSeconds) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionGracePeriodSeconds"), newMeta.DeletionGracePeriodSeconds, "field is immutable; may only be changed via deletion")) + } + if newMeta.DeletionTimestamp != nil && (oldMeta.DeletionTimestamp == nil || !newMeta.DeletionTimestamp.Equal(*oldMeta.DeletionTimestamp)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionTimestamp"), newMeta.DeletionTimestamp, "field is immutable; may only be changed via deletion")) + } + + // Finalizers cannot be added if the object is already being deleted. + if oldMeta.DeletionTimestamp != nil { + allErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.Finalizers, oldMeta.Finalizers, fldPath.Child("finalizers"))...) + } + + // Reject updates that don't specify a resource version + if len(newMeta.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.ResourceVersion, "must be specified for an update")) + } + + // Generation shouldn't be decremented + if newMeta.Generation < oldMeta.Generation { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.Generation, "must not be decremented")) + } + + allErrs = append(allErrs, ValidateImmutableField(newMeta.Name, oldMeta.Name, fldPath.Child("name"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.Namespace, oldMeta.Namespace, fldPath.Child("namespace"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.UID, oldMeta.UID, fldPath.Child("uid"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.CreationTimestamp, oldMeta.CreationTimestamp, fldPath.Child("creationTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.ClusterName, oldMeta.ClusterName, fldPath.Child("clusterName"))...) + + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(newMeta.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(newMeta.Annotations, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(newMeta.OwnerReferences, fldPath.Child("ownerReferences"))...) + + return allErrs +} + +func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { + const newFinalizersErrorMsg string = `no new finalizers can be added if the object is being deleted` + allErrs := field.ErrorList{} + extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...)) + if len(extra) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List()))) + } + return allErrs +} + +func validateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, field.ErrorList) { + allErrs := field.ErrorList{} + + allNames := sets.String{} + for i, vol := range volumes { + idxPath := fldPath.Index(i) + namePath := idxPath.Child("name") + el := validateVolumeSource(&vol.VolumeSource, idxPath) + if len(vol.Name) == 0 { + el = append(el, field.Required(namePath, "")) + } else { + el = append(el, ValidateDNS1123Label(vol.Name, namePath)...) + } + if allNames.Has(vol.Name) { + el = append(el, field.Duplicate(namePath, vol.Name)) + } + if len(el) == 0 { + allNames.Insert(vol.Name) + } else { + allErrs = append(allErrs, el...) + } + + } + return allNames, allErrs +} + +func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.ErrorList { + numVolumes := 0 + allErrs := field.ErrorList{} + if source.EmptyDir != nil { + numVolumes++ + // EmptyDirs have nothing to validate + } + if source.HostPath != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateHostPathVolumeSource(source.HostPath, fldPath.Child("hostPath"))...) + } + } + if source.GitRepo != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("gitRepo"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGitRepoVolumeSource(source.GitRepo, fldPath.Child("gitRepo"))...) + } + } + if source.GCEPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(source.GCEPersistentDisk, fldPath.Child("persistentDisk"))...) + } + } + if source.AWSElasticBlockStore != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(source.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...) + } + } + if source.Secret != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateSecretVolumeSource(source.Secret, fldPath.Child("secret"))...) + } + } + if source.NFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateNFSVolumeSource(source.NFS, fldPath.Child("nfs"))...) + } + } + if source.ISCSI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI, fldPath.Child("iscsi"))...) + } + } + if source.Glusterfs != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGlusterfs(source.Glusterfs, fldPath.Child("glusterfs"))...) + } + } + if source.Flocker != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFlockerVolumeSource(source.Flocker, fldPath.Child("flocker"))...) + } + } + if source.PersistentVolumeClaim != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeClaim"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaim, fldPath.Child("persistentVolumeClaim"))...) + } + } + if source.RBD != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateRBDVolumeSource(source.RBD, fldPath.Child("rbd"))...) + } + } + if source.Cinder != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder, fldPath.Child("cinder"))...) + } + } + if source.CephFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCephFSVolumeSource(source.CephFS, fldPath.Child("cephfs"))...) + } + } + if source.Quobyte != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("quobyte"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateQuobyteVolumeSource(source.Quobyte, fldPath.Child("quobyte"))...) + } + } + if source.DownwardAPI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwarAPI"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI, fldPath.Child("downwardAPI"))...) + } + } + if source.FC != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFCVolumeSource(source.FC, fldPath.Child("fc"))...) + } + } + if source.FlexVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("flexVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFlexVolumeSource(source.FlexVolume, fldPath.Child("flexVolume"))...) + } + } + if source.ConfigMap != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...) + } + } + if source.AzureFile != nil { + numVolumes++ + allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...) + } + if source.VsphereVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...) + } + } + if source.PhotonPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(source.PhotonPersistentDisk, fldPath.Child("photonPersistentDisk"))...) + } + } + if source.AzureDisk != nil { + numVolumes++ + allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...) + } + + if numVolumes == 0 { + allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type")) + } + + return allErrs +} + +func validateHostPathVolumeSource(hostPath *api.HostPathVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(hostPath.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + return allErrs +} + +func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(gitRepo.Repository) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("repository"), "")) + } + + pathErrs := validateLocalDescendingPath(gitRepo.Directory, fldPath.Child("directory")) + allErrs = append(allErrs, pathErrs...) + return allErrs +} + +func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(iscsi.TargetPortal) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) + } + if len(iscsi.IQN) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), "")) + } + if iscsi.Lun < 0 || iscsi.Lun > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255))) + } + return allErrs +} + +func validateFCVolumeSource(fc *api.FCVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(fc.TargetWWNs) < 1 { + allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), "")) + } + + if fc.Lun == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("lun"), "")) + } else { + if *fc.Lun < 0 || *fc.Lun > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), fc.Lun, validation.InclusiveRangeError(0, 255))) + } + } + return allErrs +} + +func validateGCEPersistentDiskVolumeSource(pd *api.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(pd.PDName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), "")) + } + if pd.Partition < 0 || pd.Partition > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), pd.Partition, pdPartitionErrorMsg)) + } + return allErrs +} + +func validateAWSElasticBlockStoreVolumeSource(PD *api.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(PD.VolumeID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) + } + if PD.Partition < 0 || PD.Partition > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), PD.Partition, pdPartitionErrorMsg)) + } + return allErrs +} + +func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(secretSource.SecretName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) + } + + secretMode := secretSource.DefaultMode + if secretMode != nil && (*secretMode > 0777 || *secretMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, volumeModeErrorMsg)) + } + + itemsPath := fldPath.Child("items") + for i, kp := range secretSource.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + } + return allErrs +} + +func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(configMapSource.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } + + configMapMode := configMapSource.DefaultMode + if configMapMode != nil && (*configMapMode > 0777 || *configMapMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, volumeModeErrorMsg)) + } + + itemsPath := fldPath.Child("items") + for i, kp := range configMapSource.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + } + return allErrs +} + +func validateKeyToPath(kp *api.KeyToPath, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(kp.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) + } + if len(kp.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + allErrs = append(allErrs, validateLocalNonReservedPath(kp.Path, fldPath.Child("path"))...) + if kp.Mode != nil && (*kp.Mode > 0777 || *kp.Mode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, volumeModeErrorMsg)) + } + + return allErrs +} + +func validatePersistentClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(claim.ClaimName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), "")) + } + return allErrs +} + +func validateNFSVolumeSource(nfs *api.NFSVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(nfs.Server) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("server"), "")) + } + if len(nfs.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + if !path.IsAbs(nfs.Path) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), nfs.Path, "must be an absolute path")) + } + return allErrs +} + +func validateQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(quobyte.Registry) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("registry"), "must be a host:port pair or multiple pairs separated by commas")) + } else { + for _, hostPortPair := range strings.Split(quobyte.Registry, ",") { + if _, _, err := net.SplitHostPort(hostPortPair); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("registry"), quobyte.Registry, "must be a host:port pair or multiple pairs separated by commas")) + } + } + } + + if len(quobyte.Volume) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volume"), "")) + } + return allErrs +} + +func validateGlusterfs(glusterfs *api.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(glusterfs.EndpointsName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) + } + if len(glusterfs.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + return allErrs +} + +func validateFlockerVolumeSource(flocker *api.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 { + //TODO: consider adding a RequiredOneOf() error for this and similar cases + allErrs = append(allErrs, field.Required(fldPath, "one of datasetName and datasetUUID is required")) + } + if len(flocker.DatasetName) != 0 && len(flocker.DatasetUUID) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath, "resource", "datasetName and datasetUUID can not be specified simultaneously")) + } + if strings.Contains(flocker.DatasetName, "/") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("datasetName"), flocker.DatasetName, "must not contain '/'")) + } + return allErrs +} + +var validDownwardAPIFieldPathExpressions = sets.NewString( + "metadata.name", + "metadata.namespace", + "metadata.labels", + "metadata.annotations") + +func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + downwardAPIMode := downwardAPIVolume.DefaultMode + if downwardAPIMode != nil && (*downwardAPIMode > 0777 || *downwardAPIMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, volumeModeErrorMsg)) + } + + for _, file := range downwardAPIVolume.Items { + if len(file.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...) + if file.FieldRef != nil { + allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) + if file.ResourceFieldRef != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) + } + } else if file.ResourceFieldRef != nil { + allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...) + } else { + allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) + } + if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, volumeModeErrorMsg)) + } + } + return allErrs +} + +// This validate will make sure targetPath: +// 1. is not abs path +// 2. does not have any element which is ".." +func validateLocalDescendingPath(targetPath string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if path.IsAbs(targetPath) { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path")) + } + + // TODO: this assumes the OS of apiserver & nodes are the same + parts := strings.Split(targetPath, string(os.PathSeparator)) + for _, item := range parts { + if item == ".." { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '..'")) + break // even for `../../..`, one error is sufficient to make the point + } + } + return allErrs +} + +// This validate will make sure targetPath: +// 1. is not abs path +// 2. does not contain any '..' elements +// 3. does not start with '..' +func validateLocalNonReservedPath(targetPath string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateLocalDescendingPath(targetPath, fldPath)...) + // Don't report this error if the check for .. elements already caught it. + if strings.HasPrefix(targetPath, "..") && !strings.HasPrefix(targetPath, "../") { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '..'")) + } + return allErrs +} + +func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(rbd.CephMonitors) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) + } + if len(rbd.RBDImage) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("image"), "")) + } + return allErrs +} + +func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.VolumeID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) + } + return allErrs +} + +func validateCephFSVolumeSource(cephfs *api.CephFSVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cephfs.Monitors) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) + } + return allErrs +} + +func validateFlexVolumeSource(fv *api.FlexVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(fv.Driver) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) + } + return allErrs +} + +func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if azure.SecretName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) + } + if azure.ShareName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), "")) + } + return allErrs +} + +var supportedCachingModes = sets.NewString(string(api.AzureDataDiskCachingNone), string(api.AzureDataDiskCachingReadOnly), string(api.AzureDataDiskCachingReadWrite)) + +func validateAzureDisk(azure *api.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if azure.DiskName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("diskName"), "")) + } + if azure.DataDiskURI == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("diskURI"), "")) + } + if azure.CachingMode != nil && !supportedCachingModes.Has(string(*azure.CachingMode)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, supportedCachingModes.List())) + } + return allErrs +} + +func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.VolumePath) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), "")) + } + return allErrs +} + +func validatePhotonPersistentDiskVolumeSource(cd *api.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.PdID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("pdID"), "")) + } + return allErrs +} + +// ValidatePersistentVolumeName checks that a name is appropriate for a +// PersistentVolumeName object. +var ValidatePersistentVolumeName = NameIsDNSSubdomain + +var supportedAccessModes = sets.NewString(string(api.ReadWriteOnce), string(api.ReadOnlyMany), string(api.ReadWriteMany)) + +var supportedReclaimPolicy = sets.NewString(string(api.PersistentVolumeReclaimDelete), string(api.PersistentVolumeReclaimRecycle), string(api.PersistentVolumeReclaimRetain)) + +func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { + allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, field.NewPath("metadata")) + + specPath := field.NewPath("spec") + if len(pv.Spec.AccessModes) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("accessModes"), "")) + } + for _, mode := range pv.Spec.AccessModes { + if !supportedAccessModes.Has(string(mode)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("accessModes"), mode, supportedAccessModes.List())) + } + } + + if len(pv.Spec.Capacity) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("capacity"), "")) + } + + if _, ok := pv.Spec.Capacity[api.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 { + allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(api.ResourceStorage)})) + } + capPath := specPath.Child("capacity") + for r, qty := range pv.Spec.Capacity { + allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) + } + if len(string(pv.Spec.PersistentVolumeReclaimPolicy)) > 0 { + if !supportedReclaimPolicy.Has(string(pv.Spec.PersistentVolumeReclaimPolicy)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("persistentVolumeReclaimPolicy"), pv.Spec.PersistentVolumeReclaimPolicy, supportedReclaimPolicy.List())) + } + } + + numVolumes := 0 + if pv.Spec.HostPath != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("hostPath"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateHostPathVolumeSource(pv.Spec.HostPath, specPath.Child("hostPath"))...) + } + } + if pv.Spec.GCEPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, specPath.Child("persistentDisk"))...) + } + } + if pv.Spec.AWSElasticBlockStore != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, specPath.Child("awsElasticBlockStore"))...) + } + } + if pv.Spec.Glusterfs != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("glusterfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGlusterfs(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...) + } + } + if pv.Spec.Flocker != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("flocker"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFlockerVolumeSource(pv.Spec.Flocker, specPath.Child("flocker"))...) + } + } + if pv.Spec.NFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("nfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateNFSVolumeSource(pv.Spec.NFS, specPath.Child("nfs"))...) + } + } + if pv.Spec.RBD != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("rbd"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateRBDVolumeSource(pv.Spec.RBD, specPath.Child("rbd"))...) + } + } + if pv.Spec.Quobyte != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("quobyte"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateQuobyteVolumeSource(pv.Spec.Quobyte, specPath.Child("quobyte"))...) + } + } + if pv.Spec.CephFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("cephFS"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCephFSVolumeSource(pv.Spec.CephFS, specPath.Child("cephfs"))...) + } + } + if pv.Spec.ISCSI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("iscsi"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...) + } + } + if pv.Spec.Cinder != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("cinder"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder, specPath.Child("cinder"))...) + } + } + if pv.Spec.FC != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("fc"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFCVolumeSource(pv.Spec.FC, specPath.Child("fc"))...) + } + } + if pv.Spec.FlexVolume != nil { + numVolumes++ + allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...) + } + if pv.Spec.AzureFile != nil { + numVolumes++ + allErrs = append(allErrs, validateAzureFile(pv.Spec.AzureFile, specPath.Child("azureFile"))...) + } + if pv.Spec.VsphereVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateVsphereVolumeSource(pv.Spec.VsphereVolume, specPath.Child("vsphereVolume"))...) + } + } + if pv.Spec.PhotonPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, specPath.Child("photonPersistentDisk"))...) + } + } + if pv.Spec.AzureDisk != nil { + numVolumes++ + allErrs = append(allErrs, validateAzureDisk(pv.Spec.AzureDisk, specPath.Child("azureDisk"))...) + } + + if numVolumes == 0 { + allErrs = append(allErrs, field.Required(specPath, "must specify a volume type")) + } + + // do not allow hostPath mounts of '/' to have a 'recycle' reclaim policy + if pv.Spec.HostPath != nil && path.Clean(pv.Spec.HostPath.Path) == "/" && pv.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle { + allErrs = append(allErrs, field.Forbidden(specPath.Child("persistentVolumeReclaimPolicy"), "may not be 'recycle' for a hostPath mount of '/'")) + } + + return allErrs +} + +// ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make. +// newPv is updated with fields that cannot be changed. +func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = ValidatePersistentVolume(newPv) + newPv.Status = oldPv.Status + return allErrs +} + +// ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make. +// newPv is updated with fields that cannot be changed. +func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata")) + if len(newPv.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) + } + newPv.Spec = oldPv.Spec + return allErrs +} + +// ValidatePersistentVolumeClaim validates a PersistentVolumeClaim +func ValidatePersistentVolumeClaim(pvc *api.PersistentVolumeClaim) field.ErrorList { + allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&pvc.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidatePersistentVolumeClaimSpec validates a PersistentVolumeClaimSpec +func ValidatePersistentVolumeClaimSpec(spec *api.PersistentVolumeClaimSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(spec.AccessModes) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required")) + } + if spec.Selector != nil { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + } + for _, mode := range spec.AccessModes { + if mode != api.ReadWriteOnce && mode != api.ReadOnlyMany && mode != api.ReadWriteMany { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, supportedAccessModes.List())) + } + } + storageValue, ok := spec.Resources.Requests[api.ResourceStorage] + if !ok { + allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(api.ResourceStorage)), "")) + } else { + allErrs = append(allErrs, ValidateResourceQuantityValue(string(api.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(api.ResourceStorage)))...) + } + return allErrs +} + +// ValidatePersistentVolumeClaimUpdate validates an update to a PeristentVolumeClaim +func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...) + // PVController needs to update PVC.Spec w/ VolumeName. + // Claims are immutable in order to enforce quota, range limits, etc. without gaming the system. + if len(oldPvc.Spec.VolumeName) == 0 { + // volumeName changes are allowed once. + // Reset back to empty string after equality check + oldPvc.Spec.VolumeName = newPvc.Spec.VolumeName + defer func() { oldPvc.Spec.VolumeName = "" }() + } + // changes to Spec are not allowed, but updates to label/annotations are OK. + // no-op updates pass validation. + if !api.Semantic.DeepEqual(newPvc.Spec, oldPvc.Spec) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "field is immutable after creation")) + } + newPvc.Status = oldPvc.Status + return allErrs +} + +// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PeristentVolumeClaim +func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) + if len(newPvc.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) + } + if len(newPvc.Spec.AccessModes) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), "")) + } + capPath := field.NewPath("status", "capacity") + for r, qty := range newPvc.Status.Capacity { + allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) + } + newPvc.Spec = oldPvc.Spec + return allErrs +} + +var supportedPortProtocols = sets.NewString(string(api.ProtocolTCP), string(api.ProtocolUDP)) + +func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allNames := sets.String{} + for i, port := range ports { + idxPath := fldPath.Index(i) + if len(port.Name) > 0 { + if msgs := validation.IsValidPortName(port.Name); len(msgs) != 0 { + for i = range msgs { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), port.Name, msgs[i])) + } + } else if allNames.Has(port.Name) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), port.Name)) + } else { + allNames.Insert(port.Name) + } + } + if port.ContainerPort == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("containerPort"), "")) + } else { + for _, msg := range validation.IsValidPortNum(int(port.ContainerPort)) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, msg)) + } + } + if port.HostPort != 0 { + for _, msg := range validation.IsValidPortNum(int(port.HostPort)) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, msg)) + } + } + if len(port.Protocol) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), "")) + } else if !supportedPortProtocols.Has(string(port.Protocol)) { + allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) + } + } + return allErrs +} + +func validateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for i, ev := range vars { + idxPath := fldPath.Index(i) + if len(ev.Name) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } else { + for _, msg := range validation.IsCIdentifier(ev.Name) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg)) + } + } + allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"))...) + } + return allErrs +} + +var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "spec.nodeName", "spec.serviceAccountName", "status.podIP") +var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "requests.cpu", "requests.memory") + +func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if ev.ValueFrom == nil { + return allErrs + } + + numSources := 0 + + if ev.ValueFrom.FieldRef != nil { + numSources++ + allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validFieldPathExpressionsEnv, fldPath.Child("fieldRef"))...) + } + if ev.ValueFrom.ResourceFieldRef != nil { + numSources++ + allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), false)...) + } + if ev.ValueFrom.ConfigMapKeyRef != nil { + numSources++ + allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...) + } + if ev.ValueFrom.SecretKeyRef != nil { + numSources++ + allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...) + } + + if len(ev.Value) != 0 { + if numSources != 0 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty")) + } + } else if numSources != 1 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time")) + } + + return allErrs +} + +func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(fs.APIVersion) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), "")) + } else if len(fs.FieldPath) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), "")) + } else { + internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err))) + } else if !expressions.Has(internalFieldPath) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), internalFieldPath, expressions.List())) + } + } + + return allErrs +} + +func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { + allErrs := field.ErrorList{} + + if volume && len(fs.ContainerName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), "")) + } else if len(fs.Resource) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("resource"), "")) + } else if !expressions.Has(fs.Resource) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List())) + } + allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...) + return allErrs +} + +var validContainerResourceDivisorForCPU = sets.NewString("1m", "1") +var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") + +func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + unsetDivisor := resource.Quantity{} + if unsetDivisor.Cmp(divisor) == 0 { + return allErrs + } + switch rName { + case "limits.cpu", "requests.cpu": + if !validContainerResourceDivisorForCPU.Has(divisor.String()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1m and 1 are supported with the cpu resource")) + } + case "limits.memory", "requests.memory": + if !validContainerResourceDivisorForMemory.Has(divisor.String()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource")) + } + } + return allErrs +} + +func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(s.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } + if len(s.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) + } else { + for _, msg := range validation.IsConfigMapKey(s.Key) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg)) + } + } + + return allErrs +} + +func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(s.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } + if len(s.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) + } else { + for _, msg := range validation.IsConfigMapKey(s.Key) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg)) + } + } + + return allErrs +} + +func validateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + mountpoints := sets.NewString() + + for i, mnt := range mounts { + idxPath := fldPath.Index(i) + if len(mnt.Name) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } else if !volumes.Has(mnt.Name) { + allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name)) + } + if len(mnt.MountPath) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("mountPath"), "")) + } + if mountpoints.Has(mnt.MountPath) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique")) + } + mountpoints.Insert(mnt.MountPath) + if len(mnt.SubPath) > 0 { + allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...) + } + } + return allErrs +} + +func validateProbe(probe *api.Probe, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if probe == nil { + return allErrs + } + allErrs = append(allErrs, validateHandler(&probe.Handler, fldPath)...) + + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.PeriodSeconds), fldPath.Child("periodSeconds"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.SuccessThreshold), fldPath.Child("successThreshold"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.FailureThreshold), fldPath.Child("failureThreshold"))...) + return allErrs +} + +// AccumulateUniqueHostPorts extracts each HostPort of each Container, +// accumulating the results and returning an error if any ports conflict. +func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for ci, ctr := range containers { + idxPath := fldPath.Index(ci) + portsPath := idxPath.Child("ports") + for pi := range ctr.Ports { + idxPath := portsPath.Index(pi) + port := ctr.Ports[pi].HostPort + if port == 0 { + continue + } + str := fmt.Sprintf("%d/%s", port, ctr.Ports[pi].Protocol) + if accumulator.Has(str) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str)) + } else { + accumulator.Insert(str) + } + } + } + return allErrs +} + +// checkHostPortConflicts checks for colliding Port.HostPort values across +// a slice of containers. +func checkHostPortConflicts(containers []api.Container, fldPath *field.Path) field.ErrorList { + allPorts := sets.String{} + return AccumulateUniqueHostPorts(containers, &allPorts, fldPath) +} + +func validateExecAction(exec *api.ExecAction, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if len(exec.Command) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("command"), "")) + } + return allErrors +} + +var supportedHTTPSchemes = sets.NewString(string(api.URISchemeHTTP), string(api.URISchemeHTTPS)) + +func validateHTTPGetAction(http *api.HTTPGetAction, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if len(http.Path) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("path"), "")) + } + allErrors = append(allErrors, ValidatePortNumOrName(http.Port, fldPath.Child("port"))...) + if !supportedHTTPSchemes.Has(string(http.Scheme)) { + allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, supportedHTTPSchemes.List())) + } + for _, header := range http.HTTPHeaders { + for _, msg := range validation.IsHTTPHeaderName(header.Name) { + allErrors = append(allErrors, field.Invalid(fldPath.Child("httpHeaders"), header.Name, msg)) + } + } + return allErrors +} + +func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if port.Type == intstr.Int { + for _, msg := range validation.IsValidPortNum(port.IntValue()) { + allErrs = append(allErrs, field.Invalid(fldPath, port.IntValue(), msg)) + } + } else if port.Type == intstr.String { + for _, msg := range validation.IsValidPortName(port.StrVal) { + allErrs = append(allErrs, field.Invalid(fldPath, port.StrVal, msg)) + } + } else { + allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("unknown type: %v", port.Type))) + } + return allErrs +} + +func validateTCPSocketAction(tcp *api.TCPSocketAction, fldPath *field.Path) field.ErrorList { + return ValidatePortNumOrName(tcp.Port, fldPath.Child("port")) +} + +func validateHandler(handler *api.Handler, fldPath *field.Path) field.ErrorList { + numHandlers := 0 + allErrors := field.ErrorList{} + if handler.Exec != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("exec"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateExecAction(handler.Exec, fldPath.Child("exec"))...) + } + } + if handler.HTTPGet != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("httpGet"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateHTTPGetAction(handler.HTTPGet, fldPath.Child("httpGet"))...) + } + } + if handler.TCPSocket != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("tcpSocket"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...) + } + } + if numHandlers == 0 { + allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type")) + } + return allErrors +} + +func validateLifecycle(lifecycle *api.Lifecycle, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if lifecycle.PostStart != nil { + allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...) + } + if lifecycle.PreStop != nil { + allErrs = append(allErrs, validateHandler(lifecycle.PreStop, fldPath.Child("preStop"))...) + } + return allErrs +} + +var supportedPullPolicies = sets.NewString(string(api.PullAlways), string(api.PullIfNotPresent), string(api.PullNever)) + +func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + + switch policy { + case api.PullAlways, api.PullIfNotPresent, api.PullNever: + break + case "": + allErrors = append(allErrors, field.Required(fldPath, "")) + default: + allErrors = append(allErrors, field.NotSupported(fldPath, policy, supportedPullPolicies.List())) + } + + return allErrors +} + +func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if len(containers) > 0 { + allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...) + } + + allNames := sets.String{} + for _, ctr := range otherContainers { + allNames.Insert(ctr.Name) + } + for i, ctr := range containers { + idxPath := fldPath.Index(i) + if allNames.Has(ctr.Name) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name)) + } + if len(ctr.Name) > 0 { + allNames.Insert(ctr.Name) + } + if ctr.Lifecycle != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers")) + } + if ctr.LivenessProbe != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers")) + } + if ctr.ReadinessProbe != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers")) + } + } + return allErrs +} + +func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(containers) == 0 { + return append(allErrs, field.Required(fldPath, "")) + } + + allNames := sets.String{} + for i, ctr := range containers { + idxPath := fldPath.Index(i) + namePath := idxPath.Child("name") + if len(ctr.Name) == 0 { + allErrs = append(allErrs, field.Required(namePath, "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(ctr.Name, namePath)...) + } + if allNames.Has(ctr.Name) { + allErrs = append(allErrs, field.Duplicate(namePath, ctr.Name)) + } else { + allNames.Insert(ctr.Name) + } + if len(ctr.Image) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("image"), "")) + } + if ctr.Lifecycle != nil { + allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...) + } + allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...) + // Liveness-specific validation + if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1")) + } + + allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...) + allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...) + allErrs = append(allErrs, validateEnv(ctr.Env, idxPath.Child("env"))...) + allErrs = append(allErrs, validateVolumeMounts(ctr.VolumeMounts, volumes, idxPath.Child("volumeMounts"))...) + allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...) + allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...) + allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...) + } + // Check for colliding ports across all containers. + allErrs = append(allErrs, checkHostPortConflicts(containers, fldPath)...) + + return allErrs +} + +func validateRestartPolicy(restartPolicy *api.RestartPolicy, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + switch *restartPolicy { + case api.RestartPolicyAlways, api.RestartPolicyOnFailure, api.RestartPolicyNever: + break + case "": + allErrors = append(allErrors, field.Required(fldPath, "")) + default: + validValues := []string{string(api.RestartPolicyAlways), string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)} + allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues)) + } + + return allErrors +} + +func validateDNSPolicy(dnsPolicy *api.DNSPolicy, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + switch *dnsPolicy { + case api.DNSClusterFirst, api.DNSDefault: + break + case "": + allErrors = append(allErrors, field.Required(fldPath, "")) + default: + validValues := []string{string(api.DNSClusterFirst), string(api.DNSDefault)} + allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues)) + } + return allErrors +} + +func validateHostNetwork(hostNetwork bool, containers []api.Container, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if hostNetwork { + for i, container := range containers { + portsPath := fldPath.Index(i).Child("ports") + for i, port := range container.Ports { + idxPath := portsPath.Index(i) + if port.HostPort != port.ContainerPort { + allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true")) + } + } + } + } + return allErrors +} + +// validateImagePullSecrets checks to make sure the pull secrets are well +// formed. Right now, we only expect name to be set (it's the only field). If +// this ever changes and someone decides to set those fields, we'd like to +// know. +func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + for i, currPullSecret := range imagePullSecrets { + idxPath := fldPath.Index(i) + strippedRef := api.LocalObjectReference{Name: currPullSecret.Name} + if !reflect.DeepEqual(strippedRef, currPullSecret) { + allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set")) + } + } + return allErrors +} + +func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { + if !allowEmpty && len(*effect) == 0 { + return field.ErrorList{field.Required(fldPath, "")} + } + + allErrors := field.ErrorList{} + switch *effect { + // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute. + case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule: + // case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoScheduleNoAdmit, api.TaintEffectNoScheduleNoAdmitNoExecute: + default: + validValues := []string{ + string(api.TaintEffectNoSchedule), + string(api.TaintEffectPreferNoSchedule), + // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute. + // string(api.TaintEffectNoScheduleNoAdmit), + // string(api.TaintEffectNoScheduleNoAdmitNoExecute), + } + allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues)) + } + return allErrors +} + +// validateTolerations tests if given tolerations have valid data. +func validateTolerations(tolerations []api.Toleration, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + for i, toleration := range tolerations { + idxPath := fldPath.Index(i) + // validate the toleration key + allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...) + + // validate toleration operator and value + switch toleration.Operator { + case api.TolerationOpEqual, "": + if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) + } + case api.TolerationOpExists: + if len(toleration.Value) > 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) + } + default: + validValues := []string{string(api.TolerationOpEqual), string(api.TolerationOpExists)} + allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) + } + + // validate toleration effect + if len(toleration.Effect) > 0 { + allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...) + } + } + return allErrors +} + +// ValidatePod tests if required fields in the pod are set. +func ValidatePod(pod *api.Pod) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath) + allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidatePodSpec tests that the specified PodSpec has valid data. +// This includes checking formatting and uniqueness. It also canonicalizes the +// structure by setting default values and implementing any backwards-compatibility +// tricks. +func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allVolumes, vErrs := validateVolumes(spec.Volumes, fldPath.Child("volumes")) + allErrs = append(allErrs, vErrs...) + allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...) + allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...) + allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) + allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) + allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...) + allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) + if len(spec.ServiceAccountName) > 0 { + for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg)) + } + } + + if len(spec.NodeName) > 0 { + for _, msg := range ValidateNodeName(spec.NodeName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg)) + } + } + + if spec.ActiveDeadlineSeconds != nil { + if *spec.ActiveDeadlineSeconds <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), spec.ActiveDeadlineSeconds, "must be greater than 0")) + } + } + + if len(spec.Hostname) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(spec.Hostname, fldPath.Child("hostname"))...) + } + + if len(spec.Subdomain) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(spec.Subdomain, fldPath.Child("subdomain"))...) + } + + return allErrs +} + +// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data +func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch rq.Operator { + case api.NodeSelectorOpIn, api.NodeSelectorOpNotIn: + if len(rq.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case api.NodeSelectorOpExists, api.NodeSelectorOpDoesNotExist: + if len(rq.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + + case api.NodeSelectorOpGt, api.NodeSelectorOpLt: + if len(rq.Values) != 1 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'")) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator")) + } + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...) + return allErrs +} + +// ValidateNodeSelectorTerm tests that the specified node selector term has valid data +func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(term.MatchExpressions) == 0 { + return append(allErrs, field.Required(fldPath.Child("matchExpressions"), "must have at least one node selector requirement")) + } + for j, req := range term.MatchExpressions { + allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...) + } + return allErrs +} + +// ValidateNodeSelector tests that the specified nodeSelector fields has valid data +func ValidateNodeSelector(nodeSelector *api.NodeSelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + termFldPath := fldPath.Child("nodeSelectorTerms") + if len(nodeSelector.NodeSelectorTerms) == 0 { + return append(allErrs, field.Required(termFldPath, "must have at least one node selector term")) + } + + for i, term := range nodeSelector.NodeSelectorTerms { + allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...) + } + + return allErrs +} + +// ValidateAvoidPodsInNodeAnnotations tests that the serialized AvoidPods in Node.Annotations has valid data +func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + avoids, err := api.GetAvoidPodsFromNodeAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), api.PreferAvoidPodsAnnotationKey, err.Error())) + return allErrs + } + + if len(avoids.PreferAvoidPods) != 0 { + for i, pa := range avoids.PreferAvoidPods { + idxPath := fldPath.Child(api.PreferAvoidPodsAnnotationKey).Index(i) + allErrs = append(allErrs, validatePreferAvoidPodsEntry(pa, idxPath)...) + } + } + + return allErrs +} + +// validatePreferAvoidPodsEntry tests if given PreferAvoidPodsEntry has valid data. +func validatePreferAvoidPodsEntry(avoidPodEntry api.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if avoidPodEntry.PodSignature.PodController == nil { + allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), "")) + } else { + if *(avoidPodEntry.PodSignature.PodController.Controller) != true { + allErrors = append(allErrors, + field.Invalid(fldPath.Child("PodSignature").Child("PodController").Child("Controller"), + *(avoidPodEntry.PodSignature.PodController.Controller), "must point to a controller")) + } + } + return allErrors +} + +// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data +func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for i, term := range terms { + if term.Weight <= 0 || term.Weight > 100 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100")) + } + + allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...) + } + return allErrs +} + +// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data +func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...) + for _, name := range podAffinityTerm.Namespaces { + for _, msg := range ValidateNamespaceName(name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg)) + } + } + if !allowEmptyTopologyKey && len(podAffinityTerm.TopologyKey) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can only be empty for PreferredDuringScheduling pod anti affinity")) + } + if len(podAffinityTerm.TopologyKey) != 0 { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...) + } + return allErrs +} + +// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data +func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, podAffinityTerm := range podAffinityTerms { + allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowEmptyTopologyKey, fldPath.Index(i))...) + } + return allErrs +} + +// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data +func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for j, weightedTerm := range weightedPodAffinityTerms { + if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100")) + } + allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowEmptyTopologyKey, fldPath.Index(j).Child("podAffinityTerm"))...) + } + return allErrs +} + +// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data +func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, + // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + //} + if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is not allowed for hard pod anti-affinity + allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false, + fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is allowed for soft pod anti-affinity + allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, true, + fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + +// validatePodAffinity tests that the specified podAffinity fields have valid data +func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, + // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + //} + if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is not allowed for hard pod affinity + allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false, + fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is not allowed for soft pod affinity + allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, false, + fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + +// ValidateAffinityInPodAnnotations tests that the serialized Affinity in Pod.Annotations has valid data +func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + affinity, err := api.GetAffinityFromPodAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, api.AffinityAnnotationKey, err.Error())) + return allErrs + } + if affinity == nil { + return allErrs + } + + affinityFldPath := fldPath.Child(api.AffinityAnnotationKey) + if affinity.NodeAffinity != nil { + na := affinity.NodeAffinity + naFldPath := affinityFldPath.Child("nodeAffinity") + // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, naFldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + // } + + if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + + if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + } + if affinity.PodAffinity != nil { + allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, affinityFldPath.Child("podAffinity"))...) + } + if affinity.PodAntiAffinity != nil { + allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, affinityFldPath.Child("podAntiAffinity"))...) + } + + return allErrs +} + +// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data +func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + tolerations, err := api.GetTolerationsFromPodAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, api.TolerationsAnnotationKey, err.Error())) + return allErrs + } + if len(tolerations) > 0 { + allErrs = append(allErrs, validateTolerations(tolerations, fldPath.Child(api.TolerationsAnnotationKey))...) + } + + return allErrs +} + +func ValidateSeccompProfile(p string, fldPath *field.Path) field.ErrorList { + if p == "docker/default" { + return nil + } + if p == "unconfined" { + return nil + } + if strings.HasPrefix(p, "localhost/") { + return validateLocalDescendingPath(strings.TrimPrefix(p, "localhost/"), fldPath) + } + return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")} +} + +func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if p, exists := annotations[api.SeccompPodAnnotationKey]; exists { + allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(api.SeccompPodAnnotationKey))...) + } + for k, p := range annotations { + if strings.HasPrefix(k, api.SeccompContainerAnnotationKeyPrefix) { + allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(k))...) + } + } + + return allErrs +} + +func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, p := range annotations { + if !strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { + continue + } + if !utilconfig.DefaultFeatureGate.AppArmor() { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "AppArmor is disabled by feature-gate")) + continue + } + containerName := strings.TrimPrefix(k, apparmor.ContainerAnnotationKeyPrefix) + if !podSpecHasContainer(spec, containerName) { + allErrs = append(allErrs, field.Invalid(fldPath.Key(k), containerName, "container not found")) + } + + if err := apparmor.ValidateProfileFormat(p); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Key(k), p, err.Error())) + } + } + + return allErrs +} + +func podSpecHasContainer(spec *api.PodSpec, containerName string) bool { + for _, c := range spec.InitContainers { + if c.Name == containerName { + return true + } + } + for _, c := range spec.Containers { + if c.Name == containerName { + return true + } + } + return false +} + +const ( + // a sysctl segment regex, concatenated with dots to form a sysctl name + SysctlSegmentFmt string = "[a-z0-9]([-_a-z0-9]*[a-z0-9])?" + + // a sysctl name regex + SysctlFmt string = "(" + SysctlSegmentFmt + "\\.)*" + SysctlSegmentFmt + + // the maximal length of a sysctl name + SysctlMaxLength int = 253 +) + +var sysctlRegexp = regexp.MustCompile("^" + SysctlFmt + "$") + +// IsValidSysctlName checks that the given string is a valid sysctl name, +// i.e. matches SysctlFmt. +func IsValidSysctlName(name string) bool { + if len(name) > SysctlMaxLength { + return false + } + return sysctlRegexp.MatchString(name) +} + +func validateSysctls(sysctls []api.Sysctl, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, s := range sysctls { + if len(s.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("name"), "")) + } else if !IsValidSysctlName(s.Name) { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("name"), s.Name, fmt.Sprintf("must have at most %d characters and match regex %s", SysctlMaxLength, SysctlFmt))) + } + } + return allErrs +} + +// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data. +func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if securityContext != nil { + allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...) + if securityContext.FSGroup != nil { + for _, msg := range validation.IsValidGroupId(*securityContext.FSGroup) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg)) + } + } + if securityContext.RunAsUser != nil { + for _, msg := range validation.IsValidUserId(*securityContext.RunAsUser) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *(securityContext.RunAsUser), msg)) + } + } + for g, gid := range securityContext.SupplementalGroups { + for _, msg := range validation.IsValidGroupId(gid) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg)) + } + } + } + + return allErrs +} + +func ValidateContainerUpdates(newContainers, oldContainers []api.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) { + allErrs = field.ErrorList{} + if len(newContainers) != len(oldContainers) { + //TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff + allErrs = append(allErrs, field.Forbidden(fldPath, "pod updates may not add or remove containers")) + return allErrs, true + } + + // validate updated container images + for i, ctr := range newContainers { + if len(ctr.Image) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("image"), "")) + } + } + return allErrs, false +} + +// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields +// that cannot be changed. +func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) + allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) + specPath := field.NewPath("spec") + + // validate updateable fields: + // 1. containers[*].image + // 2. initContainers[*].image + // 3. spec.activeDeadlineSeconds + + containerErrs, stop := ValidateContainerUpdates(newPod.Spec.Containers, oldPod.Spec.Containers, specPath.Child("containers")) + allErrs = append(allErrs, containerErrs...) + if stop { + return allErrs + } + containerErrs, stop = ValidateContainerUpdates(newPod.Spec.InitContainers, oldPod.Spec.InitContainers, specPath.Child("initContainers")) + allErrs = append(allErrs, containerErrs...) + if stop { + return allErrs + } + + // validate updated spec.activeDeadlineSeconds. two types of updates are allowed: + // 1. from nil to a positive value + // 2. from a positive value to a lesser, non-negative value + if newPod.Spec.ActiveDeadlineSeconds != nil { + newActiveDeadlineSeconds := *newPod.Spec.ActiveDeadlineSeconds + if newActiveDeadlineSeconds < 0 { + allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, isNegativeErrorMsg)) + return allErrs + } + if oldPod.Spec.ActiveDeadlineSeconds != nil { + oldActiveDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds + if oldActiveDeadlineSeconds < newActiveDeadlineSeconds { + allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, "must be less than or equal to previous value")) + return allErrs + } + } + } else if oldPod.Spec.ActiveDeadlineSeconds != nil { + allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newPod.Spec.ActiveDeadlineSeconds, "must not update from a positive integer to nil value")) + } + + // handle updateable fields by munging those fields prior to deep equal comparison. + mungedPod := *newPod + // munge containers[*].image + var newContainers []api.Container + for ix, container := range mungedPod.Spec.Containers { + container.Image = oldPod.Spec.Containers[ix].Image + newContainers = append(newContainers, container) + } + mungedPod.Spec.Containers = newContainers + // munge initContainers[*].image + var newInitContainers []api.Container + for ix, container := range mungedPod.Spec.InitContainers { + container.Image = oldPod.Spec.InitContainers[ix].Image + newInitContainers = append(newInitContainers, container) + } + mungedPod.Spec.InitContainers = newInitContainers + // munge spec.activeDeadlineSeconds + mungedPod.Spec.ActiveDeadlineSeconds = nil + if oldPod.Spec.ActiveDeadlineSeconds != nil { + activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds + mungedPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds + } + if !api.Semantic.DeepEqual(mungedPod.Spec, oldPod.Spec) { + //TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff + allErrs = append(allErrs, field.Forbidden(specPath, "pod updates may not change fields other than `containers[*].image` or `spec.activeDeadlineSeconds`")) + } + + return allErrs +} + +// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields +// that cannot be changed. +func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, field.NewPath("metadata")) + + // TODO: allow change when bindings are properly decoupled from pods + if newPod.Spec.NodeName != oldPod.Spec.NodeName { + allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "nodeName"), "may not be changed directly")) + } + + // For status update we ignore changes to pod spec. + newPod.Spec = oldPod.Spec + + return allErrs +} + +// ValidatePodBinding tests if required fields in the pod binding are legal. +func ValidatePodBinding(binding *api.Binding) field.ErrorList { + allErrs := field.ErrorList{} + + if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" { + // TODO: When validation becomes versioned, this gets more complicated. + allErrs = append(allErrs, field.NotSupported(field.NewPath("target", "kind"), binding.Target.Kind, []string{"Node", ""})) + } + if len(binding.Target.Name) == 0 { + // TODO: When validation becomes versioned, this gets more complicated. + allErrs = append(allErrs, field.Required(field.NewPath("target", "name"), "")) + } + + return allErrs +} + +// ValidatePodTemplate tests if required fields in the pod template are set. +func ValidatePodTemplate(pod *api.PodTemplate) field.ErrorList { + allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"))...) + return allErrs +} + +// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields +// that cannot be changed. +func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"))...) + return allErrs +} + +var supportedSessionAffinityType = sets.NewString(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone)) +var supportedServiceType = sets.NewString(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort), + string(api.ServiceTypeLoadBalancer), string(api.ServiceTypeExternalName)) + +// ValidateService tests if required fields/annotations of a Service are valid. +func ValidateService(service *api.Service) field.ErrorList { + allErrs := validateServiceFields(service) + allErrs = append(allErrs, validateServiceAnnotations(service, nil)...) + return allErrs +} + +// validateServiceFields tests if required fields in the service are set. +func validateServiceFields(service *api.Service) field.ErrorList { + allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata")) + + specPath := field.NewPath("spec") + isHeadlessService := service.Spec.ClusterIP == api.ClusterIPNone + if len(service.Spec.Ports) == 0 && !isHeadlessService && service.Spec.Type != api.ServiceTypeExternalName { + allErrs = append(allErrs, field.Required(specPath.Child("ports"), "")) + } + switch service.Spec.Type { + case api.ServiceTypeLoadBalancer: + for ix := range service.Spec.Ports { + port := &service.Spec.Ports[ix] + // This is a workaround for broken cloud environments that + // over-open firewalls. Hopefully it can go away when more clouds + // understand containers better. + if port.Port == 10250 { + portPath := specPath.Child("ports").Index(ix) + allErrs = append(allErrs, field.Invalid(portPath, port.Port, "may not expose port 10250 externally since it is used by kubelet")) + } + } + if service.Spec.ClusterIP == "None" { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for LoadBalancer services")) + } + case api.ServiceTypeExternalName: + if service.Spec.ClusterIP != "" { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty for ExternalName services")) + } + if len(service.Spec.ExternalName) > 0 { + allErrs = append(allErrs, ValidateDNS1123Subdomain(service.Spec.ExternalName, specPath.Child("externalName"))...) + } else { + allErrs = append(allErrs, field.Required(specPath.Child("externalName"), "")) + } + } + + allPortNames := sets.String{} + portsPath := specPath.Child("ports") + for i := range service.Spec.Ports { + portPath := portsPath.Index(i) + allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, isHeadlessService, &allPortNames, portPath)...) + } + + if service.Spec.Selector != nil { + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...) + } + + if len(service.Spec.SessionAffinity) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), "")) + } else if !supportedSessionAffinityType.Has(string(service.Spec.SessionAffinity)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List())) + } + + if api.IsServiceIPSet(service) { + if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty, 'None', or a valid IP address")) + } + } + + ipPath := specPath.Child("externalIPs") + for i, ip := range service.Spec.ExternalIPs { + idxPath := ipPath.Index(i) + if msgs := validation.IsValidIP(ip); len(msgs) != 0 { + for i := range msgs { + allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i])) + } + } else { + allErrs = append(allErrs, validateNonSpecialIP(ip, idxPath)...) + } + } + + if len(service.Spec.Type) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("type"), "")) + } else if !supportedServiceType.Has(string(service.Spec.Type)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List())) + } + + if service.Spec.Type == api.ServiceTypeLoadBalancer { + portsPath := specPath.Child("ports") + includeProtocols := sets.NewString() + for i := range service.Spec.Ports { + portPath := portsPath.Index(i) + if !supportedPortProtocols.Has(string(service.Spec.Ports[i].Protocol)) { + allErrs = append(allErrs, field.Invalid(portPath.Child("protocol"), service.Spec.Ports[i].Protocol, "cannot create an external load balancer with non-TCP/UDP ports")) + } else { + includeProtocols.Insert(string(service.Spec.Ports[i].Protocol)) + } + } + if includeProtocols.Len() > 1 { + allErrs = append(allErrs, field.Invalid(portsPath, service.Spec.Ports, "cannot create an external load balancer with mix protocols")) + } + } + + if service.Spec.Type == api.ServiceTypeClusterIP { + portsPath := specPath.Child("ports") + for i := range service.Spec.Ports { + portPath := portsPath.Index(i) + if service.Spec.Ports[i].NodePort != 0 { + allErrs = append(allErrs, field.Invalid(portPath.Child("nodePort"), service.Spec.Ports[i].NodePort, "may not be used when `type` is 'ClusterIP'")) + } + } + } + + // Check for duplicate NodePorts, considering (protocol,port) pairs + portsPath = specPath.Child("ports") + nodePorts := make(map[api.ServicePort]bool) + for i := range service.Spec.Ports { + port := &service.Spec.Ports[i] + if port.NodePort == 0 { + continue + } + portPath := portsPath.Index(i) + var key api.ServicePort + key.Protocol = port.Protocol + key.NodePort = port.NodePort + _, found := nodePorts[key] + if found { + allErrs = append(allErrs, field.Duplicate(portPath.Child("nodePort"), port.NodePort)) + } + nodePorts[key] = true + } + + // Validate SourceRange field and annotation + _, ok := service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey] + if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { + var fieldPath *field.Path + var val string + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + fieldPath = specPath.Child("LoadBalancerSourceRanges") + val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges) + } else { + fieldPath = field.NewPath("metadata", "annotations").Key(apiservice.AnnotationLoadBalancerSourceRangesKey) + val = service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey] + } + if service.Spec.Type != api.ServiceTypeLoadBalancer { + allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'")) + } + _, err := apiservice.GetLoadBalancerSourceRanges(service) + if err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath, val, "must be a list of IP ranges. For example, 10.240.0.0/24,10.250.0.0/24 ")) + } + } + return allErrs +} + +func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if requireName && len(sp.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else if len(sp.Name) != 0 { + allErrs = append(allErrs, ValidateDNS1123Label(sp.Name, fldPath.Child("name"))...) + if allNames.Has(sp.Name) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name)) + } else { + allNames.Insert(sp.Name) + } + } + + for _, msg := range validation.IsValidPortNum(int(sp.Port)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, msg)) + } + + if len(sp.Protocol) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) + } else if !supportedPortProtocols.Has(string(sp.Protocol)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, supportedPortProtocols.List())) + } + + allErrs = append(allErrs, ValidatePortNumOrName(sp.TargetPort, fldPath.Child("targetPort"))...) + + // in the v1 API, targetPorts on headless services were tolerated. + // once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility. + // + // if isHeadlessService { + // if sp.TargetPort.Type == intstr.String || (sp.TargetPort.Type == intstr.Int && sp.Port != sp.TargetPort.IntValue()) { + // allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, "must be equal to the value of 'port' when clusterIP = None")) + // } + // } + + return allErrs +} + +func validateServiceAnnotations(service *api.Service, oldService *api.Service) (allErrs field.ErrorList) { + // 2 annotations went from alpha to beta in 1.5: healthcheck-nodeport and + // external-traffic. The user cannot mix these. All updates to the alpha + // annotation are disallowed. The user must change both alpha annotations + // to beta before making any modifications, even though the system continues + // to respect the alpha version. + hcAlpha, healthCheckAlphaOk := service.Annotations[apiservice.AlphaAnnotationHealthCheckNodePort] + onlyLocalAlpha, onlyLocalAlphaOk := service.Annotations[apiservice.AlphaAnnotationExternalTraffic] + + _, healthCheckBetaOk := service.Annotations[apiservice.BetaAnnotationHealthCheckNodePort] + _, onlyLocalBetaOk := service.Annotations[apiservice.BetaAnnotationExternalTraffic] + + var oldHealthCheckAlpha, oldOnlyLocalAlpha string + var oldHealthCheckAlphaOk, oldOnlyLocalAlphaOk bool + if oldService != nil { + oldHealthCheckAlpha, oldHealthCheckAlphaOk = oldService.Annotations[apiservice.AlphaAnnotationHealthCheckNodePort] + oldOnlyLocalAlpha, oldOnlyLocalAlphaOk = oldService.Annotations[apiservice.AlphaAnnotationExternalTraffic] + } + hcValueChanged := oldHealthCheckAlphaOk && healthCheckAlphaOk && oldHealthCheckAlpha != hcAlpha + hcValueNew := !oldHealthCheckAlphaOk && healthCheckAlphaOk + hcValueGone := !healthCheckAlphaOk && !healthCheckBetaOk && oldHealthCheckAlphaOk + onlyLocalHCMismatch := onlyLocalBetaOk && healthCheckAlphaOk + + // On upgrading to a 1.5 cluster, the user is locked in at the current + // alpha setting, till they modify the Service such that the pair of + // annotations are both beta. Basically this means we need to: + // Disallow updates to the alpha annotation. + // Disallow creating a Service with the alpha annotation. + // Disallow removing both alpha annotations. Removing the health-check + // annotation is rejected at a later stage anyway, so if we allow removing + // just onlyLocal we might leak the port. + // Disallow a single field from transitioning to beta. Mismatched annotations + // cause confusion. + // Ignore changes to the fields if they're both transitioning to beta. + // Allow modifications to Services in fields other than the alpha annotation. + + if hcValueNew || hcValueChanged || hcValueGone || onlyLocalHCMismatch { + fieldPath := field.NewPath("metadata", "annotations").Key(apiservice.AlphaAnnotationHealthCheckNodePort) + msg := fmt.Sprintf("please replace the alpha annotation with the beta version %v", + apiservice.BetaAnnotationHealthCheckNodePort) + allErrs = append(allErrs, field.Invalid(fieldPath, apiservice.AlphaAnnotationHealthCheckNodePort, msg)) + } + + onlyLocalValueChanged := oldOnlyLocalAlphaOk && onlyLocalAlphaOk && oldOnlyLocalAlpha != onlyLocalAlpha + onlyLocalValueNew := !oldOnlyLocalAlphaOk && onlyLocalAlphaOk + onlyLocalValueGone := !onlyLocalAlphaOk && !onlyLocalBetaOk && oldOnlyLocalAlphaOk + hcOnlyLocalMismatch := onlyLocalAlphaOk && healthCheckBetaOk + + if onlyLocalValueNew || onlyLocalValueChanged || onlyLocalValueGone || hcOnlyLocalMismatch { + fieldPath := field.NewPath("metadata", "annotations").Key(apiservice.AlphaAnnotationExternalTraffic) + msg := fmt.Sprintf("please replace the alpha annotation with the beta version %v", + apiservice.BetaAnnotationExternalTraffic) + allErrs = append(allErrs, field.Invalid(fieldPath, apiservice.AlphaAnnotationExternalTraffic, msg)) + } + return +} + +// ValidateServiceUpdate tests if required fields in the service are set during an update +func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) + + // ClusterIP should be immutable for services using it (every type other than ExternalName) + // which do not have ClusterIP assigned yet (empty string value) + if service.Spec.Type != api.ServiceTypeExternalName { + if oldService.Spec.Type != api.ServiceTypeExternalName && oldService.Spec.ClusterIP != "" { + allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) + } + } + + allErrs = append(allErrs, validateServiceFields(service)...) + allErrs = append(allErrs, validateServiceAnnotations(service, oldService)...) + return allErrs +} + +// ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status. +func ValidateServiceStatusUpdate(service, oldService *api.Service) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) + return allErrs +} + +// ValidateReplicationController tests if required fields in the replication controller are set. +func ValidateReplicationController(controller *api.ReplicationController) field.ErrorList { + allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set. +func ValidateReplicationControllerUpdate(controller, oldController *api.ReplicationController) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set. +func ValidateReplicationControllerStatusUpdate(controller, oldController *api.ReplicationController) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) + statusPath := field.NewPath("status") + allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.Replicas), statusPath.Child("replicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.ReadyReplicas), statusPath.Child("readyReplicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.AvailableReplicas), statusPath.Child("availableReplicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.ObservedGeneration), statusPath.Child("observedGeneration"))...) + return allErrs +} + +// Validates that the given selector is non-empty. +func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + selector := labels.Set(selectorMap).AsSelector() + if selector.Empty() { + allErrs = append(allErrs, field.Required(fldPath, "")) + } + return allErrs +} + +// Validates the given template and ensures that it is in accordance with the desired selector and replicas. +func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if template == nil { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else { + selector := labels.Set(selectorMap).AsSelector() + if !selector.Empty() { + // Verify that the RC selector matches the labels in template. + labels := labels.Set(template.Labels) + if !selector.Matches(labels) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) + } + } + allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath)...) + if replicas > 1 { + allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) + } + // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). + if template.Spec.RestartPolicy != api.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) + } + } + return allErrs +} + +// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set. +func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) + allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) + allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"))...) + return allErrs +} + +// ValidatePodTemplateSpec validates the spec of a pod template +func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, &spec.Spec, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...) + return allErrs +} + +func ValidateReadOnlyPersistentDisks(volumes []api.Volume, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i := range volumes { + vol := &volumes[i] + idxPath := fldPath.Index(i) + if vol.GCEPersistentDisk != nil { + if vol.GCEPersistentDisk.ReadOnly == false { + allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only")) + } + } + // TODO: What to do for AWS? It doesn't support replicas + } + return allErrs +} + +// validateTaints tests if given taints have valid data. +func validateTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + + uniqueTaints := map[api.TaintEffect]sets.String{} + + for i, currTaint := range taints { + idxPath := fldPath.Index(i) + // validate the taint key + allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...) + // validate the taint value + if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";"))) + } + // validate the taint effect + allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...) + + // validate if taint is unique by + if len(uniqueTaints[currTaint.Effect]) > 0 && uniqueTaints[currTaint.Effect].Has(currTaint.Key) { + duplicatedError := field.Duplicate(idxPath, currTaint) + duplicatedError.Detail = "taints must be unique by key and effect pair" + allErrors = append(allErrors, duplicatedError) + continue + } + + // add taint to existingTaints for uniqueness check + if len(uniqueTaints[currTaint.Effect]) == 0 { + uniqueTaints[currTaint.Effect] = sets.String{} + } + uniqueTaints[currTaint.Effect].Insert(currTaint.Key) + } + return allErrors +} + +// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data +func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + taints, err := api.GetTaintsFromNodeAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, api.TaintsAnnotationKey, err.Error())) + return allErrs + } + if len(taints) > 0 { + allErrs = append(allErrs, validateTaints(taints, fldPath.Child(api.TaintsAnnotationKey))...) + } + + return allErrs +} + +func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if annotations[api.PreferAvoidPodsAnnotationKey] != "" { + allErrs = append(allErrs, ValidateAvoidPodsInNodeAnnotations(annotations, fldPath)...) + } + if annotations[api.TaintsAnnotationKey] != "" { + allErrs = append(allErrs, ValidateTaintsInNodeAnnotations(annotations, fldPath)...) + } + return allErrs +} + +// ValidateNode tests if required fields in the node are set. +func ValidateNode(node *api.Node) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath) + allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) + + // Only validate spec. All status fields are optional and can be updated later. + + // external ID is required. + if len(node.Spec.ExternalID) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec", "externalID"), "")) + } + + // TODO(rjnagal): Ignore PodCIDR till its completely implemented. + return allErrs +} + +// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode. +func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath) + allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) + + // TODO: Enable the code once we have better api object.status update model. Currently, + // anyone can update node status. + // if !api.Semantic.DeepEqual(node.Status, api.NodeStatus{}) { + // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty")) + // } + + // Validate resource quantities in capacity. + for k, v := range node.Status.Capacity { + resPath := field.NewPath("status", "capacity", string(k)) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + // Validate resource quantities in allocatable. + for k, v := range node.Status.Allocatable { + resPath := field.NewPath("status", "allocatable", string(k)) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + + // Validte no duplicate addresses in node status. + addresses := make(map[api.NodeAddress]bool) + for i, address := range node.Status.Addresses { + if _, ok := addresses[address]; ok { + allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address)) + } + addresses[address] = true + } + + if len(oldNode.Spec.PodCIDR) == 0 { + // Allow the controller manager to assign a CIDR to a node if it doesn't have one. + oldNode.Spec.PodCIDR = node.Spec.PodCIDR + } else { + if oldNode.Spec.PodCIDR != node.Spec.PodCIDR { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDR"), "node updates may not change podCIDR except from \"\" to valid")) + } + } + // TODO: move reset function to its own location + // Ignore metadata changes now that they have been tested + oldNode.ObjectMeta = node.ObjectMeta + // Allow users to update capacity + oldNode.Status.Capacity = node.Status.Capacity + // Allow users to unschedule node + oldNode.Spec.Unschedulable = node.Spec.Unschedulable + // Clear status + oldNode.Status = node.Status + + // TODO: Add a 'real' error type for this error and provide print actual diffs. + if !api.Semantic.DeepEqual(oldNode, node) { + glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) + allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels or capacity")) + } + + return allErrs +} + +// Validate compute resource typename. +// Refer to docs/design/resources.md for more details. +func validateResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + if len(allErrs) != 0 { + return allErrs + } + + if len(strings.Split(value, "/")) == 1 { + if !api.IsStandardResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified")) + } + } + + return field.ErrorList{} +} + +// Validate container resource name +// Refer to docs/design/resources.md for more details. +func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := validateResourceName(value, fldPath) + if len(strings.Split(value, "/")) == 1 { + if !api.IsStandardContainerResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers")) + } + } + return field.ErrorList{} +} + +// Validate resource names that can go in a resource quota +// Refer to docs/design/resources.md for more details. +func ValidateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := validateResourceName(value, fldPath) + if len(strings.Split(value, "/")) == 1 { + if !api.IsStandardQuotaResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource)) + } + } + return field.ErrorList{} +} + +// Validate limit range types +func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + if len(allErrs) != 0 { + return allErrs + } + + if len(strings.Split(value, "/")) == 1 { + if !api.IsStandardLimitRangeType(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified")) + } + } + + return allErrs +} + +// Validate limit range resource name +// limit types (other than Pod/Container) could contain storage not just cpu or memory +func validateLimitRangeResourceName(limitType api.LimitType, value string, fldPath *field.Path) field.ErrorList { + switch limitType { + case api.LimitTypePod, api.LimitTypeContainer: + return validateContainerResourceName(value, fldPath) + default: + return validateResourceName(value, fldPath) + } +} + +// ValidateLimitRange tests if required fields in the LimitRange are set. +func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { + allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata")) + + // ensure resource names are properly qualified per docs/design/resources.md + limitTypeSet := map[api.LimitType]bool{} + fldPath := field.NewPath("spec", "limits") + for i := range limitRange.Spec.Limits { + idxPath := fldPath.Index(i) + limit := &limitRange.Spec.Limits[i] + allErrs = append(allErrs, validateLimitRangeTypeName(string(limit.Type), idxPath.Child("type"))...) + + _, found := limitTypeSet[limit.Type] + if found { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("type"), limit.Type)) + } + limitTypeSet[limit.Type] = true + + keys := sets.String{} + min := map[string]resource.Quantity{} + max := map[string]resource.Quantity{} + defaults := map[string]resource.Quantity{} + defaultRequests := map[string]resource.Quantity{} + maxLimitRequestRatios := map[string]resource.Quantity{} + + for k, q := range limit.Max { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("max").Key(string(k)))...) + keys.Insert(string(k)) + max[string(k)] = q + } + for k, q := range limit.Min { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("min").Key(string(k)))...) + keys.Insert(string(k)) + min[string(k)] = q + } + + if limit.Type == api.LimitTypePod { + if len(limit.Default) > 0 { + allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'")) + } + if len(limit.DefaultRequest) > 0 { + allErrs = append(allErrs, field.Forbidden(idxPath.Child("defaultRequest"), "may not be specified when `type` is 'Pod'")) + } + } else { + for k, q := range limit.Default { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("default").Key(string(k)))...) + keys.Insert(string(k)) + defaults[string(k)] = q + } + for k, q := range limit.DefaultRequest { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("defaultRequest").Key(string(k)))...) + keys.Insert(string(k)) + defaultRequests[string(k)] = q + } + } + + if limit.Type == api.LimitTypePersistentVolumeClaim { + _, minQuantityFound := limit.Min[api.ResourceStorage] + _, maxQuantityFound := limit.Max[api.ResourceStorage] + if !minQuantityFound && !maxQuantityFound { + allErrs = append(allErrs, field.Required(idxPath.Child("limits"), "either minimum or maximum storage value is required, but neither was provided")) + } + } + + for k, q := range limit.MaxLimitRequestRatio { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...) + keys.Insert(string(k)) + maxLimitRequestRatios[string(k)] = q + } + + for k := range keys { + minQuantity, minQuantityFound := min[k] + maxQuantity, maxQuantityFound := max[k] + defaultQuantity, defaultQuantityFound := defaults[k] + defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k] + maxRatio, maxRatioFound := maxLimitRequestRatios[k] + + if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("min").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String()))) + } + + if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String()))) + } + + if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String()))) + } + + if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String()))) + } + + if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String()))) + } + + if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String()))) + } + if maxRatioFound && maxRatio.Cmp(*resource.NewQuantity(1, resource.DecimalSI)) < 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is less than 1", maxRatio.String()))) + } + if maxRatioFound && minQuantityFound && maxQuantityFound { + maxRatioValue := float64(maxRatio.Value()) + minQuantityValue := minQuantity.Value() + maxQuantityValue := maxQuantity.Value() + if maxRatio.Value() < resource.MaxMilliValue && minQuantityValue < resource.MaxMilliValue && maxQuantityValue < resource.MaxMilliValue { + maxRatioValue = float64(maxRatio.MilliValue()) / 1000 + minQuantityValue = minQuantity.MilliValue() + maxQuantityValue = maxQuantity.MilliValue() + } + maxRatioLimit := float64(maxQuantityValue) / float64(minQuantityValue) + if maxRatioValue > maxRatioLimit { + allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit))) + } + } + } + } + + return allErrs +} + +// ValidateServiceAccount tests if required fields in the ServiceAccount are set. +func ValidateServiceAccount(serviceAccount *api.ServiceAccount) field.ErrorList { + allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata")) + return allErrs +} + +// ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set. +func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *api.ServiceAccount) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...) + return allErrs +} + +// ValidateSecret tests if required fields in the Secret are set. +func ValidateSecret(secret *api.Secret) field.ErrorList { + allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata")) + + dataPath := field.NewPath("data") + totalSize := 0 + for key, value := range secret.Data { + for _, msg := range validation.IsConfigMapKey(key) { + allErrs = append(allErrs, field.Invalid(dataPath.Key(key), key, msg)) + } + totalSize += len(value) + } + if totalSize > api.MaxSecretSize { + allErrs = append(allErrs, field.TooLong(dataPath, "", api.MaxSecretSize)) + } + + switch secret.Type { + case api.SecretTypeServiceAccountToken: + // Only require Annotations[kubernetes.io/service-account.name] + // Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop + if value := secret.Annotations[api.ServiceAccountNameKey]; len(value) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(api.ServiceAccountNameKey), "")) + } + case api.SecretTypeOpaque, "": + // no-op + case api.SecretTypeDockercfg: + dockercfgBytes, exists := secret.Data[api.DockerConfigKey] + if !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigKey), "")) + break + } + + // make sure that the content is well-formed json. + if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil { + allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigKey), "", err.Error())) + } + case api.SecretTypeDockerConfigJson: + dockerConfigJsonBytes, exists := secret.Data[api.DockerConfigJsonKey] + if !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigJsonKey), "")) + break + } + + // make sure that the content is well-formed json. + if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil { + allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigJsonKey), "", err.Error())) + } + case api.SecretTypeBasicAuth: + _, usernameFieldExists := secret.Data[api.BasicAuthUsernameKey] + _, passwordFieldExists := secret.Data[api.BasicAuthPasswordKey] + + // username or password might be empty, but the field must be present + if !usernameFieldExists && !passwordFieldExists { + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthUsernameKey), "")) + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthPasswordKey), "")) + break + } + case api.SecretTypeSSHAuth: + if len(secret.Data[api.SSHAuthPrivateKey]) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.SSHAuthPrivateKey), "")) + break + } + + case api.SecretTypeTLS: + if _, exists := secret.Data[api.TLSCertKey]; !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSCertKey), "")) + } + if _, exists := secret.Data[api.TLSPrivateKeyKey]; !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSPrivateKeyKey), "")) + } + // TODO: Verify that the key matches the cert. + default: + // no-op + } + + return allErrs +} + +// ValidateSecretUpdate tests if required fields in the Secret are set. +func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata")) + + if len(newSecret.Type) == 0 { + newSecret.Type = oldSecret.Type + } + + allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...) + + allErrs = append(allErrs, ValidateSecret(newSecret)...) + return allErrs +} + +// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateConfigMapName = NameIsDNSSubdomain + +// ValidateConfigMap tests whether required fields in the ConfigMap are set. +func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...) + + totalSize := 0 + + for key, value := range cfg.Data { + for _, msg := range validation.IsConfigMapKey(key) { + allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg)) + } + totalSize += len(value) + } + if totalSize > api.MaxSecretSize { + allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", api.MaxSecretSize)) + } + + return allErrs +} + +// ValidateConfigMapUpdate tests if required fields in the ConfigMap are set. +func ValidateConfigMapUpdate(newCfg, oldCfg *api.ConfigMap) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, ValidateConfigMap(newCfg)...) + + return allErrs +} + +func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) field.ErrorList { + if quantity.Value() < 0 { + return field.ErrorList{field.Invalid(fldPath, quantity.Value(), "must be a valid resource quantity")} + } + return field.ErrorList{} +} + +// Validates resource requirement spec. +func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + limPath := fldPath.Child("limits") + reqPath := fldPath.Child("requests") + for resourceName, quantity := range requirements.Limits { + fldPath := limPath.Key(string(resourceName)) + // Validate resource name. + allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + + // Validate resource quantity. + allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + + // Check that request <= limit. + requestQuantity, exists := requirements.Requests[resourceName] + if exists { + // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. + if resourceName == api.ResourceNvidiaGPU && quantity.Cmp(requestQuantity) != 0 { + allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), fmt.Sprintf("must be equal to %s limit", api.ResourceNvidiaGPU))) + } else if quantity.Cmp(requestQuantity) < 0 { + allErrs = append(allErrs, field.Invalid(limPath, quantity.String(), fmt.Sprintf("must be greater than or equal to %s request", resourceName))) + } + } + } + for resourceName, quantity := range requirements.Requests { + fldPath := reqPath.Key(string(resourceName)) + // Validate resource name. + allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + // Validate resource quantity. + allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + } + + return allErrs +} + +// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes +func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(resourceQuotaSpec.Scopes) == 0 { + return allErrs + } + hardLimits := sets.NewString() + for k := range resourceQuotaSpec.Hard { + hardLimits.Insert(string(k)) + } + fldPath := fld.Child("scopes") + scopeSet := sets.NewString() + for _, scope := range resourceQuotaSpec.Scopes { + if !api.IsStandardResourceQuotaScope(string(scope)) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope")) + } + for _, k := range hardLimits.List() { + if api.IsStandardQuotaResourceName(k) && !api.IsResourceQuotaScopeValidForResource(scope, k) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope applied to resource")) + } + } + scopeSet.Insert(string(scope)) + } + invalidScopePairs := []sets.String{ + sets.NewString(string(api.ResourceQuotaScopeBestEffort), string(api.ResourceQuotaScopeNotBestEffort)), + sets.NewString(string(api.ResourceQuotaScopeTerminating), string(api.ResourceQuotaScopeNotTerminating)), + } + for _, invalidScopePair := range invalidScopePairs { + if scopeSet.HasAll(invalidScopePair.List()...) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes")) + } + } + return allErrs +} + +// ValidateResourceQuota tests if required fields in the ResourceQuota are set. +func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList { + allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata")) + + allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, field.NewPath("spec"))...) + allErrs = append(allErrs, ValidateResourceQuotaStatus(&resourceQuota.Status, field.NewPath("status"))...) + + return allErrs +} + +func ValidateResourceQuotaStatus(status *api.ResourceQuotaStatus, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + fldPath := fld.Child("hard") + for k, v := range status.Hard { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + fldPath = fld.Child("used") + for k, v := range status.Used { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + + return allErrs +} + +func ValidateResourceQuotaSpec(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + fldPath := fld.Child("hard") + for k, v := range resourceQuotaSpec.Hard { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuotaSpec, fld)...) + + return allErrs +} + +// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource +func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...) + if api.IsIntegerResourceName(resource) { + if value.MilliValue()%int64(1000) != int64(0) { + allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg)) + } + } + return allErrs +} + +// ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make. +// newResourceQuota is updated with fields that cannot be changed. +func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, field.NewPath("spec"))...) + + // ensure scopes cannot change, and that resources are still valid for scope + fldPath := field.NewPath("spec", "scopes") + oldScopes := sets.NewString() + newScopes := sets.NewString() + for _, scope := range newResourceQuota.Spec.Scopes { + newScopes.Insert(string(scope)) + } + for _, scope := range oldResourceQuota.Spec.Scopes { + oldScopes.Insert(string(scope)) + } + if !oldScopes.Equal(newScopes) { + allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, "field is immutable")) + } + + newResourceQuota.Status = oldResourceQuota.Status + return allErrs +} + +// ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make. +// newResourceQuota is updated with fields that cannot be changed. +func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) + if len(newResourceQuota.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) + } + fldPath := field.NewPath("status", "hard") + for k, v := range newResourceQuota.Status.Hard { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + fldPath = field.NewPath("status", "used") + for k, v := range newResourceQuota.Status.Used { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + newResourceQuota.Spec = oldResourceQuota.Spec + return allErrs +} + +// ValidateNamespace tests if required fields are set. +func ValidateNamespace(namespace *api.Namespace) field.ErrorList { + allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata")) + for i := range namespace.Spec.Finalizers { + allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...) + } + return allErrs +} + +// Validate finalizer names +func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(stringValue) { + allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg)) + } + if len(allErrs) != 0 { + return allErrs + } + + if len(strings.Split(stringValue, "/")) == 1 { + if !api.IsStandardFinalizerName(stringValue) { + return append(allErrs, field.Invalid(fldPath, stringValue, "name is neither a standard finalizer name nor is it fully qualified")) + } + } + + return field.ErrorList{} +} + +// ValidateNamespaceUpdate tests to make sure a namespace update can be applied. +// newNamespace is updated with fields that cannot be changed +func ValidateNamespaceUpdate(newNamespace *api.Namespace, oldNamespace *api.Namespace) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) + newNamespace.Spec.Finalizers = oldNamespace.Spec.Finalizers + newNamespace.Status = oldNamespace.Status + return allErrs +} + +// ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make. newNamespace is updated with fields +// that cannot be changed. +func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) + newNamespace.Spec = oldNamespace.Spec + if newNamespace.DeletionTimestamp.IsZero() { + if newNamespace.Status.Phase != api.NamespaceActive { + allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty")) + } + } else { + if newNamespace.Status.Phase != api.NamespaceTerminating { + allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty")) + } + } + return allErrs +} + +// ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make. +// newNamespace is updated with fields that cannot be changed. +func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) + + fldPath := field.NewPath("spec", "finalizers") + for i := range newNamespace.Spec.Finalizers { + idxPath := fldPath.Index(i) + allErrs = append(allErrs, validateFinalizerName(string(newNamespace.Spec.Finalizers[i]), idxPath)...) + } + newNamespace.Status = oldNamespace.Status + return allErrs +} + +// Construct lookup map of old subset IPs to NodeNames. +func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []api.EndpointAddress) { + for n := range addresses { + if addresses[n].NodeName == nil { + continue + } + ipToNodeName[addresses[n].IP] = *addresses[n].NodeName + } +} + +// Build a map across all subsets of IP -> NodeName +func buildEndpointAddressNodeNameMap(subsets []api.EndpointSubset) map[string]string { + ipToNodeName := make(map[string]string) + for i := range subsets { + updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].Addresses) + updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].NotReadyAddresses) + } + return ipToNodeName +} + +func validateEpAddrNodeNameTransition(addr *api.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList { + errList := field.ErrorList{} + existingNodeName, found := ipToNodeName[addr.IP] + if !found { + return errList + } + if addr.NodeName == nil || *addr.NodeName == existingNodeName { + return errList + } + // NodeName entry found for this endpoint IP, but user is attempting to change NodeName + return append(errList, field.Forbidden(fldPath, fmt.Sprintf("Cannot change NodeName for %s to %s", addr.IP, *addr.NodeName))) +} + +// ValidateEndpoints tests if required fields are set. +func ValidateEndpoints(endpoints *api.Endpoints) field.ErrorList { + allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...) + allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []api.EndpointSubset{}, field.NewPath("subsets"))...) + return allErrs +} + +func validateEndpointSubsets(subsets []api.EndpointSubset, oldSubsets []api.EndpointSubset, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + ipToNodeName := buildEndpointAddressNodeNameMap(oldSubsets) + for i := range subsets { + ss := &subsets[i] + idxPath := fldPath.Index(i) + + if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 { + //TODO: consider adding a RequiredOneOf() error for this and similar cases + allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`")) + } + if len(ss.Ports) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("ports"), "")) + } + for addr := range ss.Addresses { + allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr), ipToNodeName)...) + } + for addr := range ss.NotReadyAddresses { + allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr), ipToNodeName)...) + } + for port := range ss.Ports { + allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...) + } + } + + return allErrs +} + +func validateEndpointAddress(address *api.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsValidIP(address.IP) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg)) + } + if len(address.Hostname) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(address.Hostname, fldPath.Child("hostname"))...) + } + // During endpoint update, verify that NodeName is a DNS subdomain and transition rules allow the update + if address.NodeName != nil { + for _, msg := range ValidateNodeName(*address.NodeName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg)) + } + } + allErrs = append(allErrs, validateEpAddrNodeNameTransition(address, ipToNodeName, fldPath.Child("nodeName"))...) + if len(allErrs) > 0 { + return allErrs + } + allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...) + return allErrs +} + +func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList { + // We disallow some IPs as endpoints or external-ips. Specifically, + // unspecified and loopback addresses are nonsensical and link-local + // addresses tend to be used for node-centric purposes (e.g. metadata + // service). + allErrs := field.ErrorList{} + ip := net.ParseIP(ipAddress) + if ip == nil { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address")) + return allErrs + } + if ip.IsUnspecified() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be unspecified (0.0.0.0)")) + } + if ip.IsLoopback() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the loopback range (127.0.0.0/8)")) + } + if ip.IsLinkLocalUnicast() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local range (169.254.0.0/16)")) + } + if ip.IsLinkLocalMulticast() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local multicast range (224.0.0.0/24)")) + } + return allErrs +} + +func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if requireName && len(port.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else if len(port.Name) != 0 { + allErrs = append(allErrs, ValidateDNS1123Label(port.Name, fldPath.Child("name"))...) + } + for _, msg := range validation.IsValidPortNum(int(port.Port)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, msg)) + } + if len(port.Protocol) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) + } else if !supportedPortProtocols.Has(string(port.Protocol)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) + } + return allErrs +} + +// ValidateEndpointsUpdate tests to make sure an endpoints update can be applied. +func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *api.Endpoints) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, oldEndpoints.Subsets, field.NewPath("subsets"))...) + allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...) + return allErrs +} + +// ValidateSecurityContext ensure the security context contains valid settings +func ValidateSecurityContext(sc *api.SecurityContext, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + //this should only be true for testing since SecurityContext is defaulted by the api + if sc == nil { + return allErrs + } + + if sc.Privileged != nil { + if *sc.Privileged && !capabilities.Get().AllowPrivileged { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "disallowed by policy")) + } + } + + if sc.RunAsUser != nil { + if *sc.RunAsUser < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, isNegativeErrorMsg)) + } + } + return allErrs +} + +func ValidatePodLogOptions(opts *api.PodLogOptions) field.ErrorList { + allErrs := field.ErrorList{} + if opts.TailLines != nil && *opts.TailLines < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) + } + if opts.LimitBytes != nil && *opts.LimitBytes < 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0")) + } + switch { + case opts.SinceSeconds != nil && opts.SinceTime != nil: + allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified")) + case opts.SinceSeconds != nil: + if *opts.SinceSeconds < 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0")) + } + } + return allErrs +} + +// ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus +func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, ingress := range status.Ingress { + idxPath := fldPath.Child("ingress").Index(i) + if len(ingress.IP) > 0 { + if isIP := (net.ParseIP(ingress.IP) != nil); !isIP { + allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address")) + } + } + if len(ingress.Hostname) > 0 { + for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg)) + } + if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address")) + } + } + } + return allErrs +} + +// TODO: remove this after we EOL the annotation that carries it. +func isValidHostnamesMap(serializedPodHostNames string) bool { + if len(serializedPodHostNames) == 0 { + return false + } + podHostNames := map[string]endpoints.HostRecord{} + err := json.Unmarshal([]byte(serializedPodHostNames), &podHostNames) + if err != nil { + return false + } + + for ip, hostRecord := range podHostNames { + if len(validation.IsDNS1123Label(hostRecord.HostName)) != 0 { + return false + } + if net.ParseIP(ip) == nil { + return false + } + } + return true +} + +func sysctlIntersection(a []api.Sysctl, b []api.Sysctl) []string { + lookup := make(map[string]struct{}, len(a)) + result := []string{} + for i := range a { + lookup[a[i].Name] = struct{}{} + } + for i := range b { + if _, found := lookup[b[i].Name]; found { + result = append(result, b[i].Name) + } + } + return result +} diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD b/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD new file mode 100644 index 000000000..b94c14277 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD @@ -0,0 +1,30 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "capabilities.go", + "doc.go", + ], + tags = ["automanaged"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go b/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go new file mode 100644 index 000000000..be721a785 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go @@ -0,0 +1,94 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capabilities + +import ( + "sync" +) + +// Capabilities defines the set of capabilities available within the system. +// For now these are global. Eventually they may be per-user +type Capabilities struct { + AllowPrivileged bool + + // Pod sources from which to allow privileged capabilities like host networking, sharing the host + // IPC namespace, and sharing the host PID namespace. + PrivilegedSources PrivilegedSources + + // PerConnectionBandwidthLimitBytesPerSec limits the throughput of each connection (currently only used for proxy, exec, attach) + PerConnectionBandwidthLimitBytesPerSec int64 +} + +// PrivilegedSources defines the pod sources allowed to make privileged requests for certain types +// of capabilities like host networking, sharing the host IPC namespace, and sharing the host PID namespace. +type PrivilegedSources struct { + // List of pod sources for which using host network is allowed. + HostNetworkSources []string + + // List of pod sources for which using host pid namespace is allowed. + HostPIDSources []string + + // List of pod sources for which using host ipc is allowed. + HostIPCSources []string +} + +// TODO: Clean these up into a singleton +var once sync.Once +var lock sync.Mutex +var capabilities *Capabilities + +// Initialize the capability set. This can only be done once per binary, subsequent calls are ignored. +func Initialize(c Capabilities) { + // Only do this once + once.Do(func() { + capabilities = &c + }) +} + +// Setup the capability set. It wraps Initialize for improving usability. +func Setup(allowPrivileged bool, privilegedSources PrivilegedSources, perConnectionBytesPerSec int64) { + Initialize(Capabilities{ + AllowPrivileged: allowPrivileged, + PrivilegedSources: privilegedSources, + PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec, + }) +} + +// SetForTests sets capabilities for tests. Convenience method for testing. This should only be called from tests. +func SetForTests(c Capabilities) { + lock.Lock() + defer lock.Unlock() + capabilities = &c +} + +// Returns a read-only copy of the system capabilities. +func Get() Capabilities { + lock.Lock() + defer lock.Unlock() + // This check prevents clobbering of capabilities that might've been set via SetForTests + if capabilities == nil { + Initialize(Capabilities{ + AllowPrivileged: false, + PrivilegedSources: PrivilegedSources{ + HostNetworkSources: []string{}, + HostPIDSources: []string{}, + HostIPCSources: []string{}, + }, + }) + } + return *capabilities +} diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go b/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go new file mode 100644 index 000000000..bbdc89d50 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package capabilities manages system level capabilities +package capabilities // import "k8s.io/kubernetes/pkg/capabilities" diff --git a/vendor/k8s.io/kubernetes/pkg/features/BUILD b/vendor/k8s.io/kubernetes/pkg/features/BUILD new file mode 100644 index 000000000..afc417db8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/features/BUILD @@ -0,0 +1,31 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["kube_features.go"], + tags = ["automanaged"], + deps = [ + "//vendor:k8s.io/apiserver/pkg/features", + "//vendor:k8s.io/apiserver/pkg/util/feature", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go new file mode 100644 index 000000000..4bf9fedea --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -0,0 +1,97 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature() bool + + // owner: @timstclair + // beta: v1.4 + AppArmor utilfeature.Feature = "AppArmor" + + // owner: @girishkalele + // alpha: v1.4 + ExternalTrafficLocalOnly utilfeature.Feature = "AllowExtTrafficLocalEndpoints" + + // owner: @saad-ali + // alpha: v1.3 + DynamicVolumeProvisioning utilfeature.Feature = "DynamicVolumeProvisioning" + + // owner: @mtaufen + // alpha: v1.4 + DynamicKubeletConfig utilfeature.Feature = "DynamicKubeletConfig" + + // owner: timstclair + // alpha: v1.5 + // + // StreamingProxyRedirects controls whether the apiserver should intercept (and follow) + // redirects from the backend (Kubelet) for streaming requests (exec/attach/port-forward). + StreamingProxyRedirects utilfeature.Feature = genericfeatures.StreamingProxyRedirects + + // owner: @pweil- + // alpha: v1.5 + // + // Default userns=host for containers that are using other host namespaces, host mounts, the pod + // contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE, + // SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon. + ExperimentalHostUserNamespaceDefaultingGate utilfeature.Feature = "ExperimentalHostUserNamespaceDefaulting" + + // owner: @vishh + // alpha: v1.5 + // + // Ensures guaranteed scheduling of pods marked with a special pod annotation `scheduler.alpha.kubernetes.io/critical-pod` + // and also prevents them from being evicted from a node. + // Note: This feature is not supported for `BestEffort` pods. + ExperimentalCriticalPodAnnotation utilfeature.Feature = "ExperimentalCriticalPodAnnotation" + + // owner: @davidopp + // alpha: v1.6 + // + // Determines if affinity defined in annotations should be processed + // TODO: remove when alpha support for affinity is removed + AffinityInAnnotations utilfeature.Feature = "AffinityInAnnotations" +) + +func init() { + utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) +} + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ + ExternalTrafficLocalOnly: {Default: true, PreRelease: utilfeature.Beta}, + AppArmor: {Default: true, PreRelease: utilfeature.Beta}, + DynamicKubeletConfig: {Default: false, PreRelease: utilfeature.Alpha}, + DynamicVolumeProvisioning: {Default: true, PreRelease: utilfeature.Alpha}, + ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta}, + ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha}, + AffinityInAnnotations: {Default: false, PreRelease: utilfeature.Alpha}, + + // inherited features from generic apiserver, relisted here to get a conflict if it is changed + // unintentionally on either side: + StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, +} diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD b/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD new file mode 100644 index 000000000..d07aa91a9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD @@ -0,0 +1,52 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "helpers.go", + "validate.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api/v1:go_default_library", + "//pkg/features:go_default_library", + "//pkg/util:go_default_library", + "//vendor:k8s.io/apiserver/pkg/util/feature", + ], +) + +go_test( + name = "go_default_test", + srcs = ["validate_test.go"], + data = [ + "testdata/profiles", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/api/v1:go_default_library", + "//vendor:github.com/stretchr/testify/assert", + "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go new file mode 100644 index 000000000..4412d2a9a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go @@ -0,0 +1,77 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apparmor + +import ( + "strings" + + "k8s.io/kubernetes/pkg/api/v1" +) + +// TODO: Move these values into the API package. +const ( + // The prefix to an annotation key specifying a container profile. + ContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" + // The annotation key specifying the default AppArmor profile. + DefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName" + // The annotation key specifying the allowed AppArmor profiles. + AllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames" + + // The profile specifying the runtime default. + ProfileRuntimeDefault = "runtime/default" + // The prefix for specifying profiles loaded on the node. + ProfileNamePrefix = "localhost/" +) + +// Checks whether app armor is required for pod to be run. +func isRequired(pod *v1.Pod) bool { + for key := range pod.Annotations { + if strings.HasPrefix(key, ContainerAnnotationKeyPrefix) { + return true + } + } + return false +} + +// Returns the name of the profile to use with the container. +func GetProfileName(pod *v1.Pod, containerName string) string { + return GetProfileNameFromPodAnnotations(pod.Annotations, containerName) +} + +// GetProfileNameFromPodAnnotations gets the name of the profile to use with container from +// pod annotations +func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string { + return annotations[ContainerAnnotationKeyPrefix+containerName] +} + +// Sets the name of the profile to use with the container. +func SetProfileName(pod *v1.Pod, containerName, profileName string) error { + if pod.Annotations == nil { + pod.Annotations = map[string]string{} + } + pod.Annotations[ContainerAnnotationKeyPrefix+containerName] = profileName + return nil +} + +// Sets the name of the profile to use with the container. +func SetProfileNameFromPodAnnotations(annotations map[string]string, containerName, profileName string) error { + if annotations == nil { + return nil + } + annotations[ContainerAnnotationKeyPrefix+containerName] = profileName + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go new file mode 100644 index 000000000..cf12df3c1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go @@ -0,0 +1,228 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apparmor + +import ( + "bufio" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/util" +) + +// Whether AppArmor should be disabled by default. +// Set to true if the wrong build tags are set (see validate_disabled.go). +var isDisabledBuild bool + +// Interface for validating that a pod with with an AppArmor profile can be run by a Node. +type Validator interface { + Validate(pod *v1.Pod) error + ValidateHost() error +} + +func NewValidator(runtime string) Validator { + if err := validateHost(runtime); err != nil { + return &validator{validateHostErr: err} + } + appArmorFS, err := getAppArmorFS() + if err != nil { + return &validator{ + validateHostErr: fmt.Errorf("error finding AppArmor FS: %v", err), + } + } + return &validator{ + appArmorFS: appArmorFS, + } +} + +type validator struct { + validateHostErr error + appArmorFS string +} + +func (v *validator) Validate(pod *v1.Pod) error { + if !isRequired(pod) { + return nil + } + + if v.ValidateHost() != nil { + return v.validateHostErr + } + + loadedProfiles, err := v.getLoadedProfiles() + if err != nil { + return fmt.Errorf("could not read loaded profiles: %v", err) + } + + for _, container := range pod.Spec.InitContainers { + if err := validateProfile(GetProfileName(pod, container.Name), loadedProfiles); err != nil { + return err + } + } + for _, container := range pod.Spec.Containers { + if err := validateProfile(GetProfileName(pod, container.Name), loadedProfiles); err != nil { + return err + } + } + + return nil +} + +func (v *validator) ValidateHost() error { + return v.validateHostErr +} + +// Verify that the host and runtime is capable of enforcing AppArmor profiles. +func validateHost(runtime string) error { + // Check feature-gates + if !utilfeature.DefaultFeatureGate.Enabled(features.AppArmor) { + return errors.New("AppArmor disabled by feature-gate") + } + + // Check build support. + if isDisabledBuild { + return errors.New("Binary not compiled for linux") + } + + // Check kernel support. + if !IsAppArmorEnabled() { + return errors.New("AppArmor is not enabled on the host") + } + + // Check runtime support. Currently only Docker is supported. + if runtime != "docker" { + return fmt.Errorf("AppArmor is only enabled for 'docker' runtime. Found: %q.", runtime) + } + + return nil +} + +// Verify that the profile is valid and loaded. +func validateProfile(profile string, loadedProfiles map[string]bool) error { + if err := ValidateProfileFormat(profile); err != nil { + return err + } + + if strings.HasPrefix(profile, ProfileNamePrefix) { + profileName := strings.TrimPrefix(profile, ProfileNamePrefix) + if !loadedProfiles[profileName] { + return fmt.Errorf("profile %q is not loaded", profileName) + } + } + + return nil +} + +func ValidateProfileFormat(profile string) error { + if profile == "" || profile == ProfileRuntimeDefault { + return nil + } + if !strings.HasPrefix(profile, ProfileNamePrefix) { + return fmt.Errorf("invalid AppArmor profile name: %q", profile) + } + return nil +} + +func (v *validator) getLoadedProfiles() (map[string]bool, error) { + profilesPath := path.Join(v.appArmorFS, "profiles") + profilesFile, err := os.Open(profilesPath) + if err != nil { + return nil, fmt.Errorf("failed to open %s: %v", profilesPath, err) + } + defer profilesFile.Close() + + profiles := map[string]bool{} + scanner := bufio.NewScanner(profilesFile) + for scanner.Scan() { + profileName := parseProfileName(scanner.Text()) + if profileName == "" { + // Unknown line format; skip it. + continue + } + profiles[profileName] = true + } + return profiles, nil +} + +// The profiles file is formatted with one profile per line, matching a form: +// namespace://profile-name (mode) +// profile-name (mode) +// Where mode is {enforce, complain, kill}. The "namespace://" is only included for namespaced +// profiles. For the purposes of Kubernetes, we consider the namespace part of the profile name. +func parseProfileName(profileLine string) string { + modeIndex := strings.IndexRune(profileLine, '(') + if modeIndex < 0 { + return "" + } + return strings.TrimSpace(profileLine[:modeIndex]) +} + +func getAppArmorFS() (string, error) { + mountsFile, err := os.Open("/proc/mounts") + if err != nil { + return "", fmt.Errorf("could not open /proc/mounts: %v", err) + } + defer mountsFile.Close() + + scanner := bufio.NewScanner(mountsFile) + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) < 3 { + // Unknown line format; skip it. + continue + } + if fields[2] == "securityfs" { + appArmorFS := path.Join(fields[1], "apparmor") + if ok, err := util.FileExists(appArmorFS); !ok { + msg := fmt.Sprintf("path %s does not exist", appArmorFS) + if err != nil { + return "", fmt.Errorf("%s: %v", msg, err) + } else { + return "", errors.New(msg) + } + } else { + return appArmorFS, nil + } + } + } + if err := scanner.Err(); err != nil { + return "", fmt.Errorf("error scanning mounts: %v", err) + } + + return "", errors.New("securityfs not found") +} + +// IsAppArmorEnabled returns true if apparmor is enabled for the host. +// This function is forked from +// https://github.com/opencontainers/runc/blob/1a81e9ab1f138c091fe5c86d0883f87716088527/libcontainer/apparmor/apparmor.go +// to avoid the libapparmor dependency. +func IsAppArmorEnabled() bool { + if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" { + if _, err = os.Stat("/sbin/apparmor_parser"); err == nil { + buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + return err == nil && len(buf) > 1 && buf[0] == 'Y' + } + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go new file mode 100644 index 000000000..875054a94 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go @@ -0,0 +1,24 @@ +// +build !linux + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apparmor + +func init() { + // If Kubernetes was not built for linux, apparmor is always disabled. + isDisabledBuild = true +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/BUILD b/vendor/k8s.io/kubernetes/pkg/util/config/BUILD new file mode 100644 index 000000000..30d4214b4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/config/BUILD @@ -0,0 +1,40 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_binary", + "go_library", + "go_test", + "cgo_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "config.go", + "configuration_map.go", + "doc.go", + "feature_gate.go", + "namedcertkey_flag.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/util/wait:go_default_library", + "//vendor:github.com/golang/glog", + "//vendor:github.com/spf13/pflag", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "config_test.go", + "feature_gate_test.go", + "namedcertkey_flag_test.go", + ], + library = "go_default_library", + tags = ["automanaged"], + deps = ["//vendor:github.com/spf13/pflag"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/config.go b/vendor/k8s.io/kubernetes/pkg/util/config/config.go new file mode 100644 index 000000000..30defee87 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/config/config.go @@ -0,0 +1,140 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "sync" + + "k8s.io/kubernetes/pkg/util/wait" +) + +type Merger interface { + // Invoked when a change from a source is received. May also function as an incremental + // merger if you wish to consume changes incrementally. Must be reentrant when more than + // one source is defined. + Merge(source string, update interface{}) error +} + +// MergeFunc implements the Merger interface +type MergeFunc func(source string, update interface{}) error + +func (f MergeFunc) Merge(source string, update interface{}) error { + return f(source, update) +} + +// Mux is a class for merging configuration from multiple sources. Changes are +// pushed via channels and sent to the merge function. +type Mux struct { + // Invoked when an update is sent to a source. + merger Merger + + // Sources and their lock. + sourceLock sync.RWMutex + // Maps source names to channels + sources map[string]chan interface{} +} + +// NewMux creates a new mux that can merge changes from multiple sources. +func NewMux(merger Merger) *Mux { + mux := &Mux{ + sources: make(map[string]chan interface{}), + merger: merger, + } + return mux +} + +// Channel returns a channel where a configuration source +// can send updates of new configurations. Multiple calls with the same +// source will return the same channel. This allows change and state based sources +// to use the same channel. Different source names however will be treated as a +// union. +func (m *Mux) Channel(source string) chan interface{} { + if len(source) == 0 { + panic("Channel given an empty name") + } + m.sourceLock.Lock() + defer m.sourceLock.Unlock() + channel, exists := m.sources[source] + if exists { + return channel + } + newChannel := make(chan interface{}) + m.sources[source] = newChannel + go wait.Until(func() { m.listen(source, newChannel) }, 0, wait.NeverStop) + return newChannel +} + +func (m *Mux) listen(source string, listenChannel <-chan interface{}) { + for update := range listenChannel { + m.merger.Merge(source, update) + } +} + +// Accessor is an interface for retrieving the current merge state. +type Accessor interface { + // MergedState returns a representation of the current merge state. + // Must be reentrant when more than one source is defined. + MergedState() interface{} +} + +// AccessorFunc implements the Accessor interface. +type AccessorFunc func() interface{} + +func (f AccessorFunc) MergedState() interface{} { + return f() +} + +type Listener interface { + // OnUpdate is invoked when a change is made to an object. + OnUpdate(instance interface{}) +} + +// ListenerFunc receives a representation of the change or object. +type ListenerFunc func(instance interface{}) + +func (f ListenerFunc) OnUpdate(instance interface{}) { + f(instance) +} + +type Broadcaster struct { + // Listeners for changes and their lock. + listenerLock sync.RWMutex + listeners []Listener +} + +// NewBroadcaster registers a set of listeners that support the Listener interface +// and notifies them all on changes. +func NewBroadcaster() *Broadcaster { + return &Broadcaster{} +} + +// Add registers listener to receive updates of changes. +func (b *Broadcaster) Add(listener Listener) { + b.listenerLock.Lock() + defer b.listenerLock.Unlock() + b.listeners = append(b.listeners, listener) +} + +// Notify notifies all listeners. +func (b *Broadcaster) Notify(instance interface{}) { + b.listenerLock.RLock() + listeners := b.listeners + b.listenerLock.RUnlock() + for _, listener := range listeners { + listener.OnUpdate(instance) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/configuration_map.go b/vendor/k8s.io/kubernetes/pkg/util/config/configuration_map.go new file mode 100644 index 000000000..0acbde56f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/config/configuration_map.go @@ -0,0 +1,53 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "sort" + "strings" +) + +type ConfigurationMap map[string]string + +func (m *ConfigurationMap) String() string { + pairs := []string{} + for k, v := range *m { + pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (m *ConfigurationMap) Set(value string) error { + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) + } else { + (*m)[strings.TrimSpace(arr[0])] = "" + } + } + return nil +} + +func (*ConfigurationMap) Type() string { + return "mapStringString" +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/doc.go b/vendor/k8s.io/kubernetes/pkg/util/config/doc.go new file mode 100644 index 000000000..5e9a469df --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/config/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config provides utility objects for decoupling sources of configuration and the +// actual configuration state. Consumers must implement the Merger interface to unify +// the sources of change into an object. +package config // import "k8s.io/kubernetes/pkg/util/config" diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/feature_gate.go b/vendor/k8s.io/kubernetes/pkg/util/config/feature_gate.go new file mode 100644 index 000000000..bf261c83c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/config/feature_gate.go @@ -0,0 +1,273 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/golang/glog" + "github.com/spf13/pflag" +) + +const ( + flagName = "feature-gates" + + // All known feature keys + // To add a new feature, define a key for it below and add + // a featureSpec entry to knownFeatures. + + // allAlphaGate is a global toggle for alpha features. Per-feature key + // values override the default set by allAlphaGate. Examples: + // AllAlpha=false,NewFeature=true will result in newFeature=true + // AllAlpha=true,NewFeature=false will result in newFeature=false + allAlphaGate = "AllAlpha" + externalTrafficLocalOnly = "AllowExtTrafficLocalEndpoints" + appArmor = "AppArmor" + dynamicKubeletConfig = "DynamicKubeletConfig" + dynamicVolumeProvisioning = "DynamicVolumeProvisioning" + streamingProxyRedirects = "StreamingProxyRedirects" + + // experimentalHostUserNamespaceDefaulting Default userns=host for containers + // that are using other host namespaces, host mounts, the pod contains a privileged container, + // or specific non-namespaced capabilities + // (MKNOD, SYS_MODULE, SYS_TIME). This should only be enabled if user namespace remapping is enabled + // in the docker daemon. + experimentalHostUserNamespaceDefaultingGate = "ExperimentalHostUserNamespaceDefaulting" + // Ensures guaranteed scheduling of pods marked with a special pod annotation `scheduler.alpha.kubernetes.io/critical-pod` + // and also prevents them from being evicted from a node. + experimentalCriticalPodAnnotation = "ExperimentalCriticalPodAnnotation" +) + +var ( + // Default values for recorded features. Every new feature gate should be + // represented here. + knownFeatures = map[string]featureSpec{ + allAlphaGate: {false, alpha}, + externalTrafficLocalOnly: {true, beta}, + appArmor: {true, beta}, + dynamicKubeletConfig: {false, alpha}, + dynamicVolumeProvisioning: {true, alpha}, + streamingProxyRedirects: {false, alpha}, + experimentalHostUserNamespaceDefaultingGate: {false, alpha}, + experimentalCriticalPodAnnotation: {false, alpha}, + } + + // Special handling for a few gates. + specialFeatures = map[string]func(f *featureGate, val bool){ + allAlphaGate: setUnsetAlphaGates, + } + + // DefaultFeatureGate is a shared global FeatureGate. + DefaultFeatureGate = &featureGate{ + known: knownFeatures, + special: specialFeatures, + } +) + +type featureSpec struct { + enabled bool + prerelease prerelease +} + +type prerelease string + +const ( + // Values for prerelease. + alpha = prerelease("ALPHA") + beta = prerelease("BETA") + ga = prerelease("") +) + +// FeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +type FeatureGate interface { + AddFlag(fs *pflag.FlagSet) + Set(value string) error + KnownFeatures() []string + + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature() bool + + // owner: @timstclair + // beta: v1.4 + AppArmor() bool + + // owner: @girishkalele + // alpha: v1.4 + ExternalTrafficLocalOnly() bool + + // owner: @saad-ali + // alpha: v1.3 + DynamicVolumeProvisioning() bool + + // owner: @mtaufen + // alpha: v1.4 + DynamicKubeletConfig() bool + + // owner: timstclair + // alpha: v1.5 + StreamingProxyRedirects() bool + + // owner: @pweil- + // alpha: v1.5 + ExperimentalHostUserNamespaceDefaulting() bool + + // owner: @vishh + // alpha: v1.4 + ExperimentalCriticalPodAnnotation() bool +} + +// featureGate implements FeatureGate as well as pflag.Value for flag parsing. +type featureGate struct { + known map[string]featureSpec + special map[string]func(*featureGate, bool) + enabled map[string]bool +} + +func setUnsetAlphaGates(f *featureGate, val bool) { + for k, v := range f.known { + if v.prerelease == alpha { + if _, found := f.enabled[k]; !found { + f.enabled[k] = val + } + } + } +} + +// Set, String, and Type implement pflag.Value + +// Set Parses a string of the form // "key1=value1,key2=value2,..." into a +// map[string]bool of known keys or returns an error. +func (f *featureGate) Set(value string) error { + f.enabled = make(map[string]bool) + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + k := strings.TrimSpace(arr[0]) + _, ok := f.known[k] + if !ok { + return fmt.Errorf("unrecognized key: %s", k) + } + if len(arr) != 2 { + return fmt.Errorf("missing bool value for %s", k) + } + v := strings.TrimSpace(arr[1]) + boolValue, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("invalid value of %s: %s, err: %v", k, v, err) + } + f.enabled[k] = boolValue + + // Handle "special" features like "all alpha gates" + if fn, found := f.special[k]; found { + fn(f, boolValue) + } + } + + glog.Infof("feature gates: %v", f.enabled) + return nil +} + +func (f *featureGate) String() string { + pairs := []string{} + for k, v := range f.enabled { + pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (f *featureGate) Type() string { + return "mapStringBool" +} + +// ExternalTrafficLocalOnly returns value for AllowExtTrafficLocalEndpoints +func (f *featureGate) ExternalTrafficLocalOnly() bool { + return f.lookup(externalTrafficLocalOnly) +} + +// AppArmor returns the value for the AppArmor feature gate. +func (f *featureGate) AppArmor() bool { + return f.lookup(appArmor) +} + +// DynamicKubeletConfig returns value for dynamicKubeletConfig +func (f *featureGate) DynamicKubeletConfig() bool { + return f.lookup(dynamicKubeletConfig) +} + +// DynamicVolumeProvisioning returns value for dynamicVolumeProvisioning +func (f *featureGate) DynamicVolumeProvisioning() bool { + return f.lookup(dynamicVolumeProvisioning) +} + +// StreamingProxyRedirects controls whether the apiserver should intercept (and follow) +// redirects from the backend (Kubelet) for streaming requests (exec/attach/port-forward). +func (f *featureGate) StreamingProxyRedirects() bool { + return f.lookup(streamingProxyRedirects) +} + +// ExperimentalHostUserNamespaceDefaulting returns value for experimentalHostUserNamespaceDefaulting +func (f *featureGate) ExperimentalHostUserNamespaceDefaulting() bool { + return f.lookup(experimentalHostUserNamespaceDefaultingGate) +} + +// ExperimentalCriticalPodAnnotation returns true if experimentalCriticalPodAnnotation feature is enabled. +func (f *featureGate) ExperimentalCriticalPodAnnotation() bool { + return f.lookup(experimentalCriticalPodAnnotation) +} + +func (f *featureGate) lookup(key string) bool { + defaultValue := f.known[key].enabled + if f.enabled != nil { + if v, ok := f.enabled[key]; ok { + return v + } + } + return defaultValue + +} + +// AddFlag adds a flag for setting global feature gates to the specified FlagSet. +func (f *featureGate) AddFlag(fs *pflag.FlagSet) { + known := f.KnownFeatures() + fs.Var(f, flagName, ""+ + "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ + "Options are:\n"+strings.Join(known, "\n")) +} + +// Returns a string describing the FeatureGate's known features. +func (f *featureGate) KnownFeatures() []string { + var known []string + for k, v := range f.known { + pre := "" + if v.prerelease != ga { + pre = fmt.Sprintf("%s - ", v.prerelease) + } + known = append(known, fmt.Sprintf("%s=true|false (%sdefault=%t)", k, pre, v.enabled)) + } + sort.Strings(known) + return known +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/namedcertkey_flag.go b/vendor/k8s.io/kubernetes/pkg/util/config/namedcertkey_flag.go new file mode 100644 index 000000000..39f20f681 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/config/namedcertkey_flag.go @@ -0,0 +1,113 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "errors" + "flag" + "strings" +) + +// NamedCertKey is a flag value parsing "certfile,keyfile" and "certfile,keyfile:name,name,name". +type NamedCertKey struct { + Names []string + CertFile, KeyFile string +} + +var _ flag.Value = &NamedCertKey{} + +func (nkc *NamedCertKey) String() string { + s := nkc.CertFile + "," + nkc.KeyFile + if len(nkc.Names) > 0 { + s = s + ":" + strings.Join(nkc.Names, ",") + } + return s +} + +func (nkc *NamedCertKey) Set(value string) error { + cs := strings.SplitN(value, ":", 2) + var keycert string + if len(cs) == 2 { + var names string + keycert, names = strings.TrimSpace(cs[0]), strings.TrimSpace(cs[1]) + if names == "" { + return errors.New("empty names list is not allowed") + } + nkc.Names = nil + for _, name := range strings.Split(names, ",") { + nkc.Names = append(nkc.Names, strings.TrimSpace(name)) + } + } else { + nkc.Names = nil + keycert = strings.TrimSpace(cs[0]) + } + cs = strings.Split(keycert, ",") + if len(cs) != 2 { + return errors.New("expected comma separated certificate and key file paths") + } + nkc.CertFile = strings.TrimSpace(cs[0]) + nkc.KeyFile = strings.TrimSpace(cs[1]) + return nil +} + +func (*NamedCertKey) Type() string { + return "namedCertKey" +} + +// NamedCertKeyArray is a flag value parsing NamedCertKeys, each passed with its own +// flag instance (in contrast to comma separated slices). +type NamedCertKeyArray struct { + value *[]NamedCertKey + changed bool +} + +var _ flag.Value = &NamedCertKey{} + +// NewNamedKeyCertArray creates a new NamedCertKeyArray with the internal value +// pointing to p. +func NewNamedCertKeyArray(p *[]NamedCertKey) *NamedCertKeyArray { + return &NamedCertKeyArray{ + value: p, + } +} + +func (a *NamedCertKeyArray) Set(val string) error { + nkc := NamedCertKey{} + err := nkc.Set(val) + if err != nil { + return err + } + if !a.changed { + *a.value = []NamedCertKey{nkc} + a.changed = true + } else { + *a.value = append(*a.value, nkc) + } + return nil +} + +func (a *NamedCertKeyArray) Type() string { + return "namedCertKey" +} + +func (a *NamedCertKeyArray) String() string { + nkcs := make([]string, 0, len(*a.value)) + for i := range *a.value { + nkcs = append(nkcs, (*a.value)[i].String()) + } + return "[" + strings.Join(nkcs, ";") + "]" +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD b/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD new file mode 100644 index 000000000..3102e4f7e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD @@ -0,0 +1,26 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_binary", + "go_library", + "go_test", + "cgo_library", +) + +go_library( + name = "go_default_library", + srcs = ["hash.go"], + tags = ["automanaged"], + deps = ["//vendor:github.com/davecgh/go-spew/spew"], +) + +go_test( + name = "go_default_test", + srcs = ["hash_test.go"], + library = "go_default_library", + tags = ["automanaged"], + deps = ["//vendor:github.com/davecgh/go-spew/spew"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go b/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go new file mode 100644 index 000000000..803f066a4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go @@ -0,0 +1,37 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hash + +import ( + "hash" + + "github.com/davecgh/go-spew/spew" +) + +// DeepHashObject writes specified object to hash using the spew library +// which follows pointers and prints actual values of the nested objects +// ensuring the hash does not change when a pointer changes. +func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { + hasher.Reset() + printer := spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + } + printer.Fprintf(hasher, "%#v", objectToWrite) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD b/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD new file mode 100644 index 000000000..05c9bb745 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_binary", + "go_library", + "go_test", + "cgo_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "ipnet.go", + ], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["ipnet_test.go"], + library = "go_default_library", + tags = ["automanaged"], + deps = [], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/sets/doc.go b/vendor/k8s.io/kubernetes/pkg/util/net/sets/doc.go new file mode 100644 index 000000000..8414f74ac --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/net/sets/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains hand-coded set implementations that should be similar +// to the autogenerated ones in pkg/util/sets. +// We can't simply use net.IPNet as a map-key in Go (because it contains a +// []byte). +// We could use the same workaround we use here (a string representation as the +// key) to autogenerate sets. If we do that, or decide on an alternate +// approach, we should replace the implementations in this package with the +// autogenerated versions. +// It is expected that callers will alias this import as "netsets" i.e. import +// netsets "k8s.io/kubernetes/pkg/util/net/sets" + +package sets diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go b/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go new file mode 100644 index 000000000..5b6fe933f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "net" + "strings" +) + +type IPNet map[string]*net.IPNet + +func ParseIPNets(specs ...string) (IPNet, error) { + ipnetset := make(IPNet) + for _, spec := range specs { + spec = strings.TrimSpace(spec) + _, ipnet, err := net.ParseCIDR(spec) + if err != nil { + return nil, err + } + k := ipnet.String() // In case of normalization + ipnetset[k] = ipnet + } + return ipnetset, nil +} + +// Insert adds items to the set. +func (s IPNet) Insert(items ...*net.IPNet) { + for _, item := range items { + s[item.String()] = item + } +} + +// Delete removes all items from the set. +func (s IPNet) Delete(items ...*net.IPNet) { + for _, item := range items { + delete(s, item.String()) + } +} + +// Has returns true if and only if item is contained in the set. +func (s IPNet) Has(item *net.IPNet) bool { + _, contained := s[item.String()] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s IPNet) HasAll(items ...*net.IPNet) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s IPNet) Difference(s2 IPNet) IPNet { + result := make(IPNet) + for k, i := range s { + _, found := s2[k] + if found { + continue + } + result[k] = i + } + return result +} + +// StringSlice returns a []string with the String representation of each element in the set. +// Order is undefined. +func (s IPNet) StringSlice() []string { + a := make([]string, 0, len(s)) + for k := range s { + a = append(a, k) + } + return a +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 IPNet) IsSuperset(s2 IPNet) bool { + for k := range s2 { + _, found := s1[k] + if !found { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 IPNet) Equal(s2 IPNet) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +// Len returns the size of the set. +func (s IPNet) Len() int { + return len(s) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 8de8042de..04a1366be 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1354,6 +1354,12 @@ "version": "1.0.0", "versionExact": "1.0.0" }, + { + "checksumSHA1": "xPPXsop1MasrinvYYmMBeuwDu9c=", + "path": "github.com/exponent-io/jsonpath", + "revision": "d6023ce2651d8eafb5c75bb0c7167536102ec9f5", + "revisionTime": "2015-10-13T19:33:12Z" + }, { "checksumSHA1": "40Ns85VYa4smQPcewZ7SOdfLnKU=", "path": "github.com/fatih/structs", @@ -3281,6 +3287,18 @@ "revision": "f005bb24262365e93726b94d89e6cd8a26a7455e", "revisionTime": "2017-02-20T05:26:33Z" }, + { + "checksumSHA1": "7Moak3A23GAQIGxMBNwdHng8ZUk=", + "path": "k8s.io/apiserver/pkg/features", + "revision": "f5134760969bc1bb7457ac717f329538208174e6", + "revisionTime": "2017-02-23T16:14:39Z" + }, + { + "checksumSHA1": "eD47gRCYqFRXXw4CdIqIbGnJid8=", + "path": "k8s.io/apiserver/pkg/util/feature", + "revision": "f5134760969bc1bb7457ac717f329538208174e6", + "revisionTime": "2017-02-23T16:14:39Z" + }, { "checksumSHA1": "7AJnegJy/B9YU9VnaSjbmwo7bxA=", "path": "k8s.io/kubernetes/pkg/api", @@ -3289,6 +3307,14 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "yTgKpHz2CFFN/lMP9Evm87rvYkM=", + "path": "k8s.io/kubernetes/pkg/api/endpoints", + "revision": "029c3a408176b55c30846f0faedf56aae5992e9b", + "revisionTime": "2017-02-15T06:33:32Z", + "version": "v1.5.3", + "versionExact": "v1.5.3" + }, { "checksumSHA1": "4X0Ov/3JS85n222SqSb9qmp3OTs=", "path": "k8s.io/kubernetes/pkg/api/errors", @@ -3321,6 +3347,14 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "YU331KDG4VxRBuD4zFoE9LNj3t4=", + "path": "k8s.io/kubernetes/pkg/api/pod", + "revision": "029c3a408176b55c30846f0faedf56aae5992e9b", + "revisionTime": "2017-02-15T06:33:32Z", + "version": "v1.5.3", + "versionExact": "v1.5.3" + }, { "checksumSHA1": "glESePhq3u+sf13SpTzJ9ict/T4=", "path": "k8s.io/kubernetes/pkg/api/resource", @@ -3329,6 +3363,14 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "dYhk2yrh+k3G3APFuYx5vF8Rmw0=", + "path": "k8s.io/kubernetes/pkg/api/service", + "revision": "029c3a408176b55c30846f0faedf56aae5992e9b", + "revisionTime": "2017-02-15T06:33:32Z", + "version": "v1.5.3", + "versionExact": "v1.5.3" + }, { "checksumSHA1": "kgSrbsYhDwcpuGpFFiQClIq5I0w=", "path": "k8s.io/kubernetes/pkg/api/unversioned", @@ -3337,6 +3379,22 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "S3bCRWM0uMVXSas9JvKQdqfH76s=", + "path": "k8s.io/kubernetes/pkg/api/unversioned/validation", + "revision": "029c3a408176b55c30846f0faedf56aae5992e9b", + "revisionTime": "2017-02-15T06:33:32Z", + "version": "v1.5.3", + "versionExact": "v1.5.3" + }, + { + "checksumSHA1": "Oe4S5Bk0mR5lU5mh6WsJfS5tNMU=", + "path": "k8s.io/kubernetes/pkg/api/util", + "revision": "029c3a408176b55c30846f0faedf56aae5992e9b", + "revisionTime": "2017-02-15T06:33:32Z", + "version": "v1.5.3", + "versionExact": "v1.5.3" + }, { "checksumSHA1": "W5U8zCxHpibrVPlVv3YtNAG6mhI=", "path": "k8s.io/kubernetes/pkg/api/v1", @@ -3345,6 +3403,14 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "h9jLBKF7NtXnJ+GRyG6NZJMNyZA=", + "path": "k8s.io/kubernetes/pkg/api/validation", + "revision": "029c3a408176b55c30846f0faedf56aae5992e9b", + "revisionTime": "2017-02-15T06:33:32Z", + "version": "v1.5.3", + "versionExact": "v1.5.3" + }, { "checksumSHA1": "96gUSyImHFoeCmgTSNeupC3La5s=", "path": "k8s.io/kubernetes/pkg/api/validation/path", @@ -3633,6 +3699,12 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "DwUkQeGOyctoYMqg/QbdEuTzLWE=", + "path": "k8s.io/kubernetes/pkg/capabilities", + "revision": "cff3c99613fda9d4b1dc0d959cb528326ee4416c", + "revisionTime": "2017-02-26T16:10:02Z" + }, { "checksumSHA1": "QNBGekG5aiFIkuaTHX0DTgKLyiE=", "path": "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5", @@ -3822,6 +3894,12 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "tJOdN7PyF09mHrpWplVYO609sI8=", + "path": "k8s.io/kubernetes/pkg/features", + "revision": "cff3c99613fda9d4b1dc0d959cb528326ee4416c", + "revisionTime": "2017-02-26T16:10:02Z" + }, { "checksumSHA1": "n22dqZqoIzhpTiW7WsteC+Zd4ww=", "path": "k8s.io/kubernetes/pkg/fields", @@ -3902,6 +3980,12 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "JS1foKD2+ytR+8ioRzb894EGHBw=", + "path": "k8s.io/kubernetes/pkg/security/apparmor", + "revision": "cff3c99613fda9d4b1dc0d959cb528326ee4416c", + "revisionTime": "2017-02-26T16:10:02Z" + }, { "checksumSHA1": "JoovLtllozmF5E6V55//mtxkS3U=", "path": "k8s.io/kubernetes/pkg/selection", @@ -3942,6 +4026,14 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "9lYagUSPgmV/iiHV/ONEXeqFblc=", + "path": "k8s.io/kubernetes/pkg/util/config", + "revision": "029c3a408176b55c30846f0faedf56aae5992e9b", + "revisionTime": "2017-02-15T06:33:32Z", + "version": "v1.5.3", + "versionExact": "v1.5.3" + }, { "checksumSHA1": "MfaCGqPJXEj29bpJ/cDAA1K9FE4=", "path": "k8s.io/kubernetes/pkg/util/errors", @@ -3966,6 +4058,14 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "rvkgO7Yk+ISdyFyabs3nu2e4tIs=", + "path": "k8s.io/kubernetes/pkg/util/hash", + "revision": "029c3a408176b55c30846f0faedf56aae5992e9b", + "revisionTime": "2017-02-15T06:33:32Z", + "version": "v1.5.3", + "versionExact": "v1.5.3" + }, { "checksumSHA1": "a0hqN01LyyiMrSTNTWC4QbMvkJQ=", "path": "k8s.io/kubernetes/pkg/util/homedir", @@ -4022,6 +4122,14 @@ "version": "v1.5.3", "versionExact": "v1.5.3" }, + { + "checksumSHA1": "D/GHEAK824gP9AuEI3ct2dRQ1I0=", + "path": "k8s.io/kubernetes/pkg/util/net/sets", + "revision": "029c3a408176b55c30846f0faedf56aae5992e9b", + "revisionTime": "2017-02-15T06:33:32Z", + "version": "v1.5.3", + "versionExact": "v1.5.3" + }, { "checksumSHA1": "7cnboByPrgJAdMHsdDYmMV6Hy70=", "path": "k8s.io/kubernetes/pkg/util/parsers",