diff --git a/go.mod b/go.mod index fc684b93c..a4fcb3de0 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,6 @@ require ( cloud.google.com/go v0.36.0 github.com/Azure/azure-sdk-for-go v21.3.0+incompatible github.com/Azure/go-autorest v10.15.4+incompatible - github.com/Azure/go-ntlmssp v0.0.0-20170803034930-c92175d54006 // indirect - github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290 // indirect github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292 // indirect github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect github.com/agext/levenshtein v1.2.2 @@ -78,9 +76,7 @@ require ( github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba // indirect github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82 github.com/marstr/guid v1.1.0 // indirect - github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c // indirect - github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9 // indirect - github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939 + github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b github.com/mattn/go-colorable v0.1.1 github.com/mattn/go-shellwords v1.0.4 github.com/miekg/dns v1.0.8 // indirect @@ -95,7 +91,6 @@ require ( github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4 github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51 github.com/mitchellh/reflectwalk v1.0.0 - github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c // indirect github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 // indirect @@ -118,7 +113,7 @@ require ( go.uber.org/atomic v1.3.2 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.9.1 // indirect - golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2 + golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f golang.org/x/net v0.0.0-20190213061140-3a22650c66bd golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9 google.golang.org/api v0.1.0 diff --git a/go.sum b/go.sum index 0c1f6c333..b703f0f1e 100644 --- a/go.sum +++ b/go.sum @@ -13,11 +13,11 @@ github.com/Azure/azure-sdk-for-go v21.3.0+incompatible h1:YFvAka2WKAl2xnJkYV1e1b github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-autorest v10.15.4+incompatible h1:q+DRrRdbCnkY7f2WxQBx58TwCGkEdMAK/hkZ10g0Pzk= github.com/Azure/go-autorest v10.15.4+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-ntlmssp v0.0.0-20170803034930-c92175d54006 h1:dVyNL14dq1500JomYVzJTVi0XEcZFCYwwiNpDeCfoes= -github.com/Azure/go-ntlmssp v0.0.0-20170803034930-c92175d54006/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 h1:pSm8mp0T2OH2CPmPDPtwHPr3VAQaOwVF/JbllOPP4xA= +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290 h1:K9I21XUHNbYD3GNMmJBN0UKJCpdP+glftwNZ7Bo8kqY= -github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 h1:y8Gs8CzNfDF5AZvjr+5UyGQvQEBL7pwo+v+wX6q9JI8= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292 h1:tuQ7w+my8a8mkwN7x2TSd7OzTjkZ7rAeSyH4xncuAMI= github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292/go.mod h1:KYCjqMOeHpNuTOiFQU6WEcTG7poCJrUs0YgyHNtn1no= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= @@ -247,12 +247,10 @@ github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82 h1:wnfcqULT+N github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84= github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c h1:FMUOnVGy8nWk1cvlMCAoftRItQGMxI0vzJ3dQjeZTCE= -github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c/go.mod h1:mf8fjOu33zCqxUjuiU3I8S1lJMyEAlH+0F2+M5xl3hE= github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9 h1:SmVbOZFWAlyQshuMfOkiAx1f5oUTsOGG5IXplAEYeeM= github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= -github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939 h1:cRFHA33ER97Xy5jmjS519OXCS/yE3AT3zdbQAg0Z53g= -github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939/go.mod h1:CfZSN7zwz5gJiFhZJz49Uzk7mEBHIceWmbFmYx7Hf7E= +github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b h1:/1RFh2SLCJ+tEnT73+Fh5R2AO89sQqs8ba7o+hx1G0Y= +github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b/go.mod h1:wr1VqkwW0AB5JS0QLy5GpVMS9E3VtRoSYXUYyVk46KY= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= @@ -415,6 +413,8 @@ golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2 h1:NwxKRvbkH5MsNkvOtPZi3/3kmI8CAzs3mtv+GLQMkNo= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f h1:qWFY9ZxP3tfI37wYIs/MnIAqK0vlXp1xnYEa5HxFSSY= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/vendor/github.com/Azure/go-ntlmssp/.travis.yml b/vendor/github.com/Azure/go-ntlmssp/.travis.yml index 7223995e8..7951372c7 100644 --- a/vendor/github.com/Azure/go-ntlmssp/.travis.yml +++ b/vendor/github.com/Azure/go-ntlmssp/.travis.yml @@ -5,7 +5,10 @@ language: go before_script: - go get -u github.com/golang/lint/golint -go: 1.6 +go: + - 1.10.x + - master + script: - test -z "$(gofmt -s -l . | tee /dev/stderr)" - test -z "$(golint ./... | tee /dev/stderr)" diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go index 6c3ce7b01..5905c023d 100644 --- a/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go +++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go @@ -7,30 +7,38 @@ const ( /*B*/ negotiateFlagNTLMNEGOTIATEOEM = 1 << 1 /*C*/ negotiateFlagNTLMSSPREQUESTTARGET = 1 << 2 - /*D*/ negotiateFlagNTLMSSPNEGOTIATESIGN = 1 << 4 + /*D*/ + negotiateFlagNTLMSSPNEGOTIATESIGN = 1 << 4 /*E*/ negotiateFlagNTLMSSPNEGOTIATESEAL = 1 << 5 /*F*/ negotiateFlagNTLMSSPNEGOTIATEDATAGRAM = 1 << 6 /*G*/ negotiateFlagNTLMSSPNEGOTIATELMKEY = 1 << 7 - /*H*/ negotiateFlagNTLMSSPNEGOTIATENTLM = 1 << 9 + /*H*/ + negotiateFlagNTLMSSPNEGOTIATENTLM = 1 << 9 - /*J*/ negotiateFlagANONYMOUS = 1 << 11 + /*J*/ + negotiateFlagANONYMOUS = 1 << 11 /*K*/ negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED = 1 << 12 /*L*/ negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED = 1 << 13 - /*M*/ negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN = 1 << 15 + /*M*/ + negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN = 1 << 15 /*N*/ negotiateFlagNTLMSSPTARGETTYPEDOMAIN = 1 << 16 /*O*/ negotiateFlagNTLMSSPTARGETTYPESERVER = 1 << 17 - /*P*/ negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY = 1 << 19 + /*P*/ + negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY = 1 << 19 /*Q*/ negotiateFlagNTLMSSPNEGOTIATEIDENTIFY = 1 << 20 - /*R*/ negotiateFlagNTLMSSPREQUESTNONNTSESSIONKEY = 1 << 22 + /*R*/ + negotiateFlagNTLMSSPREQUESTNONNTSESSIONKEY = 1 << 22 /*S*/ negotiateFlagNTLMSSPNEGOTIATETARGETINFO = 1 << 23 - /*T*/ negotiateFlagNTLMSSPNEGOTIATEVERSION = 1 << 25 + /*T*/ + negotiateFlagNTLMSSPNEGOTIATEVERSION = 1 << 25 - /*U*/ negotiateFlagNTLMSSPNEGOTIATE128 = 1 << 29 + /*U*/ + negotiateFlagNTLMSSPNEGOTIATE128 = 1 << 29 /*V*/ negotiateFlagNTLMSSPNEGOTIATEKEYEXCH = 1 << 30 /*W*/ negotiateFlagNTLMSSPNEGOTIATE56 = 1 << 31 ) diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go index 97aa07e8f..e466a9861 100644 --- a/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go +++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go @@ -3,29 +3,62 @@ package ntlmssp import ( "bytes" "encoding/binary" + "errors" + "strings" ) +const expMsgBodyLen = 40 + type negotiateMessageFields struct { messageHeader NegotiateFlags negotiateFlags + + Domain varField + Workstation varField + + Version } +var defaultFlags = negotiateFlagNTLMSSPNEGOTIATETARGETINFO | + negotiateFlagNTLMSSPNEGOTIATE56 | + negotiateFlagNTLMSSPNEGOTIATE128 | + negotiateFlagNTLMSSPNEGOTIATEUNICODE | + negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY + //NewNegotiateMessage creates a new NEGOTIATE message with the //flags that this package supports. -func NewNegotiateMessage() []byte { - m := negotiateMessageFields{ - messageHeader: newMessageHeader(1), +func NewNegotiateMessage(domainName, workstationName string) ([]byte, error) { + payloadOffset := expMsgBodyLen + flags := defaultFlags + + if domainName != "" { + flags |= negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED } - m.NegotiateFlags = negotiateFlagNTLMSSPREQUESTTARGET | - negotiateFlagNTLMSSPNEGOTIATENTLM | - negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN | - negotiateFlagNTLMSSPNEGOTIATEUNICODE + if workstationName != "" { + flags |= negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED + } + + msg := negotiateMessageFields{ + messageHeader: newMessageHeader(1), + NegotiateFlags: flags, + Domain: newVarField(&payloadOffset, len(domainName)), + Workstation: newVarField(&payloadOffset, len(workstationName)), + Version: DefaultVersion(), + } b := bytes.Buffer{} - err := binary.Write(&b, binary.LittleEndian, &m) - if err != nil { - panic(err) + if err := binary.Write(&b, binary.LittleEndian, &msg); err != nil { + return nil, err } - return b.Bytes() + if b.Len() != expMsgBodyLen { + return nil, errors.New("incorrect body length") + } + + payload := strings.ToUpper(domainName + workstationName) + if _, err := b.WriteString(payload); err != nil { + return nil, err + } + + return b.Bytes(), nil } diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiator.go b/vendor/github.com/Azure/go-ntlmssp/negotiator.go index 9ae4586a2..6e304544b 100644 --- a/vendor/github.com/Azure/go-ntlmssp/negotiator.go +++ b/vendor/github.com/Azure/go-ntlmssp/negotiator.go @@ -6,8 +6,21 @@ import ( "io" "io/ioutil" "net/http" + "strings" ) +// GetDomain : parse domain name from based on slashes in the input +func GetDomain(user string) (string, string) { + domain := "" + + if strings.Contains(user, "\\") { + ucomponents := strings.SplitN(user, "\\", 2) + domain = ucomponents[0] + user = ucomponents[1] + } + return user, domain +} + //Negotiator is a http.Roundtripper decorator that automatically //converts basic authentication to NTLM/Negotiate authentication when appropriate. type Negotiator struct{ http.RoundTripper } @@ -76,8 +89,15 @@ func (l Negotiator) RoundTrip(req *http.Request) (res *http.Response, err error) return nil, err } + // get domain from username + domain := "" + u, domain = GetDomain(u) + // send negotiate - negotiateMessage := NewNegotiateMessage() + negotiateMessage, err := NewNegotiateMessage(domain, "") + if err != nil { + return nil, err + } if resauth.IsNTLM() { req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(negotiateMessage)) } else { diff --git a/vendor/github.com/Azure/go-ntlmssp/version.go b/vendor/github.com/Azure/go-ntlmssp/version.go new file mode 100644 index 000000000..6d8489212 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/version.go @@ -0,0 +1,20 @@ +package ntlmssp + +// Version is a struct representing https://msdn.microsoft.com/en-us/library/cc236654.aspx +type Version struct { + ProductMajorVersion uint8 + ProductMinorVersion uint8 + ProductBuild uint16 + _ [3]byte + NTLMRevisionCurrent uint8 +} + +// DefaultVersion returns a Version with "sensible" defaults (Windows 7) +func DefaultVersion() Version { + return Version{ + ProductMajorVersion: 6, + ProductMinorVersion: 1, + ProductBuild: 7601, + NTLMRevisionCurrent: 15, + } +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go b/vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go index af2e2f141..189ebb9c2 100644 --- a/vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go +++ b/vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/ChrisTrenkamp/goxpath/internal/execxp" - "github.com/ChrisTrenkamp/goxpath/internal/parser" + "github.com/ChrisTrenkamp/goxpath/parser" "github.com/ChrisTrenkamp/goxpath/tree" ) diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go index 0746eacac..7b3251568 100644 --- a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go @@ -3,7 +3,7 @@ package execxp import ( "encoding/xml" - "github.com/ChrisTrenkamp/goxpath/internal/parser" + "github.com/ChrisTrenkamp/goxpath/parser" "github.com/ChrisTrenkamp/goxpath/tree" ) diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/findutil/findUtil.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil/findUtil.go similarity index 98% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/findutil/findUtil.go rename to vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil/findUtil.go index 6a6d44abe..058f79a0b 100644 --- a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/findutil/findUtil.go +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil/findUtil.go @@ -3,9 +3,9 @@ package findutil import ( "encoding/xml" - "github.com/ChrisTrenkamp/goxpath/internal/parser/pathexpr" - "github.com/ChrisTrenkamp/goxpath/internal/xconst" + "github.com/ChrisTrenkamp/goxpath/parser/pathexpr" "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/xconst" ) const ( diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/boolfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/boolfns.go similarity index 100% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/boolfns.go rename to vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/boolfns.go diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/intfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/intfns.go similarity index 100% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/intfns.go rename to vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/intfns.go diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/nodesetfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/nodesetfns.go similarity index 100% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/nodesetfns.go rename to vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/nodesetfns.go diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/numfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/numfns.go similarity index 100% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/numfns.go rename to vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/numfns.go diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/stringfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/stringfns.go similarity index 100% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/stringfns.go rename to vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/stringfns.go diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go index 60ada7ab1..5a78138ee 100644 --- a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go @@ -6,15 +6,14 @@ import ( "strconv" "strings" - "github.com/ChrisTrenkamp/goxpath/internal/parser" - "github.com/ChrisTrenkamp/goxpath/internal/parser/findutil" - "github.com/ChrisTrenkamp/goxpath/internal/parser/intfns" - "github.com/ChrisTrenkamp/goxpath/internal/xconst" + "github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil" + "github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns" "github.com/ChrisTrenkamp/goxpath/internal/xsort" - - "github.com/ChrisTrenkamp/goxpath/internal/lexer" - "github.com/ChrisTrenkamp/goxpath/internal/parser/pathexpr" + "github.com/ChrisTrenkamp/goxpath/lexer" + "github.com/ChrisTrenkamp/goxpath/parser" + "github.com/ChrisTrenkamp/goxpath/parser/pathexpr" "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/xconst" ) type xpFilt struct { diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/lexer/lexer.go b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/lexer.go similarity index 100% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/lexer/lexer.go rename to vendor/github.com/ChrisTrenkamp/goxpath/lexer/lexer.go diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/lexer/paths.go b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/paths.go similarity index 98% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/lexer/paths.go rename to vendor/github.com/ChrisTrenkamp/goxpath/lexer/paths.go index 2393f6966..dac30d6b6 100644 --- a/vendor/github.com/ChrisTrenkamp/goxpath/internal/lexer/paths.go +++ b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/paths.go @@ -3,7 +3,7 @@ package lexer import ( "fmt" - "github.com/ChrisTrenkamp/goxpath/internal/xconst" + "github.com/ChrisTrenkamp/goxpath/xconst" ) func absLocPathState(l *Lexer) stateFn { diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/lexer/xmlchars.go b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/xmlchars.go similarity index 100% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/lexer/xmlchars.go rename to vendor/github.com/ChrisTrenkamp/goxpath/lexer/xmlchars.go diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/ast.go b/vendor/github.com/ChrisTrenkamp/goxpath/parser/ast.go similarity index 97% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/ast.go rename to vendor/github.com/ChrisTrenkamp/goxpath/parser/ast.go index ec62206ea..89678f6ba 100644 --- a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/ast.go +++ b/vendor/github.com/ChrisTrenkamp/goxpath/parser/ast.go @@ -1,6 +1,6 @@ package parser -import "github.com/ChrisTrenkamp/goxpath/internal/lexer" +import "github.com/ChrisTrenkamp/goxpath/lexer" //NodeType enumerations const ( diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/parser.go b/vendor/github.com/ChrisTrenkamp/goxpath/parser/parser.go similarity index 98% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/parser.go rename to vendor/github.com/ChrisTrenkamp/goxpath/parser/parser.go index 9029c7a74..ff664ff78 100644 --- a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/parser.go +++ b/vendor/github.com/ChrisTrenkamp/goxpath/parser/parser.go @@ -3,7 +3,7 @@ package parser import ( "fmt" - "github.com/ChrisTrenkamp/goxpath/internal/lexer" + "github.com/ChrisTrenkamp/goxpath/lexer" ) type stateType int diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/pathexpr/pathexpr.go b/vendor/github.com/ChrisTrenkamp/goxpath/parser/pathexpr/pathexpr.go similarity index 100% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/pathexpr/pathexpr.go rename to vendor/github.com/ChrisTrenkamp/goxpath/parser/pathexpr/pathexpr.go diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/xconst/xconst.go b/vendor/github.com/ChrisTrenkamp/goxpath/xconst/xconst.go similarity index 100% rename from vendor/github.com/ChrisTrenkamp/goxpath/internal/xconst/xconst.go rename to vendor/github.com/ChrisTrenkamp/goxpath/xconst/xconst.go diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/LICENSE b/vendor/github.com/masterzen/azure-sdk-for-go/LICENSE deleted file mode 100644 index af39a91e7..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/chunked.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/chunked.go deleted file mode 100644 index 749f29d32..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/chunked.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The wire protocol for HTTP's "chunked" Transfer-Encoding. - -// This code is duplicated in net/http and net/http/httputil. -// Please make any changes in both files. - -package http - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" -) - -const maxLineLength = 4096 // assumed <= bufio.defaultBufSize - -var ErrLineTooLong = errors.New("header line too long") - -// newChunkedReader returns a new chunkedReader that translates the data read from r -// out of HTTP "chunked" format before returning it. -// The chunkedReader returns io.EOF when the final 0-length chunk is read. -// -// newChunkedReader is not needed by normal applications. The http package -// automatically decodes chunking when reading response bodies. -func newChunkedReader(r io.Reader) io.Reader { - br, ok := r.(*bufio.Reader) - if !ok { - br = bufio.NewReader(r) - } - return &chunkedReader{r: br} -} - -type chunkedReader struct { - r *bufio.Reader - n uint64 // unread bytes in chunk - err error - buf [2]byte -} - -func (cr *chunkedReader) beginChunk() { - // chunk-size CRLF - var line []byte - line, cr.err = readLine(cr.r) - if cr.err != nil { - return - } - cr.n, cr.err = parseHexUint(line) - if cr.err != nil { - return - } - if cr.n == 0 { - cr.err = io.EOF - } -} - -func (cr *chunkedReader) chunkHeaderAvailable() bool { - n := cr.r.Buffered() - if n > 0 { - peek, _ := cr.r.Peek(n) - return bytes.IndexByte(peek, '\n') >= 0 - } - return false -} - -func (cr *chunkedReader) Read(b []uint8) (n int, err error) { - for cr.err == nil { - if cr.n == 0 { - if n > 0 && !cr.chunkHeaderAvailable() { - // We've read enough. Don't potentially block - // reading a new chunk header. - break - } - cr.beginChunk() - continue - } - if len(b) == 0 { - break - } - rbuf := b - if uint64(len(rbuf)) > cr.n { - rbuf = rbuf[:cr.n] - } - var n0 int - n0, cr.err = cr.r.Read(rbuf) - n += n0 - b = b[n0:] - cr.n -= uint64(n0) - // If we're at the end of a chunk, read the next two - // bytes to verify they are "\r\n". - if cr.n == 0 && cr.err == nil { - if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { - if cr.buf[0] != '\r' || cr.buf[1] != '\n' { - cr.err = errors.New("malformed chunked encoding") - } - } - } - } - return n, cr.err -} - -// Read a line of bytes (up to \n) from b. -// Give up if the line exceeds maxLineLength. -// The returned bytes are a pointer into storage in -// the bufio, so they are only valid until the next bufio read. -func readLine(b *bufio.Reader) (p []byte, err error) { - if p, err = b.ReadSlice('\n'); err != nil { - // We always know when EOF is coming. - // If the caller asked for a line, there should be a line. - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if err == bufio.ErrBufferFull { - err = ErrLineTooLong - } - return nil, err - } - if len(p) >= maxLineLength { - return nil, ErrLineTooLong - } - return trimTrailingWhitespace(p), nil -} - -func trimTrailingWhitespace(b []byte) []byte { - for len(b) > 0 && isASCIISpace(b[len(b)-1]) { - b = b[:len(b)-1] - } - return b -} - -func isASCIISpace(b byte) bool { - return b == ' ' || b == '\t' || b == '\n' || b == '\r' -} - -// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP -// "chunked" format before writing them to w. Closing the returned chunkedWriter -// sends the final 0-length chunk that marks the end of the stream. -// -// newChunkedWriter is not needed by normal applications. The http -// package adds chunking automatically if handlers don't set a -// Content-Length header. Using newChunkedWriter inside a handler -// would result in double chunking or chunking with a Content-Length -// length, both of which are wrong. -func newChunkedWriter(w io.Writer) io.WriteCloser { - return &chunkedWriter{w} -} - -// Writing to chunkedWriter translates to writing in HTTP chunked Transfer -// Encoding wire format to the underlying Wire chunkedWriter. -type chunkedWriter struct { - Wire io.Writer -} - -// Write the contents of data as one chunk to Wire. -// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has -// a bug since it does not check for success of io.WriteString -func (cw *chunkedWriter) Write(data []byte) (n int, err error) { - - // Don't send 0-length data. It looks like EOF for chunked encoding. - if len(data) == 0 { - return 0, nil - } - - if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil { - return 0, err - } - if n, err = cw.Wire.Write(data); err != nil { - return - } - if n != len(data) { - err = io.ErrShortWrite - return - } - _, err = io.WriteString(cw.Wire, "\r\n") - - return -} - -func (cw *chunkedWriter) Close() error { - _, err := io.WriteString(cw.Wire, "0\r\n") - return err -} - -func parseHexUint(v []byte) (n uint64, err error) { - for _, b := range v { - n <<= 4 - switch { - case '0' <= b && b <= '9': - b = b - '0' - case 'a' <= b && b <= 'f': - b = b - 'a' + 10 - case 'A' <= b && b <= 'F': - b = b - 'A' + 10 - default: - return 0, errors.New("invalid byte in chunk length") - } - n |= uint64(b) - } - return -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/client.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/client.go deleted file mode 100644 index a5a3abe61..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/client.go +++ /dev/null @@ -1,487 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP client. See RFC 2616. -// -// This is the high-level Client interface. -// The low-level implementation is in transport.go. - -package http - -import ( - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/url" - "strings" - "sync" - "time" -) - -// A Client is an HTTP client. Its zero value (DefaultClient) is a -// usable client that uses DefaultTransport. -// -// The Client's Transport typically has internal state (cached TCP -// connections), so Clients should be reused instead of created as -// needed. Clients are safe for concurrent use by multiple goroutines. -// -// A Client is higher-level than a RoundTripper (such as Transport) -// and additionally handles HTTP details such as cookies and -// redirects. -type Client struct { - // Transport specifies the mechanism by which individual - // HTTP requests are made. - // If nil, DefaultTransport is used. - Transport RoundTripper - - // CheckRedirect specifies the policy for handling redirects. - // If CheckRedirect is not nil, the client calls it before - // following an HTTP redirect. The arguments req and via are - // the upcoming request and the requests made already, oldest - // first. If CheckRedirect returns an error, the Client's Get - // method returns both the previous Response and - // CheckRedirect's error (wrapped in a url.Error) instead of - // issuing the Request req. - // - // If CheckRedirect is nil, the Client uses its default policy, - // which is to stop after 10 consecutive requests. - CheckRedirect func(req *Request, via []*Request) error - - // Jar specifies the cookie jar. - // If Jar is nil, cookies are not sent in requests and ignored - // in responses. - Jar CookieJar - - // Timeout specifies a time limit for requests made by this - // Client. The timeout includes connection time, any - // redirects, and reading the response body. The timer remains - // running after Get, Head, Post, or Do return and will - // interrupt reading of the Response.Body. - // - // A Timeout of zero means no timeout. - // - // The Client's Transport must support the CancelRequest - // method or Client will return errors when attempting to make - // a request with Get, Head, Post, or Do. Client's default - // Transport (DefaultTransport) supports CancelRequest. - Timeout time.Duration -} - -// DefaultClient is the default Client and is used by Get, Head, and Post. -var DefaultClient = &Client{} - -// RoundTripper is an interface representing the ability to execute a -// single HTTP transaction, obtaining the Response for a given Request. -// -// A RoundTripper must be safe for concurrent use by multiple -// goroutines. -type RoundTripper interface { - // RoundTrip executes a single HTTP transaction, returning - // the Response for the request req. RoundTrip should not - // attempt to interpret the response. In particular, - // RoundTrip must return err == nil if it obtained a response, - // regardless of the response's HTTP status code. A non-nil - // err should be reserved for failure to obtain a response. - // Similarly, RoundTrip should not attempt to handle - // higher-level protocol details such as redirects, - // authentication, or cookies. - // - // RoundTrip should not modify the request, except for - // consuming and closing the Body, including on errors. The - // request's URL and Header fields are guaranteed to be - // initialized. - RoundTrip(*Request) (*Response, error) -} - -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// Used in Send to implement io.ReadCloser by bundling together the -// bufio.Reader through which we read the response, and the underlying -// network connection. -type readClose struct { - io.Reader - io.Closer -} - -func (c *Client) send(req *Request) (*Response, error) { - if c.Jar != nil { - for _, cookie := range c.Jar.Cookies(req.URL) { - req.AddCookie(cookie) - } - } - resp, err := send(req, c.transport()) - if err != nil { - return nil, err - } - if c.Jar != nil { - if rc := resp.Cookies(); len(rc) > 0 { - c.Jar.SetCookies(req.URL, rc) - } - } - return resp, err -} - -// Do sends an HTTP request and returns an HTTP response, following -// policy (e.g. redirects, cookies, auth) as configured on the client. -// -// An error is returned if caused by client policy (such as -// CheckRedirect), or if there was an HTTP protocol error. -// A non-2xx response doesn't cause an error. -// -// When err is nil, resp always contains a non-nil resp.Body. -// -// Callers should close resp.Body when done reading from it. If -// resp.Body is not closed, the Client's underlying RoundTripper -// (typically Transport) may not be able to re-use a persistent TCP -// connection to the server for a subsequent "keep-alive" request. -// -// The request Body, if non-nil, will be closed by the underlying -// Transport, even on errors. -// -// Generally Get, Post, or PostForm will be used instead of Do. -func (c *Client) Do(req *Request) (resp *Response, err error) { - if req.Method == "GET" || req.Method == "HEAD" { - return c.doFollowingRedirects(req, shouldRedirectGet) - } - if req.Method == "POST" || req.Method == "PUT" { - return c.doFollowingRedirects(req, shouldRedirectPost) - } - return c.send(req) -} - -func (c *Client) transport() RoundTripper { - if c.Transport != nil { - return c.Transport - } - return DefaultTransport -} - -// send issues an HTTP request. -// Caller should close resp.Body when done reading from it. -func send(req *Request, t RoundTripper) (resp *Response, err error) { - if t == nil { - req.closeBody() - return nil, errors.New("http: no Client.Transport or DefaultTransport") - } - - if req.URL == nil { - req.closeBody() - return nil, errors.New("http: nil Request.URL") - } - - if req.RequestURI != "" { - req.closeBody() - return nil, errors.New("http: Request.RequestURI can't be set in client requests.") - } - - // Most the callers of send (Get, Post, et al) don't need - // Headers, leaving it uninitialized. We guarantee to the - // Transport that this has been initialized, though. - if req.Header == nil { - req.Header = make(Header) - } - - if u := req.URL.User; u != nil { - username := u.Username() - password, _ := u.Password() - req.Header.Set("Authorization", "Basic "+basicAuth(username, password)) - } - resp, err = t.RoundTrip(req) - if err != nil { - if resp != nil { - log.Printf("RoundTripper returned a response & error; ignoring response") - } - return nil, err - } - return resp, nil -} - -// See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt -// "To receive authorization, the client sends the userid and password, -// separated by a single colon (":") character, within a base64 -// encoded string in the credentials." -// It is not meant to be urlencoded. -func basicAuth(username, password string) string { - auth := username + ":" + password - return base64.StdEncoding.EncodeToString([]byte(auth)) -} - -// True if the specified HTTP status code is one for which the Get utility should -// automatically redirect. -func shouldRedirectGet(statusCode int) bool { - switch statusCode { - case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect: - return true - } - return false -} - -// True if the specified HTTP status code is one for which the Post utility should -// automatically redirect. -func shouldRedirectPost(statusCode int) bool { - switch statusCode { - case StatusFound, StatusSeeOther: - return true - } - return false -} - -// Get issues a GET to the specified URL. If the response is one of the following -// redirect codes, Get follows the redirect, up to a maximum of 10 redirects: -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -// -// An error is returned if there were too many redirects or if there -// was an HTTP protocol error. A non-2xx response doesn't cause an -// error. -// -// When err is nil, resp always contains a non-nil resp.Body. -// Caller should close resp.Body when done reading from it. -// -// Get is a wrapper around DefaultClient.Get. -func Get(url string) (resp *Response, err error) { - return DefaultClient.Get(url) -} - -// Get issues a GET to the specified URL. If the response is one of the -// following redirect codes, Get follows the redirect after calling the -// Client's CheckRedirect function. -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -// -// An error is returned if the Client's CheckRedirect function fails -// or if there was an HTTP protocol error. A non-2xx response doesn't -// cause an error. -// -// When err is nil, resp always contains a non-nil resp.Body. -// Caller should close resp.Body when done reading from it. -func (c *Client) Get(url string) (resp *Response, err error) { - req, err := NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return c.doFollowingRedirects(req, shouldRedirectGet) -} - -func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bool) (resp *Response, err error) { - var base *url.URL - redirectChecker := c.CheckRedirect - if redirectChecker == nil { - redirectChecker = defaultCheckRedirect - } - var via []*Request - - if ireq.URL == nil { - ireq.closeBody() - return nil, errors.New("http: nil Request.URL") - } - - var reqmu sync.Mutex // guards req - req := ireq - - var timer *time.Timer - if c.Timeout > 0 { - type canceler interface { - CancelRequest(*Request) - } - tr, ok := c.transport().(canceler) - if !ok { - return nil, fmt.Errorf("net/http: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", c.transport()) - } - timer = time.AfterFunc(c.Timeout, func() { - reqmu.Lock() - defer reqmu.Unlock() - tr.CancelRequest(req) - }) - } - - urlStr := "" // next relative or absolute URL to fetch (after first request) - redirectFailed := false - for redirect := 0; ; redirect++ { - if redirect != 0 { - nreq := new(Request) - nreq.Method = ireq.Method - if ireq.Method == "POST" || ireq.Method == "PUT" { - nreq.Method = "GET" - } - nreq.Header = make(Header) - nreq.URL, err = base.Parse(urlStr) - if err != nil { - break - } - if len(via) > 0 { - // Add the Referer header. - lastReq := via[len(via)-1] - if lastReq.URL.Scheme != "https" { - nreq.Header.Set("Referer", lastReq.URL.String()) - } - - err = redirectChecker(nreq, via) - if err != nil { - redirectFailed = true - break - } - } - reqmu.Lock() - req = nreq - reqmu.Unlock() - } - - urlStr = req.URL.String() - if resp, err = c.send(req); err != nil { - break - } - - if shouldRedirect(resp.StatusCode) { - // Read the body if small so underlying TCP connection will be re-used. - // No need to check for errors: if it fails, Transport won't reuse it anyway. - const maxBodySlurpSize = 2 << 10 - if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize { - io.CopyN(ioutil.Discard, resp.Body, maxBodySlurpSize) - } - resp.Body.Close() - if urlStr = resp.Header.Get("Location"); urlStr == "" { - err = errors.New(fmt.Sprintf("%d response missing Location header", resp.StatusCode)) - break - } - base = req.URL - via = append(via, req) - continue - } - if timer != nil { - resp.Body = &cancelTimerBody{timer, resp.Body} - } - return resp, nil - } - - method := ireq.Method - urlErr := &url.Error{ - Op: method[0:1] + strings.ToLower(method[1:]), - URL: urlStr, - Err: err, - } - - if redirectFailed { - // Special case for Go 1 compatibility: return both the response - // and an error if the CheckRedirect function failed. - // See http://golang.org/issue/3795 - return resp, urlErr - } - - if resp != nil { - resp.Body.Close() - } - return nil, urlErr -} - -func defaultCheckRedirect(req *Request, via []*Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - return nil -} - -// Post issues a POST to the specified URL. -// -// Caller should close resp.Body when done reading from it. -// -// Post is a wrapper around DefaultClient.Post -func Post(url string, bodyType string, body io.Reader) (resp *Response, err error) { - return DefaultClient.Post(url, bodyType, body) -} - -// Post issues a POST to the specified URL. -// -// Caller should close resp.Body when done reading from it. -// -// If the provided body is also an io.Closer, it is closed after the -// request. -func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *Response, err error) { - req, err := NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return c.doFollowingRedirects(req, shouldRedirectPost) -} - -// PostForm issues a POST to the specified URL, with data's keys and -// values URL-encoded as the request body. -// -// When err is nil, resp always contains a non-nil resp.Body. -// Caller should close resp.Body when done reading from it. -// -// PostForm is a wrapper around DefaultClient.PostForm -func PostForm(url string, data url.Values) (resp *Response, err error) { - return DefaultClient.PostForm(url, data) -} - -// PostForm issues a POST to the specified URL, -// with data's keys and values urlencoded as the request body. -// -// When err is nil, resp always contains a non-nil resp.Body. -// Caller should close resp.Body when done reading from it. -func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) { - return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// Head issues a HEAD to the specified URL. If the response is one of the -// following redirect codes, Head follows the redirect after calling the -// Client's CheckRedirect function. -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -// -// Head is a wrapper around DefaultClient.Head -func Head(url string) (resp *Response, err error) { - return DefaultClient.Head(url) -} - -// Head issues a HEAD to the specified URL. If the response is one of the -// following redirect codes, Head follows the redirect after calling the -// Client's CheckRedirect function. -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -func (c *Client) Head(url string) (resp *Response, err error) { - req, err := NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return c.doFollowingRedirects(req, shouldRedirectGet) -} - -type cancelTimerBody struct { - t *time.Timer - rc io.ReadCloser -} - -func (b *cancelTimerBody) Read(p []byte) (n int, err error) { - n, err = b.rc.Read(p) - if err == io.EOF { - b.t.Stop() - } - return -} - -func (b *cancelTimerBody) Close() error { - err := b.rc.Close() - b.t.Stop() - return err -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/cookie.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/cookie.go deleted file mode 100644 index dc60ba87f..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/cookie.go +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "fmt" - "log" - "net" - "strconv" - "strings" - "time" -) - -// This implementation is done according to RFC 6265: -// -// http://tools.ietf.org/html/rfc6265 - -// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an -// HTTP response or the Cookie header of an HTTP request. -type Cookie struct { - Name string - Value string - Path string - Domain string - Expires time.Time - RawExpires string - - // MaxAge=0 means no 'Max-Age' attribute specified. - // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' - // MaxAge>0 means Max-Age attribute present and given in seconds - MaxAge int - Secure bool - HttpOnly bool - Raw string - Unparsed []string // Raw text of unparsed attribute-value pairs -} - -// readSetCookies parses all "Set-Cookie" values from -// the header h and returns the successfully parsed Cookies. -func readSetCookies(h Header) []*Cookie { - cookies := []*Cookie{} - for _, line := range h["Set-Cookie"] { - parts := strings.Split(strings.TrimSpace(line), ";") - if len(parts) == 1 && parts[0] == "" { - continue - } - parts[0] = strings.TrimSpace(parts[0]) - j := strings.Index(parts[0], "=") - if j < 0 { - continue - } - name, value := parts[0][:j], parts[0][j+1:] - if !isCookieNameValid(name) { - continue - } - value, success := parseCookieValue(value) - if !success { - continue - } - c := &Cookie{ - Name: name, - Value: value, - Raw: line, - } - for i := 1; i < len(parts); i++ { - parts[i] = strings.TrimSpace(parts[i]) - if len(parts[i]) == 0 { - continue - } - - attr, val := parts[i], "" - if j := strings.Index(attr, "="); j >= 0 { - attr, val = attr[:j], attr[j+1:] - } - lowerAttr := strings.ToLower(attr) - val, success = parseCookieValue(val) - if !success { - c.Unparsed = append(c.Unparsed, parts[i]) - continue - } - switch lowerAttr { - case "secure": - c.Secure = true - continue - case "httponly": - c.HttpOnly = true - continue - case "domain": - c.Domain = val - continue - case "max-age": - secs, err := strconv.Atoi(val) - if err != nil || secs != 0 && val[0] == '0' { - break - } - if secs <= 0 { - c.MaxAge = -1 - } else { - c.MaxAge = secs - } - continue - case "expires": - c.RawExpires = val - exptime, err := time.Parse(time.RFC1123, val) - if err != nil { - exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val) - if err != nil { - c.Expires = time.Time{} - break - } - } - c.Expires = exptime.UTC() - continue - case "path": - c.Path = val - continue - } - c.Unparsed = append(c.Unparsed, parts[i]) - } - cookies = append(cookies, c) - } - return cookies -} - -// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers. -func SetCookie(w ResponseWriter, cookie *Cookie) { - w.Header().Add("Set-Cookie", cookie.String()) -} - -// String returns the serialization of the cookie for use in a Cookie -// header (if only Name and Value are set) or a Set-Cookie response -// header (if other fields are set). -func (c *Cookie) String() string { - var b bytes.Buffer - fmt.Fprintf(&b, "%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) - if len(c.Path) > 0 { - fmt.Fprintf(&b, "; Path=%s", sanitizeCookiePath(c.Path)) - } - if len(c.Domain) > 0 { - if validCookieDomain(c.Domain) { - // A c.Domain containing illegal characters is not - // sanitized but simply dropped which turns the cookie - // into a host-only cookie. A leading dot is okay - // but won't be sent. - d := c.Domain - if d[0] == '.' { - d = d[1:] - } - fmt.Fprintf(&b, "; Domain=%s", d) - } else { - log.Printf("net/http: invalid Cookie.Domain %q; dropping domain attribute", - c.Domain) - } - } - if c.Expires.Unix() > 0 { - fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(time.RFC1123)) - } - if c.MaxAge > 0 { - fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge) - } else if c.MaxAge < 0 { - fmt.Fprintf(&b, "; Max-Age=0") - } - if c.HttpOnly { - fmt.Fprintf(&b, "; HttpOnly") - } - if c.Secure { - fmt.Fprintf(&b, "; Secure") - } - return b.String() -} - -// readCookies parses all "Cookie" values from the header h and -// returns the successfully parsed Cookies. -// -// if filter isn't empty, only cookies of that name are returned -func readCookies(h Header, filter string) []*Cookie { - cookies := []*Cookie{} - lines, ok := h["Cookie"] - if !ok { - return cookies - } - - for _, line := range lines { - parts := strings.Split(strings.TrimSpace(line), ";") - if len(parts) == 1 && parts[0] == "" { - continue - } - // Per-line attributes - parsedPairs := 0 - for i := 0; i < len(parts); i++ { - parts[i] = strings.TrimSpace(parts[i]) - if len(parts[i]) == 0 { - continue - } - name, val := parts[i], "" - if j := strings.Index(name, "="); j >= 0 { - name, val = name[:j], name[j+1:] - } - if !isCookieNameValid(name) { - continue - } - if filter != "" && filter != name { - continue - } - val, success := parseCookieValue(val) - if !success { - continue - } - cookies = append(cookies, &Cookie{Name: name, Value: val}) - parsedPairs++ - } - } - return cookies -} - -// validCookieDomain returns wheter v is a valid cookie domain-value. -func validCookieDomain(v string) bool { - if isCookieDomainName(v) { - return true - } - if net.ParseIP(v) != nil && !strings.Contains(v, ":") { - return true - } - return false -} - -// isCookieDomainName returns whether s is a valid domain name or a valid -// domain name with a leading dot '.'. It is almost a direct copy of -// package net's isDomainName. -func isCookieDomainName(s string) bool { - if len(s) == 0 { - return false - } - if len(s) > 255 { - return false - } - - if s[0] == '.' { - // A cookie a domain attribute may start with a leading dot. - s = s[1:] - } - last := byte('.') - ok := false // Ok once we've seen a letter. - partlen := 0 - for i := 0; i < len(s); i++ { - c := s[i] - switch { - default: - return false - case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': - // No '_' allowed here (in contrast to package net). - ok = true - partlen++ - case '0' <= c && c <= '9': - // fine - partlen++ - case c == '-': - // Byte before dash cannot be dot. - if last == '.' { - return false - } - partlen++ - case c == '.': - // Byte before dot cannot be dot, dash. - if last == '.' || last == '-' { - return false - } - if partlen > 63 || partlen == 0 { - return false - } - partlen = 0 - } - last = c - } - if last == '-' || partlen > 63 { - return false - } - - return ok -} - -var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-") - -func sanitizeCookieName(n string) string { - return cookieNameSanitizer.Replace(n) -} - -// http://tools.ietf.org/html/rfc6265#section-4.1.1 -// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE ) -// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E -// ; US-ASCII characters excluding CTLs, -// ; whitespace DQUOTE, comma, semicolon, -// ; and backslash -// We loosen this as spaces and commas are common in cookie values -// but we produce a quoted cookie-value in when value starts or ends -// with a comma or space. -// See http://golang.org/issue/7243 for the discussion. -func sanitizeCookieValue(v string) string { - v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v) - if len(v) == 0 { - return v - } - if v[0] == ' ' || v[0] == ',' || v[len(v)-1] == ' ' || v[len(v)-1] == ',' { - return `"` + v + `"` - } - return v -} - -func validCookieValueByte(b byte) bool { - return 0x20 <= b && b < 0x7f && b != '"' && b != ';' && b != '\\' -} - -// path-av = "Path=" path-value -// path-value = -func sanitizeCookiePath(v string) string { - return sanitizeOrWarn("Cookie.Path", validCookiePathByte, v) -} - -func validCookiePathByte(b byte) bool { - return 0x20 <= b && b < 0x7f && b != ';' -} - -func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string { - ok := true - for i := 0; i < len(v); i++ { - if valid(v[i]) { - continue - } - log.Printf("net/http: invalid byte %q in %s; dropping invalid bytes", v[i], fieldName) - ok = false - break - } - if ok { - return v - } - buf := make([]byte, 0, len(v)) - for i := 0; i < len(v); i++ { - if b := v[i]; valid(b) { - buf = append(buf, b) - } - } - return string(buf) -} - -func parseCookieValue(raw string) (string, bool) { - // Strip the quotes, if present. - if len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' { - raw = raw[1 : len(raw)-1] - } - for i := 0; i < len(raw); i++ { - if !validCookieValueByte(raw[i]) { - return "", false - } - } - return raw, true -} - -func isCookieNameValid(raw string) bool { - return strings.IndexFunc(raw, isNotToken) < 0 -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/doc.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/doc.go deleted file mode 100644 index b1216e8da..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/doc.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package http provides HTTP client and server implementations. - -Get, Head, Post, and PostForm make HTTP (or HTTPS) requests: - - resp, err := http.Get("http://example.com/") - ... - resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) - ... - resp, err := http.PostForm("http://example.com/form", - url.Values{"key": {"Value"}, "id": {"123"}}) - -The client must close the response body when finished with it: - - resp, err := http.Get("http://example.com/") - if err != nil { - // handle error - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - // ... - -For control over HTTP client headers, redirect policy, and other -settings, create a Client: - - client := &http.Client{ - CheckRedirect: redirectPolicyFunc, - } - - resp, err := client.Get("http://example.com") - // ... - - req, err := http.NewRequest("GET", "http://example.com", nil) - // ... - req.Header.Add("If-None-Match", `W/"wyzzy"`) - resp, err := client.Do(req) - // ... - -For control over proxies, TLS configuration, keep-alives, -compression, and other settings, create a Transport: - - tr := &http.Transport{ - TLSClientConfig: &tls.Config{RootCAs: pool}, - DisableCompression: true, - } - client := &http.Client{Transport: tr} - resp, err := client.Get("https://example.com") - -Clients and Transports are safe for concurrent use by multiple -goroutines and for efficiency should only be created once and re-used. - -ListenAndServe starts an HTTP server with a given address and handler. -The handler is usually nil, which means to use DefaultServeMux. -Handle and HandleFunc add handlers to DefaultServeMux: - - http.Handle("/foo", fooHandler) - - http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) - }) - - log.Fatal(http.ListenAndServe(":8080", nil)) - -More control over the server's behavior is available by creating a -custom Server: - - s := &http.Server{ - Addr: ":8080", - Handler: myHandler, - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, - } - log.Fatal(s.ListenAndServe()) -*/ -package http diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/filetransport.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/filetransport.go deleted file mode 100644 index 821787e0c..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/filetransport.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "fmt" - "io" -) - -// fileTransport implements RoundTripper for the 'file' protocol. -type fileTransport struct { - fh fileHandler -} - -// NewFileTransport returns a new RoundTripper, serving the provided -// FileSystem. The returned RoundTripper ignores the URL host in its -// incoming requests, as well as most other properties of the -// request. -// -// The typical use case for NewFileTransport is to register the "file" -// protocol with a Transport, as in: -// -// t := &http.Transport{} -// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/"))) -// c := &http.Client{Transport: t} -// res, err := c.Get("file:///etc/passwd") -// ... -func NewFileTransport(fs FileSystem) RoundTripper { - return fileTransport{fileHandler{fs}} -} - -func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) { - // We start ServeHTTP in a goroutine, which may take a long - // time if the file is large. The newPopulateResponseWriter - // call returns a channel which either ServeHTTP or finish() - // sends our *Response on, once the *Response itself has been - // populated (even if the body itself is still being - // written to the res.Body, a pipe) - rw, resc := newPopulateResponseWriter() - go func() { - t.fh.ServeHTTP(rw, req) - rw.finish() - }() - return <-resc, nil -} - -func newPopulateResponseWriter() (*populateResponse, <-chan *Response) { - pr, pw := io.Pipe() - rw := &populateResponse{ - ch: make(chan *Response), - pw: pw, - res: &Response{ - Proto: "HTTP/1.0", - ProtoMajor: 1, - Header: make(Header), - Close: true, - Body: pr, - }, - } - return rw, rw.ch -} - -// populateResponse is a ResponseWriter that populates the *Response -// in res, and writes its body to a pipe connected to the response -// body. Once writes begin or finish() is called, the response is sent -// on ch. -type populateResponse struct { - res *Response - ch chan *Response - wroteHeader bool - hasContent bool - sentResponse bool - pw *io.PipeWriter -} - -func (pr *populateResponse) finish() { - if !pr.wroteHeader { - pr.WriteHeader(500) - } - if !pr.sentResponse { - pr.sendResponse() - } - pr.pw.Close() -} - -func (pr *populateResponse) sendResponse() { - if pr.sentResponse { - return - } - pr.sentResponse = true - - if pr.hasContent { - pr.res.ContentLength = -1 - } - pr.ch <- pr.res -} - -func (pr *populateResponse) Header() Header { - return pr.res.Header -} - -func (pr *populateResponse) WriteHeader(code int) { - if pr.wroteHeader { - return - } - pr.wroteHeader = true - - pr.res.StatusCode = code - pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code)) -} - -func (pr *populateResponse) Write(p []byte) (n int, err error) { - if !pr.wroteHeader { - pr.WriteHeader(StatusOK) - } - pr.hasContent = true - if !pr.sentResponse { - pr.sendResponse() - } - return pr.pw.Write(p) -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/fs.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/fs.go deleted file mode 100644 index 8576cf844..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/fs.go +++ /dev/null @@ -1,549 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP file system request handler - -package http - -import ( - "errors" - "fmt" - "io" - "mime" - "mime/multipart" - "net/textproto" - "net/url" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "time" -) - -// A Dir implements http.FileSystem using the native file -// system restricted to a specific directory tree. -// -// An empty Dir is treated as ".". -type Dir string - -func (d Dir) Open(name string) (File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d) - if dir == "" { - dir = "." - } - f, err := os.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -// A FileSystem implements access to a collection of named files. -// The elements in a file path are separated by slash ('/', U+002F) -// characters, regardless of host operating system convention. -type FileSystem interface { - Open(name string) (File, error) -} - -// A File is returned by a FileSystem's Open method and can be -// served by the FileServer implementation. -// -// The methods should behave the same as those on an *os.File. -type File interface { - io.Closer - io.Reader - Readdir(count int) ([]os.FileInfo, error) - Seek(offset int64, whence int) (int64, error) - Stat() (os.FileInfo, error) -} - -func dirList(w ResponseWriter, f File) { - w.Header().Set("Content-Type", "text/html; charset=utf-8") - fmt.Fprintf(w, "
\n")
-	for {
-		dirs, err := f.Readdir(100)
-		if err != nil || len(dirs) == 0 {
-			break
-		}
-		for _, d := range dirs {
-			name := d.Name()
-			if d.IsDir() {
-				name += "/"
-			}
-			// name may contain '?' or '#', which must be escaped to remain
-			// part of the URL path, and not indicate the start of a query
-			// string or fragment.
-			url := url.URL{Path: name}
-			fmt.Fprintf(w, "%s\n", url.String(), htmlReplacer.Replace(name))
-		}
-	}
-	fmt.Fprintf(w, "
\n") -} - -// ServeContent replies to the request using the content in the -// provided ReadSeeker. The main benefit of ServeContent over io.Copy -// is that it handles Range requests properly, sets the MIME type, and -// handles If-Modified-Since requests. -// -// If the response's Content-Type header is not set, ServeContent -// first tries to deduce the type from name's file extension and, -// if that fails, falls back to reading the first block of the content -// and passing it to DetectContentType. -// The name is otherwise unused; in particular it can be empty and is -// never sent in the response. -// -// If modtime is not the zero time, ServeContent includes it in a -// Last-Modified header in the response. If the request includes an -// If-Modified-Since header, ServeContent uses modtime to decide -// whether the content needs to be sent at all. -// -// The content's Seek method must work: ServeContent uses -// a seek to the end of the content to determine its size. -// -// If the caller has set w's ETag header, ServeContent uses it to -// handle requests using If-Range and If-None-Match. -// -// Note that *os.File implements the io.ReadSeeker interface. -func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) { - sizeFunc := func() (int64, error) { - size, err := content.Seek(0, os.SEEK_END) - if err != nil { - return 0, errSeeker - } - _, err = content.Seek(0, os.SEEK_SET) - if err != nil { - return 0, errSeeker - } - return size, nil - } - serveContent(w, req, name, modtime, sizeFunc, content) -} - -// errSeeker is returned by ServeContent's sizeFunc when the content -// doesn't seek properly. The underlying Seeker's error text isn't -// included in the sizeFunc reply so it's not sent over HTTP to end -// users. -var errSeeker = errors.New("seeker can't seek") - -// if name is empty, filename is unknown. (used for mime type, before sniffing) -// if modtime.IsZero(), modtime is unknown. -// content must be seeked to the beginning of the file. -// The sizeFunc is called at most once. Its error, if any, is sent in the HTTP response. -func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, sizeFunc func() (int64, error), content io.ReadSeeker) { - if checkLastModified(w, r, modtime) { - return - } - rangeReq, done := checkETag(w, r) - if done { - return - } - - code := StatusOK - - // If Content-Type isn't set, use the file's extension to find it, but - // if the Content-Type is unset explicitly, do not sniff the type. - ctypes, haveType := w.Header()["Content-Type"] - var ctype string - if !haveType { - ctype = mime.TypeByExtension(filepath.Ext(name)) - if ctype == "" { - // read a chunk to decide between utf-8 text and binary - var buf [sniffLen]byte - n, _ := io.ReadFull(content, buf[:]) - ctype = DetectContentType(buf[:n]) - _, err := content.Seek(0, os.SEEK_SET) // rewind to output whole file - if err != nil { - Error(w, "seeker can't seek", StatusInternalServerError) - return - } - } - w.Header().Set("Content-Type", ctype) - } else if len(ctypes) > 0 { - ctype = ctypes[0] - } - - size, err := sizeFunc() - if err != nil { - Error(w, err.Error(), StatusInternalServerError) - return - } - - // handle Content-Range header. - sendSize := size - var sendContent io.Reader = content - if size >= 0 { - ranges, err := parseRange(rangeReq, size) - if err != nil { - Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) - return - } - if sumRangesSize(ranges) > size { - // The total number of bytes in all the ranges - // is larger than the size of the file by - // itself, so this is probably an attack, or a - // dumb client. Ignore the range request. - ranges = nil - } - switch { - case len(ranges) == 1: - // RFC 2616, Section 14.16: - // "When an HTTP message includes the content of a single - // range (for example, a response to a request for a - // single range, or to a request for a set of ranges - // that overlap without any holes), this content is - // transmitted with a Content-Range header, and a - // Content-Length header showing the number of bytes - // actually transferred. - // ... - // A response to a request for a single range MUST NOT - // be sent using the multipart/byteranges media type." - ra := ranges[0] - if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { - Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) - return - } - sendSize = ra.length - code = StatusPartialContent - w.Header().Set("Content-Range", ra.contentRange(size)) - case len(ranges) > 1: - for _, ra := range ranges { - if ra.start > size { - Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) - return - } - } - sendSize = rangesMIMESize(ranges, ctype, size) - code = StatusPartialContent - - pr, pw := io.Pipe() - mw := multipart.NewWriter(pw) - w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) - sendContent = pr - defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. - go func() { - for _, ra := range ranges { - part, err := mw.CreatePart(ra.mimeHeader(ctype, size)) - if err != nil { - pw.CloseWithError(err) - return - } - if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { - pw.CloseWithError(err) - return - } - if _, err := io.CopyN(part, content, ra.length); err != nil { - pw.CloseWithError(err) - return - } - } - mw.Close() - pw.Close() - }() - } - - w.Header().Set("Accept-Ranges", "bytes") - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - } - - w.WriteHeader(code) - - if r.Method != "HEAD" { - io.CopyN(w, sendContent, sendSize) - } -} - -// modtime is the modification time of the resource to be served, or IsZero(). -// return value is whether this request is now complete. -func checkLastModified(w ResponseWriter, r *Request, modtime time.Time) bool { - if modtime.IsZero() { - return false - } - - // The Date-Modified header truncates sub-second precision, so - // use mtime < t+1s instead of mtime <= t to check for unmodified. - if t, err := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modtime.Before(t.Add(1*time.Second)) { - h := w.Header() - delete(h, "Content-Type") - delete(h, "Content-Length") - w.WriteHeader(StatusNotModified) - return true - } - w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat)) - return false -} - -// checkETag implements If-None-Match and If-Range checks. -// The ETag must have been previously set in the ResponseWriter's headers. -// -// The return value is the effective request "Range" header to use and -// whether this request is now considered done. -func checkETag(w ResponseWriter, r *Request) (rangeReq string, done bool) { - etag := w.Header().get("Etag") - rangeReq = r.Header.get("Range") - - // Invalidate the range request if the entity doesn't match the one - // the client was expecting. - // "If-Range: version" means "ignore the Range: header unless version matches the - // current file." - // We only support ETag versions. - // The caller must have set the ETag on the response already. - if ir := r.Header.get("If-Range"); ir != "" && ir != etag { - // TODO(bradfitz): handle If-Range requests with Last-Modified - // times instead of ETags? I'd rather not, at least for - // now. That seems like a bug/compromise in the RFC 2616, and - // I've never heard of anybody caring about that (yet). - rangeReq = "" - } - - if inm := r.Header.get("If-None-Match"); inm != "" { - // Must know ETag. - if etag == "" { - return rangeReq, false - } - - // TODO(bradfitz): non-GET/HEAD requests require more work: - // sending a different status code on matches, and - // also can't use weak cache validators (those with a "W/ - // prefix). But most users of ServeContent will be using - // it on GET or HEAD, so only support those for now. - if r.Method != "GET" && r.Method != "HEAD" { - return rangeReq, false - } - - // TODO(bradfitz): deal with comma-separated or multiple-valued - // list of If-None-match values. For now just handle the common - // case of a single item. - if inm == etag || inm == "*" { - h := w.Header() - delete(h, "Content-Type") - delete(h, "Content-Length") - w.WriteHeader(StatusNotModified) - return "", true - } - } - return rangeReq, false -} - -// name is '/'-separated, not filepath.Separator. -func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) { - const indexPage = "/index.html" - - // redirect .../index.html to .../ - // can't use Redirect() because that would make the path absolute, - // which would be a problem running under StripPrefix - if strings.HasSuffix(r.URL.Path, indexPage) { - localRedirect(w, r, "./") - return - } - - f, err := fs.Open(name) - if err != nil { - // TODO expose actual error? - NotFound(w, r) - return - } - defer f.Close() - - d, err1 := f.Stat() - if err1 != nil { - // TODO expose actual error? - NotFound(w, r) - return - } - - if redirect { - // redirect to canonical path: / at end of directory url - // r.URL.Path always begins with / - url := r.URL.Path - if d.IsDir() { - if url[len(url)-1] != '/' { - localRedirect(w, r, path.Base(url)+"/") - return - } - } else { - if url[len(url)-1] == '/' { - localRedirect(w, r, "../"+path.Base(url)) - return - } - } - } - - // use contents of index.html for directory, if present - if d.IsDir() { - index := name + indexPage - ff, err := fs.Open(index) - if err == nil { - defer ff.Close() - dd, err := ff.Stat() - if err == nil { - name = index - d = dd - f = ff - } - } - } - - // Still a directory? (we didn't find an index.html file) - if d.IsDir() { - if checkLastModified(w, r, d.ModTime()) { - return - } - dirList(w, f) - return - } - - // serverContent will check modification time - sizeFunc := func() (int64, error) { return d.Size(), nil } - serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f) -} - -// localRedirect gives a Moved Permanently response. -// It does not convert relative paths to absolute paths like Redirect does. -func localRedirect(w ResponseWriter, r *Request, newPath string) { - if q := r.URL.RawQuery; q != "" { - newPath += "?" + q - } - w.Header().Set("Location", newPath) - w.WriteHeader(StatusMovedPermanently) -} - -// ServeFile replies to the request with the contents of the named file or directory. -func ServeFile(w ResponseWriter, r *Request, name string) { - dir, file := filepath.Split(name) - serveFile(w, r, Dir(dir), file, false) -} - -type fileHandler struct { - root FileSystem -} - -// FileServer returns a handler that serves HTTP requests -// with the contents of the file system rooted at root. -// -// To use the operating system's file system implementation, -// use http.Dir: -// -// http.Handle("/", http.FileServer(http.Dir("/tmp"))) -func FileServer(root FileSystem) Handler { - return &fileHandler{root} -} - -func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) { - upath := r.URL.Path - if !strings.HasPrefix(upath, "/") { - upath = "/" + upath - r.URL.Path = upath - } - serveFile(w, r, f.root, path.Clean(upath), true) -} - -// httpRange specifies the byte range to be sent to the client. -type httpRange struct { - start, length int64 -} - -func (r httpRange) contentRange(size int64) string { - return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size) -} - -func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader { - return textproto.MIMEHeader{ - "Content-Range": {r.contentRange(size)}, - "Content-Type": {contentType}, - } -} - -// parseRange parses a Range header string as per RFC 2616. -func parseRange(s string, size int64) ([]httpRange, error) { - if s == "" { - return nil, nil // header not present - } - const b = "bytes=" - if !strings.HasPrefix(s, b) { - return nil, errors.New("invalid range") - } - var ranges []httpRange - for _, ra := range strings.Split(s[len(b):], ",") { - ra = strings.TrimSpace(ra) - if ra == "" { - continue - } - i := strings.Index(ra, "-") - if i < 0 { - return nil, errors.New("invalid range") - } - start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:]) - var r httpRange - if start == "" { - // If no start is specified, end specifies the - // range start relative to the end of the file. - i, err := strconv.ParseInt(end, 10, 64) - if err != nil { - return nil, errors.New("invalid range") - } - if i > size { - i = size - } - r.start = size - i - r.length = size - r.start - } else { - i, err := strconv.ParseInt(start, 10, 64) - if err != nil || i > size || i < 0 { - return nil, errors.New("invalid range") - } - r.start = i - if end == "" { - // If no end is specified, range extends to end of the file. - r.length = size - r.start - } else { - i, err := strconv.ParseInt(end, 10, 64) - if err != nil || r.start > i { - return nil, errors.New("invalid range") - } - if i >= size { - i = size - 1 - } - r.length = i - r.start + 1 - } - } - ranges = append(ranges, r) - } - return ranges, nil -} - -// countingWriter counts how many bytes have been written to it. -type countingWriter int64 - -func (w *countingWriter) Write(p []byte) (n int, err error) { - *w += countingWriter(len(p)) - return len(p), nil -} - -// rangesMIMESize returns the number of bytes it takes to encode the -// provided ranges as a multipart response. -func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) { - var w countingWriter - mw := multipart.NewWriter(&w) - for _, ra := range ranges { - mw.CreatePart(ra.mimeHeader(contentType, contentSize)) - encSize += ra.length - } - mw.Close() - encSize += int64(w) - return -} - -func sumRangesSize(ranges []httpRange) (size int64) { - for _, ra := range ranges { - size += ra.length - } - return -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/header.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/header.go deleted file mode 100644 index 153b94370..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/header.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "io" - "net/textproto" - "sort" - "strings" - "sync" - "time" -) - -var raceEnabled = false // set by race.go - -// A Header represents the key-value pairs in an HTTP header. -type Header map[string][]string - -// Add adds the key, value pair to the header. -// It appends to any existing values associated with key. -func (h Header) Add(key, value string) { - textproto.MIMEHeader(h).Add(key, value) -} - -// Set sets the header entries associated with key to -// the single element value. It replaces any existing -// values associated with key. -func (h Header) Set(key, value string) { - textproto.MIMEHeader(h).Set(key, value) -} - -// Get gets the first value associated with the given key. -// If there are no values associated with the key, Get returns "". -// To access multiple values of a key, access the map directly -// with CanonicalHeaderKey. -func (h Header) Get(key string) string { - return textproto.MIMEHeader(h).Get(key) -} - -// get is like Get, but key must already be in CanonicalHeaderKey form. -func (h Header) get(key string) string { - if v := h[key]; len(v) > 0 { - return v[0] - } - return "" -} - -// Del deletes the values associated with key. -func (h Header) Del(key string) { - textproto.MIMEHeader(h).Del(key) -} - -// Write writes a header in wire format. -func (h Header) Write(w io.Writer) error { - return h.WriteSubset(w, nil) -} - -func (h Header) clone() Header { - h2 := make(Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -var timeFormats = []string{ - TimeFormat, - time.RFC850, - time.ANSIC, -} - -// ParseTime parses a time header (such as the Date: header), -// trying each of the three formats allowed by HTTP/1.1: -// TimeFormat, time.RFC850, and time.ANSIC. -func ParseTime(text string) (t time.Time, err error) { - for _, layout := range timeFormats { - t, err = time.Parse(layout, text) - if err == nil { - return - } - } - return -} - -var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ") - -type writeStringer interface { - WriteString(string) (int, error) -} - -// stringWriter implements WriteString on a Writer. -type stringWriter struct { - w io.Writer -} - -func (w stringWriter) WriteString(s string) (n int, err error) { - return w.w.Write([]byte(s)) -} - -type keyValues struct { - key string - values []string -} - -// A headerSorter implements sort.Interface by sorting a []keyValues -// by key. It's used as a pointer, so it can fit in a sort.Interface -// interface value without allocation. -type headerSorter struct { - kvs []keyValues -} - -func (s *headerSorter) Len() int { return len(s.kvs) } -func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] } -func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].key < s.kvs[j].key } - -var headerSorterPool = sync.Pool{ - New: func() interface{} { return new(headerSorter) }, -} - -// sortedKeyValues returns h's keys sorted in the returned kvs -// slice. The headerSorter used to sort is also returned, for possible -// return to headerSorterCache. -func (h Header) sortedKeyValues(exclude map[string]bool) (kvs []keyValues, hs *headerSorter) { - hs = headerSorterPool.Get().(*headerSorter) - if cap(hs.kvs) < len(h) { - hs.kvs = make([]keyValues, 0, len(h)) - } - kvs = hs.kvs[:0] - for k, vv := range h { - if !exclude[k] { - kvs = append(kvs, keyValues{k, vv}) - } - } - hs.kvs = kvs - sort.Sort(hs) - return kvs, hs -} - -// WriteSubset writes a header in wire format. -// If exclude is not nil, keys where exclude[key] == true are not written. -func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error { - ws, ok := w.(writeStringer) - if !ok { - ws = stringWriter{w} - } - kvs, sorter := h.sortedKeyValues(exclude) - for _, kv := range kvs { - for _, v := range kv.values { - v = headerNewlineToSpace.Replace(v) - v = textproto.TrimString(v) - for _, s := range []string{kv.key, ": ", v, "\r\n"} { - if _, err := ws.WriteString(s); err != nil { - return err - } - } - } - } - headerSorterPool.Put(sorter) - return nil -} - -// CanonicalHeaderKey returns the canonical format of the -// header key s. The canonicalization converts the first -// letter and any letter following a hyphen to upper case; -// the rest are converted to lowercase. For example, the -// canonical key for "accept-encoding" is "Accept-Encoding". -func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) } - -// hasToken reports whether token appears with v, ASCII -// case-insensitive, with space or comma boundaries. -// token must be all lowercase. -// v may contain mixed cased. -func hasToken(v, token string) bool { - if len(token) > len(v) || token == "" { - return false - } - if v == token { - return true - } - for sp := 0; sp <= len(v)-len(token); sp++ { - // Check that first character is good. - // The token is ASCII, so checking only a single byte - // is sufficient. We skip this potential starting - // position if both the first byte and its potential - // ASCII uppercase equivalent (b|0x20) don't match. - // False positives ('^' => '~') are caught by EqualFold. - if b := v[sp]; b != token[0] && b|0x20 != token[0] { - continue - } - // Check that start pos is on a valid token boundary. - if sp > 0 && !isTokenBoundary(v[sp-1]) { - continue - } - // Check that end pos is on a valid token boundary. - if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) { - continue - } - if strings.EqualFold(v[sp:sp+len(token)], token) { - return true - } - } - return false -} - -func isTokenBoundary(b byte) bool { - return b == ' ' || b == ',' || b == '\t' -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/jar.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/jar.go deleted file mode 100644 index 5c3de0dad..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/jar.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "net/url" -) - -// A CookieJar manages storage and use of cookies in HTTP requests. -// -// Implementations of CookieJar must be safe for concurrent use by multiple -// goroutines. -// -// The net/http/cookiejar package provides a CookieJar implementation. -type CookieJar interface { - // SetCookies handles the receipt of the cookies in a reply for the - // given URL. It may or may not choose to save the cookies, depending - // on the jar's policy and implementation. - SetCookies(u *url.URL, cookies []*Cookie) - - // Cookies returns the cookies to send in a request for the given URL. - // It is up to the implementation to honor the standard cookie use - // restrictions such as in RFC 6265. - Cookies(u *url.URL) []*Cookie -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/lex.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/lex.go deleted file mode 100644 index cb33318f4..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/lex.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -// This file deals with lexical matters of HTTP - -var isTokenTable = [127]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -func isToken(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !isToken(r) -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/race.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/race.go deleted file mode 100644 index 766503967..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/race.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build race - -package http - -func init() { - raceEnabled = true -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/request.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/request.go deleted file mode 100644 index a67092066..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/request.go +++ /dev/null @@ -1,875 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP Request reading and parsing. - -package http - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "mime" - "mime/multipart" - "net/textproto" - "net/url" - "strconv" - "strings" - "sync" -) - -const ( - maxValueLength = 4096 - maxHeaderLines = 1024 - chunkSize = 4 << 10 // 4 KB chunks - defaultMaxMemory = 32 << 20 // 32 MB -) - -// ErrMissingFile is returned by FormFile when the provided file field name -// is either not present in the request or not a file field. -var ErrMissingFile = errors.New("http: no such file") - -// HTTP request parsing errors. -type ProtocolError struct { - ErrorString string -} - -func (err *ProtocolError) Error() string { return err.ErrorString } - -var ( - ErrHeaderTooLong = &ProtocolError{"header too long"} - ErrShortBody = &ProtocolError{"entity body too short"} - ErrNotSupported = &ProtocolError{"feature not supported"} - ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"} - ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"} - ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"} - ErrMissingBoundary = &ProtocolError{"no multipart boundary param in Content-Type"} -) - -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// Headers that Request.Write handles itself and should be skipped. -var reqWriteExcludeHeader = map[string]bool{ - "Host": true, // not in Header map anyway - "User-Agent": true, - "Content-Length": true, - "Transfer-Encoding": true, - "Trailer": true, -} - -// A Request represents an HTTP request received by a server -// or to be sent by a client. -// -// The field semantics differ slightly between client and server -// usage. In addition to the notes on the fields below, see the -// documentation for Request.Write and RoundTripper. -type Request struct { - // Method specifies the HTTP method (GET, POST, PUT, etc.). - // For client requests an empty string means GET. - Method string - - // URL specifies either the URI being requested (for server - // requests) or the URL to access (for client requests). - // - // For server requests the URL is parsed from the URI - // supplied on the Request-Line as stored in RequestURI. For - // most requests, fields other than Path and RawQuery will be - // empty. (See RFC 2616, Section 5.1.2) - // - // For client requests, the URL's Host specifies the server to - // connect to, while the Request's Host field optionally - // specifies the Host header value to send in the HTTP - // request. - URL *url.URL - - // The protocol version for incoming requests. - // Client requests always use HTTP/1.1. - Proto string // "HTTP/1.0" - ProtoMajor int // 1 - ProtoMinor int // 0 - - // A header maps request lines to their values. - // If the header says - // - // accept-encoding: gzip, deflate - // Accept-Language: en-us - // Connection: keep-alive - // - // then - // - // Header = map[string][]string{ - // "Accept-Encoding": {"gzip, deflate"}, - // "Accept-Language": {"en-us"}, - // "Connection": {"keep-alive"}, - // } - // - // HTTP defines that header names are case-insensitive. - // The request parser implements this by canonicalizing the - // name, making the first character and any characters - // following a hyphen uppercase and the rest lowercase. - // - // For client requests certain headers are automatically - // added and may override values in Header. - // - // See the documentation for the Request.Write method. - Header Header - - // Body is the request's body. - // - // For client requests a nil body means the request has no - // body, such as a GET request. The HTTP Client's Transport - // is responsible for calling the Close method. - // - // For server requests the Request Body is always non-nil - // but will return EOF immediately when no body is present. - // The Server will close the request body. The ServeHTTP - // Handler does not need to. - Body io.ReadCloser - - // ContentLength records the length of the associated content. - // The value -1 indicates that the length is unknown. - // Values >= 0 indicate that the given number of bytes may - // be read from Body. - // For client requests, a value of 0 means unknown if Body is not nil. - ContentLength int64 - - // TransferEncoding lists the transfer encodings from outermost to - // innermost. An empty list denotes the "identity" encoding. - // TransferEncoding can usually be ignored; chunked encoding is - // automatically added and removed as necessary when sending and - // receiving requests. - TransferEncoding []string - - // Close indicates whether to close the connection after - // replying to this request (for servers) or after sending - // the request (for clients). - Close bool - - // For server requests Host specifies the host on which the - // URL is sought. Per RFC 2616, this is either the value of - // the "Host" header or the host name given in the URL itself. - // It may be of the form "host:port". - // - // For client requests Host optionally overrides the Host - // header to send. If empty, the Request.Write method uses - // the value of URL.Host. - Host string - - // Form contains the parsed form data, including both the URL - // field's query parameters and the POST or PUT form data. - // This field is only available after ParseForm is called. - // The HTTP client ignores Form and uses Body instead. - Form url.Values - - // PostForm contains the parsed form data from POST or PUT - // body parameters. - // This field is only available after ParseForm is called. - // The HTTP client ignores PostForm and uses Body instead. - PostForm url.Values - - // MultipartForm is the parsed multipart form, including file uploads. - // This field is only available after ParseMultipartForm is called. - // The HTTP client ignores MultipartForm and uses Body instead. - MultipartForm *multipart.Form - - // Trailer specifies additional headers that are sent after the request - // body. - // - // For server requests the Trailer map initially contains only the - // trailer keys, with nil values. (The client declares which trailers it - // will later send.) While the handler is reading from Body, it must - // not reference Trailer. After reading from Body returns EOF, Trailer - // can be read again and will contain non-nil values, if they were sent - // by the client. - // - // For client requests Trailer must be initialized to a map containing - // the trailer keys to later send. The values may be nil or their final - // values. The ContentLength must be 0 or -1, to send a chunked request. - // After the HTTP request is sent the map values can be updated while - // the request body is read. Once the body returns EOF, the caller must - // not mutate Trailer. - // - // Few HTTP clients, servers, or proxies support HTTP trailers. - Trailer Header - - // RemoteAddr allows HTTP servers and other software to record - // the network address that sent the request, usually for - // logging. This field is not filled in by ReadRequest and - // has no defined format. The HTTP server in this package - // sets RemoteAddr to an "IP:port" address before invoking a - // handler. - // This field is ignored by the HTTP client. - RemoteAddr string - - // RequestURI is the unmodified Request-URI of the - // Request-Line (RFC 2616, Section 5.1) as sent by the client - // to a server. Usually the URL field should be used instead. - // It is an error to set this field in an HTTP client request. - RequestURI string - - // TLS allows HTTP servers and other software to record - // information about the TLS connection on which the request - // was received. This field is not filled in by ReadRequest. - // The HTTP server in this package sets the field for - // TLS-enabled connections before invoking a handler; - // otherwise it leaves the field nil. - // This field is ignored by the HTTP client. - TLS *tls.ConnectionState -} - -// ProtoAtLeast reports whether the HTTP protocol used -// in the request is at least major.minor. -func (r *Request) ProtoAtLeast(major, minor int) bool { - return r.ProtoMajor > major || - r.ProtoMajor == major && r.ProtoMinor >= minor -} - -// UserAgent returns the client's User-Agent, if sent in the request. -func (r *Request) UserAgent() string { - return r.Header.Get("User-Agent") -} - -// Cookies parses and returns the HTTP cookies sent with the request. -func (r *Request) Cookies() []*Cookie { - return readCookies(r.Header, "") -} - -var ErrNoCookie = errors.New("http: named cookie not present") - -// Cookie returns the named cookie provided in the request or -// ErrNoCookie if not found. -func (r *Request) Cookie(name string) (*Cookie, error) { - for _, c := range readCookies(r.Header, name) { - return c, nil - } - return nil, ErrNoCookie -} - -// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, -// AddCookie does not attach more than one Cookie header field. That -// means all cookies, if any, are written into the same line, -// separated by semicolon. -func (r *Request) AddCookie(c *Cookie) { - s := fmt.Sprintf("%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) - if c := r.Header.Get("Cookie"); c != "" { - r.Header.Set("Cookie", c+"; "+s) - } else { - r.Header.Set("Cookie", s) - } -} - -// Referer returns the referring URL, if sent in the request. -// -// Referer is misspelled as in the request itself, a mistake from the -// earliest days of HTTP. This value can also be fetched from the -// Header map as Header["Referer"]; the benefit of making it available -// as a method is that the compiler can diagnose programs that use the -// alternate (correct English) spelling req.Referrer() but cannot -// diagnose programs that use Header["Referrer"]. -func (r *Request) Referer() string { - return r.Header.Get("Referer") -} - -// multipartByReader is a sentinel value. -// Its presence in Request.MultipartForm indicates that parsing of the request -// body has been handed off to a MultipartReader instead of ParseMultipartFrom. -var multipartByReader = &multipart.Form{ - Value: make(map[string][]string), - File: make(map[string][]*multipart.FileHeader), -} - -// MultipartReader returns a MIME multipart reader if this is a -// multipart/form-data POST request, else returns nil and an error. -// Use this function instead of ParseMultipartForm to -// process the request body as a stream. -func (r *Request) MultipartReader() (*multipart.Reader, error) { - if r.MultipartForm == multipartByReader { - return nil, errors.New("http: MultipartReader called twice") - } - if r.MultipartForm != nil { - return nil, errors.New("http: multipart handled by ParseMultipartForm") - } - r.MultipartForm = multipartByReader - return r.multipartReader() -} - -func (r *Request) multipartReader() (*multipart.Reader, error) { - v := r.Header.Get("Content-Type") - if v == "" { - return nil, ErrNotMultipart - } - d, params, err := mime.ParseMediaType(v) - if err != nil || d != "multipart/form-data" { - return nil, ErrNotMultipart - } - boundary, ok := params["boundary"] - if !ok { - return nil, ErrMissingBoundary - } - return multipart.NewReader(r.Body, boundary), nil -} - -// Return value if nonempty, def otherwise. -func valueOrDefault(value, def string) string { - if value != "" { - return value - } - return def -} - -// NOTE: This is not intended to reflect the actual Go version being used. -// It was changed from "Go http package" to "Go 1.1 package http" at the -// time of the Go 1.1 release because the former User-Agent had ended up -// on a blacklist for some intrusion detection systems. -// See https://codereview.appspot.com/7532043. -const defaultUserAgent = "Go 1.1 package http" - -// Write writes an HTTP/1.1 request -- header and body -- in wire format. -// This method consults the following fields of the request: -// Host -// URL -// Method (defaults to "GET") -// Header -// ContentLength -// TransferEncoding -// Body -// -// If Body is present, Content-Length is <= 0 and TransferEncoding -// hasn't been set to "identity", Write adds "Transfer-Encoding: -// chunked" to the header. Body is closed after it is sent. -func (r *Request) Write(w io.Writer) error { - return r.write(w, false, nil) -} - -// WriteProxy is like Write but writes the request in the form -// expected by an HTTP proxy. In particular, WriteProxy writes the -// initial Request-URI line of the request with an absolute URI, per -// section 5.1.2 of RFC 2616, including the scheme and host. -// In either case, WriteProxy also writes a Host header, using -// either r.Host or r.URL.Host. -func (r *Request) WriteProxy(w io.Writer) error { - return r.write(w, true, nil) -} - -// extraHeaders may be nil -func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error { - host := req.Host - if host == "" { - if req.URL == nil { - return errors.New("http: Request.Write on Request with no Host or URL set") - } - host = req.URL.Host - } - - ruri := req.URL.RequestURI() - if usingProxy && req.URL.Scheme != "" && req.URL.Opaque == "" { - ruri = req.URL.Scheme + "://" + host + ruri - } else if req.Method == "CONNECT" && req.URL.Path == "" { - // CONNECT requests normally give just the host and port, not a full URL. - ruri = host - } - // TODO(bradfitz): escape at least newlines in ruri? - - // Wrap the writer in a bufio Writer if it's not already buffered. - // Don't always call NewWriter, as that forces a bytes.Buffer - // and other small bufio Writers to have a minimum 4k buffer - // size. - var bw *bufio.Writer - if _, ok := w.(io.ByteWriter); !ok { - bw = bufio.NewWriter(w) - w = bw - } - - fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), ruri) - - // Header lines - fmt.Fprintf(w, "Host: %s\r\n", host) - - // Use the defaultUserAgent unless the Header contains one, which - // may be blank to not send the header. - userAgent := defaultUserAgent - if req.Header != nil { - if ua := req.Header["User-Agent"]; len(ua) > 0 { - userAgent = ua[0] - } - } - if userAgent != "" { - fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent) - } - - // Process Body,ContentLength,Close,Trailer - tw, err := newTransferWriter(req) - if err != nil { - return err - } - err = tw.WriteHeader(w) - if err != nil { - return err - } - - err = req.Header.WriteSubset(w, reqWriteExcludeHeader) - if err != nil { - return err - } - - if extraHeaders != nil { - err = extraHeaders.Write(w) - if err != nil { - return err - } - } - - io.WriteString(w, "\r\n") - - // Write body and trailer - err = tw.WriteBody(w) - if err != nil { - return err - } - - if bw != nil { - return bw.Flush() - } - return nil -} - -// ParseHTTPVersion parses a HTTP version string. -// "HTTP/1.0" returns (1, 0, true). -func ParseHTTPVersion(vers string) (major, minor int, ok bool) { - const Big = 1000000 // arbitrary upper bound - switch vers { - case "HTTP/1.1": - return 1, 1, true - case "HTTP/1.0": - return 1, 0, true - } - if !strings.HasPrefix(vers, "HTTP/") { - return 0, 0, false - } - dot := strings.Index(vers, ".") - if dot < 0 { - return 0, 0, false - } - major, err := strconv.Atoi(vers[5:dot]) - if err != nil || major < 0 || major > Big { - return 0, 0, false - } - minor, err = strconv.Atoi(vers[dot+1:]) - if err != nil || minor < 0 || minor > Big { - return 0, 0, false - } - return major, minor, true -} - -// NewRequest returns a new Request given a method, URL, and optional body. -// -// If the provided body is also an io.Closer, the returned -// Request.Body is set to body and will be closed by the Client -// methods Do, Post, and PostForm, and Transport.RoundTrip. -func NewRequest(method, urlStr string, body io.Reader) (*Request, error) { - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - rc, ok := body.(io.ReadCloser) - if !ok && body != nil { - rc = ioutil.NopCloser(body) - } - req := &Request{ - Method: method, - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(Header), - Body: rc, - Host: u.Host, - } - if body != nil { - switch v := body.(type) { - case *bytes.Buffer: - req.ContentLength = int64(v.Len()) - case *bytes.Reader: - req.ContentLength = int64(v.Len()) - case *strings.Reader: - req.ContentLength = int64(v.Len()) - } - } - - return req, nil -} - -// SetBasicAuth sets the request's Authorization header to use HTTP -// Basic Authentication with the provided username and password. -// -// With HTTP Basic Authentication the provided username and password -// are not encrypted. -func (r *Request) SetBasicAuth(username, password string) { - r.Header.Set("Authorization", "Basic "+basicAuth(username, password)) -} - -// parseRequestLine parses "GET /foo HTTP/1.1" into its three parts. -func parseRequestLine(line string) (method, requestURI, proto string, ok bool) { - s1 := strings.Index(line, " ") - s2 := strings.Index(line[s1+1:], " ") - if s1 < 0 || s2 < 0 { - return - } - s2 += s1 + 1 - return line[:s1], line[s1+1 : s2], line[s2+1:], true -} - -var textprotoReaderPool sync.Pool - -func newTextprotoReader(br *bufio.Reader) *textproto.Reader { - if v := textprotoReaderPool.Get(); v != nil { - tr := v.(*textproto.Reader) - tr.R = br - return tr - } - return textproto.NewReader(br) -} - -func putTextprotoReader(r *textproto.Reader) { - r.R = nil - textprotoReaderPool.Put(r) -} - -// ReadRequest reads and parses a request from b. -func ReadRequest(b *bufio.Reader) (req *Request, err error) { - - tp := newTextprotoReader(b) - req = new(Request) - - // First line: GET /index.html HTTP/1.0 - var s string - if s, err = tp.ReadLine(); err != nil { - return nil, err - } - defer func() { - putTextprotoReader(tp) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - var ok bool - req.Method, req.RequestURI, req.Proto, ok = parseRequestLine(s) - if !ok { - return nil, &badStringError{"malformed HTTP request", s} - } - rawurl := req.RequestURI - if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok { - return nil, &badStringError{"malformed HTTP version", req.Proto} - } - - // CONNECT requests are used two different ways, and neither uses a full URL: - // The standard use is to tunnel HTTPS through an HTTP proxy. - // It looks like "CONNECT www.google.com:443 HTTP/1.1", and the parameter is - // just the authority section of a URL. This information should go in req.URL.Host. - // - // The net/rpc package also uses CONNECT, but there the parameter is a path - // that starts with a slash. It can be parsed with the regular URL parser, - // and the path will end up in req.URL.Path, where it needs to be in order for - // RPC to work. - justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/") - if justAuthority { - rawurl = "http://" + rawurl - } - - if req.URL, err = url.ParseRequestURI(rawurl); err != nil { - return nil, err - } - - if justAuthority { - // Strip the bogus "http://" back off. - req.URL.Scheme = "" - } - - // Subsequent lines: Key: value. - mimeHeader, err := tp.ReadMIMEHeader() - if err != nil { - return nil, err - } - req.Header = Header(mimeHeader) - - // RFC2616: Must treat - // GET /index.html HTTP/1.1 - // Host: www.google.com - // and - // GET http://www.google.com/index.html HTTP/1.1 - // Host: doesntmatter - // the same. In the second case, any Host line is ignored. - req.Host = req.URL.Host - if req.Host == "" { - req.Host = req.Header.get("Host") - } - delete(req.Header, "Host") - - fixPragmaCacheControl(req.Header) - - err = readTransfer(req, b) - if err != nil { - return nil, err - } - - return req, nil -} - -// MaxBytesReader is similar to io.LimitReader but is intended for -// limiting the size of incoming request bodies. In contrast to -// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a -// non-EOF error for a Read beyond the limit, and Closes the -// underlying reader when its Close method is called. -// -// MaxBytesReader prevents clients from accidentally or maliciously -// sending a large request and wasting server resources. -func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser { - return &maxBytesReader{w: w, r: r, n: n} -} - -type maxBytesReader struct { - w ResponseWriter - r io.ReadCloser // underlying reader - n int64 // max bytes remaining - stopped bool -} - -func (l *maxBytesReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - if !l.stopped { - l.stopped = true - if res, ok := l.w.(*response); ok { - res.requestTooLarge() - } - } - return 0, errors.New("http: request body too large") - } - if int64(len(p)) > l.n { - p = p[:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - return -} - -func (l *maxBytesReader) Close() error { - return l.r.Close() -} - -func copyValues(dst, src url.Values) { - for k, vs := range src { - for _, value := range vs { - dst.Add(k, value) - } - } -} - -func parsePostForm(r *Request) (vs url.Values, err error) { - if r.Body == nil { - err = errors.New("missing form body") - return - } - ct := r.Header.Get("Content-Type") - // RFC 2616, section 7.2.1 - empty type - // SHOULD be treated as application/octet-stream - if ct == "" { - ct = "application/octet-stream" - } - ct, _, err = mime.ParseMediaType(ct) - switch { - case ct == "application/x-www-form-urlencoded": - var reader io.Reader = r.Body - maxFormSize := int64(1<<63 - 1) - if _, ok := r.Body.(*maxBytesReader); !ok { - maxFormSize = int64(10 << 20) // 10 MB is a lot of text. - reader = io.LimitReader(r.Body, maxFormSize+1) - } - b, e := ioutil.ReadAll(reader) - if e != nil { - if err == nil { - err = e - } - break - } - if int64(len(b)) > maxFormSize { - err = errors.New("http: POST too large") - return - } - vs, e = url.ParseQuery(string(b)) - if err == nil { - err = e - } - case ct == "multipart/form-data": - // handled by ParseMultipartForm (which is calling us, or should be) - // TODO(bradfitz): there are too many possible - // orders to call too many functions here. - // Clean this up and write more tests. - // request_test.go contains the start of this, - // in TestParseMultipartFormOrder and others. - } - return -} - -// ParseForm parses the raw query from the URL and updates r.Form. -// -// For POST or PUT requests, it also parses the request body as a form and -// put the results into both r.PostForm and r.Form. -// POST and PUT body parameters take precedence over URL query string values -// in r.Form. -// -// If the request Body's size has not already been limited by MaxBytesReader, -// the size is capped at 10MB. -// -// ParseMultipartForm calls ParseForm automatically. -// It is idempotent. -func (r *Request) ParseForm() error { - var err error - if r.PostForm == nil { - if r.Method == "POST" || r.Method == "PUT" || r.Method == "PATCH" { - r.PostForm, err = parsePostForm(r) - } - if r.PostForm == nil { - r.PostForm = make(url.Values) - } - } - if r.Form == nil { - if len(r.PostForm) > 0 { - r.Form = make(url.Values) - copyValues(r.Form, r.PostForm) - } - var newValues url.Values - if r.URL != nil { - var e error - newValues, e = url.ParseQuery(r.URL.RawQuery) - if err == nil { - err = e - } - } - if newValues == nil { - newValues = make(url.Values) - } - if r.Form == nil { - r.Form = newValues - } else { - copyValues(r.Form, newValues) - } - } - return err -} - -// ParseMultipartForm parses a request body as multipart/form-data. -// The whole request body is parsed and up to a total of maxMemory bytes of -// its file parts are stored in memory, with the remainder stored on -// disk in temporary files. -// ParseMultipartForm calls ParseForm if necessary. -// After one call to ParseMultipartForm, subsequent calls have no effect. -func (r *Request) ParseMultipartForm(maxMemory int64) error { - if r.MultipartForm == multipartByReader { - return errors.New("http: multipart handled by MultipartReader") - } - if r.Form == nil { - err := r.ParseForm() - if err != nil { - return err - } - } - if r.MultipartForm != nil { - return nil - } - - mr, err := r.multipartReader() - if err != nil { - return err - } - - f, err := mr.ReadForm(maxMemory) - if err != nil { - return err - } - for k, v := range f.Value { - r.Form[k] = append(r.Form[k], v...) - } - r.MultipartForm = f - - return nil -} - -// FormValue returns the first value for the named component of the query. -// POST and PUT body parameters take precedence over URL query string values. -// FormValue calls ParseMultipartForm and ParseForm if necessary. -// To access multiple values of the same key use ParseForm. -func (r *Request) FormValue(key string) string { - if r.Form == nil { - r.ParseMultipartForm(defaultMaxMemory) - } - if vs := r.Form[key]; len(vs) > 0 { - return vs[0] - } - return "" -} - -// PostFormValue returns the first value for the named component of the POST -// or PUT request body. URL query parameters are ignored. -// PostFormValue calls ParseMultipartForm and ParseForm if necessary. -func (r *Request) PostFormValue(key string) string { - if r.PostForm == nil { - r.ParseMultipartForm(defaultMaxMemory) - } - if vs := r.PostForm[key]; len(vs) > 0 { - return vs[0] - } - return "" -} - -// FormFile returns the first file for the provided form key. -// FormFile calls ParseMultipartForm and ParseForm if necessary. -func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) { - if r.MultipartForm == multipartByReader { - return nil, nil, errors.New("http: multipart handled by MultipartReader") - } - if r.MultipartForm == nil { - err := r.ParseMultipartForm(defaultMaxMemory) - if err != nil { - return nil, nil, err - } - } - if r.MultipartForm != nil && r.MultipartForm.File != nil { - if fhs := r.MultipartForm.File[key]; len(fhs) > 0 { - f, err := fhs[0].Open() - return f, fhs[0], err - } - } - return nil, nil, ErrMissingFile -} - -func (r *Request) expectsContinue() bool { - return hasToken(r.Header.get("Expect"), "100-continue") -} - -func (r *Request) wantsHttp10KeepAlive() bool { - if r.ProtoMajor != 1 || r.ProtoMinor != 0 { - return false - } - return hasToken(r.Header.get("Connection"), "keep-alive") -} - -func (r *Request) wantsClose() bool { - return hasToken(r.Header.get("Connection"), "close") -} - -func (r *Request) closeBody() { - if r.Body != nil { - r.Body.Close() - } -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/response.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/response.go deleted file mode 100644 index e1209a237..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/response.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP Response reading and parsing. - -package http - -import ( - "bufio" - "bytes" - "errors" - "github.com/masterzen/azure-sdk-for-go/core/tls" - "io" - "net/textproto" - "net/url" - "strconv" - "strings" -) - -var respExcludeHeader = map[string]bool{ - "Content-Length": true, - "Transfer-Encoding": true, - "Trailer": true, -} - -// Response represents the response from an HTTP request. -// -type Response struct { - Status string // e.g. "200 OK" - StatusCode int // e.g. 200 - Proto string // e.g. "HTTP/1.0" - ProtoMajor int // e.g. 1 - ProtoMinor int // e.g. 0 - - // Header maps header keys to values. If the response had multiple - // headers with the same key, they may be concatenated, with comma - // delimiters. (Section 4.2 of RFC 2616 requires that multiple headers - // be semantically equivalent to a comma-delimited sequence.) Values - // duplicated by other fields in this struct (e.g., ContentLength) are - // omitted from Header. - // - // Keys in the map are canonicalized (see CanonicalHeaderKey). - Header Header - - // Body represents the response body. - // - // The http Client and Transport guarantee that Body is always - // non-nil, even on responses without a body or responses with - // a zero-length body. It is the caller's responsibility to - // close Body. - // - // The Body is automatically dechunked if the server replied - // with a "chunked" Transfer-Encoding. - Body io.ReadCloser - - // ContentLength records the length of the associated content. The - // value -1 indicates that the length is unknown. Unless Request.Method - // is "HEAD", values >= 0 indicate that the given number of bytes may - // be read from Body. - ContentLength int64 - - // Contains transfer encodings from outer-most to inner-most. Value is - // nil, means that "identity" encoding is used. - TransferEncoding []string - - // Close records whether the header directed that the connection be - // closed after reading Body. The value is advice for clients: neither - // ReadResponse nor Response.Write ever closes a connection. - Close bool - - // Trailer maps trailer keys to values, in the same - // format as the header. - Trailer Header - - // The Request that was sent to obtain this Response. - // Request's Body is nil (having already been consumed). - // This is only populated for Client requests. - Request *Request - - // TLS contains information about the TLS connection on which the - // response was received. It is nil for unencrypted responses. - // The pointer is shared between responses and should not be - // modified. - TLS *tls.ConnectionState -} - -// Cookies parses and returns the cookies set in the Set-Cookie headers. -func (r *Response) Cookies() []*Cookie { - return readSetCookies(r.Header) -} - -var ErrNoLocation = errors.New("http: no Location header in response") - -// Location returns the URL of the response's "Location" header, -// if present. Relative redirects are resolved relative to -// the Response's Request. ErrNoLocation is returned if no -// Location header is present. -func (r *Response) Location() (*url.URL, error) { - lv := r.Header.Get("Location") - if lv == "" { - return nil, ErrNoLocation - } - if r.Request != nil && r.Request.URL != nil { - return r.Request.URL.Parse(lv) - } - return url.Parse(lv) -} - -// ReadResponse reads and returns an HTTP response from r. -// The req parameter optionally specifies the Request that corresponds -// to this Response. If nil, a GET request is assumed. -// Clients must call resp.Body.Close when finished reading resp.Body. -// After that call, clients can inspect resp.Trailer to find key/value -// pairs included in the response trailer. -func ReadResponse(r *bufio.Reader, req *Request) (*Response, error) { - tp := textproto.NewReader(r) - resp := &Response{ - Request: req, - } - - // Parse the first line of the response. - line, err := tp.ReadLine() - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return nil, err - } - f := strings.SplitN(line, " ", 3) - if len(f) < 2 { - return nil, &badStringError{"malformed HTTP response", line} - } - reasonPhrase := "" - if len(f) > 2 { - reasonPhrase = f[2] - } - resp.Status = f[1] + " " + reasonPhrase - resp.StatusCode, err = strconv.Atoi(f[1]) - if err != nil { - return nil, &badStringError{"malformed HTTP status code", f[1]} - } - - resp.Proto = f[0] - var ok bool - if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok { - return nil, &badStringError{"malformed HTTP version", resp.Proto} - } - - // Parse the response headers. - mimeHeader, err := tp.ReadMIMEHeader() - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return nil, err - } - resp.Header = Header(mimeHeader) - - fixPragmaCacheControl(resp.Header) - - err = readTransfer(resp, r) - if err != nil { - return nil, err - } - - return resp, nil -} - -// RFC2616: Should treat -// Pragma: no-cache -// like -// Cache-Control: no-cache -func fixPragmaCacheControl(header Header) { - if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" { - if _, presentcc := header["Cache-Control"]; !presentcc { - header["Cache-Control"] = []string{"no-cache"} - } - } -} - -// ProtoAtLeast reports whether the HTTP protocol used -// in the response is at least major.minor. -func (r *Response) ProtoAtLeast(major, minor int) bool { - return r.ProtoMajor > major || - r.ProtoMajor == major && r.ProtoMinor >= minor -} - -// Writes the response (header, body and trailer) in wire format. This method -// consults the following fields of the response: -// -// StatusCode -// ProtoMajor -// ProtoMinor -// Request.Method -// TransferEncoding -// Trailer -// Body -// ContentLength -// Header, values for non-canonical keys will have unpredictable behavior -// -// Body is closed after it is sent. -func (r *Response) Write(w io.Writer) error { - // Status line - text := r.Status - if text == "" { - var ok bool - text, ok = statusText[r.StatusCode] - if !ok { - text = "status code " + strconv.Itoa(r.StatusCode) - } - } - protoMajor, protoMinor := strconv.Itoa(r.ProtoMajor), strconv.Itoa(r.ProtoMinor) - statusCode := strconv.Itoa(r.StatusCode) + " " - text = strings.TrimPrefix(text, statusCode) - if _, err := io.WriteString(w, "HTTP/"+protoMajor+"."+protoMinor+" "+statusCode+text+"\r\n"); err != nil { - return err - } - - // Clone it, so we can modify r1 as needed. - r1 := new(Response) - *r1 = *r - if r1.ContentLength == 0 && r1.Body != nil { - // Is it actually 0 length? Or just unknown? - var buf [1]byte - n, err := r1.Body.Read(buf[:]) - if err != nil && err != io.EOF { - return err - } - if n == 0 { - // Reset it to a known zero reader, in case underlying one - // is unhappy being read repeatedly. - r1.Body = eofReader - } else { - r1.ContentLength = -1 - r1.Body = struct { - io.Reader - io.Closer - }{ - io.MultiReader(bytes.NewReader(buf[:1]), r.Body), - r.Body, - } - } - } - // If we're sending a non-chunked HTTP/1.1 response without a - // content-length, the only way to do that is the old HTTP/1.0 - // way, by noting the EOF with a connection close, so we need - // to set Close. - if r1.ContentLength == -1 && !r1.Close && r1.ProtoAtLeast(1, 1) && !chunked(r1.TransferEncoding) { - r1.Close = true - } - - // Process Body,ContentLength,Close,Trailer - tw, err := newTransferWriter(r1) - if err != nil { - return err - } - err = tw.WriteHeader(w) - if err != nil { - return err - } - - // Rest of header - err = r.Header.WriteSubset(w, respExcludeHeader) - if err != nil { - return err - } - - // contentLengthAlreadySent may have been already sent for - // POST/PUT requests, even if zero length. See Issue 8180. - contentLengthAlreadySent := tw.shouldSendContentLength() - if r1.ContentLength == 0 && !chunked(r1.TransferEncoding) && !contentLengthAlreadySent { - if _, err := io.WriteString(w, "Content-Length: 0\r\n"); err != nil { - return err - } - } - - // End-of-header - if _, err := io.WriteString(w, "\r\n"); err != nil { - return err - } - - // Write body and trailer - err = tw.WriteBody(w) - if err != nil { - return err - } - - // Success - return nil -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/server.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/server.go deleted file mode 100644 index eae097eb8..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/server.go +++ /dev/null @@ -1,2052 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP server. See RFC 2616. - -package http - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/url" - "os" - "path" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// Errors introduced by the HTTP server. -var ( - ErrWriteAfterFlush = errors.New("Conn.Write called after Flush") - ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") - ErrHijacked = errors.New("Conn has been hijacked") - ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length") -) - -// Objects implementing the Handler interface can be -// registered to serve a particular path or subtree -// in the HTTP server. -// -// ServeHTTP should write reply headers and data to the ResponseWriter -// and then return. Returning signals that the request is finished -// and that the HTTP server can move on to the next request on -// the connection. -type Handler interface { - ServeHTTP(ResponseWriter, *Request) -} - -// A ResponseWriter interface is used by an HTTP handler to -// construct an HTTP response. -type ResponseWriter interface { - // Header returns the header map that will be sent by WriteHeader. - // Changing the header after a call to WriteHeader (or Write) has - // no effect. - Header() Header - - // Write writes the data to the connection as part of an HTTP reply. - // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) - // before writing the data. If the Header does not contain a - // Content-Type line, Write adds a Content-Type set to the result of passing - // the initial 512 bytes of written data to DetectContentType. - Write([]byte) (int, error) - - // WriteHeader sends an HTTP response header with status code. - // If WriteHeader is not called explicitly, the first call to Write - // will trigger an implicit WriteHeader(http.StatusOK). - // Thus explicit calls to WriteHeader are mainly used to - // send error codes. - WriteHeader(int) -} - -// The Flusher interface is implemented by ResponseWriters that allow -// an HTTP handler to flush buffered data to the client. -// -// Note that even for ResponseWriters that support Flush, -// if the client is connected through an HTTP proxy, -// the buffered data may not reach the client until the response -// completes. -type Flusher interface { - // Flush sends any buffered data to the client. - Flush() -} - -// The Hijacker interface is implemented by ResponseWriters that allow -// an HTTP handler to take over the connection. -type Hijacker interface { - // Hijack lets the caller take over the connection. - // After a call to Hijack(), the HTTP server library - // will not do anything else with the connection. - // It becomes the caller's responsibility to manage - // and close the connection. - Hijack() (net.Conn, *bufio.ReadWriter, error) -} - -// The CloseNotifier interface is implemented by ResponseWriters which -// allow detecting when the underlying connection has gone away. -// -// This mechanism can be used to cancel long operations on the server -// if the client has disconnected before the response is ready. -type CloseNotifier interface { - // CloseNotify returns a channel that receives a single value - // when the client connection has gone away. - CloseNotify() <-chan bool -} - -// A conn represents the server side of an HTTP connection. -type conn struct { - remoteAddr string // network address of remote side - server *Server // the Server on which the connection arrived - rwc net.Conn // i/o connection - sr liveSwitchReader // where the LimitReader reads from; usually the rwc - lr *io.LimitedReader // io.LimitReader(sr) - buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->sr->rwc - tlsState *tls.ConnectionState // or nil when not using TLS - - mu sync.Mutex // guards the following - clientGone bool // if client has disconnected mid-request - closeNotifyc chan bool // made lazily - hijackedv bool // connection has been hijacked by handler -} - -func (c *conn) hijacked() bool { - c.mu.Lock() - defer c.mu.Unlock() - return c.hijackedv -} - -func (c *conn) hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.hijackedv { - return nil, nil, ErrHijacked - } - if c.closeNotifyc != nil { - return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier") - } - c.hijackedv = true - rwc = c.rwc - buf = c.buf - c.rwc = nil - c.buf = nil - c.setState(rwc, StateHijacked) - return -} - -func (c *conn) closeNotify() <-chan bool { - c.mu.Lock() - defer c.mu.Unlock() - if c.closeNotifyc == nil { - c.closeNotifyc = make(chan bool, 1) - if c.hijackedv { - // to obey the function signature, even though - // it'll never receive a value. - return c.closeNotifyc - } - pr, pw := io.Pipe() - - readSource := c.sr.r - c.sr.Lock() - c.sr.r = pr - c.sr.Unlock() - go func() { - _, err := io.Copy(pw, readSource) - if err == nil { - err = io.EOF - } - pw.CloseWithError(err) - c.noteClientGone() - }() - } - return c.closeNotifyc -} - -func (c *conn) noteClientGone() { - c.mu.Lock() - defer c.mu.Unlock() - if c.closeNotifyc != nil && !c.clientGone { - c.closeNotifyc <- true - } - c.clientGone = true -} - -// A switchReader can have its Reader changed at runtime. -// It's not safe for concurrent Reads and switches. -type switchReader struct { - io.Reader -} - -// A switchWriter can have its Writer changed at runtime. -// It's not safe for concurrent Writes and switches. -type switchWriter struct { - io.Writer -} - -// A liveSwitchReader is a switchReader that's safe for concurrent -// reads and switches, if its mutex is held. -type liveSwitchReader struct { - sync.Mutex - r io.Reader -} - -func (sr *liveSwitchReader) Read(p []byte) (n int, err error) { - sr.Lock() - r := sr.r - sr.Unlock() - return r.Read(p) -} - -// This should be >= 512 bytes for DetectContentType, -// but otherwise it's somewhat arbitrary. -const bufferBeforeChunkingSize = 2048 - -// chunkWriter writes to a response's conn buffer, and is the writer -// wrapped by the response.bufw buffered writer. -// -// chunkWriter also is responsible for finalizing the Header, including -// conditionally setting the Content-Type and setting a Content-Length -// in cases where the handler's final output is smaller than the buffer -// size. It also conditionally adds chunk headers, when in chunking mode. -// -// See the comment above (*response).Write for the entire write flow. -type chunkWriter struct { - res *response - - // header is either nil or a deep clone of res.handlerHeader - // at the time of res.WriteHeader, if res.WriteHeader is - // called and extra buffering is being done to calculate - // Content-Type and/or Content-Length. - header Header - - // wroteHeader tells whether the header's been written to "the - // wire" (or rather: w.conn.buf). this is unlike - // (*response).wroteHeader, which tells only whether it was - // logically written. - wroteHeader bool - - // set by the writeHeader method: - chunking bool // using chunked transfer encoding for reply body -} - -var ( - crlf = []byte("\r\n") - colonSpace = []byte(": ") -) - -func (cw *chunkWriter) Write(p []byte) (n int, err error) { - if !cw.wroteHeader { - cw.writeHeader(p) - } - if cw.res.req.Method == "HEAD" { - // Eat writes. - return len(p), nil - } - if cw.chunking { - _, err = fmt.Fprintf(cw.res.conn.buf, "%x\r\n", len(p)) - if err != nil { - cw.res.conn.rwc.Close() - return - } - } - n, err = cw.res.conn.buf.Write(p) - if cw.chunking && err == nil { - _, err = cw.res.conn.buf.Write(crlf) - } - if err != nil { - cw.res.conn.rwc.Close() - } - return -} - -func (cw *chunkWriter) flush() { - if !cw.wroteHeader { - cw.writeHeader(nil) - } - cw.res.conn.buf.Flush() -} - -func (cw *chunkWriter) close() { - if !cw.wroteHeader { - cw.writeHeader(nil) - } - if cw.chunking { - // zero EOF chunk, trailer key/value pairs (currently - // unsupported in Go's server), followed by a blank - // line. - cw.res.conn.buf.WriteString("0\r\n\r\n") - } -} - -// A response represents the server side of an HTTP response. -type response struct { - conn *conn - req *Request // request for this response - wroteHeader bool // reply header has been (logically) written - wroteContinue bool // 100 Continue response was written - - w *bufio.Writer // buffers output in chunks to chunkWriter - cw chunkWriter - sw *switchWriter // of the bufio.Writer, for return to putBufioWriter - - // handlerHeader is the Header that Handlers get access to, - // which may be retained and mutated even after WriteHeader. - // handlerHeader is copied into cw.header at WriteHeader - // time, and privately mutated thereafter. - handlerHeader Header - calledHeader bool // handler accessed handlerHeader via Header - - written int64 // number of bytes written in body - contentLength int64 // explicitly-declared Content-Length; or -1 - status int // status code passed to WriteHeader - - // close connection after this reply. set on request and - // updated after response from handler if there's a - // "Connection: keep-alive" response header and a - // Content-Length. - closeAfterReply bool - - // requestBodyLimitHit is set by requestTooLarge when - // maxBytesReader hits its max size. It is checked in - // WriteHeader, to make sure we don't consume the - // remaining request body to try to advance to the next HTTP - // request. Instead, when this is set, we stop reading - // subsequent requests on this connection and stop reading - // input from it. - requestBodyLimitHit bool - - handlerDone bool // set true when the handler exits - - // Buffers for Date and Content-Length - dateBuf [len(TimeFormat)]byte - clenBuf [10]byte -} - -// requestTooLarge is called by maxBytesReader when too much input has -// been read from the client. -func (w *response) requestTooLarge() { - w.closeAfterReply = true - w.requestBodyLimitHit = true - if !w.wroteHeader { - w.Header().Set("Connection", "close") - } -} - -// needsSniff reports whether a Content-Type still needs to be sniffed. -func (w *response) needsSniff() bool { - _, haveType := w.handlerHeader["Content-Type"] - return !w.cw.wroteHeader && !haveType && w.written < sniffLen -} - -// writerOnly hides an io.Writer value's optional ReadFrom method -// from io.Copy. -type writerOnly struct { - io.Writer -} - -func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { - switch v := src.(type) { - case *os.File: - fi, err := v.Stat() - if err != nil { - return false, err - } - return fi.Mode().IsRegular(), nil - case *io.LimitedReader: - return srcIsRegularFile(v.R) - default: - return - } -} - -// ReadFrom is here to optimize copying from an *os.File regular file -// to a *net.TCPConn with sendfile. -func (w *response) ReadFrom(src io.Reader) (n int64, err error) { - // Our underlying w.conn.rwc is usually a *TCPConn (with its - // own ReadFrom method). If not, or if our src isn't a regular - // file, just fall back to the normal copy method. - rf, ok := w.conn.rwc.(io.ReaderFrom) - regFile, err := srcIsRegularFile(src) - if err != nil { - return 0, err - } - if !ok || !regFile { - return io.Copy(writerOnly{w}, src) - } - - // sendfile path: - - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - - if w.needsSniff() { - n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) - n += n0 - if err != nil { - return n, err - } - } - - w.w.Flush() // get rid of any previous writes - w.cw.flush() // make sure Header is written; flush data to rwc - - // Now that cw has been flushed, its chunking field is guaranteed initialized. - if !w.cw.chunking && w.bodyAllowed() { - n0, err := rf.ReadFrom(src) - n += n0 - w.written += n0 - return n, err - } - - n0, err := io.Copy(writerOnly{w}, src) - n += n0 - return n, err -} - -// noLimit is an effective infinite upper bound for io.LimitedReader -const noLimit int64 = (1 << 63) - 1 - -// debugServerConnections controls whether all server connections are wrapped -// with a verbose logging wrapper. -const debugServerConnections = false - -// Create new connection from rwc. -func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) { - c = new(conn) - c.remoteAddr = rwc.RemoteAddr().String() - c.server = srv - c.rwc = rwc - if debugServerConnections { - c.rwc = newLoggingConn("server", c.rwc) - } - c.sr = liveSwitchReader{r: c.rwc} - c.lr = io.LimitReader(&c.sr, noLimit).(*io.LimitedReader) - br := newBufioReader(c.lr) - bw := newBufioWriterSize(c.rwc, 4<<10) - c.buf = bufio.NewReadWriter(br, bw) - return c, nil -} - -var ( - bufioReaderPool sync.Pool - bufioWriter2kPool sync.Pool - bufioWriter4kPool sync.Pool -) - -func bufioWriterPool(size int) *sync.Pool { - switch size { - case 2 << 10: - return &bufioWriter2kPool - case 4 << 10: - return &bufioWriter4kPool - } - return nil -} - -func newBufioReader(r io.Reader) *bufio.Reader { - if v := bufioReaderPool.Get(); v != nil { - br := v.(*bufio.Reader) - br.Reset(r) - return br - } - return bufio.NewReader(r) -} - -func putBufioReader(br *bufio.Reader) { - br.Reset(nil) - bufioReaderPool.Put(br) -} - -func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { - pool := bufioWriterPool(size) - if pool != nil { - if v := pool.Get(); v != nil { - bw := v.(*bufio.Writer) - bw.Reset(w) - return bw - } - } - return bufio.NewWriterSize(w, size) -} - -func putBufioWriter(bw *bufio.Writer) { - bw.Reset(nil) - if pool := bufioWriterPool(bw.Available()); pool != nil { - pool.Put(bw) - } -} - -// DefaultMaxHeaderBytes is the maximum permitted size of the headers -// in an HTTP request. -// This can be overridden by setting Server.MaxHeaderBytes. -const DefaultMaxHeaderBytes = 1 << 20 // 1 MB - -func (srv *Server) maxHeaderBytes() int { - if srv.MaxHeaderBytes > 0 { - return srv.MaxHeaderBytes - } - return DefaultMaxHeaderBytes -} - -func (srv *Server) initialLimitedReaderSize() int64 { - return int64(srv.maxHeaderBytes()) + 4096 // bufio slop -} - -// wrapper around io.ReaderCloser which on first read, sends an -// HTTP/1.1 100 Continue header -type expectContinueReader struct { - resp *response - readCloser io.ReadCloser - closed bool -} - -func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { - if ecr.closed { - return 0, ErrBodyReadAfterClose - } - if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { - ecr.resp.wroteContinue = true - ecr.resp.conn.buf.WriteString("HTTP/1.1 100 Continue\r\n\r\n") - ecr.resp.conn.buf.Flush() - } - return ecr.readCloser.Read(p) -} - -func (ecr *expectContinueReader) Close() error { - ecr.closed = true - return ecr.readCloser.Close() -} - -// TimeFormat is the time format to use with -// time.Parse and time.Time.Format when parsing -// or generating times in HTTP headers. -// It is like time.RFC1123 but hard codes GMT as the time zone. -const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - -// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) -func appendTime(b []byte, t time.Time) []byte { - const days = "SunMonTueWedThuFriSat" - const months = "JanFebMarAprMayJunJulAugSepOctNovDec" - - t = t.UTC() - yy, mm, dd := t.Date() - hh, mn, ss := t.Clock() - day := days[3*t.Weekday():] - mon := months[3*(mm-1):] - - return append(b, - day[0], day[1], day[2], ',', ' ', - byte('0'+dd/10), byte('0'+dd%10), ' ', - mon[0], mon[1], mon[2], ' ', - byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', - byte('0'+hh/10), byte('0'+hh%10), ':', - byte('0'+mn/10), byte('0'+mn%10), ':', - byte('0'+ss/10), byte('0'+ss%10), ' ', - 'G', 'M', 'T') -} - -var errTooLarge = errors.New("http: request too large") - -// Read next request from connection. -func (c *conn) readRequest() (w *response, err error) { - if c.hijacked() { - return nil, ErrHijacked - } - - if d := c.server.ReadTimeout; d != 0 { - c.rwc.SetReadDeadline(time.Now().Add(d)) - } - if d := c.server.WriteTimeout; d != 0 { - defer func() { - c.rwc.SetWriteDeadline(time.Now().Add(d)) - }() - } - - c.lr.N = c.server.initialLimitedReaderSize() - var req *Request - if req, err = ReadRequest(c.buf.Reader); err != nil { - if c.lr.N == 0 { - return nil, errTooLarge - } - return nil, err - } - c.lr.N = noLimit - - req.RemoteAddr = c.remoteAddr - req.TLS = c.tlsState - - w = &response{ - conn: c, - req: req, - handlerHeader: make(Header), - contentLength: -1, - } - w.cw.res = w - w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) - return w, nil -} - -func (w *response) Header() Header { - if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { - // Accessing the header between logically writing it - // and physically writing it means we need to allocate - // a clone to snapshot the logically written state. - w.cw.header = w.handlerHeader.clone() - } - w.calledHeader = true - return w.handlerHeader -} - -// maxPostHandlerReadBytes is the max number of Request.Body bytes not -// consumed by a handler that the server will read from the client -// in order to keep a connection alive. If there are more bytes than -// this then the server to be paranoid instead sends a "Connection: -// close" response. -// -// This number is approximately what a typical machine's TCP buffer -// size is anyway. (if we have the bytes on the machine, we might as -// well read them) -const maxPostHandlerReadBytes = 256 << 10 - -func (w *response) WriteHeader(code int) { - if w.conn.hijacked() { - w.conn.server.logf("http: response.WriteHeader on hijacked connection") - return - } - if w.wroteHeader { - w.conn.server.logf("http: multiple response.WriteHeader calls") - return - } - w.wroteHeader = true - w.status = code - - if w.calledHeader && w.cw.header == nil { - w.cw.header = w.handlerHeader.clone() - } - - if cl := w.handlerHeader.get("Content-Length"); cl != "" { - v, err := strconv.ParseInt(cl, 10, 64) - if err == nil && v >= 0 { - w.contentLength = v - } else { - w.conn.server.logf("http: invalid Content-Length of %q", cl) - w.handlerHeader.Del("Content-Length") - } - } -} - -// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. -// This type is used to avoid extra allocations from cloning and/or populating -// the response Header map and all its 1-element slices. -type extraHeader struct { - contentType string - connection string - transferEncoding string - date []byte // written if not nil - contentLength []byte // written if not nil -} - -// Sorted the same as extraHeader.Write's loop. -var extraHeaderKeys = [][]byte{ - []byte("Content-Type"), - []byte("Connection"), - []byte("Transfer-Encoding"), -} - -var ( - headerContentLength = []byte("Content-Length: ") - headerDate = []byte("Date: ") -) - -// Write writes the headers described in h to w. -// -// This method has a value receiver, despite the somewhat large size -// of h, because it prevents an allocation. The escape analysis isn't -// smart enough to realize this function doesn't mutate h. -func (h extraHeader) Write(w *bufio.Writer) { - if h.date != nil { - w.Write(headerDate) - w.Write(h.date) - w.Write(crlf) - } - if h.contentLength != nil { - w.Write(headerContentLength) - w.Write(h.contentLength) - w.Write(crlf) - } - for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { - if v != "" { - w.Write(extraHeaderKeys[i]) - w.Write(colonSpace) - w.WriteString(v) - w.Write(crlf) - } - } -} - -// writeHeader finalizes the header sent to the client and writes it -// to cw.res.conn.buf. -// -// p is not written by writeHeader, but is the first chunk of the body -// that will be written. It is sniffed for a Content-Type if none is -// set explicitly. It's also used to set the Content-Length, if the -// total body size was small and the handler has already finished -// running. -func (cw *chunkWriter) writeHeader(p []byte) { - if cw.wroteHeader { - return - } - cw.wroteHeader = true - - w := cw.res - keepAlivesEnabled := w.conn.server.doKeepAlives() - isHEAD := w.req.Method == "HEAD" - - // header is written out to w.conn.buf below. Depending on the - // state of the handler, we either own the map or not. If we - // don't own it, the exclude map is created lazily for - // WriteSubset to remove headers. The setHeader struct holds - // headers we need to add. - header := cw.header - owned := header != nil - if !owned { - header = w.handlerHeader - } - var excludeHeader map[string]bool - delHeader := func(key string) { - if owned { - header.Del(key) - return - } - if _, ok := header[key]; !ok { - return - } - if excludeHeader == nil { - excludeHeader = make(map[string]bool) - } - excludeHeader[key] = true - } - var setHeader extraHeader - - // If the handler is done but never sent a Content-Length - // response header and this is our first (and last) write, set - // it, even to zero. This helps HTTP/1.0 clients keep their - // "keep-alive" connections alive. - // Exceptions: 304/204/1xx responses never get Content-Length, and if - // it was a HEAD request, we don't know the difference between - // 0 actual bytes and 0 bytes because the handler noticed it - // was a HEAD request and chose not to write anything. So for - // HEAD, the handler should either write the Content-Length or - // write non-zero bytes. If it's actually 0 bytes and the - // handler never looked at the Request.Method, we just don't - // send a Content-Length header. - if w.handlerDone && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { - w.contentLength = int64(len(p)) - setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) - } - - // If this was an HTTP/1.0 request with keep-alive and we sent a - // Content-Length back, we can make this a keep-alive response ... - if w.req.wantsHttp10KeepAlive() && keepAlivesEnabled { - sentLength := header.get("Content-Length") != "" - if sentLength && header.get("Connection") == "keep-alive" { - w.closeAfterReply = false - } - } - - // Check for a explicit (and valid) Content-Length header. - hasCL := w.contentLength != -1 - - if w.req.wantsHttp10KeepAlive() && (isHEAD || hasCL) { - _, connectionHeaderSet := header["Connection"] - if !connectionHeaderSet { - setHeader.connection = "keep-alive" - } - } else if !w.req.ProtoAtLeast(1, 1) || w.req.wantsClose() { - w.closeAfterReply = true - } - - if header.get("Connection") == "close" || !keepAlivesEnabled { - w.closeAfterReply = true - } - - // Per RFC 2616, we should consume the request body before - // replying, if the handler hasn't already done so. But we - // don't want to do an unbounded amount of reading here for - // DoS reasons, so we only try up to a threshold. - if w.req.ContentLength != 0 && !w.closeAfterReply { - ecr, isExpecter := w.req.Body.(*expectContinueReader) - if !isExpecter || ecr.resp.wroteContinue { - n, _ := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1) - if n >= maxPostHandlerReadBytes { - w.requestTooLarge() - delHeader("Connection") - setHeader.connection = "close" - } else { - w.req.Body.Close() - } - } - } - - code := w.status - if bodyAllowedForStatus(code) { - // If no content type, apply sniffing algorithm to body. - _, haveType := header["Content-Type"] - if !haveType { - setHeader.contentType = DetectContentType(p) - } - } else { - for _, k := range suppressedHeaders(code) { - delHeader(k) - } - } - - if _, ok := header["Date"]; !ok { - setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) - } - - te := header.get("Transfer-Encoding") - hasTE := te != "" - if hasCL && hasTE && te != "identity" { - // TODO: return an error if WriteHeader gets a return parameter - // For now just ignore the Content-Length. - w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", - te, w.contentLength) - delHeader("Content-Length") - hasCL = false - } - - if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { - // do nothing - } else if code == StatusNoContent { - delHeader("Transfer-Encoding") - } else if hasCL { - delHeader("Transfer-Encoding") - } else if w.req.ProtoAtLeast(1, 1) { - // HTTP/1.1 or greater: use chunked transfer encoding - // to avoid closing the connection at EOF. - // TODO: this blows away any custom or stacked Transfer-Encoding they - // might have set. Deal with that as need arises once we have a valid - // use case. - cw.chunking = true - setHeader.transferEncoding = "chunked" - } else { - // HTTP version < 1.1: cannot do chunked transfer - // encoding and we don't know the Content-Length so - // signal EOF by closing connection. - w.closeAfterReply = true - delHeader("Transfer-Encoding") // in case already set - } - - // Cannot use Content-Length with non-identity Transfer-Encoding. - if cw.chunking { - delHeader("Content-Length") - } - if !w.req.ProtoAtLeast(1, 0) { - return - } - - if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { - delHeader("Connection") - if w.req.ProtoAtLeast(1, 1) { - setHeader.connection = "close" - } - } - - w.conn.buf.WriteString(statusLine(w.req, code)) - cw.header.WriteSubset(w.conn.buf, excludeHeader) - setHeader.Write(w.conn.buf.Writer) - w.conn.buf.Write(crlf) -} - -// statusLines is a cache of Status-Line strings, keyed by code (for -// HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a -// map keyed by struct of two fields. This map's max size is bounded -// by 2*len(statusText), two protocol types for each known official -// status code in the statusText map. -var ( - statusMu sync.RWMutex - statusLines = make(map[int]string) -) - -// statusLine returns a response Status-Line (RFC 2616 Section 6.1) -// for the given request and response status code. -func statusLine(req *Request, code int) string { - // Fast path: - key := code - proto11 := req.ProtoAtLeast(1, 1) - if !proto11 { - key = -key - } - statusMu.RLock() - line, ok := statusLines[key] - statusMu.RUnlock() - if ok { - return line - } - - // Slow path: - proto := "HTTP/1.0" - if proto11 { - proto = "HTTP/1.1" - } - codestring := strconv.Itoa(code) - text, ok := statusText[code] - if !ok { - text = "status code " + codestring - } - line = proto + " " + codestring + " " + text + "\r\n" - if ok { - statusMu.Lock() - defer statusMu.Unlock() - statusLines[key] = line - } - return line -} - -// bodyAllowed returns true if a Write is allowed for this response type. -// It's illegal to call this before the header has been flushed. -func (w *response) bodyAllowed() bool { - if !w.wroteHeader { - panic("") - } - return bodyAllowedForStatus(w.status) -} - -// The Life Of A Write is like this: -// -// Handler starts. No header has been sent. The handler can either -// write a header, or just start writing. Writing before sending a header -// sends an implicitly empty 200 OK header. -// -// If the handler didn't declare a Content-Length up front, we either -// go into chunking mode or, if the handler finishes running before -// the chunking buffer size, we compute a Content-Length and send that -// in the header instead. -// -// Likewise, if the handler didn't set a Content-Type, we sniff that -// from the initial chunk of output. -// -// The Writers are wired together like: -// -// 1. *response (the ResponseWriter) -> -// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes -// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) -// and which writes the chunk headers, if needed. -// 4. conn.buf, a bufio.Writer of default (4kB) bytes -// 5. the rwc, the net.Conn. -// -// TODO(bradfitz): short-circuit some of the buffering when the -// initial header contains both a Content-Type and Content-Length. -// Also short-circuit in (1) when the header's been sent and not in -// chunking mode, writing directly to (4) instead, if (2) has no -// buffered data. More generally, we could short-circuit from (1) to -// (3) even in chunking mode if the write size from (1) is over some -// threshold and nothing is in (2). The answer might be mostly making -// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal -// with this instead. -func (w *response) Write(data []byte) (n int, err error) { - return w.write(len(data), data, "") -} - -func (w *response) WriteString(data string) (n int, err error) { - return w.write(len(data), nil, data) -} - -// either dataB or dataS is non-zero. -func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { - if w.conn.hijacked() { - w.conn.server.logf("http: response.Write on hijacked connection") - return 0, ErrHijacked - } - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - if lenData == 0 { - return 0, nil - } - if !w.bodyAllowed() { - return 0, ErrBodyNotAllowed - } - - w.written += int64(lenData) // ignoring errors, for errorKludge - if w.contentLength != -1 && w.written > w.contentLength { - return 0, ErrContentLength - } - if dataB != nil { - return w.w.Write(dataB) - } else { - return w.w.WriteString(dataS) - } -} - -func (w *response) finishRequest() { - w.handlerDone = true - - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - - w.w.Flush() - putBufioWriter(w.w) - w.cw.close() - w.conn.buf.Flush() - - // Close the body (regardless of w.closeAfterReply) so we can - // re-use its bufio.Reader later safely. - w.req.Body.Close() - - if w.req.MultipartForm != nil { - w.req.MultipartForm.RemoveAll() - } - - if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { - // Did not write enough. Avoid getting out of sync. - w.closeAfterReply = true - } -} - -func (w *response) Flush() { - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - w.w.Flush() - w.cw.flush() -} - -func (c *conn) finalFlush() { - if c.buf != nil { - c.buf.Flush() - - // Steal the bufio.Reader (~4KB worth of memory) and its associated - // reader for a future connection. - putBufioReader(c.buf.Reader) - - // Steal the bufio.Writer (~4KB worth of memory) and its associated - // writer for a future connection. - putBufioWriter(c.buf.Writer) - - c.buf = nil - } -} - -// Close the connection. -func (c *conn) close() { - c.finalFlush() - if c.rwc != nil { - c.rwc.Close() - c.rwc = nil - } -} - -// rstAvoidanceDelay is the amount of time we sleep after closing the -// write side of a TCP connection before closing the entire socket. -// By sleeping, we increase the chances that the client sees our FIN -// and processes its final data before they process the subsequent RST -// from closing a connection with known unread data. -// This RST seems to occur mostly on BSD systems. (And Windows?) -// This timeout is somewhat arbitrary (~latency around the planet). -const rstAvoidanceDelay = 500 * time.Millisecond - -// closeWrite flushes any outstanding data and sends a FIN packet (if -// client is connected via TCP), signalling that we're done. We then -// pause for a bit, hoping the client processes it before `any -// subsequent RST. -// -// See http://golang.org/issue/3595 -func (c *conn) closeWriteAndWait() { - c.finalFlush() - if tcp, ok := c.rwc.(*net.TCPConn); ok { - tcp.CloseWrite() - } - time.Sleep(rstAvoidanceDelay) -} - -// validNPN reports whether the proto is not a blacklisted Next -// Protocol Negotiation protocol. Empty and built-in protocol types -// are blacklisted and can't be overridden with alternate -// implementations. -func validNPN(proto string) bool { - switch proto { - case "", "http/1.1", "http/1.0": - return false - } - return true -} - -func (c *conn) setState(nc net.Conn, state ConnState) { - if hook := c.server.ConnState; hook != nil { - hook(nc, state) - } -} - -// Serve a new connection. -func (c *conn) serve() { - origConn := c.rwc // copy it before it's set nil on Close or Hijack - defer func() { - if err := recover(); err != nil { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) - } - if !c.hijacked() { - c.close() - c.setState(origConn, StateClosed) - } - }() - - if tlsConn, ok := c.rwc.(*tls.Conn); ok { - if d := c.server.ReadTimeout; d != 0 { - c.rwc.SetReadDeadline(time.Now().Add(d)) - } - if d := c.server.WriteTimeout; d != 0 { - c.rwc.SetWriteDeadline(time.Now().Add(d)) - } - if err := tlsConn.Handshake(); err != nil { - c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) - return - } - c.tlsState = new(tls.ConnectionState) - *c.tlsState = tlsConn.ConnectionState() - if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { - if fn := c.server.TLSNextProto[proto]; fn != nil { - h := initNPNRequest{tlsConn, serverHandler{c.server}} - fn(c.server, tlsConn, h) - } - return - } - } - - for { - w, err := c.readRequest() - if c.lr.N != c.server.initialLimitedReaderSize() { - // If we read any bytes off the wire, we're active. - c.setState(c.rwc, StateActive) - } - if err != nil { - if err == errTooLarge { - // Their HTTP client may or may not be - // able to read this if we're - // responding to them and hanging up - // while they're still writing their - // request. Undefined behavior. - io.WriteString(c.rwc, "HTTP/1.1 413 Request Entity Too Large\r\n\r\n") - c.closeWriteAndWait() - break - } else if err == io.EOF { - break // Don't reply - } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() { - break // Don't reply - } - io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\n\r\n") - break - } - - // Expect 100 Continue support - req := w.req - if req.expectsContinue() { - if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { - // Wrap the Body reader with one that replies on the connection - req.Body = &expectContinueReader{readCloser: req.Body, resp: w} - } - req.Header.Del("Expect") - } else if req.Header.get("Expect") != "" { - w.sendExpectationFailed() - break - } - - // HTTP cannot have multiple simultaneous active requests.[*] - // Until the server replies to this request, it can't read another, - // so we might as well run the handler in this goroutine. - // [*] Not strictly true: HTTP pipelining. We could let them all process - // in parallel even if their responses need to be serialized. - serverHandler{c.server}.ServeHTTP(w, w.req) - if c.hijacked() { - return - } - w.finishRequest() - if w.closeAfterReply { - if w.requestBodyLimitHit { - c.closeWriteAndWait() - } - break - } - c.setState(c.rwc, StateIdle) - } -} - -func (w *response) sendExpectationFailed() { - // TODO(bradfitz): let ServeHTTP handlers handle - // requests with non-standard expectation[s]? Seems - // theoretical at best, and doesn't fit into the - // current ServeHTTP model anyway. We'd need to - // make the ResponseWriter an optional - // "ExpectReplier" interface or something. - // - // For now we'll just obey RFC 2616 14.20 which says - // "If a server receives a request containing an - // Expect field that includes an expectation- - // extension that it does not support, it MUST - // respond with a 417 (Expectation Failed) status." - w.Header().Set("Connection", "close") - w.WriteHeader(StatusExpectationFailed) - w.finishRequest() -} - -// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter -// and a Hijacker. -func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { - if w.wroteHeader { - w.cw.flush() - } - // Release the bufioWriter that writes to the chunk writer, it is not - // used after a connection has been hijacked. - rwc, buf, err = w.conn.hijack() - if err == nil { - putBufioWriter(w.w) - w.w = nil - } - return rwc, buf, err -} - -func (w *response) CloseNotify() <-chan bool { - return w.conn.closeNotify() -} - -// The HandlerFunc type is an adapter to allow the use of -// ordinary functions as HTTP handlers. If f is a function -// with the appropriate signature, HandlerFunc(f) is a -// Handler object that calls f. -type HandlerFunc func(ResponseWriter, *Request) - -// ServeHTTP calls f(w, r). -func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { - f(w, r) -} - -// Helper handlers - -// Error replies to the request with the specified error message and HTTP code. -// The error message should be plain text. -func Error(w ResponseWriter, error string, code int) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.WriteHeader(code) - fmt.Fprintln(w, error) -} - -// NotFound replies to the request with an HTTP 404 not found error. -func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } - -// NotFoundHandler returns a simple request handler -// that replies to each request with a ``404 page not found'' reply. -func NotFoundHandler() Handler { return HandlerFunc(NotFound) } - -// StripPrefix returns a handler that serves HTTP requests -// by removing the given prefix from the request URL's Path -// and invoking the handler h. StripPrefix handles a -// request for a path that doesn't begin with prefix by -// replying with an HTTP 404 not found error. -func StripPrefix(prefix string, h Handler) Handler { - if prefix == "" { - return h - } - return HandlerFunc(func(w ResponseWriter, r *Request) { - if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { - r.URL.Path = p - h.ServeHTTP(w, r) - } else { - NotFound(w, r) - } - }) -} - -// Redirect replies to the request with a redirect to url, -// which may be a path relative to the request path. -func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { - if u, err := url.Parse(urlStr); err == nil { - // If url was relative, make absolute by - // combining with request path. - // The browser would probably do this for us, - // but doing it ourselves is more reliable. - - // NOTE(rsc): RFC 2616 says that the Location - // line must be an absolute URI, like - // "http://www.google.com/redirect/", - // not a path like "/redirect/". - // Unfortunately, we don't know what to - // put in the host name section to get the - // client to connect to us again, so we can't - // know the right absolute URI to send back. - // Because of this problem, no one pays attention - // to the RFC; they all send back just a new path. - // So do we. - oldpath := r.URL.Path - if oldpath == "" { // should not happen, but avoid a crash if it does - oldpath = "/" - } - if u.Scheme == "" { - // no leading http://server - if urlStr == "" || urlStr[0] != '/' { - // make relative path absolute - olddir, _ := path.Split(oldpath) - urlStr = olddir + urlStr - } - - var query string - if i := strings.Index(urlStr, "?"); i != -1 { - urlStr, query = urlStr[:i], urlStr[i:] - } - - // clean up but preserve trailing slash - trailing := strings.HasSuffix(urlStr, "/") - urlStr = path.Clean(urlStr) - if trailing && !strings.HasSuffix(urlStr, "/") { - urlStr += "/" - } - urlStr += query - } - } - - w.Header().Set("Location", urlStr) - w.WriteHeader(code) - - // RFC2616 recommends that a short note "SHOULD" be included in the - // response because older user agents may not understand 301/307. - // Shouldn't send the response for POST or HEAD; that leaves GET. - if r.Method == "GET" { - note := "" + statusText[code] + ".\n" - fmt.Fprintln(w, note) - } -} - -var htmlReplacer = strings.NewReplacer( - "&", "&", - "<", "<", - ">", ">", - // """ is shorter than """. - `"`, """, - // "'" is shorter than "'" and apos was not in HTML until HTML5. - "'", "'", -) - -func htmlEscape(s string) string { - return htmlReplacer.Replace(s) -} - -// Redirect to a fixed URL -type redirectHandler struct { - url string - code int -} - -func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { - Redirect(w, r, rh.url, rh.code) -} - -// RedirectHandler returns a request handler that redirects -// each request it receives to the given url using the given -// status code. -func RedirectHandler(url string, code int) Handler { - return &redirectHandler{url, code} -} - -// ServeMux is an HTTP request multiplexer. -// It matches the URL of each incoming request against a list of registered -// patterns and calls the handler for the pattern that -// most closely matches the URL. -// -// Patterns name fixed, rooted paths, like "/favicon.ico", -// or rooted subtrees, like "/images/" (note the trailing slash). -// Longer patterns take precedence over shorter ones, so that -// if there are handlers registered for both "/images/" -// and "/images/thumbnails/", the latter handler will be -// called for paths beginning "/images/thumbnails/" and the -// former will receive requests for any other paths in the -// "/images/" subtree. -// -// Note that since a pattern ending in a slash names a rooted subtree, -// the pattern "/" matches all paths not matched by other registered -// patterns, not just the URL with Path == "/". -// -// Patterns may optionally begin with a host name, restricting matches to -// URLs on that host only. Host-specific patterns take precedence over -// general patterns, so that a handler might register for the two patterns -// "/codesearch" and "codesearch.google.com/" without also taking over -// requests for "http://www.google.com/". -// -// ServeMux also takes care of sanitizing the URL request path, -// redirecting any request containing . or .. elements to an -// equivalent .- and ..-free URL. -type ServeMux struct { - mu sync.RWMutex - m map[string]muxEntry - hosts bool // whether any patterns contain hostnames -} - -type muxEntry struct { - explicit bool - h Handler - pattern string -} - -// NewServeMux allocates and returns a new ServeMux. -func NewServeMux() *ServeMux { return &ServeMux{m: make(map[string]muxEntry)} } - -// DefaultServeMux is the default ServeMux used by Serve. -var DefaultServeMux = NewServeMux() - -// Does path match pattern? -func pathMatch(pattern, path string) bool { - if len(pattern) == 0 { - // should not happen - return false - } - n := len(pattern) - if pattern[n-1] != '/' { - return pattern == path - } - return len(path) >= n && path[0:n] == pattern -} - -// Return the canonical path for p, eliminating . and .. elements. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} - -// Find a handler on a handler map given a path string -// Most-specific (longest) pattern wins -func (mux *ServeMux) match(path string) (h Handler, pattern string) { - var n = 0 - for k, v := range mux.m { - if !pathMatch(k, path) { - continue - } - if h == nil || len(k) > n { - n = len(k) - h = v.h - pattern = v.pattern - } - } - return -} - -// Handler returns the handler to use for the given request, -// consulting r.Method, r.Host, and r.URL.Path. It always returns -// a non-nil handler. If the path is not in its canonical form, the -// handler will be an internally-generated handler that redirects -// to the canonical path. -// -// Handler also returns the registered pattern that matches the -// request or, in the case of internally-generated redirects, -// the pattern that will match after following the redirect. -// -// If there is no registered handler that applies to the request, -// Handler returns a ``page not found'' handler and an empty pattern. -func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { - if r.Method != "CONNECT" { - if p := cleanPath(r.URL.Path); p != r.URL.Path { - _, pattern = mux.handler(r.Host, p) - url := *r.URL - url.Path = p - return RedirectHandler(url.String(), StatusMovedPermanently), pattern - } - } - - return mux.handler(r.Host, r.URL.Path) -} - -// handler is the main implementation of Handler. -// The path is known to be in canonical form, except for CONNECT methods. -func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { - mux.mu.RLock() - defer mux.mu.RUnlock() - - // Host-specific pattern takes precedence over generic ones - if mux.hosts { - h, pattern = mux.match(host + path) - } - if h == nil { - h, pattern = mux.match(path) - } - if h == nil { - h, pattern = NotFoundHandler(), "" - } - return -} - -// ServeHTTP dispatches the request to the handler whose -// pattern most closely matches the request URL. -func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { - if r.RequestURI == "*" { - if r.ProtoAtLeast(1, 1) { - w.Header().Set("Connection", "close") - } - w.WriteHeader(StatusBadRequest) - return - } - h, _ := mux.Handler(r) - h.ServeHTTP(w, r) -} - -// Handle registers the handler for the given pattern. -// If a handler already exists for pattern, Handle panics. -func (mux *ServeMux) Handle(pattern string, handler Handler) { - mux.mu.Lock() - defer mux.mu.Unlock() - - if pattern == "" { - panic("http: invalid pattern " + pattern) - } - if handler == nil { - panic("http: nil handler") - } - if mux.m[pattern].explicit { - panic("http: multiple registrations for " + pattern) - } - - mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} - - if pattern[0] != '/' { - mux.hosts = true - } - - // Helpful behavior: - // If pattern is /tree/, insert an implicit permanent redirect for /tree. - // It can be overridden by an explicit registration. - n := len(pattern) - if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { - // If pattern contains a host name, strip it and use remaining - // path for redirect. - path := pattern - if pattern[0] != '/' { - // In pattern, at least the last character is a '/', so - // strings.Index can't be -1. - path = pattern[strings.Index(pattern, "/"):] - } - mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(path, StatusMovedPermanently), pattern: pattern} - } -} - -// HandleFunc registers the handler function for the given pattern. -func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { - mux.Handle(pattern, HandlerFunc(handler)) -} - -// Handle registers the handler for the given pattern -// in the DefaultServeMux. -// The documentation for ServeMux explains how patterns are matched. -func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } - -// HandleFunc registers the handler function for the given pattern -// in the DefaultServeMux. -// The documentation for ServeMux explains how patterns are matched. -func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { - DefaultServeMux.HandleFunc(pattern, handler) -} - -// Serve accepts incoming HTTP connections on the listener l, -// creating a new service goroutine for each. The service goroutines -// read requests and then call handler to reply to them. -// Handler is typically nil, in which case the DefaultServeMux is used. -func Serve(l net.Listener, handler Handler) error { - srv := &Server{Handler: handler} - return srv.Serve(l) -} - -// A Server defines parameters for running an HTTP server. -// The zero value for Server is a valid configuration. -type Server struct { - Addr string // TCP address to listen on, ":http" if empty - Handler Handler // handler to invoke, http.DefaultServeMux if nil - ReadTimeout time.Duration // maximum duration before timing out read of the request - WriteTimeout time.Duration // maximum duration before timing out write of the response - MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0 - TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS - - // TLSNextProto optionally specifies a function to take over - // ownership of the provided TLS connection when an NPN - // protocol upgrade has occurred. The map key is the protocol - // name negotiated. The Handler argument should be used to - // handle HTTP requests and will initialize the Request's TLS - // and RemoteAddr if not already set. The connection is - // automatically closed when the function returns. - TLSNextProto map[string]func(*Server, *tls.Conn, Handler) - - // ConnState specifies an optional callback function that is - // called when a client connection changes state. See the - // ConnState type and associated constants for details. - ConnState func(net.Conn, ConnState) - - // ErrorLog specifies an optional logger for errors accepting - // connections and unexpected behavior from handlers. - // If nil, logging goes to os.Stderr via the log package's - // standard logger. - ErrorLog *log.Logger - - disableKeepAlives int32 // accessed atomically. -} - -// A ConnState represents the state of a client connection to a server. -// It's used by the optional Server.ConnState hook. -type ConnState int - -const ( - // StateNew represents a new connection that is expected to - // send a request immediately. Connections begin at this - // state and then transition to either StateActive or - // StateClosed. - StateNew ConnState = iota - - // StateActive represents a connection that has read 1 or more - // bytes of a request. The Server.ConnState hook for - // StateActive fires before the request has entered a handler - // and doesn't fire again until the request has been - // handled. After the request is handled, the state - // transitions to StateClosed, StateHijacked, or StateIdle. - StateActive - - // StateIdle represents a connection that has finished - // handling a request and is in the keep-alive state, waiting - // for a new request. Connections transition from StateIdle - // to either StateActive or StateClosed. - StateIdle - - // StateHijacked represents a hijacked connection. - // This is a terminal state. It does not transition to StateClosed. - StateHijacked - - // StateClosed represents a closed connection. - // This is a terminal state. Hijacked connections do not - // transition to StateClosed. - StateClosed -) - -var stateName = map[ConnState]string{ - StateNew: "new", - StateActive: "active", - StateIdle: "idle", - StateHijacked: "hijacked", - StateClosed: "closed", -} - -func (c ConnState) String() string { - return stateName[c] -} - -// serverHandler delegates to either the server's Handler or -// DefaultServeMux and also handles "OPTIONS *" requests. -type serverHandler struct { - srv *Server -} - -func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { - handler := sh.srv.Handler - if handler == nil { - handler = DefaultServeMux - } - if req.RequestURI == "*" && req.Method == "OPTIONS" { - handler = globalOptionsHandler{} - } - handler.ServeHTTP(rw, req) -} - -// ListenAndServe listens on the TCP network address srv.Addr and then -// calls Serve to handle requests on incoming connections. If -// srv.Addr is blank, ":http" is used. -func (srv *Server) ListenAndServe() error { - addr := srv.Addr - if addr == "" { - addr = ":http" - } - ln, err := net.Listen("tcp", addr) - if err != nil { - return err - } - return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) -} - -// Serve accepts incoming connections on the Listener l, creating a -// new service goroutine for each. The service goroutines read requests and -// then call srv.Handler to reply to them. -func (srv *Server) Serve(l net.Listener) error { - defer l.Close() - var tempDelay time.Duration // how long to sleep on accept failure - for { - rw, e := l.Accept() - if e != nil { - if ne, ok := e.(net.Error); ok && ne.Temporary() { - if tempDelay == 0 { - tempDelay = 5 * time.Millisecond - } else { - tempDelay *= 2 - } - if max := 1 * time.Second; tempDelay > max { - tempDelay = max - } - srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) - time.Sleep(tempDelay) - continue - } - return e - } - tempDelay = 0 - c, err := srv.newConn(rw) - if err != nil { - continue - } - c.setState(c.rwc, StateNew) // before Serve can return - go c.serve() - } -} - -func (s *Server) doKeepAlives() bool { - return atomic.LoadInt32(&s.disableKeepAlives) == 0 -} - -// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. -// By default, keep-alives are always enabled. Only very -// resource-constrained environments or servers in the process of -// shutting down should disable them. -func (s *Server) SetKeepAlivesEnabled(v bool) { - if v { - atomic.StoreInt32(&s.disableKeepAlives, 0) - } else { - atomic.StoreInt32(&s.disableKeepAlives, 1) - } -} - -func (s *Server) logf(format string, args ...interface{}) { - if s.ErrorLog != nil { - s.ErrorLog.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// ListenAndServe listens on the TCP network address addr -// and then calls Serve with handler to handle requests -// on incoming connections. Handler is typically nil, -// in which case the DefaultServeMux is used. -// -// A trivial example server is: -// -// package main -// -// import ( -// "io" -// "net/http" -// "log" -// ) -// -// // hello world, the web server -// func HelloServer(w http.ResponseWriter, req *http.Request) { -// io.WriteString(w, "hello, world!\n") -// } -// -// func main() { -// http.HandleFunc("/hello", HelloServer) -// err := http.ListenAndServe(":12345", nil) -// if err != nil { -// log.Fatal("ListenAndServe: ", err) -// } -// } -func ListenAndServe(addr string, handler Handler) error { - server := &Server{Addr: addr, Handler: handler} - return server.ListenAndServe() -} - -// ListenAndServeTLS acts identically to ListenAndServe, except that it -// expects HTTPS connections. Additionally, files containing a certificate and -// matching private key for the server must be provided. If the certificate -// is signed by a certificate authority, the certFile should be the concatenation -// of the server's certificate followed by the CA's certificate. -// -// A trivial example server is: -// -// import ( -// "log" -// "net/http" -// ) -// -// func handler(w http.ResponseWriter, req *http.Request) { -// w.Header().Set("Content-Type", "text/plain") -// w.Write([]byte("This is an example server.\n")) -// } -// -// func main() { -// http.HandleFunc("/", handler) -// log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") -// err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) -// if err != nil { -// log.Fatal(err) -// } -// } -// -// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. -func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) error { - server := &Server{Addr: addr, Handler: handler} - return server.ListenAndServeTLS(certFile, keyFile) -} - -// ListenAndServeTLS listens on the TCP network address srv.Addr and -// then calls Serve to handle requests on incoming TLS connections. -// -// Filenames containing a certificate and matching private key for -// the server must be provided. If the certificate is signed by a -// certificate authority, the certFile should be the concatenation -// of the server's certificate followed by the CA's certificate. -// -// If srv.Addr is blank, ":https" is used. -func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { - addr := srv.Addr - if addr == "" { - addr = ":https" - } - config := &tls.Config{} - if srv.TLSConfig != nil { - *config = *srv.TLSConfig - } - if config.NextProtos == nil { - config.NextProtos = []string{"http/1.1"} - } - - var err error - config.Certificates = make([]tls.Certificate, 1) - config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - - ln, err := net.Listen("tcp", addr) - if err != nil { - return err - } - - tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) - return srv.Serve(tlsListener) -} - -// TimeoutHandler returns a Handler that runs h with the given time limit. -// -// The new Handler calls h.ServeHTTP to handle each request, but if a -// call runs for longer than its time limit, the handler responds with -// a 503 Service Unavailable error and the given message in its body. -// (If msg is empty, a suitable default message will be sent.) -// After such a timeout, writes by h to its ResponseWriter will return -// ErrHandlerTimeout. -func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { - f := func() <-chan time.Time { - return time.After(dt) - } - return &timeoutHandler{h, f, msg} -} - -// ErrHandlerTimeout is returned on ResponseWriter Write calls -// in handlers which have timed out. -var ErrHandlerTimeout = errors.New("http: Handler timeout") - -type timeoutHandler struct { - handler Handler - timeout func() <-chan time.Time // returns channel producing a timeout - body string -} - -func (h *timeoutHandler) errorBody() string { - if h.body != "" { - return h.body - } - return "Timeout

Timeout

" -} - -func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { - done := make(chan bool, 1) - tw := &timeoutWriter{w: w} - go func() { - h.handler.ServeHTTP(tw, r) - done <- true - }() - select { - case <-done: - return - case <-h.timeout(): - tw.mu.Lock() - defer tw.mu.Unlock() - if !tw.wroteHeader { - tw.w.WriteHeader(StatusServiceUnavailable) - tw.w.Write([]byte(h.errorBody())) - } - tw.timedOut = true - } -} - -type timeoutWriter struct { - w ResponseWriter - - mu sync.Mutex - timedOut bool - wroteHeader bool -} - -func (tw *timeoutWriter) Header() Header { - return tw.w.Header() -} - -func (tw *timeoutWriter) Write(p []byte) (int, error) { - tw.mu.Lock() - timedOut := tw.timedOut - tw.mu.Unlock() - if timedOut { - return 0, ErrHandlerTimeout - } - return tw.w.Write(p) -} - -func (tw *timeoutWriter) WriteHeader(code int) { - tw.mu.Lock() - if tw.timedOut || tw.wroteHeader { - tw.mu.Unlock() - return - } - tw.wroteHeader = true - tw.mu.Unlock() - tw.w.WriteHeader(code) -} - -// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted -// connections. It's used by ListenAndServe and ListenAndServeTLS so -// dead TCP connections (e.g. closing laptop mid-download) eventually -// go away. -type tcpKeepAliveListener struct { - *net.TCPListener -} - -func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -// globalOptionsHandler responds to "OPTIONS *" requests. -type globalOptionsHandler struct{} - -func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { - w.Header().Set("Content-Length", "0") - if r.ContentLength != 0 { - // Read up to 4KB of OPTIONS body (as mentioned in the - // spec as being reserved for future use), but anything - // over that is considered a waste of server resources - // (or an attack) and we abort and close the connection, - // courtesy of MaxBytesReader's EOF behavior. - mb := MaxBytesReader(w, r.Body, 4<<10) - io.Copy(ioutil.Discard, mb) - } -} - -type eofReaderWithWriteTo struct{} - -func (eofReaderWithWriteTo) WriteTo(io.Writer) (int64, error) { return 0, nil } -func (eofReaderWithWriteTo) Read([]byte) (int, error) { return 0, io.EOF } - -// eofReader is a non-nil io.ReadCloser that always returns EOF. -// It has a WriteTo method so io.Copy won't need a buffer. -var eofReader = &struct { - eofReaderWithWriteTo - io.Closer -}{ - eofReaderWithWriteTo{}, - ioutil.NopCloser(nil), -} - -// Verify that an io.Copy from an eofReader won't require a buffer. -var _ io.WriterTo = eofReader - -// initNPNRequest is an HTTP handler that initializes certain -// uninitialized fields in its *Request. Such partially-initialized -// Requests come from NPN protocol handlers. -type initNPNRequest struct { - c *tls.Conn - h serverHandler -} - -func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { - if req.TLS == nil { - req.TLS = &tls.ConnectionState{} - *req.TLS = h.c.ConnectionState() - } - if req.Body == nil { - req.Body = eofReader - } - if req.RemoteAddr == "" { - req.RemoteAddr = h.c.RemoteAddr().String() - } - h.h.ServeHTTP(rw, req) -} - -// loggingConn is used for debugging. -type loggingConn struct { - name string - net.Conn -} - -var ( - uniqNameMu sync.Mutex - uniqNameNext = make(map[string]int) -) - -func newLoggingConn(baseName string, c net.Conn) net.Conn { - uniqNameMu.Lock() - defer uniqNameMu.Unlock() - uniqNameNext[baseName]++ - return &loggingConn{ - name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), - Conn: c, - } -} - -func (c *loggingConn) Write(p []byte) (n int, err error) { - log.Printf("%s.Write(%d) = ....", c.name, len(p)) - n, err = c.Conn.Write(p) - log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) - return -} - -func (c *loggingConn) Read(p []byte) (n int, err error) { - log.Printf("%s.Read(%d) = ....", c.name, len(p)) - n, err = c.Conn.Read(p) - log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) - return -} - -func (c *loggingConn) Close() (err error) { - log.Printf("%s.Close() = ...", c.name) - err = c.Conn.Close() - log.Printf("%s.Close() = %v", c.name, err) - return -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/sniff.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/sniff.go deleted file mode 100644 index 68f519b05..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/sniff.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "encoding/binary" -) - -// The algorithm uses at most sniffLen bytes to make its decision. -const sniffLen = 512 - -// DetectContentType implements the algorithm described -// at http://mimesniff.spec.whatwg.org/ to determine the -// Content-Type of the given data. It considers at most the -// first 512 bytes of data. DetectContentType always returns -// a valid MIME type: if it cannot determine a more specific one, it -// returns "application/octet-stream". -func DetectContentType(data []byte) string { - if len(data) > sniffLen { - data = data[:sniffLen] - } - - // Index of the first non-whitespace byte in data. - firstNonWS := 0 - for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ { - } - - for _, sig := range sniffSignatures { - if ct := sig.match(data, firstNonWS); ct != "" { - return ct - } - } - - return "application/octet-stream" // fallback -} - -func isWS(b byte) bool { - return bytes.IndexByte([]byte("\t\n\x0C\r "), b) != -1 -} - -type sniffSig interface { - // match returns the MIME type of the data, or "" if unknown. - match(data []byte, firstNonWS int) string -} - -// Data matching the table in section 6. -var sniffSignatures = []sniffSig{ - htmlSig("' { - return "" - } - return "text/html; charset=utf-8" -} - -type mp4Sig int - -func (mp4Sig) match(data []byte, firstNonWS int) string { - // c.f. section 6.1. - if len(data) < 8 { - return "" - } - boxSize := int(binary.BigEndian.Uint32(data[:4])) - if boxSize%4 != 0 || len(data) < boxSize { - return "" - } - if !bytes.Equal(data[4:8], []byte("ftyp")) { - return "" - } - for st := 8; st < boxSize; st += 4 { - if st == 12 { - // minor version number - continue - } - seg := string(data[st : st+3]) - switch seg { - case "mp4", "iso", "M4V", "M4P", "M4B": - return "video/mp4" - /* The remainder are not in the spec. - case "M4A": - return "audio/mp4" - case "3gp": - return "video/3gpp" - case "jp2": - return "image/jp2" // JPEG 2000 - */ - } - } - return "" -} - -type textSig int - -func (textSig) match(data []byte, firstNonWS int) string { - // c.f. section 5, step 4. - for _, b := range data[firstNonWS:] { - switch { - case 0x00 <= b && b <= 0x08, - b == 0x0B, - 0x0E <= b && b <= 0x1A, - 0x1C <= b && b <= 0x1F: - return "" - } - } - return "text/plain; charset=utf-8" -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/status.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/status.go deleted file mode 100644 index d253bd5cb..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/status.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -// HTTP status codes, defined in RFC 2616. -const ( - StatusContinue = 100 - StatusSwitchingProtocols = 101 - - StatusOK = 200 - StatusCreated = 201 - StatusAccepted = 202 - StatusNonAuthoritativeInfo = 203 - StatusNoContent = 204 - StatusResetContent = 205 - StatusPartialContent = 206 - - StatusMultipleChoices = 300 - StatusMovedPermanently = 301 - StatusFound = 302 - StatusSeeOther = 303 - StatusNotModified = 304 - StatusUseProxy = 305 - StatusTemporaryRedirect = 307 - - StatusBadRequest = 400 - StatusUnauthorized = 401 - StatusPaymentRequired = 402 - StatusForbidden = 403 - StatusNotFound = 404 - StatusMethodNotAllowed = 405 - StatusNotAcceptable = 406 - StatusProxyAuthRequired = 407 - StatusRequestTimeout = 408 - StatusConflict = 409 - StatusGone = 410 - StatusLengthRequired = 411 - StatusPreconditionFailed = 412 - StatusRequestEntityTooLarge = 413 - StatusRequestURITooLong = 414 - StatusUnsupportedMediaType = 415 - StatusRequestedRangeNotSatisfiable = 416 - StatusExpectationFailed = 417 - StatusTeapot = 418 - - StatusInternalServerError = 500 - StatusNotImplemented = 501 - StatusBadGateway = 502 - StatusServiceUnavailable = 503 - StatusGatewayTimeout = 504 - StatusHTTPVersionNotSupported = 505 - - // New HTTP status codes from RFC 6585. Not exported yet in Go 1.1. - // See discussion at https://codereview.appspot.com/7678043/ - statusPreconditionRequired = 428 - statusTooManyRequests = 429 - statusRequestHeaderFieldsTooLarge = 431 - statusNetworkAuthenticationRequired = 511 -) - -var statusText = map[int]string{ - StatusContinue: "Continue", - StatusSwitchingProtocols: "Switching Protocols", - - StatusOK: "OK", - StatusCreated: "Created", - StatusAccepted: "Accepted", - StatusNonAuthoritativeInfo: "Non-Authoritative Information", - StatusNoContent: "No Content", - StatusResetContent: "Reset Content", - StatusPartialContent: "Partial Content", - - StatusMultipleChoices: "Multiple Choices", - StatusMovedPermanently: "Moved Permanently", - StatusFound: "Found", - StatusSeeOther: "See Other", - StatusNotModified: "Not Modified", - StatusUseProxy: "Use Proxy", - StatusTemporaryRedirect: "Temporary Redirect", - - StatusBadRequest: "Bad Request", - StatusUnauthorized: "Unauthorized", - StatusPaymentRequired: "Payment Required", - StatusForbidden: "Forbidden", - StatusNotFound: "Not Found", - StatusMethodNotAllowed: "Method Not Allowed", - StatusNotAcceptable: "Not Acceptable", - StatusProxyAuthRequired: "Proxy Authentication Required", - StatusRequestTimeout: "Request Timeout", - StatusConflict: "Conflict", - StatusGone: "Gone", - StatusLengthRequired: "Length Required", - StatusPreconditionFailed: "Precondition Failed", - StatusRequestEntityTooLarge: "Request Entity Too Large", - StatusRequestURITooLong: "Request URI Too Long", - StatusUnsupportedMediaType: "Unsupported Media Type", - StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", - StatusExpectationFailed: "Expectation Failed", - StatusTeapot: "I'm a teapot", - - StatusInternalServerError: "Internal Server Error", - StatusNotImplemented: "Not Implemented", - StatusBadGateway: "Bad Gateway", - StatusServiceUnavailable: "Service Unavailable", - StatusGatewayTimeout: "Gateway Timeout", - StatusHTTPVersionNotSupported: "HTTP Version Not Supported", - - statusPreconditionRequired: "Precondition Required", - statusTooManyRequests: "Too Many Requests", - statusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large", - statusNetworkAuthenticationRequired: "Network Authentication Required", -} - -// StatusText returns a text for the HTTP status code. It returns the empty -// string if the code is unknown. -func StatusText(code int) string { - return statusText[code] -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/transfer.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/transfer.go deleted file mode 100644 index 7f6368652..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/transfer.go +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "net/textproto" - "sort" - "strconv" - "strings" - "sync" -) - -type errorReader struct { - err error -} - -func (r *errorReader) Read(p []byte) (n int, err error) { - return 0, r.err -} - -// transferWriter inspects the fields of a user-supplied Request or Response, -// sanitizes them without changing the user object and provides methods for -// writing the respective header, body and trailer in wire format. -type transferWriter struct { - Method string - Body io.Reader - BodyCloser io.Closer - ResponseToHEAD bool - ContentLength int64 // -1 means unknown, 0 means exactly none - Close bool - TransferEncoding []string - Trailer Header -} - -func newTransferWriter(r interface{}) (t *transferWriter, err error) { - t = &transferWriter{} - - // Extract relevant fields - atLeastHTTP11 := false - switch rr := r.(type) { - case *Request: - if rr.ContentLength != 0 && rr.Body == nil { - return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength) - } - t.Method = rr.Method - t.Body = rr.Body - t.BodyCloser = rr.Body - t.ContentLength = rr.ContentLength - t.Close = rr.Close - t.TransferEncoding = rr.TransferEncoding - t.Trailer = rr.Trailer - atLeastHTTP11 = rr.ProtoAtLeast(1, 1) - if t.Body != nil && len(t.TransferEncoding) == 0 && atLeastHTTP11 { - if t.ContentLength == 0 { - // Test to see if it's actually zero or just unset. - var buf [1]byte - n, rerr := io.ReadFull(t.Body, buf[:]) - if rerr != nil && rerr != io.EOF { - t.ContentLength = -1 - t.Body = &errorReader{rerr} - } else if n == 1 { - // Oh, guess there is data in this Body Reader after all. - // The ContentLength field just wasn't set. - // Stich the Body back together again, re-attaching our - // consumed byte. - t.ContentLength = -1 - t.Body = io.MultiReader(bytes.NewReader(buf[:]), t.Body) - } else { - // Body is actually empty. - t.Body = nil - t.BodyCloser = nil - } - } - if t.ContentLength < 0 { - t.TransferEncoding = []string{"chunked"} - } - } - case *Response: - if rr.Request != nil { - t.Method = rr.Request.Method - } - t.Body = rr.Body - t.BodyCloser = rr.Body - t.ContentLength = rr.ContentLength - t.Close = rr.Close - t.TransferEncoding = rr.TransferEncoding - t.Trailer = rr.Trailer - atLeastHTTP11 = rr.ProtoAtLeast(1, 1) - t.ResponseToHEAD = noBodyExpected(t.Method) - } - - // Sanitize Body,ContentLength,TransferEncoding - if t.ResponseToHEAD { - t.Body = nil - if chunked(t.TransferEncoding) { - t.ContentLength = -1 - } - } else { - if !atLeastHTTP11 || t.Body == nil { - t.TransferEncoding = nil - } - if chunked(t.TransferEncoding) { - t.ContentLength = -1 - } else if t.Body == nil { // no chunking, no body - t.ContentLength = 0 - } - } - - // Sanitize Trailer - if !chunked(t.TransferEncoding) { - t.Trailer = nil - } - - return t, nil -} - -func noBodyExpected(requestMethod string) bool { - return requestMethod == "HEAD" -} - -func (t *transferWriter) shouldSendContentLength() bool { - if chunked(t.TransferEncoding) { - return false - } - if t.ContentLength > 0 { - return true - } - // Many servers expect a Content-Length for these methods - if t.Method == "POST" || t.Method == "PUT" { - return true - } - if t.ContentLength == 0 && isIdentity(t.TransferEncoding) { - return true - } - - return false -} - -func (t *transferWriter) WriteHeader(w io.Writer) error { - if t.Close { - if _, err := io.WriteString(w, "Connection: close\r\n"); err != nil { - return err - } - } - - // Write Content-Length and/or Transfer-Encoding whose values are a - // function of the sanitized field triple (Body, ContentLength, - // TransferEncoding) - if t.shouldSendContentLength() { - if _, err := io.WriteString(w, "Content-Length: "); err != nil { - return err - } - if _, err := io.WriteString(w, strconv.FormatInt(t.ContentLength, 10)+"\r\n"); err != nil { - return err - } - } else if chunked(t.TransferEncoding) { - if _, err := io.WriteString(w, "Transfer-Encoding: chunked\r\n"); err != nil { - return err - } - } - - // Write Trailer header - if t.Trailer != nil { - keys := make([]string, 0, len(t.Trailer)) - for k := range t.Trailer { - k = CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return &badStringError{"invalid Trailer key", k} - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - // TODO: could do better allocation-wise here, but trailers are rare, - // so being lazy for now. - if _, err := io.WriteString(w, "Trailer: "+strings.Join(keys, ",")+"\r\n"); err != nil { - return err - } - } - } - - return nil -} - -func (t *transferWriter) WriteBody(w io.Writer) error { - var err error - var ncopy int64 - - // Write body - if t.Body != nil { - if chunked(t.TransferEncoding) { - cw := newChunkedWriter(w) - _, err = io.Copy(cw, t.Body) - if err == nil { - err = cw.Close() - } - } else if t.ContentLength == -1 { - ncopy, err = io.Copy(w, t.Body) - } else { - ncopy, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength)) - if err != nil { - return err - } - var nextra int64 - nextra, err = io.Copy(ioutil.Discard, t.Body) - ncopy += nextra - } - if err != nil { - return err - } - if err = t.BodyCloser.Close(); err != nil { - return err - } - } - - if !t.ResponseToHEAD && t.ContentLength != -1 && t.ContentLength != ncopy { - return fmt.Errorf("http: Request.ContentLength=%d with Body length %d", - t.ContentLength, ncopy) - } - - // TODO(petar): Place trailer writer code here. - if chunked(t.TransferEncoding) { - // Write Trailer header - if t.Trailer != nil { - if err := t.Trailer.Write(w); err != nil { - return err - } - } - // Last chunk, empty trailer - _, err = io.WriteString(w, "\r\n") - } - return err -} - -type transferReader struct { - // Input - Header Header - StatusCode int - RequestMethod string - ProtoMajor int - ProtoMinor int - // Output - Body io.ReadCloser - ContentLength int64 - TransferEncoding []string - Close bool - Trailer Header -} - -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC2616, section 4.4. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -var ( - suppressedHeaders304 = []string{"Content-Type", "Content-Length", "Transfer-Encoding"} - suppressedHeadersNoBody = []string{"Content-Length", "Transfer-Encoding"} -) - -func suppressedHeaders(status int) []string { - switch { - case status == 304: - // RFC 2616 section 10.3.5: "the response MUST NOT include other entity-headers" - return suppressedHeaders304 - case !bodyAllowedForStatus(status): - return suppressedHeadersNoBody - } - return nil -} - -// msg is *Request or *Response. -func readTransfer(msg interface{}, r *bufio.Reader) (err error) { - t := &transferReader{RequestMethod: "GET"} - - // Unify input - isResponse := false - switch rr := msg.(type) { - case *Response: - t.Header = rr.Header - t.StatusCode = rr.StatusCode - t.ProtoMajor = rr.ProtoMajor - t.ProtoMinor = rr.ProtoMinor - t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header) - isResponse = true - if rr.Request != nil { - t.RequestMethod = rr.Request.Method - } - case *Request: - t.Header = rr.Header - t.ProtoMajor = rr.ProtoMajor - t.ProtoMinor = rr.ProtoMinor - // Transfer semantics for Requests are exactly like those for - // Responses with status code 200, responding to a GET method - t.StatusCode = 200 - default: - panic("unexpected type") - } - - // Default to HTTP/1.1 - if t.ProtoMajor == 0 && t.ProtoMinor == 0 { - t.ProtoMajor, t.ProtoMinor = 1, 1 - } - - // Transfer encoding, content length - t.TransferEncoding, err = fixTransferEncoding(t.RequestMethod, t.Header) - if err != nil { - return err - } - - realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding) - if err != nil { - return err - } - if isResponse && t.RequestMethod == "HEAD" { - if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil { - return err - } else { - t.ContentLength = n - } - } else { - t.ContentLength = realLength - } - - // Trailer - t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding) - if err != nil { - return err - } - - // If there is no Content-Length or chunked Transfer-Encoding on a *Response - // and the status is not 1xx, 204 or 304, then the body is unbounded. - // See RFC2616, section 4.4. - switch msg.(type) { - case *Response: - if realLength == -1 && - !chunked(t.TransferEncoding) && - bodyAllowedForStatus(t.StatusCode) { - // Unbounded body. - t.Close = true - } - } - - // Prepare body reader. ContentLength < 0 means chunked encoding - // or close connection when finished, since multipart is not supported yet - switch { - case chunked(t.TransferEncoding): - if noBodyExpected(t.RequestMethod) { - t.Body = eofReader - } else { - t.Body = &body{src: newChunkedReader(r), hdr: msg, r: r, closing: t.Close} - } - case realLength == 0: - t.Body = eofReader - case realLength > 0: - t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close} - default: - // realLength < 0, i.e. "Content-Length" not mentioned in header - if t.Close { - // Close semantics (i.e. HTTP/1.0) - t.Body = &body{src: r, closing: t.Close} - } else { - // Persistent connection (i.e. HTTP/1.1) - t.Body = eofReader - } - } - - // Unify output - switch rr := msg.(type) { - case *Request: - rr.Body = t.Body - rr.ContentLength = t.ContentLength - rr.TransferEncoding = t.TransferEncoding - rr.Close = t.Close - rr.Trailer = t.Trailer - case *Response: - rr.Body = t.Body - rr.ContentLength = t.ContentLength - rr.TransferEncoding = t.TransferEncoding - rr.Close = t.Close - rr.Trailer = t.Trailer - } - - return nil -} - -// Checks whether chunked is part of the encodings stack -func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" } - -// Checks whether the encoding is explicitly "identity". -func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" } - -// Sanitize transfer encoding -func fixTransferEncoding(requestMethod string, header Header) ([]string, error) { - raw, present := header["Transfer-Encoding"] - if !present { - return nil, nil - } - - delete(header, "Transfer-Encoding") - - encodings := strings.Split(raw[0], ",") - te := make([]string, 0, len(encodings)) - // TODO: Even though we only support "identity" and "chunked" - // encodings, the loop below is designed with foresight. One - // invariant that must be maintained is that, if present, - // chunked encoding must always come first. - for _, encoding := range encodings { - encoding = strings.ToLower(strings.TrimSpace(encoding)) - // "identity" encoding is not recorded - if encoding == "identity" { - break - } - if encoding != "chunked" { - return nil, &badStringError{"unsupported transfer encoding", encoding} - } - te = te[0 : len(te)+1] - te[len(te)-1] = encoding - } - if len(te) > 1 { - return nil, &badStringError{"too many transfer encodings", strings.Join(te, ",")} - } - if len(te) > 0 { - // Chunked encoding trumps Content-Length. See RFC 2616 - // Section 4.4. Currently len(te) > 0 implies chunked - // encoding. - delete(header, "Content-Length") - return te, nil - } - - return nil, nil -} - -// Determine the expected body length, using RFC 2616 Section 4.4. This -// function is not a method, because ultimately it should be shared by -// ReadResponse and ReadRequest. -func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) { - - // Logic based on response type or status - if noBodyExpected(requestMethod) { - return 0, nil - } - if status/100 == 1 { - return 0, nil - } - switch status { - case 204, 304: - return 0, nil - } - - // Logic based on Transfer-Encoding - if chunked(te) { - return -1, nil - } - - // Logic based on Content-Length - cl := strings.TrimSpace(header.get("Content-Length")) - if cl != "" { - n, err := parseContentLength(cl) - if err != nil { - return -1, err - } - return n, nil - } else { - header.Del("Content-Length") - } - - if !isResponse && requestMethod == "GET" { - // RFC 2616 doesn't explicitly permit nor forbid an - // entity-body on a GET request so we permit one if - // declared, but we default to 0 here (not -1 below) - // if there's no mention of a body. - return 0, nil - } - - // Body-EOF logic based on other methods (like closing, or chunked coding) - return -1, nil -} - -// Determine whether to hang up after sending a request and body, or -// receiving a response and body -// 'header' is the request headers -func shouldClose(major, minor int, header Header) bool { - if major < 1 { - return true - } else if major == 1 && minor == 0 { - if !strings.Contains(strings.ToLower(header.get("Connection")), "keep-alive") { - return true - } - return false - } else { - // TODO: Should split on commas, toss surrounding white space, - // and check each field. - if strings.ToLower(header.get("Connection")) == "close" { - header.Del("Connection") - return true - } - } - return false -} - -// Parse the trailer header -func fixTrailer(header Header, te []string) (Header, error) { - raw := header.get("Trailer") - if raw == "" { - return nil, nil - } - - header.Del("Trailer") - trailer := make(Header) - keys := strings.Split(raw, ",") - for _, key := range keys { - key = CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - return nil, &badStringError{"bad trailer key", key} - } - trailer[key] = nil - } - if len(trailer) == 0 { - return nil, nil - } - if !chunked(te) { - // Trailer and no chunking - return nil, ErrUnexpectedTrailer - } - return trailer, nil -} - -// body turns a Reader into a ReadCloser. -// Close ensures that the body has been fully read -// and then reads the trailer if necessary. -type body struct { - src io.Reader - hdr interface{} // non-nil (Response or Request) value means read trailer - r *bufio.Reader // underlying wire-format reader for the trailer - closing bool // is the connection to be closed after reading body? - - mu sync.Mutex // guards closed, and calls to Read and Close - closed bool -} - -// ErrBodyReadAfterClose is returned when reading a Request or Response -// Body after the body has been closed. This typically happens when the body is -// read after an HTTP Handler calls WriteHeader or Write on its -// ResponseWriter. -var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body") - -func (b *body) Read(p []byte) (n int, err error) { - b.mu.Lock() - defer b.mu.Unlock() - if b.closed { - return 0, ErrBodyReadAfterClose - } - return b.readLocked(p) -} - -// Must hold b.mu. -func (b *body) readLocked(p []byte) (n int, err error) { - n, err = b.src.Read(p) - - if err == io.EOF { - // Chunked case. Read the trailer. - if b.hdr != nil { - if e := b.readTrailer(); e != nil { - err = e - } - b.hdr = nil - } else { - // If the server declared the Content-Length, our body is a LimitedReader - // and we need to check whether this EOF arrived early. - if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > 0 { - err = io.ErrUnexpectedEOF - } - } - } - - // If we can return an EOF here along with the read data, do - // so. This is optional per the io.Reader contract, but doing - // so helps the HTTP transport code recycle its connection - // earlier (since it will see this EOF itself), even if the - // client doesn't do future reads or Close. - if err == nil && n > 0 { - if lr, ok := b.src.(*io.LimitedReader); ok && lr.N == 0 { - err = io.EOF - } - } - - return n, err -} - -var ( - singleCRLF = []byte("\r\n") - doubleCRLF = []byte("\r\n\r\n") -) - -func seeUpcomingDoubleCRLF(r *bufio.Reader) bool { - for peekSize := 4; ; peekSize++ { - // This loop stops when Peek returns an error, - // which it does when r's buffer has been filled. - buf, err := r.Peek(peekSize) - if bytes.HasSuffix(buf, doubleCRLF) { - return true - } - if err != nil { - break - } - } - return false -} - -var errTrailerEOF = errors.New("http: unexpected EOF reading trailer") - -func (b *body) readTrailer() error { - // The common case, since nobody uses trailers. - buf, err := b.r.Peek(2) - if bytes.Equal(buf, singleCRLF) { - b.r.ReadByte() - b.r.ReadByte() - return nil - } - if len(buf) < 2 { - return errTrailerEOF - } - if err != nil { - return err - } - - // Make sure there's a header terminator coming up, to prevent - // a DoS with an unbounded size Trailer. It's not easy to - // slip in a LimitReader here, as textproto.NewReader requires - // a concrete *bufio.Reader. Also, we can't get all the way - // back up to our conn's LimitedReader that *might* be backing - // this bufio.Reader. Instead, a hack: we iteratively Peek up - // to the bufio.Reader's max size, looking for a double CRLF. - // This limits the trailer to the underlying buffer size, typically 4kB. - if !seeUpcomingDoubleCRLF(b.r) { - return errors.New("http: suspiciously long trailer after chunked body") - } - - hdr, err := textproto.NewReader(b.r).ReadMIMEHeader() - if err != nil { - if err == io.EOF { - return errTrailerEOF - } - return err - } - switch rr := b.hdr.(type) { - case *Request: - mergeSetHeader(&rr.Trailer, Header(hdr)) - case *Response: - mergeSetHeader(&rr.Trailer, Header(hdr)) - } - return nil -} - -func mergeSetHeader(dst *Header, src Header) { - if *dst == nil { - *dst = src - return - } - for k, vv := range src { - (*dst)[k] = vv - } -} - -func (b *body) Close() error { - b.mu.Lock() - defer b.mu.Unlock() - if b.closed { - return nil - } - var err error - switch { - case b.hdr == nil && b.closing: - // no trailer and closing the connection next. - // no point in reading to EOF. - default: - // Fully consume the body, which will also lead to us reading - // the trailer headers after the body, if present. - _, err = io.Copy(ioutil.Discard, bodyLocked{b}) - } - b.closed = true - return err -} - -// bodyLocked is a io.Reader reading from a *body when its mutex is -// already held. -type bodyLocked struct { - b *body -} - -func (bl bodyLocked) Read(p []byte) (n int, err error) { - if bl.b.closed { - return 0, ErrBodyReadAfterClose - } - return bl.b.readLocked(p) -} - -// parseContentLength trims whitespace from s and returns -1 if no value -// is set, or the value if it's >= 0. -func parseContentLength(cl string) (int64, error) { - cl = strings.TrimSpace(cl) - if cl == "" { - return -1, nil - } - n, err := strconv.ParseInt(cl, 10, 64) - if err != nil || n < 0 { - return 0, &badStringError{"bad Content-Length", cl} - } - return n, nil - -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/transport.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/transport.go deleted file mode 100644 index c58e2dc35..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/transport.go +++ /dev/null @@ -1,1208 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP client implementation. See RFC 2616. -// -// This is the low-level Transport implementation of RoundTripper. -// The high-level interface is in client.go. - -package http - -import ( - "bufio" - "compress/gzip" - "errors" - "fmt" - "github.com/masterzen/azure-sdk-for-go/core/tls" - "io" - "log" - "net" - "net/url" - "os" - "strings" - "sync" - "time" -) - -// DefaultTransport is the default implementation of Transport and is -// used by DefaultClient. It establishes network connections as needed -// and caches them for reuse by subsequent calls. It uses HTTP proxies -// as directed by the $HTTP_PROXY and $NO_PROXY (or $http_proxy and -// $no_proxy) environment variables. -var DefaultTransport RoundTripper = &Transport{ - Proxy: ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, -} - -// DefaultMaxIdleConnsPerHost is the default value of Transport's -// MaxIdleConnsPerHost. -const DefaultMaxIdleConnsPerHost = 2 - -// Transport is an implementation of RoundTripper that supports http, -// https, and http proxies (for either http or https with CONNECT). -// Transport can also cache connections for future re-use. -type Transport struct { - idleMu sync.Mutex - idleConn map[connectMethodKey][]*persistConn - idleConnCh map[connectMethodKey]chan *persistConn - reqMu sync.Mutex - reqCanceler map[*Request]func() - altMu sync.RWMutex - altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper - - // Proxy specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxy func(*Request) (*url.URL, error) - - // Dial specifies the dial function for creating TCP - // connections. - // If Dial is nil, net.Dial is used. - Dial func(network, addr string) (net.Conn, error) - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // TLSHandshakeTimeout specifies the maximum amount of time waiting to - // wait for a TLS handshake. Zero means no timeout. - TLSHandshakeTimeout time.Duration - - // DisableKeepAlives, if true, prevents re-use of TCP connections - // between different HTTP requests. - DisableKeepAlives bool - - // DisableCompression, if true, prevents the Transport from - // requesting compression with an "Accept-Encoding: gzip" - // request header when the Request contains no existing - // Accept-Encoding value. If the Transport requests gzip on - // its own and gets a gzipped response, it's transparently - // decoded in the Response.Body. However, if the user - // explicitly requested gzip it is not automatically - // uncompressed. - DisableCompression bool - - // MaxIdleConnsPerHost, if non-zero, controls the maximum idle - // (keep-alive) to keep per-host. If zero, - // DefaultMaxIdleConnsPerHost is used. - MaxIdleConnsPerHost int - - // ResponseHeaderTimeout, if non-zero, specifies the amount of - // time to wait for a server's response headers after fully - // writing the request (including its body, if any). This - // time does not include the time to read the response body. - ResponseHeaderTimeout time.Duration - - // TODO: tunable on global max cached connections - // TODO: tunable on timeout on cached connections -} - -// ProxyFromEnvironment returns the URL of the proxy to use for a -// given request, as indicated by the environment variables -// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy). -// An error is returned if the proxy environment is invalid. -// A nil URL and nil error are returned if no proxy is defined in the -// environment, or a proxy should not be used for the given request. -// -// As a special case, if req.URL.Host is "localhost" (with or without -// a port number), then a nil URL and nil error will be returned. -func ProxyFromEnvironment(req *Request) (*url.URL, error) { - proxy := httpProxyEnv.Get() - if proxy == "" { - return nil, nil - } - if !useProxy(canonicalAddr(req.URL)) { - return nil, nil - } - proxyURL, err := url.Parse(proxy) - if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") { - // proxy was bogus. Try prepending "http://" to it and - // see if that parses correctly. If not, we fall - // through and complain about the original one. - if proxyURL, err := url.Parse("http://" + proxy); err == nil { - return proxyURL, nil - } - } - if err != nil { - return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) - } - return proxyURL, nil -} - -// ProxyURL returns a proxy function (for use in a Transport) -// that always returns the same URL. -func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) { - return func(*Request) (*url.URL, error) { - return fixedURL, nil - } -} - -// transportRequest is a wrapper around a *Request that adds -// optional extra headers to write. -type transportRequest struct { - *Request // original request, not to be mutated - extra Header // extra headers to write, or nil -} - -func (tr *transportRequest) extraHeaders() Header { - if tr.extra == nil { - tr.extra = make(Header) - } - return tr.extra -} - -// RoundTrip implements the RoundTripper interface. -// -// For higher-level HTTP client support (such as handling of cookies -// and redirects), see Get, Post, and the Client type. -func (t *Transport) RoundTrip(req *Request) (resp *Response, err error) { - if req.URL == nil { - req.closeBody() - return nil, errors.New("http: nil Request.URL") - } - if req.Header == nil { - req.closeBody() - return nil, errors.New("http: nil Request.Header") - } - if req.URL.Scheme != "http" && req.URL.Scheme != "https" { - t.altMu.RLock() - var rt RoundTripper - if t.altProto != nil { - rt = t.altProto[req.URL.Scheme] - } - t.altMu.RUnlock() - if rt == nil { - req.closeBody() - return nil, &badStringError{"unsupported protocol scheme", req.URL.Scheme} - } - return rt.RoundTrip(req) - } - if req.URL.Host == "" { - req.closeBody() - return nil, errors.New("http: no Host in request URL") - } - treq := &transportRequest{Request: req} - cm, err := t.connectMethodForRequest(treq) - if err != nil { - req.closeBody() - return nil, err - } - - // Get the cached or newly-created connection to either the - // host (for http or https), the http proxy, or the http proxy - // pre-CONNECTed to https server. In any case, we'll be ready - // to send it requests. - pconn, err := t.getConn(req, cm) - if err != nil { - t.setReqCanceler(req, nil) - req.closeBody() - return nil, err - } - - return pconn.roundTrip(treq) -} - -// RegisterProtocol registers a new protocol with scheme. -// The Transport will pass requests using the given scheme to rt. -// It is rt's responsibility to simulate HTTP request semantics. -// -// RegisterProtocol can be used by other packages to provide -// implementations of protocol schemes like "ftp" or "file". -func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) { - if scheme == "http" || scheme == "https" { - panic("protocol " + scheme + " already registered") - } - t.altMu.Lock() - defer t.altMu.Unlock() - if t.altProto == nil { - t.altProto = make(map[string]RoundTripper) - } - if _, exists := t.altProto[scheme]; exists { - panic("protocol " + scheme + " already registered") - } - t.altProto[scheme] = rt -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle in -// a "keep-alive" state. It does not interrupt any connections currently -// in use. -func (t *Transport) CloseIdleConnections() { - t.idleMu.Lock() - m := t.idleConn - t.idleConn = nil - t.idleConnCh = nil - t.idleMu.Unlock() - for _, conns := range m { - for _, pconn := range conns { - pconn.close() - } - } -} - -// CancelRequest cancels an in-flight request by closing its -// connection. -func (t *Transport) CancelRequest(req *Request) { - t.reqMu.Lock() - cancel := t.reqCanceler[req] - t.reqMu.Unlock() - if cancel != nil { - cancel() - } -} - -// -// Private implementation past this point. -// - -var ( - httpProxyEnv = &envOnce{ - names: []string{"HTTP_PROXY", "http_proxy"}, - } - noProxyEnv = &envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -type envOnce struct { - names []string - once sync.Once - val string -} - -func (e *envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// reset is used by tests -func (e *envOnce) reset() { - e.once = sync.Once{} - e.val = "" -} - -func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) { - cm.targetScheme = treq.URL.Scheme - cm.targetAddr = canonicalAddr(treq.URL) - if t.Proxy != nil { - cm.proxyURL, err = t.Proxy(treq.Request) - } - return cm, nil -} - -// proxyAuth returns the Proxy-Authorization header to set -// on requests, if applicable. -func (cm *connectMethod) proxyAuth() string { - if cm.proxyURL == nil { - return "" - } - if u := cm.proxyURL.User; u != nil { - username := u.Username() - password, _ := u.Password() - return "Basic " + basicAuth(username, password) - } - return "" -} - -// putIdleConn adds pconn to the list of idle persistent connections awaiting -// a new request. -// If pconn is no longer needed or not in a good state, putIdleConn -// returns false. -func (t *Transport) putIdleConn(pconn *persistConn) bool { - if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 { - pconn.close() - return false - } - if pconn.isBroken() { - return false - } - key := pconn.cacheKey - max := t.MaxIdleConnsPerHost - if max == 0 { - max = DefaultMaxIdleConnsPerHost - } - t.idleMu.Lock() - - waitingDialer := t.idleConnCh[key] - select { - case waitingDialer <- pconn: - // We're done with this pconn and somebody else is - // currently waiting for a conn of this type (they're - // actively dialing, but this conn is ready - // first). Chrome calls this socket late binding. See - // https://insouciant.org/tech/connection-management-in-chromium/ - t.idleMu.Unlock() - return true - default: - if waitingDialer != nil { - // They had populated this, but their dial won - // first, so we can clean up this map entry. - delete(t.idleConnCh, key) - } - } - if t.idleConn == nil { - t.idleConn = make(map[connectMethodKey][]*persistConn) - } - if len(t.idleConn[key]) >= max { - t.idleMu.Unlock() - pconn.close() - return false - } - for _, exist := range t.idleConn[key] { - if exist == pconn { - log.Fatalf("dup idle pconn %p in freelist", pconn) - } - } - t.idleConn[key] = append(t.idleConn[key], pconn) - t.idleMu.Unlock() - return true -} - -// getIdleConnCh returns a channel to receive and return idle -// persistent connection for the given connectMethod. -// It may return nil, if persistent connections are not being used. -func (t *Transport) getIdleConnCh(cm connectMethod) chan *persistConn { - if t.DisableKeepAlives { - return nil - } - key := cm.key() - t.idleMu.Lock() - defer t.idleMu.Unlock() - if t.idleConnCh == nil { - t.idleConnCh = make(map[connectMethodKey]chan *persistConn) - } - ch, ok := t.idleConnCh[key] - if !ok { - ch = make(chan *persistConn) - t.idleConnCh[key] = ch - } - return ch -} - -func (t *Transport) getIdleConn(cm connectMethod) (pconn *persistConn) { - key := cm.key() - t.idleMu.Lock() - defer t.idleMu.Unlock() - if t.idleConn == nil { - return nil - } - for { - pconns, ok := t.idleConn[key] - if !ok { - return nil - } - if len(pconns) == 1 { - pconn = pconns[0] - delete(t.idleConn, key) - } else { - // 2 or more cached connections; pop last - // TODO: queue? - pconn = pconns[len(pconns)-1] - t.idleConn[key] = pconns[:len(pconns)-1] - } - if !pconn.isBroken() { - return - } - } -} - -func (t *Transport) setReqCanceler(r *Request, fn func()) { - t.reqMu.Lock() - defer t.reqMu.Unlock() - if t.reqCanceler == nil { - t.reqCanceler = make(map[*Request]func()) - } - if fn != nil { - t.reqCanceler[r] = fn - } else { - delete(t.reqCanceler, r) - } -} - -func (t *Transport) dial(network, addr string) (c net.Conn, err error) { - if t.Dial != nil { - return t.Dial(network, addr) - } - return net.Dial(network, addr) -} - -// getConn dials and creates a new persistConn to the target as -// specified in the connectMethod. This includes doing a proxy CONNECT -// and/or setting up TLS. If this doesn't return an error, the persistConn -// is ready to write requests to. -func (t *Transport) getConn(req *Request, cm connectMethod) (*persistConn, error) { - if pc := t.getIdleConn(cm); pc != nil { - return pc, nil - } - - type dialRes struct { - pc *persistConn - err error - } - dialc := make(chan dialRes) - - handlePendingDial := func() { - if v := <-dialc; v.err == nil { - t.putIdleConn(v.pc) - } - } - - cancelc := make(chan struct{}) - t.setReqCanceler(req, func() { close(cancelc) }) - - go func() { - pc, err := t.dialConn(cm) - dialc <- dialRes{pc, err} - }() - - idleConnCh := t.getIdleConnCh(cm) - select { - case v := <-dialc: - // Our dial finished. - return v.pc, v.err - case pc := <-idleConnCh: - // Another request finished first and its net.Conn - // became available before our dial. Or somebody - // else's dial that they didn't use. - // But our dial is still going, so give it away - // when it finishes: - go handlePendingDial() - return pc, nil - case <-cancelc: - go handlePendingDial() - return nil, errors.New("net/http: request canceled while waiting for connection") - } -} - -func (t *Transport) dialConn(cm connectMethod) (*persistConn, error) { - conn, err := t.dial("tcp", cm.addr()) - if err != nil { - if cm.proxyURL != nil { - err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err) - } - return nil, err - } - - pa := cm.proxyAuth() - - pconn := &persistConn{ - t: t, - cacheKey: cm.key(), - conn: conn, - reqch: make(chan requestAndChan, 1), - writech: make(chan writeRequest, 1), - closech: make(chan struct{}), - writeErrCh: make(chan error, 1), - } - - switch { - case cm.proxyURL == nil: - // Do nothing. - case cm.targetScheme == "http": - pconn.isProxy = true - if pa != "" { - pconn.mutateHeaderFunc = func(h Header) { - h.Set("Proxy-Authorization", pa) - } - } - case cm.targetScheme == "https": - connectReq := &Request{ - Method: "CONNECT", - URL: &url.URL{Opaque: cm.targetAddr}, - Host: cm.targetAddr, - Header: make(Header), - } - if pa != "" { - connectReq.Header.Set("Proxy-Authorization", pa) - } - connectReq.Write(conn) - - // Read response. - // Okay to use and discard buffered reader here, because - // TLS server will not speak until spoken to. - br := bufio.NewReader(conn) - resp, err := ReadResponse(br, connectReq) - if err != nil { - conn.Close() - return nil, err - } - if resp.StatusCode != 200 { - f := strings.SplitN(resp.Status, " ", 2) - conn.Close() - return nil, errors.New(f[1]) - } - } - - if cm.targetScheme == "https" { - // Initiate TLS and check remote host name against certificate. - cfg := t.TLSClientConfig - if cfg == nil || cfg.ServerName == "" { - host := cm.tlsHost() - if cfg == nil { - cfg = &tls.Config{ServerName: host} - } else { - clone := *cfg // shallow clone - clone.ServerName = host - cfg = &clone - } - } - plainConn := conn - tlsConn := tls.Client(plainConn, cfg) - errc := make(chan error, 2) - var timer *time.Timer // for canceling TLS handshake - if d := t.TLSHandshakeTimeout; d != 0 { - timer = time.AfterFunc(d, func() { - errc <- tlsHandshakeTimeoutError{} - }) - } - go func() { - err := tlsConn.Handshake() - if timer != nil { - timer.Stop() - } - errc <- err - }() - if err := <-errc; err != nil { - plainConn.Close() - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - plainConn.Close() - return nil, err - } - } - cs := tlsConn.ConnectionState() - pconn.tlsState = &cs - pconn.conn = tlsConn - } - - pconn.br = bufio.NewReader(noteEOFReader{pconn.conn, &pconn.sawEOF}) - pconn.bw = bufio.NewWriter(pconn.conn) - go pconn.readLoop() - go pconn.writeLoop() - return pconn, nil -} - -// useProxy returns true if requests to addr should use a proxy, -// according to the NO_PROXY or no_proxy environment variable. -// addr is always a canonicalAddr with a host and port. -func useProxy(addr string) bool { - if len(addr) == 0 { - return true - } - host, _, err := net.SplitHostPort(addr) - if err != nil { - return false - } - if host == "localhost" { - return false - } - if ip := net.ParseIP(host); ip != nil { - if ip.IsLoopback() { - return false - } - } - - no_proxy := noProxyEnv.Get() - if no_proxy == "*" { - return false - } - - addr = strings.ToLower(strings.TrimSpace(addr)) - if hasPort(addr) { - addr = addr[:strings.LastIndex(addr, ":")] - } - - for _, p := range strings.Split(no_proxy, ",") { - p = strings.ToLower(strings.TrimSpace(p)) - if len(p) == 0 { - continue - } - if hasPort(p) { - p = p[:strings.LastIndex(p, ":")] - } - if addr == p { - return false - } - if p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:]) { - // no_proxy ".foo.com" matches "bar.foo.com" or "foo.com" - return false - } - if p[0] != '.' && strings.HasSuffix(addr, p) && addr[len(addr)-len(p)-1] == '.' { - // no_proxy "foo.com" matches "bar.foo.com" - return false - } - } - return true -} - -// connectMethod is the map key (in its String form) for keeping persistent -// TCP connections alive for subsequent HTTP requests. -// -// A connect method may be of the following types: -// -// Cache key form Description -// ----------------- ------------------------- -// |http|foo.com http directly to server, no proxy -// |https|foo.com https directly to server, no proxy -// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com -// http://proxy.com|http http to proxy, http to anywhere after that -// -// Note: no support to https to the proxy yet. -// -type connectMethod struct { - proxyURL *url.URL // nil for no proxy, else full proxy URL - targetScheme string // "http" or "https" - targetAddr string // Not used if proxy + http targetScheme (4th example in table) -} - -func (cm *connectMethod) key() connectMethodKey { - proxyStr := "" - targetAddr := cm.targetAddr - if cm.proxyURL != nil { - proxyStr = cm.proxyURL.String() - if cm.targetScheme == "http" { - targetAddr = "" - } - } - return connectMethodKey{ - proxy: proxyStr, - scheme: cm.targetScheme, - addr: targetAddr, - } -} - -// addr returns the first hop "host:port" to which we need to TCP connect. -func (cm *connectMethod) addr() string { - if cm.proxyURL != nil { - return canonicalAddr(cm.proxyURL) - } - return cm.targetAddr -} - -// tlsHost returns the host name to match against the peer's -// TLS certificate. -func (cm *connectMethod) tlsHost() string { - h := cm.targetAddr - if hasPort(h) { - h = h[:strings.LastIndex(h, ":")] - } - return h -} - -// connectMethodKey is the map key version of connectMethod, with a -// stringified proxy URL (or the empty string) instead of a pointer to -// a URL. -type connectMethodKey struct { - proxy, scheme, addr string -} - -func (k connectMethodKey) String() string { - // Only used by tests. - return fmt.Sprintf("%s|%s|%s", k.proxy, k.scheme, k.addr) -} - -// persistConn wraps a connection, usually a persistent one -// (but may be used for non-keep-alive requests as well) -type persistConn struct { - t *Transport - cacheKey connectMethodKey - conn net.Conn - tlsState *tls.ConnectionState - br *bufio.Reader // from conn - sawEOF bool // whether we've seen EOF from conn; owned by readLoop - bw *bufio.Writer // to conn - reqch chan requestAndChan // written by roundTrip; read by readLoop - writech chan writeRequest // written by roundTrip; read by writeLoop - closech chan struct{} // closed when conn closed - isProxy bool - // writeErrCh passes the request write error (usually nil) - // from the writeLoop goroutine to the readLoop which passes - // it off to the res.Body reader, which then uses it to decide - // whether or not a connection can be reused. Issue 7569. - writeErrCh chan error - - lk sync.Mutex // guards following fields - numExpectedResponses int - closed bool // whether conn has been closed - broken bool // an error has happened on this connection; marked broken so it's not reused. - // mutateHeaderFunc is an optional func to modify extra - // headers on each outbound request before it's written. (the - // original Request given to RoundTrip is not modified) - mutateHeaderFunc func(Header) -} - -// isBroken reports whether this connection is in a known broken state. -func (pc *persistConn) isBroken() bool { - pc.lk.Lock() - b := pc.broken - pc.lk.Unlock() - return b -} - -func (pc *persistConn) cancelRequest() { - pc.conn.Close() -} - -var remoteSideClosedFunc func(error) bool // or nil to use default - -func remoteSideClosed(err error) bool { - if err == io.EOF { - return true - } - if remoteSideClosedFunc != nil { - return remoteSideClosedFunc(err) - } - return false -} - -func (pc *persistConn) readLoop() { - alive := true - - for alive { - pb, err := pc.br.Peek(1) - - pc.lk.Lock() - if pc.numExpectedResponses == 0 { - if !pc.closed { - pc.closeLocked() - if len(pb) > 0 { - log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", - string(pb), err) - } - } - pc.lk.Unlock() - return - } - pc.lk.Unlock() - - rc := <-pc.reqch - - var resp *Response - if err == nil { - resp, err = ReadResponse(pc.br, rc.req) - if err == nil && resp.StatusCode == 100 { - // Skip any 100-continue for now. - // TODO(bradfitz): if rc.req had "Expect: 100-continue", - // actually block the request body write and signal the - // writeLoop now to begin sending it. (Issue 2184) For now we - // eat it, since we're never expecting one. - resp, err = ReadResponse(pc.br, rc.req) - } - } - - if resp != nil { - resp.TLS = pc.tlsState - } - - hasBody := resp != nil && rc.req.Method != "HEAD" && resp.ContentLength != 0 - - if err != nil { - pc.close() - } else { - if rc.addedGzip && hasBody && resp.Header.Get("Content-Encoding") == "gzip" { - resp.Header.Del("Content-Encoding") - resp.Header.Del("Content-Length") - resp.ContentLength = -1 - resp.Body = &gzipReader{body: resp.Body} - } - resp.Body = &bodyEOFSignal{body: resp.Body} - } - - if err != nil || resp.Close || rc.req.Close || resp.StatusCode <= 199 { - // Don't do keep-alive on error if either party requested a close - // or we get an unexpected informational (1xx) response. - // StatusCode 100 is already handled above. - alive = false - } - - var waitForBodyRead chan bool - if hasBody { - waitForBodyRead = make(chan bool, 2) - resp.Body.(*bodyEOFSignal).earlyCloseFn = func() error { - // Sending false here sets alive to - // false and closes the connection - // below. - waitForBodyRead <- false - return nil - } - resp.Body.(*bodyEOFSignal).fn = func(err error) { - waitForBodyRead <- alive && - err == nil && - !pc.sawEOF && - pc.wroteRequest() && - pc.t.putIdleConn(pc) - } - } - - if alive && !hasBody { - alive = !pc.sawEOF && - pc.wroteRequest() && - pc.t.putIdleConn(pc) - } - - rc.ch <- responseAndError{resp, err} - - // Wait for the just-returned response body to be fully consumed - // before we race and peek on the underlying bufio reader. - if waitForBodyRead != nil { - select { - case alive = <-waitForBodyRead: - case <-pc.closech: - alive = false - } - } - - pc.t.setReqCanceler(rc.req, nil) - - if !alive { - pc.close() - } - } -} - -func (pc *persistConn) writeLoop() { - for { - select { - case wr := <-pc.writech: - if pc.isBroken() { - wr.ch <- errors.New("http: can't write HTTP request on broken connection") - continue - } - err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra) - if err == nil { - err = pc.bw.Flush() - } - if err != nil { - pc.markBroken() - wr.req.Request.closeBody() - } - pc.writeErrCh <- err // to the body reader, which might recycle us - wr.ch <- err // to the roundTrip function - case <-pc.closech: - return - } - } -} - -// wroteRequest is a check before recycling a connection that the previous write -// (from writeLoop above) happened and was successful. -func (pc *persistConn) wroteRequest() bool { - select { - case err := <-pc.writeErrCh: - // Common case: the write happened well before the response, so - // avoid creating a timer. - return err == nil - default: - // Rare case: the request was written in writeLoop above but - // before it could send to pc.writeErrCh, the reader read it - // all, processed it, and called us here. In this case, give the - // write goroutine a bit of time to finish its send. - // - // Less rare case: We also get here in the legitimate case of - // Issue 7569, where the writer is still writing (or stalled), - // but the server has already replied. In this case, we don't - // want to wait too long, and we want to return false so this - // connection isn't re-used. - select { - case err := <-pc.writeErrCh: - return err == nil - case <-time.After(50 * time.Millisecond): - return false - } - } -} - -type responseAndError struct { - res *Response - err error -} - -type requestAndChan struct { - req *Request - ch chan responseAndError - - // did the Transport (as opposed to the client code) add an - // Accept-Encoding gzip header? only if it we set it do - // we transparently decode the gzip. - addedGzip bool -} - -// A writeRequest is sent by the readLoop's goroutine to the -// writeLoop's goroutine to write a request while the read loop -// concurrently waits on both the write response and the server's -// reply. -type writeRequest struct { - req *transportRequest - ch chan<- error -} - -type httpError struct { - err string - timeout bool -} - -func (e *httpError) Error() string { return e.err } -func (e *httpError) Timeout() bool { return e.timeout } -func (e *httpError) Temporary() bool { return true } - -var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true} -var errClosed error = &httpError{err: "net/http: transport closed before response was received"} - -func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) { - pc.t.setReqCanceler(req.Request, pc.cancelRequest) - pc.lk.Lock() - pc.numExpectedResponses++ - headerFn := pc.mutateHeaderFunc - pc.lk.Unlock() - - if headerFn != nil { - headerFn(req.extraHeaders()) - } - - // Ask for a compressed version if the caller didn't set their - // own value for Accept-Encoding. We only attempted to - // uncompress the gzip stream if we were the layer that - // requested it. - requestedGzip := false - if !pc.t.DisableCompression && req.Header.Get("Accept-Encoding") == "" && req.Method != "HEAD" { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // http://golang.org/issue/5522 - requestedGzip = true - req.extraHeaders().Set("Accept-Encoding", "gzip") - } - - // Write the request concurrently with waiting for a response, - // in case the server decides to reply before reading our full - // request body. - writeErrCh := make(chan error, 1) - pc.writech <- writeRequest{req, writeErrCh} - - resc := make(chan responseAndError, 1) - pc.reqch <- requestAndChan{req.Request, resc, requestedGzip} - - var re responseAndError - var pconnDeadCh = pc.closech - var failTicker <-chan time.Time - var respHeaderTimer <-chan time.Time -WaitResponse: - for { - select { - case err := <-writeErrCh: - if err != nil { - re = responseAndError{nil, err} - pc.close() - break WaitResponse - } - if d := pc.t.ResponseHeaderTimeout; d > 0 { - respHeaderTimer = time.After(d) - } - case <-pconnDeadCh: - // The persist connection is dead. This shouldn't - // usually happen (only with Connection: close responses - // with no response bodies), but if it does happen it - // means either a) the remote server hung up on us - // prematurely, or b) the readLoop sent us a response & - // closed its closech at roughly the same time, and we - // selected this case first, in which case a response - // might still be coming soon. - // - // We can't avoid the select race in b) by using a unbuffered - // resc channel instead, because then goroutines can - // leak if we exit due to other errors. - pconnDeadCh = nil // avoid spinning - failTicker = time.After(100 * time.Millisecond) // arbitrary time to wait for resc - case <-failTicker: - re = responseAndError{err: errClosed} - break WaitResponse - case <-respHeaderTimer: - pc.close() - re = responseAndError{err: errTimeout} - break WaitResponse - case re = <-resc: - break WaitResponse - } - } - - pc.lk.Lock() - pc.numExpectedResponses-- - pc.lk.Unlock() - - if re.err != nil { - pc.t.setReqCanceler(req.Request, nil) - } - return re.res, re.err -} - -// markBroken marks a connection as broken (so it's not reused). -// It differs from close in that it doesn't close the underlying -// connection for use when it's still being read. -func (pc *persistConn) markBroken() { - pc.lk.Lock() - defer pc.lk.Unlock() - pc.broken = true -} - -func (pc *persistConn) close() { - pc.lk.Lock() - defer pc.lk.Unlock() - pc.closeLocked() -} - -func (pc *persistConn) closeLocked() { - pc.broken = true - if !pc.closed { - pc.conn.Close() - pc.closed = true - close(pc.closech) - } - pc.mutateHeaderFunc = nil -} - -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} - -// bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most -// once, right before its final (error-producing) Read or Close call -// returns. If earlyCloseFn is non-nil and Close is called before -// io.EOF is seen, earlyCloseFn is called instead of fn, and its -// return value is the return value from Close. -type bodyEOFSignal struct { - body io.ReadCloser - mu sync.Mutex // guards following 4 fields - closed bool // whether Close has been called - rerr error // sticky Read error - fn func(error) // error will be nil on Read io.EOF - earlyCloseFn func() error // optional alt Close func used if io.EOF not seen -} - -func (es *bodyEOFSignal) Read(p []byte) (n int, err error) { - es.mu.Lock() - closed, rerr := es.closed, es.rerr - es.mu.Unlock() - if closed { - return 0, errors.New("http: read on closed response body") - } - if rerr != nil { - return 0, rerr - } - - n, err = es.body.Read(p) - if err != nil { - es.mu.Lock() - defer es.mu.Unlock() - if es.rerr == nil { - es.rerr = err - } - es.condfn(err) - } - return -} - -func (es *bodyEOFSignal) Close() error { - es.mu.Lock() - defer es.mu.Unlock() - if es.closed { - return nil - } - es.closed = true - if es.earlyCloseFn != nil && es.rerr != io.EOF { - return es.earlyCloseFn() - } - err := es.body.Close() - es.condfn(err) - return err -} - -// caller must hold es.mu. -func (es *bodyEOFSignal) condfn(err error) { - if es.fn == nil { - return - } - if err == io.EOF { - err = nil - } - es.fn(err) - es.fn = nil -} - -// gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read -type gzipReader struct { - body io.ReadCloser // underlying Response.Body - zr io.Reader // lazily-initialized gzip reader -} - -func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - return 0, err - } - } - return gz.zr.Read(p) -} - -func (gz *gzipReader) Close() error { - return gz.body.Close() -} - -type readerAndCloser struct { - io.Reader - io.Closer -} - -type tlsHandshakeTimeoutError struct{} - -func (tlsHandshakeTimeoutError) Timeout() bool { return true } -func (tlsHandshakeTimeoutError) Temporary() bool { return true } -func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" } - -type noteEOFReader struct { - r io.Reader - sawEOF *bool -} - -func (nr noteEOFReader) Read(p []byte) (n int, err error) { - n, err = nr.r.Read(p) - if err == io.EOF { - *nr.sawEOF = true - } - return -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/triv.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/http/triv.go deleted file mode 100644 index 232d65089..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/http/triv.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "expvar" - "flag" - "fmt" - "io" - "log" - "net/http" - "os" - "os/exec" - "strconv" - "sync" -) - -// hello world, the web server -var helloRequests = expvar.NewInt("hello-requests") - -func HelloServer(w http.ResponseWriter, req *http.Request) { - helloRequests.Add(1) - io.WriteString(w, "hello, world!\n") -} - -// Simple counter server. POSTing to it will set the value. -type Counter struct { - mu sync.Mutex // protects n - n int -} - -// This makes Counter satisfy the expvar.Var interface, so we can export -// it directly. -func (ctr *Counter) String() string { - ctr.mu.Lock() - defer ctr.mu.Unlock() - return fmt.Sprintf("%d", ctr.n) -} - -func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) { - ctr.mu.Lock() - defer ctr.mu.Unlock() - switch req.Method { - case "GET": - ctr.n++ - case "POST": - buf := new(bytes.Buffer) - io.Copy(buf, req.Body) - body := buf.String() - if n, err := strconv.Atoi(body); err != nil { - fmt.Fprintf(w, "bad POST: %v\nbody: [%v]\n", err, body) - } else { - ctr.n = n - fmt.Fprint(w, "counter reset\n") - } - } - fmt.Fprintf(w, "counter = %d\n", ctr.n) -} - -// simple flag server -var booleanflag = flag.Bool("boolean", true, "another flag for testing") - -func FlagServer(w http.ResponseWriter, req *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprint(w, "Flags:\n") - flag.VisitAll(func(f *flag.Flag) { - if f.Value.String() != f.DefValue { - fmt.Fprintf(w, "%s = %s [default = %s]\n", f.Name, f.Value.String(), f.DefValue) - } else { - fmt.Fprintf(w, "%s = %s\n", f.Name, f.Value.String()) - } - }) -} - -// simple argument server -func ArgServer(w http.ResponseWriter, req *http.Request) { - for _, s := range os.Args { - fmt.Fprint(w, s, " ") - } -} - -// a channel (just for the fun of it) -type Chan chan int - -func ChanCreate() Chan { - c := make(Chan) - go func(c Chan) { - for x := 0; ; x++ { - c <- x - } - }(c) - return c -} - -func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) { - io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch)) -} - -// exec a program, redirecting output -func DateServer(rw http.ResponseWriter, req *http.Request) { - rw.Header().Set("Content-Type", "text/plain; charset=utf-8") - - date, err := exec.Command("/bin/date").Output() - if err != nil { - http.Error(rw, err.Error(), 500) - return - } - rw.Write(date) -} - -func Logger(w http.ResponseWriter, req *http.Request) { - log.Print(req.URL) - http.Error(w, "oops", 404) -} - -var webroot = flag.String("root", os.Getenv("HOME"), "web root directory") - -func main() { - flag.Parse() - - // The counter is published as a variable directly. - ctr := new(Counter) - expvar.Publish("counter", ctr) - http.Handle("/counter", ctr) - http.Handle("/", http.HandlerFunc(Logger)) - http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(*webroot)))) - http.Handle("/chan", ChanCreate()) - http.HandleFunc("/flags", FlagServer) - http.HandleFunc("/args", ArgServer) - http.HandleFunc("/go/hello", HelloServer) - http.HandleFunc("/date", DateServer) - err := http.ListenAndServe(":12345", nil) - if err != nil { - log.Panicln("ListenAndServe:", err) - } -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/alert.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/alert.go deleted file mode 100644 index 0856311e4..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/alert.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import "strconv" - -type alert uint8 - -const ( - // alert level - alertLevelWarning = 1 - alertLevelError = 2 -) - -const ( - alertCloseNotify alert = 0 - alertUnexpectedMessage alert = 10 - alertBadRecordMAC alert = 20 - alertDecryptionFailed alert = 21 - alertRecordOverflow alert = 22 - alertDecompressionFailure alert = 30 - alertHandshakeFailure alert = 40 - alertBadCertificate alert = 42 - alertUnsupportedCertificate alert = 43 - alertCertificateRevoked alert = 44 - alertCertificateExpired alert = 45 - alertCertificateUnknown alert = 46 - alertIllegalParameter alert = 47 - alertUnknownCA alert = 48 - alertAccessDenied alert = 49 - alertDecodeError alert = 50 - alertDecryptError alert = 51 - alertProtocolVersion alert = 70 - alertInsufficientSecurity alert = 71 - alertInternalError alert = 80 - alertUserCanceled alert = 90 - alertNoRenegotiation alert = 100 -) - -var alertText = map[alert]string{ - alertCloseNotify: "close notify", - alertUnexpectedMessage: "unexpected message", - alertBadRecordMAC: "bad record MAC", - alertDecryptionFailed: "decryption failed", - alertRecordOverflow: "record overflow", - alertDecompressionFailure: "decompression failure", - alertHandshakeFailure: "handshake failure", - alertBadCertificate: "bad certificate", - alertUnsupportedCertificate: "unsupported certificate", - alertCertificateRevoked: "revoked certificate", - alertCertificateExpired: "expired certificate", - alertCertificateUnknown: "unknown certificate", - alertIllegalParameter: "illegal parameter", - alertUnknownCA: "unknown certificate authority", - alertAccessDenied: "access denied", - alertDecodeError: "error decoding message", - alertDecryptError: "error decrypting message", - alertProtocolVersion: "protocol version not supported", - alertInsufficientSecurity: "insufficient security level", - alertInternalError: "internal error", - alertUserCanceled: "user canceled", - alertNoRenegotiation: "no renegotiation", -} - -func (e alert) String() string { - s, ok := alertText[e] - if ok { - return s - } - return "alert(" + strconv.Itoa(int(e)) + ")" -} - -func (e alert) Error() string { - return e.String() -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/cipher_suites.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/cipher_suites.go deleted file mode 100644 index 39a51459d..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/cipher_suites.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/hmac" - "crypto/rc4" - "crypto/sha1" - "crypto/x509" - "hash" -) - -// a keyAgreement implements the client and server side of a TLS key agreement -// protocol by generating and processing key exchange messages. -type keyAgreement interface { - // On the server side, the first two methods are called in order. - - // In the case that the key agreement protocol doesn't use a - // ServerKeyExchange message, generateServerKeyExchange can return nil, - // nil. - generateServerKeyExchange(*Config, *Certificate, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error) - processClientKeyExchange(*Config, *Certificate, *clientKeyExchangeMsg, uint16) ([]byte, error) - - // On the client side, the next two methods are called in order. - - // This method may not be called if the server doesn't send a - // ServerKeyExchange message. - processServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error - generateClientKeyExchange(*Config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) -} - -const ( - // suiteECDH indicates that the cipher suite involves elliptic curve - // Diffie-Hellman. This means that it should only be selected when the - // client indicates that it supports ECC with a curve and point format - // that we're happy with. - suiteECDHE = 1 << iota - // suiteECDSA indicates that the cipher suite involves an ECDSA - // signature and therefore may only be selected when the server's - // certificate is ECDSA. If this is not set then the cipher suite is - // RSA based. - suiteECDSA - // suiteTLS12 indicates that the cipher suite should only be advertised - // and accepted when using TLS 1.2. - suiteTLS12 -) - -// A cipherSuite is a specific combination of key agreement, cipher and MAC -// function. All cipher suites currently assume RSA key agreement. -type cipherSuite struct { - id uint16 - // the lengths, in bytes, of the key material needed for each component. - keyLen int - macLen int - ivLen int - ka func(version uint16) keyAgreement - // flags is a bitmask of the suite* values, above. - flags int - cipher func(key, iv []byte, isRead bool) interface{} - mac func(version uint16, macKey []byte) macFunction - aead func(key, fixedNonce []byte) cipher.AEAD -} - -var cipherSuites = []*cipherSuite{ - // Ciphersuite order is chosen so that ECDHE comes before plain RSA - // and RC4 comes before AES (because of the Lucky13 attack). - {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM}, - {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12, nil, nil, aeadAESGCM}, - {TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE, cipherRC4, macSHA1, nil}, - {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherRC4, macSHA1, nil}, - {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil}, - {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil}, - {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil}, - {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil}, - {TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, 0, cipherRC4, macSHA1, nil}, - {TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil}, - {TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil}, - {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil}, - {TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil}, -} - -func cipherRC4(key, iv []byte, isRead bool) interface{} { - cipher, _ := rc4.NewCipher(key) - return cipher -} - -func cipher3DES(key, iv []byte, isRead bool) interface{} { - block, _ := des.NewTripleDESCipher(key) - if isRead { - return cipher.NewCBCDecrypter(block, iv) - } - return cipher.NewCBCEncrypter(block, iv) -} - -func cipherAES(key, iv []byte, isRead bool) interface{} { - block, _ := aes.NewCipher(key) - if isRead { - return cipher.NewCBCDecrypter(block, iv) - } - return cipher.NewCBCEncrypter(block, iv) -} - -// macSHA1 returns a macFunction for the given protocol version. -func macSHA1(version uint16, key []byte) macFunction { - if version == VersionSSL30 { - mac := ssl30MAC{ - h: sha1.New(), - key: make([]byte, len(key)), - } - copy(mac.key, key) - return mac - } - return tls10MAC{hmac.New(sha1.New, key)} -} - -type macFunction interface { - Size() int - MAC(digestBuf, seq, header, data []byte) []byte -} - -// fixedNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to -// each call. -type fixedNonceAEAD struct { - // sealNonce and openNonce are buffers where the larger nonce will be - // constructed. Since a seal and open operation may be running - // concurrently, there is a separate buffer for each. - sealNonce, openNonce []byte - aead cipher.AEAD -} - -func (f *fixedNonceAEAD) NonceSize() int { return 8 } -func (f *fixedNonceAEAD) Overhead() int { return f.aead.Overhead() } - -func (f *fixedNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte { - copy(f.sealNonce[len(f.sealNonce)-8:], nonce) - return f.aead.Seal(out, f.sealNonce, plaintext, additionalData) -} - -func (f *fixedNonceAEAD) Open(out, nonce, plaintext, additionalData []byte) ([]byte, error) { - copy(f.openNonce[len(f.openNonce)-8:], nonce) - return f.aead.Open(out, f.openNonce, plaintext, additionalData) -} - -func aeadAESGCM(key, fixedNonce []byte) cipher.AEAD { - aes, err := aes.NewCipher(key) - if err != nil { - panic(err) - } - aead, err := cipher.NewGCM(aes) - if err != nil { - panic(err) - } - - nonce1, nonce2 := make([]byte, 12), make([]byte, 12) - copy(nonce1, fixedNonce) - copy(nonce2, fixedNonce) - - return &fixedNonceAEAD{nonce1, nonce2, aead} -} - -// ssl30MAC implements the SSLv3 MAC function, as defined in -// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 5.2.3.1 -type ssl30MAC struct { - h hash.Hash - key []byte -} - -func (s ssl30MAC) Size() int { - return s.h.Size() -} - -var ssl30Pad1 = [48]byte{0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36} - -var ssl30Pad2 = [48]byte{0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c} - -func (s ssl30MAC) MAC(digestBuf, seq, header, data []byte) []byte { - padLength := 48 - if s.h.Size() == 20 { - padLength = 40 - } - - s.h.Reset() - s.h.Write(s.key) - s.h.Write(ssl30Pad1[:padLength]) - s.h.Write(seq) - s.h.Write(header[:1]) - s.h.Write(header[3:5]) - s.h.Write(data) - digestBuf = s.h.Sum(digestBuf[:0]) - - s.h.Reset() - s.h.Write(s.key) - s.h.Write(ssl30Pad2[:padLength]) - s.h.Write(digestBuf) - return s.h.Sum(digestBuf[:0]) -} - -// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, section 6.2.3. -type tls10MAC struct { - h hash.Hash -} - -func (s tls10MAC) Size() int { - return s.h.Size() -} - -func (s tls10MAC) MAC(digestBuf, seq, header, data []byte) []byte { - s.h.Reset() - s.h.Write(seq) - s.h.Write(header) - s.h.Write(data) - return s.h.Sum(digestBuf[:0]) -} - -func rsaKA(version uint16) keyAgreement { - return rsaKeyAgreement{} -} - -func ecdheECDSAKA(version uint16) keyAgreement { - return &ecdheKeyAgreement{ - sigType: signatureECDSA, - version: version, - } -} - -func ecdheRSAKA(version uint16) keyAgreement { - return &ecdheKeyAgreement{ - sigType: signatureRSA, - version: version, - } -} - -// mutualCipherSuite returns a cipherSuite given a list of supported -// ciphersuites and the id requested by the peer. -func mutualCipherSuite(have []uint16, want uint16) *cipherSuite { - for _, id := range have { - if id == want { - for _, suite := range cipherSuites { - if suite.id == want { - return suite - } - } - return nil - } - } - return nil -} - -// A list of the possible cipher suite ids. Taken from -// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml -const ( - TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 - TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a - TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f - TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009 - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a - TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011 - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013 - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014 - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b -) diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/common.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/common.go deleted file mode 100644 index 7eb283c5a..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/common.go +++ /dev/null @@ -1,438 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "io" - "math/big" - "strings" - "sync" - "time" -) - -const ( - VersionSSL30 = 0x0300 - VersionTLS10 = 0x0301 - VersionTLS11 = 0x0302 - VersionTLS12 = 0x0303 -) - -const ( - maxPlaintext = 16384 // maximum plaintext payload length - maxCiphertext = 16384 + 2048 // maximum ciphertext payload length - recordHeaderLen = 5 // record header length - maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB) - - minVersion = VersionSSL30 - maxVersion = VersionTLS12 -) - -// TLS record types. -type recordType uint8 - -const ( - recordTypeChangeCipherSpec recordType = 20 - recordTypeAlert recordType = 21 - recordTypeHandshake recordType = 22 - recordTypeApplicationData recordType = 23 -) - -// TLS handshake message types. -const ( - typeHelloRequest uint8 = 0 - typeClientHello uint8 = 1 - typeServerHello uint8 = 2 - typeNewSessionTicket uint8 = 4 - typeCertificate uint8 = 11 - typeServerKeyExchange uint8 = 12 - typeCertificateRequest uint8 = 13 - typeServerHelloDone uint8 = 14 - typeCertificateVerify uint8 = 15 - typeClientKeyExchange uint8 = 16 - typeFinished uint8 = 20 - typeCertificateStatus uint8 = 22 - typeNextProtocol uint8 = 67 // Not IANA assigned -) - -// TLS compression types. -const ( - compressionNone uint8 = 0 -) - -// TLS extension numbers -var ( - extensionServerName uint16 = 0 - extensionStatusRequest uint16 = 5 - extensionSupportedCurves uint16 = 10 - extensionSupportedPoints uint16 = 11 - extensionSignatureAlgorithms uint16 = 13 - extensionSessionTicket uint16 = 35 - extensionNextProtoNeg uint16 = 13172 // not IANA assigned -) - -// TLS Elliptic Curves -// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8 -var ( - curveP256 uint16 = 23 - curveP384 uint16 = 24 - curveP521 uint16 = 25 -) - -// TLS Elliptic Curve Point Formats -// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9 -var ( - pointFormatUncompressed uint8 = 0 -) - -// TLS CertificateStatusType (RFC 3546) -const ( - statusTypeOCSP uint8 = 1 -) - -// Certificate types (for certificateRequestMsg) -const ( - certTypeRSASign = 1 // A certificate containing an RSA key - certTypeDSSSign = 2 // A certificate containing a DSA key - certTypeRSAFixedDH = 3 // A certificate containing a static DH key - certTypeDSSFixedDH = 4 // A certificate containing a static DH key - - // See RFC4492 sections 3 and 5.5. - certTypeECDSASign = 64 // A certificate containing an ECDSA-capable public key, signed with ECDSA. - certTypeRSAFixedECDH = 65 // A certificate containing an ECDH-capable public key, signed with RSA. - certTypeECDSAFixedECDH = 66 // A certificate containing an ECDH-capable public key, signed with ECDSA. - - // Rest of these are reserved by the TLS spec -) - -// Hash functions for TLS 1.2 (See RFC 5246, section A.4.1) -const ( - hashSHA1 uint8 = 2 - hashSHA256 uint8 = 4 -) - -// Signature algorithms for TLS 1.2 (See RFC 5246, section A.4.1) -const ( - signatureRSA uint8 = 1 - signatureECDSA uint8 = 3 -) - -// signatureAndHash mirrors the TLS 1.2, SignatureAndHashAlgorithm struct. See -// RFC 5246, section A.4.1. -type signatureAndHash struct { - hash, signature uint8 -} - -// supportedSKXSignatureAlgorithms contains the signature and hash algorithms -// that the code advertises as supported in a TLS 1.2 ClientHello. -var supportedSKXSignatureAlgorithms = []signatureAndHash{ - {hashSHA256, signatureRSA}, - {hashSHA256, signatureECDSA}, - {hashSHA1, signatureRSA}, - {hashSHA1, signatureECDSA}, -} - -// supportedClientCertSignatureAlgorithms contains the signature and hash -// algorithms that the code advertises as supported in a TLS 1.2 -// CertificateRequest. -var supportedClientCertSignatureAlgorithms = []signatureAndHash{ - {hashSHA256, signatureRSA}, - {hashSHA256, signatureECDSA}, -} - -// ConnectionState records basic TLS details about the connection. -type ConnectionState struct { - HandshakeComplete bool // TLS handshake is complete - DidResume bool // connection resumes a previous TLS connection - CipherSuite uint16 // cipher suite in use (TLS_RSA_WITH_RC4_128_SHA, ...) - NegotiatedProtocol string // negotiated next protocol (from Config.NextProtos) - NegotiatedProtocolIsMutual bool // negotiated protocol was advertised by server - ServerName string // server name requested by client, if any (server side only) - PeerCertificates []*x509.Certificate // certificate chain presented by remote peer - VerifiedChains [][]*x509.Certificate // verified chains built from PeerCertificates -} - -// ClientAuthType declares the policy the server will follow for -// TLS Client Authentication. -type ClientAuthType int - -const ( - NoClientCert ClientAuthType = iota - RequestClientCert - RequireAnyClientCert - VerifyClientCertIfGiven - RequireAndVerifyClientCert -) - -// A Config structure is used to configure a TLS client or server. After one -// has been passed to a TLS function it must not be modified. -type Config struct { - // Rand provides the source of entropy for nonces and RSA blinding. - // If Rand is nil, TLS uses the cryptographic random reader in package - // crypto/rand. - Rand io.Reader - - // Time returns the current time as the number of seconds since the epoch. - // If Time is nil, TLS uses time.Now. - Time func() time.Time - - // Certificates contains one or more certificate chains - // to present to the other side of the connection. - // Server configurations must include at least one certificate. - Certificates []Certificate - - // NameToCertificate maps from a certificate name to an element of - // Certificates. Note that a certificate name can be of the form - // '*.example.com' and so doesn't have to be a domain name as such. - // See Config.BuildNameToCertificate - // The nil value causes the first element of Certificates to be used - // for all connections. - NameToCertificate map[string]*Certificate - - // RootCAs defines the set of root certificate authorities - // that clients use when verifying server certificates. - // If RootCAs is nil, TLS uses the host's root CA set. - RootCAs *x509.CertPool - - // NextProtos is a list of supported, application level protocols. - NextProtos []string - - // ServerName is included in the client's handshake to support virtual - // hosting. - ServerName string - - // ClientAuth determines the server's policy for - // TLS Client Authentication. The default is NoClientCert. - ClientAuth ClientAuthType - - // ClientCAs defines the set of root certificate authorities - // that servers use if required to verify a client certificate - // by the policy in ClientAuth. - ClientCAs *x509.CertPool - - // InsecureSkipVerify controls whether a client verifies the - // server's certificate chain and host name. - // If InsecureSkipVerify is true, TLS accepts any certificate - // presented by the server and any host name in that certificate. - // In this mode, TLS is susceptible to man-in-the-middle attacks. - // This should be used only for testing. - InsecureSkipVerify bool - - // CipherSuites is a list of supported cipher suites. If CipherSuites - // is nil, TLS uses a list of suites supported by the implementation. - CipherSuites []uint16 - - // PreferServerCipherSuites controls whether the server selects the - // client's most preferred ciphersuite, or the server's most preferred - // ciphersuite. If true then the server's preference, as expressed in - // the order of elements in CipherSuites, is used. - PreferServerCipherSuites bool - - // SessionTicketsDisabled may be set to true to disable session ticket - // (resumption) support. - SessionTicketsDisabled bool - - // SessionTicketKey is used by TLS servers to provide session - // resumption. See RFC 5077. If zero, it will be filled with - // random data before the first server handshake. - // - // If multiple servers are terminating connections for the same host - // they should all have the same SessionTicketKey. If the - // SessionTicketKey leaks, previously recorded and future TLS - // connections using that key are compromised. - SessionTicketKey [32]byte - - // MinVersion contains the minimum SSL/TLS version that is acceptable. - // If zero, then SSLv3 is taken as the minimum. - MinVersion uint16 - - // MaxVersion contains the maximum SSL/TLS version that is acceptable. - // If zero, then the maximum version supported by this package is used, - // which is currently TLS 1.2. - MaxVersion uint16 - - serverInitOnce sync.Once // guards calling (*Config).serverInit -} - -func (c *Config) serverInit() { - if c.SessionTicketsDisabled { - return - } - - // If the key has already been set then we have nothing to do. - for _, b := range c.SessionTicketKey { - if b != 0 { - return - } - } - - if _, err := io.ReadFull(c.rand(), c.SessionTicketKey[:]); err != nil { - c.SessionTicketsDisabled = true - } -} - -func (c *Config) rand() io.Reader { - r := c.Rand - if r == nil { - return rand.Reader - } - return r -} - -func (c *Config) time() time.Time { - t := c.Time - if t == nil { - t = time.Now - } - return t() -} - -func (c *Config) cipherSuites() []uint16 { - s := c.CipherSuites - if s == nil { - s = defaultCipherSuites() - } - return s -} - -func (c *Config) minVersion() uint16 { - if c == nil || c.MinVersion == 0 { - return minVersion - } - return c.MinVersion -} - -func (c *Config) maxVersion() uint16 { - if c == nil || c.MaxVersion == 0 { - return maxVersion - } - return c.MaxVersion -} - -// mutualVersion returns the protocol version to use given the advertised -// version of the peer. -func (c *Config) mutualVersion(vers uint16) (uint16, bool) { - minVersion := c.minVersion() - maxVersion := c.maxVersion() - - if vers < minVersion { - return 0, false - } - if vers > maxVersion { - vers = maxVersion - } - return vers, true -} - -// getCertificateForName returns the best certificate for the given name, -// defaulting to the first element of c.Certificates if there are no good -// options. -func (c *Config) getCertificateForName(name string) *Certificate { - if len(c.Certificates) == 1 || c.NameToCertificate == nil { - // There's only one choice, so no point doing any work. - return &c.Certificates[0] - } - - name = strings.ToLower(name) - for len(name) > 0 && name[len(name)-1] == '.' { - name = name[:len(name)-1] - } - - if cert, ok := c.NameToCertificate[name]; ok { - return cert - } - - // try replacing labels in the name with wildcards until we get a - // match. - labels := strings.Split(name, ".") - for i := range labels { - labels[i] = "*" - candidate := strings.Join(labels, ".") - if cert, ok := c.NameToCertificate[candidate]; ok { - return cert - } - } - - // If nothing matches, return the first certificate. - return &c.Certificates[0] -} - -// BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate -// from the CommonName and SubjectAlternateName fields of each of the leaf -// certificates. -func (c *Config) BuildNameToCertificate() { - c.NameToCertificate = make(map[string]*Certificate) - for i := range c.Certificates { - cert := &c.Certificates[i] - x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - continue - } - if len(x509Cert.Subject.CommonName) > 0 { - c.NameToCertificate[x509Cert.Subject.CommonName] = cert - } - for _, san := range x509Cert.DNSNames { - c.NameToCertificate[san] = cert - } - } -} - -// A Certificate is a chain of one or more certificates, leaf first. -type Certificate struct { - Certificate [][]byte - PrivateKey crypto.PrivateKey // supported types: *rsa.PrivateKey, *ecdsa.PrivateKey - // OCSPStaple contains an optional OCSP response which will be served - // to clients that request it. - OCSPStaple []byte - // Leaf is the parsed form of the leaf certificate, which may be - // initialized using x509.ParseCertificate to reduce per-handshake - // processing for TLS clients doing client authentication. If nil, the - // leaf certificate will be parsed as needed. - Leaf *x509.Certificate -} - -// A TLS record. -type record struct { - contentType recordType - major, minor uint8 - payload []byte -} - -type handshakeMessage interface { - marshal() []byte - unmarshal([]byte) bool -} - -// TODO(jsing): Make these available to both crypto/x509 and crypto/tls. -type dsaSignature struct { - R, S *big.Int -} - -type ecdsaSignature dsaSignature - -var emptyConfig Config - -func defaultConfig() *Config { - return &emptyConfig -} - -var ( - once sync.Once - varDefaultCipherSuites []uint16 -) - -func defaultCipherSuites() []uint16 { - once.Do(initDefaultCipherSuites) - return varDefaultCipherSuites -} - -func initDefaultCipherSuites() { - varDefaultCipherSuites = make([]uint16, len(cipherSuites)) - for i, suite := range cipherSuites { - varDefaultCipherSuites[i] = suite.id - } -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/conn.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/conn.go deleted file mode 100644 index db036f967..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/conn.go +++ /dev/null @@ -1,1026 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TLS low level connection and record layer - -package tls - -import ( - "bytes" - "crypto/cipher" - "crypto/subtle" - "crypto/x509" - "errors" - "io" - "net" - "sync" - "time" -) - -// A Conn represents a secured connection. -// It implements the net.Conn interface. -type Conn struct { - // constant - conn net.Conn - isClient bool - - // constant after handshake; protected by handshakeMutex - handshakeMutex sync.Mutex // handshakeMutex < in.Mutex, out.Mutex, errMutex - vers uint16 // TLS version - haveVers bool // version has been negotiated - config *Config // configuration passed to constructor - handshakeComplete bool - didResume bool // whether this connection was a session resumption - cipherSuite uint16 - ocspResponse []byte // stapled OCSP response - peerCertificates []*x509.Certificate - // verifiedChains contains the certificate chains that we built, as - // opposed to the ones presented by the server. - verifiedChains [][]*x509.Certificate - // serverName contains the server name indicated by the client, if any. - serverName string - - clientProtocol string - clientProtocolFallback bool - - // first permanent error - connErr - - // input/output - in, out halfConn // in.Mutex < out.Mutex - rawInput *block // raw input, right off the wire - input *block // application data waiting to be read - hand bytes.Buffer // handshake data waiting to be read - - tmp [16]byte -} - -type connErr struct { - mu sync.Mutex - value error -} - -func (e *connErr) setError(err error) error { - e.mu.Lock() - defer e.mu.Unlock() - - if e.value == nil { - e.value = err - } - return err -} - -func (e *connErr) error() error { - e.mu.Lock() - defer e.mu.Unlock() - return e.value -} - -// Access to net.Conn methods. -// Cannot just embed net.Conn because that would -// export the struct field too. - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// SetDeadline sets the read and write deadlines associated with the connection. -// A zero value for t means Read and Write will not time out. -// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error. -func (c *Conn) SetDeadline(t time.Time) error { - return c.conn.SetDeadline(t) -} - -// SetReadDeadline sets the read deadline on the underlying connection. -// A zero value for t means Read will not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetWriteDeadline sets the write deadline on the underlying conneciton. -// A zero value for t means Write will not time out. -// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error. -func (c *Conn) SetWriteDeadline(t time.Time) error { - return c.conn.SetWriteDeadline(t) -} - -// A halfConn represents one direction of the record layer -// connection, either sending or receiving. -type halfConn struct { - sync.Mutex - version uint16 // protocol version - cipher interface{} // cipher algorithm - mac macFunction - seq [8]byte // 64-bit sequence number - bfree *block // list of free blocks - - nextCipher interface{} // next encryption state - nextMac macFunction // next MAC algorithm - - // used to save allocating a new buffer for each MAC. - inDigestBuf, outDigestBuf []byte -} - -// prepareCipherSpec sets the encryption and MAC states -// that a subsequent changeCipherSpec will use. -func (hc *halfConn) prepareCipherSpec(version uint16, cipher interface{}, mac macFunction) { - hc.version = version - hc.nextCipher = cipher - hc.nextMac = mac -} - -// changeCipherSpec changes the encryption and MAC states -// to the ones previously passed to prepareCipherSpec. -func (hc *halfConn) changeCipherSpec() error { - if hc.nextCipher == nil { - return alertInternalError - } - hc.cipher = hc.nextCipher - hc.mac = hc.nextMac - hc.nextCipher = nil - hc.nextMac = nil - for i := range hc.seq { - hc.seq[i] = 0 - } - return nil -} - -// incSeq increments the sequence number. -func (hc *halfConn) incSeq() { - for i := 7; i >= 0; i-- { - hc.seq[i]++ - if hc.seq[i] != 0 { - return - } - } - - // Not allowed to let sequence number wrap. - // Instead, must renegotiate before it does. - // Not likely enough to bother. - panic("TLS: sequence number wraparound") -} - -// resetSeq resets the sequence number to zero. -func (hc *halfConn) resetSeq() { - for i := range hc.seq { - hc.seq[i] = 0 - } -} - -// removePadding returns an unpadded slice, in constant time, which is a prefix -// of the input. It also returns a byte which is equal to 255 if the padding -// was valid and 0 otherwise. See RFC 2246, section 6.2.3.2 -func removePadding(payload []byte) ([]byte, byte) { - if len(payload) < 1 { - return payload, 0 - } - - paddingLen := payload[len(payload)-1] - t := uint(len(payload)-1) - uint(paddingLen) - // if len(payload) >= (paddingLen - 1) then the MSB of t is zero - good := byte(int32(^t) >> 31) - - toCheck := 255 // the maximum possible padding length - // The length of the padded data is public, so we can use an if here - if toCheck+1 > len(payload) { - toCheck = len(payload) - 1 - } - - for i := 0; i < toCheck; i++ { - t := uint(paddingLen) - uint(i) - // if i <= paddingLen then the MSB of t is zero - mask := byte(int32(^t) >> 31) - b := payload[len(payload)-1-i] - good &^= mask&paddingLen ^ mask&b - } - - // We AND together the bits of good and replicate the result across - // all the bits. - good &= good << 4 - good &= good << 2 - good &= good << 1 - good = uint8(int8(good) >> 7) - - toRemove := good&paddingLen + 1 - return payload[:len(payload)-int(toRemove)], good -} - -// removePaddingSSL30 is a replacement for removePadding in the case that the -// protocol version is SSLv3. In this version, the contents of the padding -// are random and cannot be checked. -func removePaddingSSL30(payload []byte) ([]byte, byte) { - if len(payload) < 1 { - return payload, 0 - } - - paddingLen := int(payload[len(payload)-1]) + 1 - if paddingLen > len(payload) { - return payload, 0 - } - - return payload[:len(payload)-paddingLen], 255 -} - -func roundUp(a, b int) int { - return a + (b-a%b)%b -} - -// cbcMode is an interface for block ciphers using cipher block chaining. -type cbcMode interface { - cipher.BlockMode - SetIV([]byte) -} - -// decrypt checks and strips the mac and decrypts the data in b. Returns a -// success boolean, the number of bytes to skip from the start of the record in -// order to get the application payload, and an optional alert value. -func (hc *halfConn) decrypt(b *block) (ok bool, prefixLen int, alertValue alert) { - // pull out payload - payload := b.data[recordHeaderLen:] - - macSize := 0 - if hc.mac != nil { - macSize = hc.mac.Size() - } - - paddingGood := byte(255) - explicitIVLen := 0 - - // decrypt - if hc.cipher != nil { - switch c := hc.cipher.(type) { - case cipher.Stream: - c.XORKeyStream(payload, payload) - case cipher.AEAD: - explicitIVLen = 8 - if len(payload) < explicitIVLen { - return false, 0, alertBadRecordMAC - } - nonce := payload[:8] - payload = payload[8:] - - var additionalData [13]byte - copy(additionalData[:], hc.seq[:]) - copy(additionalData[8:], b.data[:3]) - n := len(payload) - c.Overhead() - additionalData[11] = byte(n >> 8) - additionalData[12] = byte(n) - var err error - payload, err = c.Open(payload[:0], nonce, payload, additionalData[:]) - if err != nil { - return false, 0, alertBadRecordMAC - } - b.resize(recordHeaderLen + explicitIVLen + len(payload)) - case cbcMode: - blockSize := c.BlockSize() - if hc.version >= VersionTLS11 { - explicitIVLen = blockSize - } - - if len(payload)%blockSize != 0 || len(payload) < roundUp(explicitIVLen+macSize+1, blockSize) { - return false, 0, alertBadRecordMAC - } - - if explicitIVLen > 0 { - c.SetIV(payload[:explicitIVLen]) - payload = payload[explicitIVLen:] - } - c.CryptBlocks(payload, payload) - if hc.version == VersionSSL30 { - payload, paddingGood = removePaddingSSL30(payload) - } else { - payload, paddingGood = removePadding(payload) - } - b.resize(recordHeaderLen + explicitIVLen + len(payload)) - - // note that we still have a timing side-channel in the - // MAC check, below. An attacker can align the record - // so that a correct padding will cause one less hash - // block to be calculated. Then they can iteratively - // decrypt a record by breaking each byte. See - // "Password Interception in a SSL/TLS Channel", Brice - // Canvel et al. - // - // However, our behavior matches OpenSSL, so we leak - // only as much as they do. - default: - panic("unknown cipher type") - } - } - - // check, strip mac - if hc.mac != nil { - if len(payload) < macSize { - return false, 0, alertBadRecordMAC - } - - // strip mac off payload, b.data - n := len(payload) - macSize - b.data[3] = byte(n >> 8) - b.data[4] = byte(n) - b.resize(recordHeaderLen + explicitIVLen + n) - remoteMAC := payload[n:] - localMAC := hc.mac.MAC(hc.inDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], payload[:n]) - - if subtle.ConstantTimeCompare(localMAC, remoteMAC) != 1 || paddingGood != 255 { - return false, 0, alertBadRecordMAC - } - hc.inDigestBuf = localMAC - } - hc.incSeq() - - return true, recordHeaderLen + explicitIVLen, 0 -} - -// padToBlockSize calculates the needed padding block, if any, for a payload. -// On exit, prefix aliases payload and extends to the end of the last full -// block of payload. finalBlock is a fresh slice which contains the contents of -// any suffix of payload as well as the needed padding to make finalBlock a -// full block. -func padToBlockSize(payload []byte, blockSize int) (prefix, finalBlock []byte) { - overrun := len(payload) % blockSize - paddingLen := blockSize - overrun - prefix = payload[:len(payload)-overrun] - finalBlock = make([]byte, blockSize) - copy(finalBlock, payload[len(payload)-overrun:]) - for i := overrun; i < blockSize; i++ { - finalBlock[i] = byte(paddingLen - 1) - } - return -} - -// encrypt encrypts and macs the data in b. -func (hc *halfConn) encrypt(b *block, explicitIVLen int) (bool, alert) { - // mac - if hc.mac != nil { - mac := hc.mac.MAC(hc.outDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], b.data[recordHeaderLen+explicitIVLen:]) - - n := len(b.data) - b.resize(n + len(mac)) - copy(b.data[n:], mac) - hc.outDigestBuf = mac - } - - payload := b.data[recordHeaderLen:] - - // encrypt - if hc.cipher != nil { - switch c := hc.cipher.(type) { - case cipher.Stream: - c.XORKeyStream(payload, payload) - case cipher.AEAD: - payloadLen := len(b.data) - recordHeaderLen - explicitIVLen - b.resize(len(b.data) + c.Overhead()) - nonce := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen] - payload := b.data[recordHeaderLen+explicitIVLen:] - payload = payload[:payloadLen] - - var additionalData [13]byte - copy(additionalData[:], hc.seq[:]) - copy(additionalData[8:], b.data[:3]) - additionalData[11] = byte(payloadLen >> 8) - additionalData[12] = byte(payloadLen) - - c.Seal(payload[:0], nonce, payload, additionalData[:]) - case cbcMode: - blockSize := c.BlockSize() - if explicitIVLen > 0 { - c.SetIV(payload[:explicitIVLen]) - payload = payload[explicitIVLen:] - } - prefix, finalBlock := padToBlockSize(payload, blockSize) - b.resize(recordHeaderLen + explicitIVLen + len(prefix) + len(finalBlock)) - c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen:], prefix) - c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen+len(prefix):], finalBlock) - default: - panic("unknown cipher type") - } - } - - // update length to include MAC and any block padding needed. - n := len(b.data) - recordHeaderLen - b.data[3] = byte(n >> 8) - b.data[4] = byte(n) - hc.incSeq() - - return true, 0 -} - -// A block is a simple data buffer. -type block struct { - data []byte - off int // index for Read - link *block -} - -// resize resizes block to be n bytes, growing if necessary. -func (b *block) resize(n int) { - if n > cap(b.data) { - b.reserve(n) - } - b.data = b.data[0:n] -} - -// reserve makes sure that block contains a capacity of at least n bytes. -func (b *block) reserve(n int) { - if cap(b.data) >= n { - return - } - m := cap(b.data) - if m == 0 { - m = 1024 - } - for m < n { - m *= 2 - } - data := make([]byte, len(b.data), m) - copy(data, b.data) - b.data = data -} - -// readFromUntil reads from r into b until b contains at least n bytes -// or else returns an error. -func (b *block) readFromUntil(r io.Reader, n int) error { - // quick case - if len(b.data) >= n { - return nil - } - - // read until have enough. - b.reserve(n) - for { - m, err := r.Read(b.data[len(b.data):cap(b.data)]) - b.data = b.data[0 : len(b.data)+m] - if len(b.data) >= n { - break - } - if err != nil { - return err - } - } - return nil -} - -func (b *block) Read(p []byte) (n int, err error) { - n = copy(p, b.data[b.off:]) - b.off += n - return -} - -// newBlock allocates a new block, from hc's free list if possible. -func (hc *halfConn) newBlock() *block { - b := hc.bfree - if b == nil { - return new(block) - } - hc.bfree = b.link - b.link = nil - b.resize(0) - return b -} - -// freeBlock returns a block to hc's free list. -// The protocol is such that each side only has a block or two on -// its free list at a time, so there's no need to worry about -// trimming the list, etc. -func (hc *halfConn) freeBlock(b *block) { - b.link = hc.bfree - hc.bfree = b -} - -// splitBlock splits a block after the first n bytes, -// returning a block with those n bytes and a -// block with the remainder. the latter may be nil. -func (hc *halfConn) splitBlock(b *block, n int) (*block, *block) { - if len(b.data) <= n { - return b, nil - } - bb := hc.newBlock() - bb.resize(len(b.data) - n) - copy(bb.data, b.data[n:]) - b.data = b.data[0:n] - return b, bb -} - -// readRecord reads the next TLS record from the connection -// and updates the record layer state. -// c.in.Mutex <= L; c.input == nil. -func (c *Conn) readRecord(want recordType) error { - // Caller must be in sync with connection: - // handshake data if handshake not yet completed, - // else application data. (We don't support renegotiation.) - switch want { - default: - return c.sendAlert(alertInternalError) - case recordTypeHandshake, recordTypeChangeCipherSpec: - if c.handshakeComplete { - return c.sendAlert(alertInternalError) - } - case recordTypeApplicationData: - if !c.handshakeComplete { - return c.sendAlert(alertInternalError) - } - } - -Again: - if c.rawInput == nil { - c.rawInput = c.in.newBlock() - } - b := c.rawInput - - // Read header, payload. - if err := b.readFromUntil(c.conn, recordHeaderLen); err != nil { - // RFC suggests that EOF without an alertCloseNotify is - // an error, but popular web sites seem to do this, - // so we can't make it an error. - // if err == io.EOF { - // err = io.ErrUnexpectedEOF - // } - if e, ok := err.(net.Error); !ok || !e.Temporary() { - c.setError(err) - } - return err - } - typ := recordType(b.data[0]) - - // No valid TLS record has a type of 0x80, however SSLv2 handshakes - // start with a uint16 length where the MSB is set and the first record - // is always < 256 bytes long. Therefore typ == 0x80 strongly suggests - // an SSLv2 client. - if want == recordTypeHandshake && typ == 0x80 { - c.sendAlert(alertProtocolVersion) - return errors.New("tls: unsupported SSLv2 handshake received") - } - - vers := uint16(b.data[1])<<8 | uint16(b.data[2]) - n := int(b.data[3])<<8 | int(b.data[4]) - if c.haveVers && vers != c.vers { - return c.sendAlert(alertProtocolVersion) - } - if n > maxCiphertext { - return c.sendAlert(alertRecordOverflow) - } - if !c.haveVers { - // First message, be extra suspicious: - // this might not be a TLS client. - // Bail out before reading a full 'body', if possible. - // The current max version is 3.1. - // If the version is >= 16.0, it's probably not real. - // Similarly, a clientHello message encodes in - // well under a kilobyte. If the length is >= 12 kB, - // it's probably not real. - if (typ != recordTypeAlert && typ != want) || vers >= 0x1000 || n >= 0x3000 { - return c.sendAlert(alertUnexpectedMessage) - } - } - if err := b.readFromUntil(c.conn, recordHeaderLen+n); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if e, ok := err.(net.Error); !ok || !e.Temporary() { - c.setError(err) - } - return err - } - - // Process message. - b, c.rawInput = c.in.splitBlock(b, recordHeaderLen+n) - ok, off, err := c.in.decrypt(b) - if !ok { - return c.sendAlert(err) - } - b.off = off - data := b.data[b.off:] - if len(data) > maxPlaintext { - c.sendAlert(alertRecordOverflow) - c.in.freeBlock(b) - return c.error() - } - - switch typ { - default: - c.sendAlert(alertUnexpectedMessage) - - case recordTypeAlert: - if len(data) != 2 { - c.sendAlert(alertUnexpectedMessage) - break - } - if alert(data[1]) == alertCloseNotify { - c.setError(io.EOF) - break - } - switch data[0] { - case alertLevelWarning: - // drop on the floor - c.in.freeBlock(b) - goto Again - case alertLevelError: - c.setError(&net.OpError{Op: "remote error", Err: alert(data[1])}) - default: - c.sendAlert(alertUnexpectedMessage) - } - - case recordTypeChangeCipherSpec: - if typ != want || len(data) != 1 || data[0] != 1 { - c.sendAlert(alertUnexpectedMessage) - break - } - err := c.in.changeCipherSpec() - if err != nil { - c.sendAlert(err.(alert)) - } - - case recordTypeApplicationData: - if typ != want { - c.sendAlert(alertUnexpectedMessage) - break - } - c.input = b - b = nil - - case recordTypeHandshake: - // TODO(rsc): Should at least pick off connection close. - if typ != want && !c.isClient { - return c.sendAlert(alertNoRenegotiation) - } - c.hand.Write(data) - } - - if b != nil { - c.in.freeBlock(b) - } - return c.error() -} - -// sendAlert sends a TLS alert message. -// c.out.Mutex <= L. -func (c *Conn) sendAlertLocked(err alert) error { - switch err { - case alertNoRenegotiation, alertCloseNotify: - c.tmp[0] = alertLevelWarning - default: - c.tmp[0] = alertLevelError - } - c.tmp[1] = byte(err) - c.writeRecord(recordTypeAlert, c.tmp[0:2]) - // closeNotify is a special case in that it isn't an error: - if err != alertCloseNotify { - return c.setError(&net.OpError{Op: "local error", Err: err}) - } - return nil -} - -// sendAlert sends a TLS alert message. -// L < c.out.Mutex. -func (c *Conn) sendAlert(err alert) error { - c.out.Lock() - defer c.out.Unlock() - return c.sendAlertLocked(err) -} - -// writeRecord writes a TLS record with the given type and payload -// to the connection and updates the record layer state. -// c.out.Mutex <= L. -func (c *Conn) writeRecord(typ recordType, data []byte) (n int, err error) { - b := c.out.newBlock() - for len(data) > 0 { - m := len(data) - if m > maxPlaintext { - m = maxPlaintext - } - explicitIVLen := 0 - explicitIVIsSeq := false - - var cbc cbcMode - if c.out.version >= VersionTLS11 { - var ok bool - if cbc, ok = c.out.cipher.(cbcMode); ok { - explicitIVLen = cbc.BlockSize() - } - } - if explicitIVLen == 0 { - if _, ok := c.out.cipher.(cipher.AEAD); ok { - explicitIVLen = 8 - // The AES-GCM construction in TLS has an - // explicit nonce so that the nonce can be - // random. However, the nonce is only 8 bytes - // which is too small for a secure, random - // nonce. Therefore we use the sequence number - // as the nonce. - explicitIVIsSeq = true - } - } - b.resize(recordHeaderLen + explicitIVLen + m) - b.data[0] = byte(typ) - vers := c.vers - if vers == 0 { - // Some TLS servers fail if the record version is - // greater than TLS 1.0 for the initial ClientHello. - vers = VersionTLS10 - } - b.data[1] = byte(vers >> 8) - b.data[2] = byte(vers) - b.data[3] = byte(m >> 8) - b.data[4] = byte(m) - if explicitIVLen > 0 { - explicitIV := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen] - if explicitIVIsSeq { - copy(explicitIV, c.out.seq[:]) - } else { - if _, err = io.ReadFull(c.config.rand(), explicitIV); err != nil { - break - } - } - } - copy(b.data[recordHeaderLen+explicitIVLen:], data) - c.out.encrypt(b, explicitIVLen) - _, err = c.conn.Write(b.data) - if err != nil { - break - } - n += m - data = data[m:] - } - c.out.freeBlock(b) - - if typ == recordTypeChangeCipherSpec { - err = c.out.changeCipherSpec() - if err != nil { - // Cannot call sendAlert directly, - // because we already hold c.out.Mutex. - c.tmp[0] = alertLevelError - c.tmp[1] = byte(err.(alert)) - c.writeRecord(recordTypeAlert, c.tmp[0:2]) - return n, c.setError(&net.OpError{Op: "local error", Err: err}) - } - } - return -} - -// readHandshake reads the next handshake message from -// the record layer. -// c.in.Mutex < L; c.out.Mutex < L. -func (c *Conn) readHandshake() (interface{}, error) { - for c.hand.Len() < 4 { - if err := c.error(); err != nil { - return nil, err - } - if err := c.readRecord(recordTypeHandshake); err != nil { - return nil, err - } - } - - data := c.hand.Bytes() - n := int(data[1])<<16 | int(data[2])<<8 | int(data[3]) - if n > maxHandshake { - c.sendAlert(alertInternalError) - return nil, c.error() - } - for c.hand.Len() < 4+n { - if err := c.error(); err != nil { - return nil, err - } - if err := c.readRecord(recordTypeHandshake); err != nil { - return nil, err - } - } - data = c.hand.Next(4 + n) - var m handshakeMessage - switch data[0] { - case typeHelloRequest: - m = new(helloRequestMsg) - case typeClientHello: - m = new(clientHelloMsg) - case typeServerHello: - m = new(serverHelloMsg) - case typeCertificate: - m = new(certificateMsg) - case typeCertificateRequest: - m = &certificateRequestMsg{ - hasSignatureAndHash: c.vers >= VersionTLS12, - } - case typeCertificateStatus: - m = new(certificateStatusMsg) - case typeServerKeyExchange: - m = new(serverKeyExchangeMsg) - case typeServerHelloDone: - m = new(serverHelloDoneMsg) - case typeClientKeyExchange: - m = new(clientKeyExchangeMsg) - case typeCertificateVerify: - m = &certificateVerifyMsg{ - hasSignatureAndHash: c.vers >= VersionTLS12, - } - case typeNextProtocol: - m = new(nextProtoMsg) - case typeFinished: - m = new(finishedMsg) - default: - c.sendAlert(alertUnexpectedMessage) - return nil, alertUnexpectedMessage - } - - // The handshake message unmarshallers - // expect to be able to keep references to data, - // so pass in a fresh copy that won't be overwritten. - data = append([]byte(nil), data...) - - if !m.unmarshal(data) { - c.sendAlert(alertUnexpectedMessage) - return nil, alertUnexpectedMessage - } - return m, nil -} - -// Write writes data to the connection. -func (c *Conn) Write(b []byte) (int, error) { - if err := c.error(); err != nil { - return 0, err - } - - if err := c.Handshake(); err != nil { - return 0, c.setError(err) - } - - c.out.Lock() - defer c.out.Unlock() - - if !c.handshakeComplete { - return 0, alertInternalError - } - - // SSL 3.0 and TLS 1.0 are susceptible to a chosen-plaintext - // attack when using block mode ciphers due to predictable IVs. - // This can be prevented by splitting each Application Data - // record into two records, effectively randomizing the IV. - // - // http://www.openssl.org/~bodo/tls-cbc.txt - // https://bugzilla.mozilla.org/show_bug.cgi?id=665814 - // http://www.imperialviolet.org/2012/01/15/beastfollowup.html - - var m int - if len(b) > 1 && c.vers <= VersionTLS10 { - if _, ok := c.out.cipher.(cipher.BlockMode); ok { - n, err := c.writeRecord(recordTypeApplicationData, b[:1]) - if err != nil { - return n, c.setError(err) - } - m, b = 1, b[1:] - } - } - - n, err := c.writeRecord(recordTypeApplicationData, b) - return n + m, c.setError(err) -} - -func (c *Conn) handleRenegotiation() error { - c.handshakeComplete = false - if !c.isClient { - panic("renegotiation should only happen for a client") - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - _, ok := msg.(*helloRequestMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return alertUnexpectedMessage - } - - return c.Handshake() -} - -// Read can be made to time out and return a net.Error with Timeout() == true -// after a fixed time limit; see SetDeadline and SetReadDeadline. -func (c *Conn) Read(b []byte) (n int, err error) { - if err = c.Handshake(); err != nil { - return - } - - c.in.Lock() - defer c.in.Unlock() - - // Some OpenSSL servers send empty records in order to randomize the - // CBC IV. So this loop ignores a limited number of empty records. - const maxConsecutiveEmptyRecords = 100 - for emptyRecordCount := 0; emptyRecordCount <= maxConsecutiveEmptyRecords; emptyRecordCount++ { - for c.input == nil && c.error() == nil { - if err := c.readRecord(recordTypeApplicationData); err != nil { - // Soft error, like EAGAIN - return 0, err - } - if c.hand.Len() > 0 { - // We received handshake bytes, indicating the start of - // a renegotiation. - if err := c.handleRenegotiation(); err != nil { - return 0, err - } - continue - } - } - if err := c.error(); err != nil { - return 0, err - } - - n, err = c.input.Read(b) - if c.input.off >= len(c.input.data) { - c.in.freeBlock(c.input) - c.input = nil - } - - if n != 0 || err != nil { - return n, err - } - } - - return 0, io.ErrNoProgress -} - -// Close closes the connection. -func (c *Conn) Close() error { - var alertErr error - - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - if c.handshakeComplete { - alertErr = c.sendAlert(alertCloseNotify) - } - - if err := c.conn.Close(); err != nil { - return err - } - return alertErr -} - -// Handshake runs the client or server handshake -// protocol if it has not yet been run. -// Most uses of this package need not call Handshake -// explicitly: the first Read or Write will call it automatically. -func (c *Conn) Handshake() error { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - if err := c.error(); err != nil { - return err - } - if c.handshakeComplete { - return nil - } - if c.isClient { - return c.clientHandshake() - } - return c.serverHandshake() -} - -// ConnectionState returns basic TLS details about the connection. -func (c *Conn) ConnectionState() ConnectionState { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - - var state ConnectionState - state.HandshakeComplete = c.handshakeComplete - if c.handshakeComplete { - state.NegotiatedProtocol = c.clientProtocol - state.DidResume = c.didResume - state.NegotiatedProtocolIsMutual = !c.clientProtocolFallback - state.CipherSuite = c.cipherSuite - state.PeerCertificates = c.peerCertificates - state.VerifiedChains = c.verifiedChains - state.ServerName = c.serverName - } - - return state -} - -// OCSPResponse returns the stapled OCSP response from the TLS server, if -// any. (Only valid for client connections.) -func (c *Conn) OCSPResponse() []byte { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - - return c.ocspResponse -} - -// VerifyHostname checks that the peer certificate chain is valid for -// connecting to host. If so, it returns nil; if not, it returns an error -// describing the problem. -func (c *Conn) VerifyHostname(host string) error { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - if !c.isClient { - return errors.New("VerifyHostname called on TLS server connection") - } - if !c.handshakeComplete { - return errors.New("TLS handshake has not yet been performed") - } - return c.peerCertificates[0].VerifyHostname(host) -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/generate_cert.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/generate_cert.go deleted file mode 100644 index b417ea464..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/generate_cert.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate a self-signed X.509 certificate for a TLS server. Outputs to -// 'cert.pem' and 'key.pem' and will overwrite existing files. - -package main - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "flag" - "fmt" - "log" - "math/big" - "net" - "os" - "strings" - "time" -) - -var ( - host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for") - validFrom = flag.String("start-date", "", "Creation date formatted as Jan 1 15:04:05 2011") - validFor = flag.Duration("duration", 365*24*time.Hour, "Duration that certificate is valid for") - isCA = flag.Bool("ca", false, "whether this cert should be its own Certificate Authority") - rsaBits = flag.Int("rsa-bits", 2048, "Size of RSA key to generate") -) - -func main() { - flag.Parse() - - if len(*host) == 0 { - log.Fatalf("Missing required --host parameter") - } - - priv, err := rsa.GenerateKey(rand.Reader, *rsaBits) - if err != nil { - log.Fatalf("failed to generate private key: %s", err) - return - } - - var notBefore time.Time - if len(*validFrom) == 0 { - notBefore = time.Now() - } else { - notBefore, err = time.Parse("Jan 2 15:04:05 2006", *validFrom) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to parse creation date: %s\n", err) - os.Exit(1) - } - } - - notAfter := notBefore.Add(*validFor) - - // end of ASN.1 time - endOfTime := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC) - if notAfter.After(endOfTime) { - notAfter = endOfTime - } - - template := x509.Certificate{ - SerialNumber: new(big.Int).SetInt64(0), - Subject: pkix.Name{ - Organization: []string{"Acme Co"}, - }, - NotBefore: notBefore, - NotAfter: notAfter, - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - hosts := strings.Split(*host, ",") - for _, h := range hosts { - if ip := net.ParseIP(h); ip != nil { - template.IPAddresses = append(template.IPAddresses, ip) - } else { - template.DNSNames = append(template.DNSNames, h) - } - } - - if *isCA { - template.IsCA = true - template.KeyUsage |= x509.KeyUsageCertSign - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) - if err != nil { - log.Fatalf("Failed to create certificate: %s", err) - return - } - - certOut, err := os.Create("cert.pem") - if err != nil { - log.Fatalf("failed to open cert.pem for writing: %s", err) - return - } - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - log.Print("written cert.pem\n") - - keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - log.Print("failed to open key.pem for writing:", err) - return - } - pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) - keyOut.Close() - log.Print("written key.pem\n") -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_client.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_client.go deleted file mode 100644 index 85e4adefc..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_client.go +++ /dev/null @@ -1,411 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "bytes" - "crypto/ecdsa" - "crypto/rsa" - "crypto/subtle" - "crypto/x509" - "encoding/asn1" - "errors" - "io" - "strconv" -) - -func (c *Conn) clientHandshake() error { - if c.config == nil { - c.config = defaultConfig() - } - - hello := &clientHelloMsg{ - vers: c.config.maxVersion(), - compressionMethods: []uint8{compressionNone}, - random: make([]byte, 32), - ocspStapling: true, - serverName: c.config.ServerName, - supportedCurves: []uint16{curveP256, curveP384, curveP521}, - supportedPoints: []uint8{pointFormatUncompressed}, - nextProtoNeg: len(c.config.NextProtos) > 0, - } - - possibleCipherSuites := c.config.cipherSuites() - hello.cipherSuites = make([]uint16, 0, len(possibleCipherSuites)) - -NextCipherSuite: - for _, suiteId := range possibleCipherSuites { - for _, suite := range cipherSuites { - if suite.id != suiteId { - continue - } - // Don't advertise TLS 1.2-only cipher suites unless - // we're attempting TLS 1.2. - if hello.vers < VersionTLS12 && suite.flags&suiteTLS12 != 0 { - continue - } - hello.cipherSuites = append(hello.cipherSuites, suiteId) - continue NextCipherSuite - } - } - - t := uint32(c.config.time().Unix()) - hello.random[0] = byte(t >> 24) - hello.random[1] = byte(t >> 16) - hello.random[2] = byte(t >> 8) - hello.random[3] = byte(t) - _, err := io.ReadFull(c.config.rand(), hello.random[4:]) - if err != nil { - c.sendAlert(alertInternalError) - return errors.New("short read from Rand") - } - - if hello.vers >= VersionTLS12 { - hello.signatureAndHashes = supportedSKXSignatureAlgorithms - } - - c.writeRecord(recordTypeHandshake, hello.marshal()) - - msg, err := c.readHandshake() - if err != nil { - return err - } - serverHello, ok := msg.(*serverHelloMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - - vers, ok := c.config.mutualVersion(serverHello.vers) - if !ok || vers < VersionTLS10 { - // TLS 1.0 is the minimum version supported as a client. - return c.sendAlert(alertProtocolVersion) - } - c.vers = vers - c.haveVers = true - - finishedHash := newFinishedHash(c.vers) - finishedHash.Write(hello.marshal()) - finishedHash.Write(serverHello.marshal()) - - if serverHello.compressionMethod != compressionNone { - return c.sendAlert(alertUnexpectedMessage) - } - - if !hello.nextProtoNeg && serverHello.nextProtoNeg { - c.sendAlert(alertHandshakeFailure) - return errors.New("server advertised unrequested NPN") - } - - suite := mutualCipherSuite(c.config.cipherSuites(), serverHello.cipherSuite) - if suite == nil { - return c.sendAlert(alertHandshakeFailure) - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - certMsg, ok := msg.(*certificateMsg) - if !ok || len(certMsg.certificates) == 0 { - return c.sendAlert(alertUnexpectedMessage) - } - finishedHash.Write(certMsg.marshal()) - - certs := make([]*x509.Certificate, len(certMsg.certificates)) - for i, asn1Data := range certMsg.certificates { - cert, err := x509.ParseCertificate(asn1Data) - if err != nil { - c.sendAlert(alertBadCertificate) - return errors.New("failed to parse certificate from server: " + err.Error()) - } - certs[i] = cert - } - - if !c.config.InsecureSkipVerify { - opts := x509.VerifyOptions{ - Roots: c.config.RootCAs, - CurrentTime: c.config.time(), - DNSName: c.config.ServerName, - Intermediates: x509.NewCertPool(), - } - - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - c.verifiedChains, err = certs[0].Verify(opts) - if err != nil { - c.sendAlert(alertBadCertificate) - return err - } - } - - switch certs[0].PublicKey.(type) { - case *rsa.PublicKey, *ecdsa.PublicKey: - break - default: - return c.sendAlert(alertUnsupportedCertificate) - } - - c.peerCertificates = certs - - if serverHello.ocspStapling { - msg, err = c.readHandshake() - if err != nil { - return err - } - cs, ok := msg.(*certificateStatusMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - finishedHash.Write(cs.marshal()) - - if cs.statusType == statusTypeOCSP { - c.ocspResponse = cs.response - } - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - - keyAgreement := suite.ka(c.vers) - - skx, ok := msg.(*serverKeyExchangeMsg) - if ok { - finishedHash.Write(skx.marshal()) - err = keyAgreement.processServerKeyExchange(c.config, hello, serverHello, certs[0], skx) - if err != nil { - c.sendAlert(alertUnexpectedMessage) - return err - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - var chainToSend *Certificate - var certRequested bool - certReq, ok := msg.(*certificateRequestMsg) - if ok { - certRequested = true - - // RFC 4346 on the certificateAuthorities field: - // A list of the distinguished names of acceptable certificate - // authorities. These distinguished names may specify a desired - // distinguished name for a root CA or for a subordinate CA; - // thus, this message can be used to describe both known roots - // and a desired authorization space. If the - // certificate_authorities list is empty then the client MAY - // send any certificate of the appropriate - // ClientCertificateType, unless there is some external - // arrangement to the contrary. - - finishedHash.Write(certReq.marshal()) - - var rsaAvail, ecdsaAvail bool - for _, certType := range certReq.certificateTypes { - switch certType { - case certTypeRSASign: - rsaAvail = true - case certTypeECDSASign: - ecdsaAvail = true - } - } - - // We need to search our list of client certs for one - // where SignatureAlgorithm is RSA and the Issuer is in - // certReq.certificateAuthorities - findCert: - for i, chain := range c.config.Certificates { - if !rsaAvail && !ecdsaAvail { - continue - } - - for j, cert := range chain.Certificate { - x509Cert := chain.Leaf - // parse the certificate if this isn't the leaf - // node, or if chain.Leaf was nil - if j != 0 || x509Cert == nil { - if x509Cert, err = x509.ParseCertificate(cert); err != nil { - c.sendAlert(alertInternalError) - return errors.New("tls: failed to parse client certificate #" + strconv.Itoa(i) + ": " + err.Error()) - } - } - - switch { - case rsaAvail && x509Cert.PublicKeyAlgorithm == x509.RSA: - case ecdsaAvail && x509Cert.PublicKeyAlgorithm == x509.ECDSA: - default: - continue findCert - } - - if len(certReq.certificateAuthorities) == 0 { - // they gave us an empty list, so just take the - // first RSA cert from c.config.Certificates - chainToSend = &chain - break findCert - } - - for _, ca := range certReq.certificateAuthorities { - if bytes.Equal(x509Cert.RawIssuer, ca) { - chainToSend = &chain - break findCert - } - } - } - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - shd, ok := msg.(*serverHelloDoneMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - finishedHash.Write(shd.marshal()) - - // If the server requested a certificate then we have to send a - // Certificate message, even if it's empty because we don't have a - // certificate to send. - if certRequested { - certMsg = new(certificateMsg) - if chainToSend != nil { - certMsg.certificates = chainToSend.Certificate - } - finishedHash.Write(certMsg.marshal()) - c.writeRecord(recordTypeHandshake, certMsg.marshal()) - } - - preMasterSecret, ckx, err := keyAgreement.generateClientKeyExchange(c.config, hello, certs[0]) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - if ckx != nil { - finishedHash.Write(ckx.marshal()) - c.writeRecord(recordTypeHandshake, ckx.marshal()) - } - - if chainToSend != nil { - var signed []byte - certVerify := &certificateVerifyMsg{ - hasSignatureAndHash: c.vers >= VersionTLS12, - } - - switch key := c.config.Certificates[0].PrivateKey.(type) { - case *ecdsa.PrivateKey: - digest, _, hashId := finishedHash.hashForClientCertificate(signatureECDSA) - r, s, err := ecdsa.Sign(c.config.rand(), key, digest) - if err == nil { - signed, err = asn1.Marshal(ecdsaSignature{r, s}) - } - certVerify.signatureAndHash.signature = signatureECDSA - certVerify.signatureAndHash.hash = hashId - case *rsa.PrivateKey: - digest, hashFunc, hashId := finishedHash.hashForClientCertificate(signatureRSA) - signed, err = rsa.SignPKCS1v15(c.config.rand(), key, hashFunc, digest) - certVerify.signatureAndHash.signature = signatureRSA - certVerify.signatureAndHash.hash = hashId - default: - err = errors.New("unknown private key type") - } - if err != nil { - return c.sendAlert(alertInternalError) - } - certVerify.signature = signed - - finishedHash.Write(certVerify.marshal()) - c.writeRecord(recordTypeHandshake, certVerify.marshal()) - } - - masterSecret := masterFromPreMasterSecret(c.vers, preMasterSecret, hello.random, serverHello.random) - clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV := - keysFromMasterSecret(c.vers, masterSecret, hello.random, serverHello.random, suite.macLen, suite.keyLen, suite.ivLen) - - var clientCipher interface{} - var clientHash macFunction - if suite.cipher != nil { - clientCipher = suite.cipher(clientKey, clientIV, false /* not for reading */) - clientHash = suite.mac(c.vers, clientMAC) - } else { - clientCipher = suite.aead(clientKey, clientIV) - } - c.out.prepareCipherSpec(c.vers, clientCipher, clientHash) - c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) - - if serverHello.nextProtoNeg { - nextProto := new(nextProtoMsg) - proto, fallback := mutualProtocol(c.config.NextProtos, serverHello.nextProtos) - nextProto.proto = proto - c.clientProtocol = proto - c.clientProtocolFallback = fallback - - finishedHash.Write(nextProto.marshal()) - c.writeRecord(recordTypeHandshake, nextProto.marshal()) - } - - finished := new(finishedMsg) - finished.verifyData = finishedHash.clientSum(masterSecret) - finishedHash.Write(finished.marshal()) - c.writeRecord(recordTypeHandshake, finished.marshal()) - - var serverCipher interface{} - var serverHash macFunction - if suite.cipher != nil { - serverCipher = suite.cipher(serverKey, serverIV, true /* for reading */) - serverHash = suite.mac(c.vers, serverMAC) - } else { - serverCipher = suite.aead(serverKey, serverIV) - } - c.in.prepareCipherSpec(c.vers, serverCipher, serverHash) - c.readRecord(recordTypeChangeCipherSpec) - if err := c.error(); err != nil { - return err - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - serverFinished, ok := msg.(*finishedMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - - verify := finishedHash.serverSum(masterSecret) - if len(verify) != len(serverFinished.verifyData) || - subtle.ConstantTimeCompare(verify, serverFinished.verifyData) != 1 { - return c.sendAlert(alertHandshakeFailure) - } - - c.handshakeComplete = true - c.cipherSuite = suite.id - return nil -} - -// mutualProtocol finds the mutual Next Protocol Negotiation protocol given the -// set of client and server supported protocols. The set of client supported -// protocols must not be empty. It returns the resulting protocol and flag -// indicating if the fallback case was reached. -func mutualProtocol(clientProtos, serverProtos []string) (string, bool) { - for _, s := range serverProtos { - for _, c := range clientProtos { - if s == c { - return s, false - } - } - } - - return clientProtos[0], true -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_messages.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_messages.go deleted file mode 100644 index 8197391e3..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_messages.go +++ /dev/null @@ -1,1304 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import "bytes" - -type clientHelloMsg struct { - raw []byte - vers uint16 - random []byte - sessionId []byte - cipherSuites []uint16 - compressionMethods []uint8 - nextProtoNeg bool - serverName string - ocspStapling bool - supportedCurves []uint16 - supportedPoints []uint8 - ticketSupported bool - sessionTicket []uint8 - signatureAndHashes []signatureAndHash -} - -func (m *clientHelloMsg) equal(i interface{}) bool { - m1, ok := i.(*clientHelloMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.vers == m1.vers && - bytes.Equal(m.random, m1.random) && - bytes.Equal(m.sessionId, m1.sessionId) && - eqUint16s(m.cipherSuites, m1.cipherSuites) && - bytes.Equal(m.compressionMethods, m1.compressionMethods) && - m.nextProtoNeg == m1.nextProtoNeg && - m.serverName == m1.serverName && - m.ocspStapling == m1.ocspStapling && - eqUint16s(m.supportedCurves, m1.supportedCurves) && - bytes.Equal(m.supportedPoints, m1.supportedPoints) && - m.ticketSupported == m1.ticketSupported && - bytes.Equal(m.sessionTicket, m1.sessionTicket) && - eqSignatureAndHashes(m.signatureAndHashes, m1.signatureAndHashes) -} - -func (m *clientHelloMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - length := 2 + 32 + 1 + len(m.sessionId) + 2 + len(m.cipherSuites)*2 + 1 + len(m.compressionMethods) - numExtensions := 0 - extensionsLength := 0 - if m.nextProtoNeg { - numExtensions++ - } - if m.ocspStapling { - extensionsLength += 1 + 2 + 2 - numExtensions++ - } - if len(m.serverName) > 0 { - extensionsLength += 5 + len(m.serverName) - numExtensions++ - } - if len(m.supportedCurves) > 0 { - extensionsLength += 2 + 2*len(m.supportedCurves) - numExtensions++ - } - if len(m.supportedPoints) > 0 { - extensionsLength += 1 + len(m.supportedPoints) - numExtensions++ - } - if m.ticketSupported { - extensionsLength += len(m.sessionTicket) - numExtensions++ - } - if len(m.signatureAndHashes) > 0 { - extensionsLength += 2 + 2*len(m.signatureAndHashes) - numExtensions++ - } - if numExtensions > 0 { - extensionsLength += 4 * numExtensions - length += 2 + extensionsLength - } - - x := make([]byte, 4+length) - x[0] = typeClientHello - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - x[4] = uint8(m.vers >> 8) - x[5] = uint8(m.vers) - copy(x[6:38], m.random) - x[38] = uint8(len(m.sessionId)) - copy(x[39:39+len(m.sessionId)], m.sessionId) - y := x[39+len(m.sessionId):] - y[0] = uint8(len(m.cipherSuites) >> 7) - y[1] = uint8(len(m.cipherSuites) << 1) - for i, suite := range m.cipherSuites { - y[2+i*2] = uint8(suite >> 8) - y[3+i*2] = uint8(suite) - } - z := y[2+len(m.cipherSuites)*2:] - z[0] = uint8(len(m.compressionMethods)) - copy(z[1:], m.compressionMethods) - - z = z[1+len(m.compressionMethods):] - if numExtensions > 0 { - z[0] = byte(extensionsLength >> 8) - z[1] = byte(extensionsLength) - z = z[2:] - } - if m.nextProtoNeg { - z[0] = byte(extensionNextProtoNeg >> 8) - z[1] = byte(extensionNextProtoNeg) - // The length is always 0 - z = z[4:] - } - if len(m.serverName) > 0 { - z[0] = byte(extensionServerName >> 8) - z[1] = byte(extensionServerName) - l := len(m.serverName) + 5 - z[2] = byte(l >> 8) - z[3] = byte(l) - z = z[4:] - - // RFC 3546, section 3.1 - // - // struct { - // NameType name_type; - // select (name_type) { - // case host_name: HostName; - // } name; - // } ServerName; - // - // enum { - // host_name(0), (255) - // } NameType; - // - // opaque HostName<1..2^16-1>; - // - // struct { - // ServerName server_name_list<1..2^16-1> - // } ServerNameList; - - z[0] = byte((len(m.serverName) + 3) >> 8) - z[1] = byte(len(m.serverName) + 3) - z[3] = byte(len(m.serverName) >> 8) - z[4] = byte(len(m.serverName)) - copy(z[5:], []byte(m.serverName)) - z = z[l:] - } - if m.ocspStapling { - // RFC 4366, section 3.6 - z[0] = byte(extensionStatusRequest >> 8) - z[1] = byte(extensionStatusRequest) - z[2] = 0 - z[3] = 5 - z[4] = 1 // OCSP type - // Two zero valued uint16s for the two lengths. - z = z[9:] - } - if len(m.supportedCurves) > 0 { - // http://tools.ietf.org/html/rfc4492#section-5.5.1 - z[0] = byte(extensionSupportedCurves >> 8) - z[1] = byte(extensionSupportedCurves) - l := 2 + 2*len(m.supportedCurves) - z[2] = byte(l >> 8) - z[3] = byte(l) - l -= 2 - z[4] = byte(l >> 8) - z[5] = byte(l) - z = z[6:] - for _, curve := range m.supportedCurves { - z[0] = byte(curve >> 8) - z[1] = byte(curve) - z = z[2:] - } - } - if len(m.supportedPoints) > 0 { - // http://tools.ietf.org/html/rfc4492#section-5.5.2 - z[0] = byte(extensionSupportedPoints >> 8) - z[1] = byte(extensionSupportedPoints) - l := 1 + len(m.supportedPoints) - z[2] = byte(l >> 8) - z[3] = byte(l) - l-- - z[4] = byte(l) - z = z[5:] - for _, pointFormat := range m.supportedPoints { - z[0] = byte(pointFormat) - z = z[1:] - } - } - if m.ticketSupported { - // http://tools.ietf.org/html/rfc5077#section-3.2 - z[0] = byte(extensionSessionTicket >> 8) - z[1] = byte(extensionSessionTicket) - l := len(m.sessionTicket) - z[2] = byte(l >> 8) - z[3] = byte(l) - z = z[4:] - copy(z, m.sessionTicket) - z = z[len(m.sessionTicket):] - } - if len(m.signatureAndHashes) > 0 { - // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 - z[0] = byte(extensionSignatureAlgorithms >> 8) - z[1] = byte(extensionSignatureAlgorithms) - l := 2 + 2*len(m.signatureAndHashes) - z[2] = byte(l >> 8) - z[3] = byte(l) - z = z[4:] - - l -= 2 - z[0] = byte(l >> 8) - z[1] = byte(l) - z = z[2:] - for _, sigAndHash := range m.signatureAndHashes { - z[0] = sigAndHash.hash - z[1] = sigAndHash.signature - z = z[2:] - } - } - - m.raw = x - - return x -} - -func (m *clientHelloMsg) unmarshal(data []byte) bool { - if len(data) < 42 { - return false - } - m.raw = data - m.vers = uint16(data[4])<<8 | uint16(data[5]) - m.random = data[6:38] - sessionIdLen := int(data[38]) - if sessionIdLen > 32 || len(data) < 39+sessionIdLen { - return false - } - m.sessionId = data[39 : 39+sessionIdLen] - data = data[39+sessionIdLen:] - if len(data) < 2 { - return false - } - // cipherSuiteLen is the number of bytes of cipher suite numbers. Since - // they are uint16s, the number must be even. - cipherSuiteLen := int(data[0])<<8 | int(data[1]) - if cipherSuiteLen%2 == 1 || len(data) < 2+cipherSuiteLen { - return false - } - numCipherSuites := cipherSuiteLen / 2 - m.cipherSuites = make([]uint16, numCipherSuites) - for i := 0; i < numCipherSuites; i++ { - m.cipherSuites[i] = uint16(data[2+2*i])<<8 | uint16(data[3+2*i]) - } - data = data[2+cipherSuiteLen:] - if len(data) < 1 { - return false - } - compressionMethodsLen := int(data[0]) - if len(data) < 1+compressionMethodsLen { - return false - } - m.compressionMethods = data[1 : 1+compressionMethodsLen] - - data = data[1+compressionMethodsLen:] - - m.nextProtoNeg = false - m.serverName = "" - m.ocspStapling = false - m.ticketSupported = false - m.sessionTicket = nil - m.signatureAndHashes = nil - - if len(data) == 0 { - // ClientHello is optionally followed by extension data - return true - } - if len(data) < 2 { - return false - } - - extensionsLength := int(data[0])<<8 | int(data[1]) - data = data[2:] - if extensionsLength != len(data) { - return false - } - - for len(data) != 0 { - if len(data) < 4 { - return false - } - extension := uint16(data[0])<<8 | uint16(data[1]) - length := int(data[2])<<8 | int(data[3]) - data = data[4:] - if len(data) < length { - return false - } - - switch extension { - case extensionServerName: - if length < 2 { - return false - } - numNames := int(data[0])<<8 | int(data[1]) - d := data[2:] - for i := 0; i < numNames; i++ { - if len(d) < 3 { - return false - } - nameType := d[0] - nameLen := int(d[1])<<8 | int(d[2]) - d = d[3:] - if len(d) < nameLen { - return false - } - if nameType == 0 { - m.serverName = string(d[0:nameLen]) - break - } - d = d[nameLen:] - } - case extensionNextProtoNeg: - if length > 0 { - return false - } - m.nextProtoNeg = true - case extensionStatusRequest: - m.ocspStapling = length > 0 && data[0] == statusTypeOCSP - case extensionSupportedCurves: - // http://tools.ietf.org/html/rfc4492#section-5.5.1 - if length < 2 { - return false - } - l := int(data[0])<<8 | int(data[1]) - if l%2 == 1 || length != l+2 { - return false - } - numCurves := l / 2 - m.supportedCurves = make([]uint16, numCurves) - d := data[2:] - for i := 0; i < numCurves; i++ { - m.supportedCurves[i] = uint16(d[0])<<8 | uint16(d[1]) - d = d[2:] - } - case extensionSupportedPoints: - // http://tools.ietf.org/html/rfc4492#section-5.5.2 - if length < 1 { - return false - } - l := int(data[0]) - if length != l+1 { - return false - } - m.supportedPoints = make([]uint8, l) - copy(m.supportedPoints, data[1:]) - case extensionSessionTicket: - // http://tools.ietf.org/html/rfc5077#section-3.2 - m.ticketSupported = true - m.sessionTicket = data[:length] - case extensionSignatureAlgorithms: - // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 - if length < 2 || length&1 != 0 { - return false - } - l := int(data[0])<<8 | int(data[1]) - if l != length-2 { - return false - } - n := l / 2 - d := data[2:] - m.signatureAndHashes = make([]signatureAndHash, n) - for i := range m.signatureAndHashes { - m.signatureAndHashes[i].hash = d[0] - m.signatureAndHashes[i].signature = d[1] - d = d[2:] - } - } - data = data[length:] - } - - return true -} - -type serverHelloMsg struct { - raw []byte - vers uint16 - random []byte - sessionId []byte - cipherSuite uint16 - compressionMethod uint8 - nextProtoNeg bool - nextProtos []string - ocspStapling bool - ticketSupported bool -} - -func (m *serverHelloMsg) equal(i interface{}) bool { - m1, ok := i.(*serverHelloMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.vers == m1.vers && - bytes.Equal(m.random, m1.random) && - bytes.Equal(m.sessionId, m1.sessionId) && - m.cipherSuite == m1.cipherSuite && - m.compressionMethod == m1.compressionMethod && - m.nextProtoNeg == m1.nextProtoNeg && - eqStrings(m.nextProtos, m1.nextProtos) && - m.ocspStapling == m1.ocspStapling && - m.ticketSupported == m1.ticketSupported -} - -func (m *serverHelloMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - length := 38 + len(m.sessionId) - numExtensions := 0 - extensionsLength := 0 - - nextProtoLen := 0 - if m.nextProtoNeg { - numExtensions++ - for _, v := range m.nextProtos { - nextProtoLen += len(v) - } - nextProtoLen += len(m.nextProtos) - extensionsLength += nextProtoLen - } - if m.ocspStapling { - numExtensions++ - } - if m.ticketSupported { - numExtensions++ - } - if numExtensions > 0 { - extensionsLength += 4 * numExtensions - length += 2 + extensionsLength - } - - x := make([]byte, 4+length) - x[0] = typeServerHello - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - x[4] = uint8(m.vers >> 8) - x[5] = uint8(m.vers) - copy(x[6:38], m.random) - x[38] = uint8(len(m.sessionId)) - copy(x[39:39+len(m.sessionId)], m.sessionId) - z := x[39+len(m.sessionId):] - z[0] = uint8(m.cipherSuite >> 8) - z[1] = uint8(m.cipherSuite) - z[2] = uint8(m.compressionMethod) - - z = z[3:] - if numExtensions > 0 { - z[0] = byte(extensionsLength >> 8) - z[1] = byte(extensionsLength) - z = z[2:] - } - if m.nextProtoNeg { - z[0] = byte(extensionNextProtoNeg >> 8) - z[1] = byte(extensionNextProtoNeg) - z[2] = byte(nextProtoLen >> 8) - z[3] = byte(nextProtoLen) - z = z[4:] - - for _, v := range m.nextProtos { - l := len(v) - if l > 255 { - l = 255 - } - z[0] = byte(l) - copy(z[1:], []byte(v[0:l])) - z = z[1+l:] - } - } - if m.ocspStapling { - z[0] = byte(extensionStatusRequest >> 8) - z[1] = byte(extensionStatusRequest) - z = z[4:] - } - if m.ticketSupported { - z[0] = byte(extensionSessionTicket >> 8) - z[1] = byte(extensionSessionTicket) - z = z[4:] - } - - m.raw = x - - return x -} - -func (m *serverHelloMsg) unmarshal(data []byte) bool { - if len(data) < 42 { - return false - } - m.raw = data - m.vers = uint16(data[4])<<8 | uint16(data[5]) - m.random = data[6:38] - sessionIdLen := int(data[38]) - if sessionIdLen > 32 || len(data) < 39+sessionIdLen { - return false - } - m.sessionId = data[39 : 39+sessionIdLen] - data = data[39+sessionIdLen:] - if len(data) < 3 { - return false - } - m.cipherSuite = uint16(data[0])<<8 | uint16(data[1]) - m.compressionMethod = data[2] - data = data[3:] - - m.nextProtoNeg = false - m.nextProtos = nil - m.ocspStapling = false - m.ticketSupported = false - - if len(data) == 0 { - // ServerHello is optionally followed by extension data - return true - } - if len(data) < 2 { - return false - } - - extensionsLength := int(data[0])<<8 | int(data[1]) - data = data[2:] - if len(data) != extensionsLength { - return false - } - - for len(data) != 0 { - if len(data) < 4 { - return false - } - extension := uint16(data[0])<<8 | uint16(data[1]) - length := int(data[2])<<8 | int(data[3]) - data = data[4:] - if len(data) < length { - return false - } - - switch extension { - case extensionNextProtoNeg: - m.nextProtoNeg = true - d := data[:length] - for len(d) > 0 { - l := int(d[0]) - d = d[1:] - if l == 0 || l > len(d) { - return false - } - m.nextProtos = append(m.nextProtos, string(d[:l])) - d = d[l:] - } - case extensionStatusRequest: - if length > 0 { - return false - } - m.ocspStapling = true - case extensionSessionTicket: - if length > 0 { - return false - } - m.ticketSupported = true - } - data = data[length:] - } - - return true -} - -type certificateMsg struct { - raw []byte - certificates [][]byte -} - -func (m *certificateMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - eqByteSlices(m.certificates, m1.certificates) -} - -func (m *certificateMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - var i int - for _, slice := range m.certificates { - i += len(slice) - } - - length := 3 + 3*len(m.certificates) + i - x = make([]byte, 4+length) - x[0] = typeCertificate - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - - certificateOctets := length - 3 - x[4] = uint8(certificateOctets >> 16) - x[5] = uint8(certificateOctets >> 8) - x[6] = uint8(certificateOctets) - - y := x[7:] - for _, slice := range m.certificates { - y[0] = uint8(len(slice) >> 16) - y[1] = uint8(len(slice) >> 8) - y[2] = uint8(len(slice)) - copy(y[3:], slice) - y = y[3+len(slice):] - } - - m.raw = x - return -} - -func (m *certificateMsg) unmarshal(data []byte) bool { - if len(data) < 7 { - return false - } - - m.raw = data - certsLen := uint32(data[4])<<16 | uint32(data[5])<<8 | uint32(data[6]) - if uint32(len(data)) != certsLen+7 { - return false - } - - numCerts := 0 - d := data[7:] - for certsLen > 0 { - if len(d) < 4 { - return false - } - certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2]) - if uint32(len(d)) < 3+certLen { - return false - } - d = d[3+certLen:] - certsLen -= 3 + certLen - numCerts++ - } - - m.certificates = make([][]byte, numCerts) - d = data[7:] - for i := 0; i < numCerts; i++ { - certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2]) - m.certificates[i] = d[3 : 3+certLen] - d = d[3+certLen:] - } - - return true -} - -type serverKeyExchangeMsg struct { - raw []byte - key []byte -} - -func (m *serverKeyExchangeMsg) equal(i interface{}) bool { - m1, ok := i.(*serverKeyExchangeMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.key, m1.key) -} - -func (m *serverKeyExchangeMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - length := len(m.key) - x := make([]byte, length+4) - x[0] = typeServerKeyExchange - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - copy(x[4:], m.key) - - m.raw = x - return x -} - -func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool { - m.raw = data - if len(data) < 4 { - return false - } - m.key = data[4:] - return true -} - -type certificateStatusMsg struct { - raw []byte - statusType uint8 - response []byte -} - -func (m *certificateStatusMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateStatusMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.statusType == m1.statusType && - bytes.Equal(m.response, m1.response) -} - -func (m *certificateStatusMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var x []byte - if m.statusType == statusTypeOCSP { - x = make([]byte, 4+4+len(m.response)) - x[0] = typeCertificateStatus - l := len(m.response) + 4 - x[1] = byte(l >> 16) - x[2] = byte(l >> 8) - x[3] = byte(l) - x[4] = statusTypeOCSP - - l -= 4 - x[5] = byte(l >> 16) - x[6] = byte(l >> 8) - x[7] = byte(l) - copy(x[8:], m.response) - } else { - x = []byte{typeCertificateStatus, 0, 0, 1, m.statusType} - } - - m.raw = x - return x -} - -func (m *certificateStatusMsg) unmarshal(data []byte) bool { - m.raw = data - if len(data) < 5 { - return false - } - m.statusType = data[4] - - m.response = nil - if m.statusType == statusTypeOCSP { - if len(data) < 8 { - return false - } - respLen := uint32(data[5])<<16 | uint32(data[6])<<8 | uint32(data[7]) - if uint32(len(data)) != 4+4+respLen { - return false - } - m.response = data[8:] - } - return true -} - -type serverHelloDoneMsg struct{} - -func (m *serverHelloDoneMsg) equal(i interface{}) bool { - _, ok := i.(*serverHelloDoneMsg) - return ok -} - -func (m *serverHelloDoneMsg) marshal() []byte { - x := make([]byte, 4) - x[0] = typeServerHelloDone - return x -} - -func (m *serverHelloDoneMsg) unmarshal(data []byte) bool { - return len(data) == 4 -} - -type clientKeyExchangeMsg struct { - raw []byte - ciphertext []byte -} - -func (m *clientKeyExchangeMsg) equal(i interface{}) bool { - m1, ok := i.(*clientKeyExchangeMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.ciphertext, m1.ciphertext) -} - -func (m *clientKeyExchangeMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - length := len(m.ciphertext) - x := make([]byte, length+4) - x[0] = typeClientKeyExchange - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - copy(x[4:], m.ciphertext) - - m.raw = x - return x -} - -func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool { - m.raw = data - if len(data) < 4 { - return false - } - l := int(data[1])<<16 | int(data[2])<<8 | int(data[3]) - if l != len(data)-4 { - return false - } - m.ciphertext = data[4:] - return true -} - -type finishedMsg struct { - raw []byte - verifyData []byte -} - -func (m *finishedMsg) equal(i interface{}) bool { - m1, ok := i.(*finishedMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.verifyData, m1.verifyData) -} - -func (m *finishedMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - x = make([]byte, 4+len(m.verifyData)) - x[0] = typeFinished - x[3] = byte(len(m.verifyData)) - copy(x[4:], m.verifyData) - m.raw = x - return -} - -func (m *finishedMsg) unmarshal(data []byte) bool { - m.raw = data - if len(data) < 4 { - return false - } - m.verifyData = data[4:] - return true -} - -type nextProtoMsg struct { - raw []byte - proto string -} - -func (m *nextProtoMsg) equal(i interface{}) bool { - m1, ok := i.(*nextProtoMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.proto == m1.proto -} - -func (m *nextProtoMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - l := len(m.proto) - if l > 255 { - l = 255 - } - - padding := 32 - (l+2)%32 - length := l + padding + 2 - x := make([]byte, length+4) - x[0] = typeNextProtocol - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - - y := x[4:] - y[0] = byte(l) - copy(y[1:], []byte(m.proto[0:l])) - y = y[1+l:] - y[0] = byte(padding) - - m.raw = x - - return x -} - -func (m *nextProtoMsg) unmarshal(data []byte) bool { - m.raw = data - - if len(data) < 5 { - return false - } - data = data[4:] - protoLen := int(data[0]) - data = data[1:] - if len(data) < protoLen { - return false - } - m.proto = string(data[0:protoLen]) - data = data[protoLen:] - - if len(data) < 1 { - return false - } - paddingLen := int(data[0]) - data = data[1:] - if len(data) != paddingLen { - return false - } - - return true -} - -type certificateRequestMsg struct { - raw []byte - // hasSignatureAndHash indicates whether this message includes a list - // of signature and hash functions. This change was introduced with TLS - // 1.2. - hasSignatureAndHash bool - - certificateTypes []byte - signatureAndHashes []signatureAndHash - certificateAuthorities [][]byte -} - -func (m *certificateRequestMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateRequestMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.certificateTypes, m1.certificateTypes) && - eqByteSlices(m.certificateAuthorities, m1.certificateAuthorities) && - eqSignatureAndHashes(m.signatureAndHashes, m1.signatureAndHashes) -} - -func (m *certificateRequestMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - // See http://tools.ietf.org/html/rfc4346#section-7.4.4 - length := 1 + len(m.certificateTypes) + 2 - casLength := 0 - for _, ca := range m.certificateAuthorities { - casLength += 2 + len(ca) - } - length += casLength - - if m.hasSignatureAndHash { - length += 2 + 2*len(m.signatureAndHashes) - } - - x = make([]byte, 4+length) - x[0] = typeCertificateRequest - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - - x[4] = uint8(len(m.certificateTypes)) - - copy(x[5:], m.certificateTypes) - y := x[5+len(m.certificateTypes):] - - if m.hasSignatureAndHash { - n := len(m.signatureAndHashes) * 2 - y[0] = uint8(n >> 8) - y[1] = uint8(n) - y = y[2:] - for _, sigAndHash := range m.signatureAndHashes { - y[0] = sigAndHash.hash - y[1] = sigAndHash.signature - y = y[2:] - } - } - - y[0] = uint8(casLength >> 8) - y[1] = uint8(casLength) - y = y[2:] - for _, ca := range m.certificateAuthorities { - y[0] = uint8(len(ca) >> 8) - y[1] = uint8(len(ca)) - y = y[2:] - copy(y, ca) - y = y[len(ca):] - } - - m.raw = x - return -} - -func (m *certificateRequestMsg) unmarshal(data []byte) bool { - m.raw = data - - if len(data) < 5 { - return false - } - - length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) - if uint32(len(data))-4 != length { - return false - } - - numCertTypes := int(data[4]) - data = data[5:] - if numCertTypes == 0 || len(data) <= numCertTypes { - return false - } - - m.certificateTypes = make([]byte, numCertTypes) - if copy(m.certificateTypes, data) != numCertTypes { - return false - } - - data = data[numCertTypes:] - - if m.hasSignatureAndHash { - if len(data) < 2 { - return false - } - sigAndHashLen := uint16(data[0])<<8 | uint16(data[1]) - data = data[2:] - if sigAndHashLen&1 != 0 { - return false - } - if len(data) < int(sigAndHashLen) { - return false - } - numSigAndHash := sigAndHashLen / 2 - m.signatureAndHashes = make([]signatureAndHash, numSigAndHash) - for i := range m.signatureAndHashes { - m.signatureAndHashes[i].hash = data[0] - m.signatureAndHashes[i].signature = data[1] - data = data[2:] - } - } - - if len(data) < 2 { - return false - } - casLength := uint16(data[0])<<8 | uint16(data[1]) - data = data[2:] - if len(data) < int(casLength) { - return false - } - cas := make([]byte, casLength) - copy(cas, data) - data = data[casLength:] - - m.certificateAuthorities = nil - for len(cas) > 0 { - if len(cas) < 2 { - return false - } - caLen := uint16(cas[0])<<8 | uint16(cas[1]) - cas = cas[2:] - - if len(cas) < int(caLen) { - return false - } - - m.certificateAuthorities = append(m.certificateAuthorities, cas[:caLen]) - cas = cas[caLen:] - } - if len(data) > 0 { - return false - } - - return true -} - -type certificateVerifyMsg struct { - raw []byte - hasSignatureAndHash bool - signatureAndHash signatureAndHash - signature []byte -} - -func (m *certificateVerifyMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateVerifyMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.hasSignatureAndHash == m1.hasSignatureAndHash && - m.signatureAndHash.hash == m1.signatureAndHash.hash && - m.signatureAndHash.signature == m1.signatureAndHash.signature && - bytes.Equal(m.signature, m1.signature) -} - -func (m *certificateVerifyMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - // See http://tools.ietf.org/html/rfc4346#section-7.4.8 - siglength := len(m.signature) - length := 2 + siglength - if m.hasSignatureAndHash { - length += 2 - } - x = make([]byte, 4+length) - x[0] = typeCertificateVerify - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - y := x[4:] - if m.hasSignatureAndHash { - y[0] = m.signatureAndHash.hash - y[1] = m.signatureAndHash.signature - y = y[2:] - } - y[0] = uint8(siglength >> 8) - y[1] = uint8(siglength) - copy(y[2:], m.signature) - - m.raw = x - - return -} - -func (m *certificateVerifyMsg) unmarshal(data []byte) bool { - m.raw = data - - if len(data) < 6 { - return false - } - - length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) - if uint32(len(data))-4 != length { - return false - } - - data = data[4:] - if m.hasSignatureAndHash { - m.signatureAndHash.hash = data[0] - m.signatureAndHash.signature = data[1] - data = data[2:] - } - - if len(data) < 2 { - return false - } - siglength := int(data[0])<<8 + int(data[1]) - data = data[2:] - if len(data) != siglength { - return false - } - - m.signature = data - - return true -} - -type newSessionTicketMsg struct { - raw []byte - ticket []byte -} - -func (m *newSessionTicketMsg) equal(i interface{}) bool { - m1, ok := i.(*newSessionTicketMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.ticket, m1.ticket) -} - -func (m *newSessionTicketMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - // See http://tools.ietf.org/html/rfc5077#section-3.3 - ticketLen := len(m.ticket) - length := 2 + 4 + ticketLen - x = make([]byte, 4+length) - x[0] = typeNewSessionTicket - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - x[8] = uint8(ticketLen >> 8) - x[9] = uint8(ticketLen) - copy(x[10:], m.ticket) - - m.raw = x - - return -} - -func (m *newSessionTicketMsg) unmarshal(data []byte) bool { - m.raw = data - - if len(data) < 10 { - return false - } - - length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) - if uint32(len(data))-4 != length { - return false - } - - ticketLen := int(data[8])<<8 + int(data[9]) - if len(data)-10 != ticketLen { - return false - } - - m.ticket = data[10:] - - return true -} - -type helloRequestMsg struct { -} - -func (*helloRequestMsg) marshal() []byte { - return []byte{typeHelloRequest, 0, 0, 0} -} - -func (*helloRequestMsg) unmarshal(data []byte) bool { - return len(data) == 4 -} - -func eqUint16s(x, y []uint16) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if y[i] != v { - return false - } - } - return true -} - -func eqStrings(x, y []string) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if y[i] != v { - return false - } - } - return true -} - -func eqByteSlices(x, y [][]byte) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if !bytes.Equal(v, y[i]) { - return false - } - } - return true -} - -func eqSignatureAndHashes(x, y []signatureAndHash) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - v2 := y[i] - if v.hash != v2.hash || v.signature != v2.signature { - return false - } - } - return true -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_server.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_server.go deleted file mode 100644 index c9ccf675c..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/handshake_server.go +++ /dev/null @@ -1,638 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/subtle" - "crypto/x509" - "encoding/asn1" - "errors" - "io" -) - -// serverHandshakeState contains details of a server handshake in progress. -// It's discarded once the handshake has completed. -type serverHandshakeState struct { - c *Conn - clientHello *clientHelloMsg - hello *serverHelloMsg - suite *cipherSuite - ellipticOk bool - ecdsaOk bool - sessionState *sessionState - finishedHash finishedHash - masterSecret []byte - certsFromClient [][]byte - cert *Certificate -} - -// serverHandshake performs a TLS handshake as a server. -func (c *Conn) serverHandshake() error { - config := c.config - - // If this is the first server handshake, we generate a random key to - // encrypt the tickets with. - config.serverInitOnce.Do(config.serverInit) - - hs := serverHandshakeState{ - c: c, - } - isResume, err := hs.readClientHello() - if err != nil { - return err - } - - // For an overview of TLS handshaking, see https://tools.ietf.org/html/rfc5246#section-7.3 - if isResume { - // The client has included a session ticket and so we do an abbreviated handshake. - if err := hs.doResumeHandshake(); err != nil { - return err - } - if err := hs.establishKeys(); err != nil { - return err - } - if err := hs.sendFinished(); err != nil { - return err - } - if err := hs.readFinished(); err != nil { - return err - } - c.didResume = true - } else { - // The client didn't include a session ticket, or it wasn't - // valid so we do a full handshake. - if err := hs.doFullHandshake(); err != nil { - return err - } - if err := hs.establishKeys(); err != nil { - return err - } - if err := hs.readFinished(); err != nil { - return err - } - if err := hs.sendSessionTicket(); err != nil { - return err - } - if err := hs.sendFinished(); err != nil { - return err - } - } - c.handshakeComplete = true - - return nil -} - -// readClientHello reads a ClientHello message from the client and decides -// whether we will perform session resumption. -func (hs *serverHandshakeState) readClientHello() (isResume bool, err error) { - config := hs.c.config - c := hs.c - - msg, err := c.readHandshake() - if err != nil { - return false, err - } - var ok bool - hs.clientHello, ok = msg.(*clientHelloMsg) - if !ok { - return false, c.sendAlert(alertUnexpectedMessage) - } - c.vers, ok = config.mutualVersion(hs.clientHello.vers) - if !ok { - return false, c.sendAlert(alertProtocolVersion) - } - c.haveVers = true - - hs.finishedHash = newFinishedHash(c.vers) - hs.finishedHash.Write(hs.clientHello.marshal()) - - hs.hello = new(serverHelloMsg) - - supportedCurve := false -Curves: - for _, curve := range hs.clientHello.supportedCurves { - switch curve { - case curveP256, curveP384, curveP521: - supportedCurve = true - break Curves - } - } - - supportedPointFormat := false - for _, pointFormat := range hs.clientHello.supportedPoints { - if pointFormat == pointFormatUncompressed { - supportedPointFormat = true - break - } - } - hs.ellipticOk = supportedCurve && supportedPointFormat - - foundCompression := false - // We only support null compression, so check that the client offered it. - for _, compression := range hs.clientHello.compressionMethods { - if compression == compressionNone { - foundCompression = true - break - } - } - - if !foundCompression { - return false, c.sendAlert(alertHandshakeFailure) - } - - hs.hello.vers = c.vers - t := uint32(config.time().Unix()) - hs.hello.random = make([]byte, 32) - hs.hello.random[0] = byte(t >> 24) - hs.hello.random[1] = byte(t >> 16) - hs.hello.random[2] = byte(t >> 8) - hs.hello.random[3] = byte(t) - _, err = io.ReadFull(config.rand(), hs.hello.random[4:]) - if err != nil { - return false, c.sendAlert(alertInternalError) - } - hs.hello.compressionMethod = compressionNone - if len(hs.clientHello.serverName) > 0 { - c.serverName = hs.clientHello.serverName - } - // Although sending an empty NPN extension is reasonable, Firefox has - // had a bug around this. Best to send nothing at all if - // config.NextProtos is empty. See - // https://code.google.com/p/go/issues/detail?id=5445. - if hs.clientHello.nextProtoNeg && len(config.NextProtos) > 0 { - hs.hello.nextProtoNeg = true - hs.hello.nextProtos = config.NextProtos - } - - if len(config.Certificates) == 0 { - return false, c.sendAlert(alertInternalError) - } - hs.cert = &config.Certificates[0] - if len(hs.clientHello.serverName) > 0 { - hs.cert = config.getCertificateForName(hs.clientHello.serverName) - } - - _, hs.ecdsaOk = hs.cert.PrivateKey.(*ecdsa.PrivateKey) - - if hs.checkForResumption() { - return true, nil - } - - var preferenceList, supportedList []uint16 - if c.config.PreferServerCipherSuites { - preferenceList = c.config.cipherSuites() - supportedList = hs.clientHello.cipherSuites - } else { - preferenceList = hs.clientHello.cipherSuites - supportedList = c.config.cipherSuites() - } - - for _, id := range preferenceList { - if hs.suite = c.tryCipherSuite(id, supportedList, c.vers, hs.ellipticOk, hs.ecdsaOk); hs.suite != nil { - break - } - } - - if hs.suite == nil { - return false, c.sendAlert(alertHandshakeFailure) - } - - return false, nil -} - -// checkForResumption returns true if we should perform resumption on this connection. -func (hs *serverHandshakeState) checkForResumption() bool { - c := hs.c - - var ok bool - if hs.sessionState, ok = c.decryptTicket(hs.clientHello.sessionTicket); !ok { - return false - } - - if hs.sessionState.vers > hs.clientHello.vers { - return false - } - if vers, ok := c.config.mutualVersion(hs.sessionState.vers); !ok || vers != hs.sessionState.vers { - return false - } - - cipherSuiteOk := false - // Check that the client is still offering the ciphersuite in the session. - for _, id := range hs.clientHello.cipherSuites { - if id == hs.sessionState.cipherSuite { - cipherSuiteOk = true - break - } - } - if !cipherSuiteOk { - return false - } - - // Check that we also support the ciphersuite from the session. - hs.suite = c.tryCipherSuite(hs.sessionState.cipherSuite, c.config.cipherSuites(), hs.sessionState.vers, hs.ellipticOk, hs.ecdsaOk) - if hs.suite == nil { - return false - } - - sessionHasClientCerts := len(hs.sessionState.certificates) != 0 - needClientCerts := c.config.ClientAuth == RequireAnyClientCert || c.config.ClientAuth == RequireAndVerifyClientCert - if needClientCerts && !sessionHasClientCerts { - return false - } - if sessionHasClientCerts && c.config.ClientAuth == NoClientCert { - return false - } - - return true -} - -func (hs *serverHandshakeState) doResumeHandshake() error { - c := hs.c - - hs.hello.cipherSuite = hs.suite.id - // We echo the client's session ID in the ServerHello to let it know - // that we're doing a resumption. - hs.hello.sessionId = hs.clientHello.sessionId - hs.finishedHash.Write(hs.hello.marshal()) - c.writeRecord(recordTypeHandshake, hs.hello.marshal()) - - if len(hs.sessionState.certificates) > 0 { - if _, err := hs.processCertsFromClient(hs.sessionState.certificates); err != nil { - return err - } - } - - hs.masterSecret = hs.sessionState.masterSecret - - return nil -} - -func (hs *serverHandshakeState) doFullHandshake() error { - config := hs.c.config - c := hs.c - - if hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 { - hs.hello.ocspStapling = true - } - - hs.hello.ticketSupported = hs.clientHello.ticketSupported && !config.SessionTicketsDisabled - hs.hello.cipherSuite = hs.suite.id - hs.finishedHash.Write(hs.hello.marshal()) - c.writeRecord(recordTypeHandshake, hs.hello.marshal()) - - certMsg := new(certificateMsg) - certMsg.certificates = hs.cert.Certificate - hs.finishedHash.Write(certMsg.marshal()) - c.writeRecord(recordTypeHandshake, certMsg.marshal()) - - if hs.hello.ocspStapling { - certStatus := new(certificateStatusMsg) - certStatus.statusType = statusTypeOCSP - certStatus.response = hs.cert.OCSPStaple - hs.finishedHash.Write(certStatus.marshal()) - c.writeRecord(recordTypeHandshake, certStatus.marshal()) - } - - keyAgreement := hs.suite.ka(c.vers) - skx, err := keyAgreement.generateServerKeyExchange(config, hs.cert, hs.clientHello, hs.hello) - if err != nil { - c.sendAlert(alertHandshakeFailure) - return err - } - if skx != nil { - hs.finishedHash.Write(skx.marshal()) - c.writeRecord(recordTypeHandshake, skx.marshal()) - } - - if config.ClientAuth >= RequestClientCert { - // Request a client certificate - certReq := new(certificateRequestMsg) - certReq.certificateTypes = []byte{ - byte(certTypeRSASign), - byte(certTypeECDSASign), - } - if c.vers >= VersionTLS12 { - certReq.hasSignatureAndHash = true - certReq.signatureAndHashes = supportedClientCertSignatureAlgorithms - } - - // An empty list of certificateAuthorities signals to - // the client that it may send any certificate in response - // to our request. When we know the CAs we trust, then - // we can send them down, so that the client can choose - // an appropriate certificate to give to us. - if config.ClientCAs != nil { - certReq.certificateAuthorities = config.ClientCAs.Subjects() - } - hs.finishedHash.Write(certReq.marshal()) - c.writeRecord(recordTypeHandshake, certReq.marshal()) - } - - helloDone := new(serverHelloDoneMsg) - hs.finishedHash.Write(helloDone.marshal()) - c.writeRecord(recordTypeHandshake, helloDone.marshal()) - - var pub crypto.PublicKey // public key for client auth, if any - - msg, err := c.readHandshake() - if err != nil { - return err - } - - var ok bool - // If we requested a client certificate, then the client must send a - // certificate message, even if it's empty. - if config.ClientAuth >= RequestClientCert { - if certMsg, ok = msg.(*certificateMsg); !ok { - return c.sendAlert(alertHandshakeFailure) - } - hs.finishedHash.Write(certMsg.marshal()) - - if len(certMsg.certificates) == 0 { - // The client didn't actually send a certificate - switch config.ClientAuth { - case RequireAnyClientCert, RequireAndVerifyClientCert: - c.sendAlert(alertBadCertificate) - return errors.New("tls: client didn't provide a certificate") - } - } - - pub, err = hs.processCertsFromClient(certMsg.certificates) - if err != nil { - return err - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - // Get client key exchange - ckx, ok := msg.(*clientKeyExchangeMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - hs.finishedHash.Write(ckx.marshal()) - - // If we received a client cert in response to our certificate request message, - // the client will send us a certificateVerifyMsg immediately after the - // clientKeyExchangeMsg. This message is a digest of all preceding - // handshake-layer messages that is signed using the private key corresponding - // to the client's certificate. This allows us to verify that the client is in - // possession of the private key of the certificate. - if len(c.peerCertificates) > 0 { - msg, err = c.readHandshake() - if err != nil { - return err - } - certVerify, ok := msg.(*certificateVerifyMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - - switch key := pub.(type) { - case *ecdsa.PublicKey: - ecdsaSig := new(ecdsaSignature) - if _, err = asn1.Unmarshal(certVerify.signature, ecdsaSig); err != nil { - break - } - if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { - err = errors.New("ECDSA signature contained zero or negative values") - break - } - digest, _, _ := hs.finishedHash.hashForClientCertificate(signatureECDSA) - if !ecdsa.Verify(key, digest, ecdsaSig.R, ecdsaSig.S) { - err = errors.New("ECDSA verification failure") - break - } - case *rsa.PublicKey: - digest, hashFunc, _ := hs.finishedHash.hashForClientCertificate(signatureRSA) - err = rsa.VerifyPKCS1v15(key, hashFunc, digest, certVerify.signature) - } - if err != nil { - c.sendAlert(alertBadCertificate) - return errors.New("could not validate signature of connection nonces: " + err.Error()) - } - - hs.finishedHash.Write(certVerify.marshal()) - } - - preMasterSecret, err := keyAgreement.processClientKeyExchange(config, hs.cert, ckx, c.vers) - if err != nil { - c.sendAlert(alertHandshakeFailure) - return err - } - hs.masterSecret = masterFromPreMasterSecret(c.vers, preMasterSecret, hs.clientHello.random, hs.hello.random) - - return nil -} - -func (hs *serverHandshakeState) establishKeys() error { - c := hs.c - - clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV := - keysFromMasterSecret(c.vers, hs.masterSecret, hs.clientHello.random, hs.hello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen) - - var clientCipher, serverCipher interface{} - var clientHash, serverHash macFunction - - if hs.suite.aead == nil { - clientCipher = hs.suite.cipher(clientKey, clientIV, true /* for reading */) - clientHash = hs.suite.mac(c.vers, clientMAC) - serverCipher = hs.suite.cipher(serverKey, serverIV, false /* not for reading */) - serverHash = hs.suite.mac(c.vers, serverMAC) - } else { - clientCipher = hs.suite.aead(clientKey, clientIV) - serverCipher = hs.suite.aead(serverKey, serverIV) - } - - c.in.prepareCipherSpec(c.vers, clientCipher, clientHash) - c.out.prepareCipherSpec(c.vers, serverCipher, serverHash) - - return nil -} - -func (hs *serverHandshakeState) readFinished() error { - c := hs.c - - c.readRecord(recordTypeChangeCipherSpec) - if err := c.error(); err != nil { - return err - } - - if hs.hello.nextProtoNeg { - msg, err := c.readHandshake() - if err != nil { - return err - } - nextProto, ok := msg.(*nextProtoMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - hs.finishedHash.Write(nextProto.marshal()) - c.clientProtocol = nextProto.proto - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - clientFinished, ok := msg.(*finishedMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - - verify := hs.finishedHash.clientSum(hs.masterSecret) - if len(verify) != len(clientFinished.verifyData) || - subtle.ConstantTimeCompare(verify, clientFinished.verifyData) != 1 { - return c.sendAlert(alertHandshakeFailure) - } - - hs.finishedHash.Write(clientFinished.marshal()) - return nil -} - -func (hs *serverHandshakeState) sendSessionTicket() error { - if !hs.hello.ticketSupported { - return nil - } - - c := hs.c - m := new(newSessionTicketMsg) - - var err error - state := sessionState{ - vers: c.vers, - cipherSuite: hs.suite.id, - masterSecret: hs.masterSecret, - certificates: hs.certsFromClient, - } - m.ticket, err = c.encryptTicket(&state) - if err != nil { - return err - } - - hs.finishedHash.Write(m.marshal()) - c.writeRecord(recordTypeHandshake, m.marshal()) - - return nil -} - -func (hs *serverHandshakeState) sendFinished() error { - c := hs.c - - c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) - - finished := new(finishedMsg) - finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret) - hs.finishedHash.Write(finished.marshal()) - c.writeRecord(recordTypeHandshake, finished.marshal()) - - c.cipherSuite = hs.suite.id - - return nil -} - -// processCertsFromClient takes a chain of client certificates either from a -// Certificates message or from a sessionState and verifies them. It returns -// the public key of the leaf certificate. -func (hs *serverHandshakeState) processCertsFromClient(certificates [][]byte) (crypto.PublicKey, error) { - c := hs.c - - hs.certsFromClient = certificates - certs := make([]*x509.Certificate, len(certificates)) - var err error - for i, asn1Data := range certificates { - if certs[i], err = x509.ParseCertificate(asn1Data); err != nil { - c.sendAlert(alertBadCertificate) - return nil, errors.New("tls: failed to parse client certificate: " + err.Error()) - } - } - - if c.config.ClientAuth >= VerifyClientCertIfGiven && len(certs) > 0 { - opts := x509.VerifyOptions{ - Roots: c.config.ClientCAs, - CurrentTime: c.config.time(), - Intermediates: x509.NewCertPool(), - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - } - - for _, cert := range certs[1:] { - opts.Intermediates.AddCert(cert) - } - - chains, err := certs[0].Verify(opts) - if err != nil { - c.sendAlert(alertBadCertificate) - return nil, errors.New("tls: failed to verify client's certificate: " + err.Error()) - } - - ok := false - for _, ku := range certs[0].ExtKeyUsage { - if ku == x509.ExtKeyUsageClientAuth { - ok = true - break - } - } - if !ok { - c.sendAlert(alertHandshakeFailure) - return nil, errors.New("tls: client's certificate's extended key usage doesn't permit it to be used for client authentication") - } - - c.verifiedChains = chains - } - - if len(certs) > 0 { - var pub crypto.PublicKey - switch key := certs[0].PublicKey.(type) { - case *ecdsa.PublicKey, *rsa.PublicKey: - pub = key - default: - return nil, c.sendAlert(alertUnsupportedCertificate) - } - c.peerCertificates = certs - return pub, nil - } - - return nil, nil -} - -// tryCipherSuite returns a cipherSuite with the given id if that cipher suite -// is acceptable to use. -func (c *Conn) tryCipherSuite(id uint16, supportedCipherSuites []uint16, version uint16, ellipticOk, ecdsaOk bool) *cipherSuite { - for _, supported := range supportedCipherSuites { - if id == supported { - var candidate *cipherSuite - - for _, s := range cipherSuites { - if s.id == id { - candidate = s - break - } - } - if candidate == nil { - continue - } - // Don't select a ciphersuite which we can't - // support for this client. - if (candidate.flags&suiteECDHE != 0) && !ellipticOk { - continue - } - if (candidate.flags&suiteECDSA != 0) != ecdsaOk { - continue - } - if version < VersionTLS12 && candidate.flags&suiteTLS12 != 0 { - continue - } - return candidate - } - } - - return nil -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/key_agreement.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/key_agreement.go deleted file mode 100644 index 7e820c1e7..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/key_agreement.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "errors" - "io" - "math/big" -) - -// rsaKeyAgreement implements the standard TLS key agreement where the client -// encrypts the pre-master secret to the server's public key. -type rsaKeyAgreement struct{} - -func (ka rsaKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) { - return nil, nil -} - -func (ka rsaKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) { - preMasterSecret := make([]byte, 48) - _, err := io.ReadFull(config.rand(), preMasterSecret[2:]) - if err != nil { - return nil, err - } - - if len(ckx.ciphertext) < 2 { - return nil, errors.New("bad ClientKeyExchange") - } - - ciphertext := ckx.ciphertext - if version != VersionSSL30 { - ciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1]) - if ciphertextLen != len(ckx.ciphertext)-2 { - return nil, errors.New("bad ClientKeyExchange") - } - ciphertext = ckx.ciphertext[2:] - } - - err = rsa.DecryptPKCS1v15SessionKey(config.rand(), cert.PrivateKey.(*rsa.PrivateKey), ciphertext, preMasterSecret) - if err != nil { - return nil, err - } - // We don't check the version number in the premaster secret. For one, - // by checking it, we would leak information about the validity of the - // encrypted pre-master secret. Secondly, it provides only a small - // benefit against a downgrade attack and some implementations send the - // wrong version anyway. See the discussion at the end of section - // 7.4.7.1 of RFC 4346. - return preMasterSecret, nil -} - -func (ka rsaKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error { - return errors.New("unexpected ServerKeyExchange") -} - -func (ka rsaKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) { - preMasterSecret := make([]byte, 48) - preMasterSecret[0] = byte(clientHello.vers >> 8) - preMasterSecret[1] = byte(clientHello.vers) - _, err := io.ReadFull(config.rand(), preMasterSecret[2:]) - if err != nil { - return nil, nil, err - } - - encrypted, err := rsa.EncryptPKCS1v15(config.rand(), cert.PublicKey.(*rsa.PublicKey), preMasterSecret) - if err != nil { - return nil, nil, err - } - ckx := new(clientKeyExchangeMsg) - ckx.ciphertext = make([]byte, len(encrypted)+2) - ckx.ciphertext[0] = byte(len(encrypted) >> 8) - ckx.ciphertext[1] = byte(len(encrypted)) - copy(ckx.ciphertext[2:], encrypted) - return preMasterSecret, ckx, nil -} - -// sha1Hash calculates a SHA1 hash over the given byte slices. -func sha1Hash(slices [][]byte) []byte { - hsha1 := sha1.New() - for _, slice := range slices { - hsha1.Write(slice) - } - return hsha1.Sum(nil) -} - -// md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the -// concatenation of an MD5 and SHA1 hash. -func md5SHA1Hash(slices [][]byte) []byte { - md5sha1 := make([]byte, md5.Size+sha1.Size) - hmd5 := md5.New() - for _, slice := range slices { - hmd5.Write(slice) - } - copy(md5sha1, hmd5.Sum(nil)) - copy(md5sha1[md5.Size:], sha1Hash(slices)) - return md5sha1 -} - -// sha256Hash implements TLS 1.2's hash function. -func sha256Hash(slices [][]byte) []byte { - h := sha256.New() - for _, slice := range slices { - h.Write(slice) - } - return h.Sum(nil) -} - -// hashForServerKeyExchange hashes the given slices and returns their digest -// and the identifier of the hash function used. The hashFunc argument is only -// used for >= TLS 1.2 and precisely identifies the hash function to use. -func hashForServerKeyExchange(sigType, hashFunc uint8, version uint16, slices ...[]byte) ([]byte, crypto.Hash, error) { - if version >= VersionTLS12 { - switch hashFunc { - case hashSHA256: - return sha256Hash(slices), crypto.SHA256, nil - case hashSHA1: - return sha1Hash(slices), crypto.SHA1, nil - default: - return nil, crypto.Hash(0), errors.New("tls: unknown hash function used by peer") - } - } - if sigType == signatureECDSA { - return sha1Hash(slices), crypto.SHA1, nil - } - return md5SHA1Hash(slices), crypto.MD5SHA1, nil -} - -// pickTLS12HashForSignature returns a TLS 1.2 hash identifier for signing a -// ServerKeyExchange given the signature type being used and the client's -// advertized list of supported signature and hash combinations. -func pickTLS12HashForSignature(sigType uint8, clientSignatureAndHashes []signatureAndHash) (uint8, error) { - if len(clientSignatureAndHashes) == 0 { - // If the client didn't specify any signature_algorithms - // extension then we can assume that it supports SHA1. See - // http://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 - return hashSHA1, nil - } - - for _, sigAndHash := range clientSignatureAndHashes { - if sigAndHash.signature != sigType { - continue - } - switch sigAndHash.hash { - case hashSHA1, hashSHA256: - return sigAndHash.hash, nil - } - } - - return 0, errors.New("tls: client doesn't support any common hash functions") -} - -// ecdheRSAKeyAgreement implements a TLS key agreement where the server -// generates a ephemeral EC public/private key pair and signs it. The -// pre-master secret is then calculated using ECDH. The signature may -// either be ECDSA or RSA. -type ecdheKeyAgreement struct { - version uint16 - sigType uint8 - privateKey []byte - curve elliptic.Curve - x, y *big.Int -} - -func (ka *ecdheKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) { - var curveid uint16 - -Curve: - for _, c := range clientHello.supportedCurves { - switch c { - case curveP256: - ka.curve = elliptic.P256() - curveid = c - break Curve - case curveP384: - ka.curve = elliptic.P384() - curveid = c - break Curve - case curveP521: - ka.curve = elliptic.P521() - curveid = c - break Curve - } - } - - if curveid == 0 { - return nil, errors.New("tls: no supported elliptic curves offered") - } - - var x, y *big.Int - var err error - ka.privateKey, x, y, err = elliptic.GenerateKey(ka.curve, config.rand()) - if err != nil { - return nil, err - } - ecdhePublic := elliptic.Marshal(ka.curve, x, y) - - // http://tools.ietf.org/html/rfc4492#section-5.4 - serverECDHParams := make([]byte, 1+2+1+len(ecdhePublic)) - serverECDHParams[0] = 3 // named curve - serverECDHParams[1] = byte(curveid >> 8) - serverECDHParams[2] = byte(curveid) - serverECDHParams[3] = byte(len(ecdhePublic)) - copy(serverECDHParams[4:], ecdhePublic) - - var tls12HashId uint8 - if ka.version >= VersionTLS12 { - if tls12HashId, err = pickTLS12HashForSignature(ka.sigType, clientHello.signatureAndHashes); err != nil { - return nil, err - } - } - - digest, hashFunc, err := hashForServerKeyExchange(ka.sigType, tls12HashId, ka.version, clientHello.random, hello.random, serverECDHParams) - if err != nil { - return nil, err - } - var sig []byte - switch ka.sigType { - case signatureECDSA: - privKey, ok := cert.PrivateKey.(*ecdsa.PrivateKey) - if !ok { - return nil, errors.New("ECDHE ECDSA requires an ECDSA server private key") - } - r, s, err := ecdsa.Sign(config.rand(), privKey, digest) - if err != nil { - return nil, errors.New("failed to sign ECDHE parameters: " + err.Error()) - } - sig, err = asn1.Marshal(ecdsaSignature{r, s}) - case signatureRSA: - privKey, ok := cert.PrivateKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("ECDHE RSA requires a RSA server private key") - } - sig, err = rsa.SignPKCS1v15(config.rand(), privKey, hashFunc, digest) - if err != nil { - return nil, errors.New("failed to sign ECDHE parameters: " + err.Error()) - } - default: - return nil, errors.New("unknown ECDHE signature algorithm") - } - - skx := new(serverKeyExchangeMsg) - sigAndHashLen := 0 - if ka.version >= VersionTLS12 { - sigAndHashLen = 2 - } - skx.key = make([]byte, len(serverECDHParams)+sigAndHashLen+2+len(sig)) - copy(skx.key, serverECDHParams) - k := skx.key[len(serverECDHParams):] - if ka.version >= VersionTLS12 { - k[0] = tls12HashId - k[1] = ka.sigType - k = k[2:] - } - k[0] = byte(len(sig) >> 8) - k[1] = byte(len(sig)) - copy(k[2:], sig) - - return skx, nil -} - -func (ka *ecdheKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) { - if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 { - return nil, errors.New("bad ClientKeyExchange") - } - x, y := elliptic.Unmarshal(ka.curve, ckx.ciphertext[1:]) - if x == nil { - return nil, errors.New("bad ClientKeyExchange") - } - x, _ = ka.curve.ScalarMult(x, y, ka.privateKey) - preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3) - xBytes := x.Bytes() - copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes) - - return preMasterSecret, nil -} - -var errServerKeyExchange = errors.New("invalid ServerKeyExchange") - -func (ka *ecdheKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error { - if len(skx.key) < 4 { - return errServerKeyExchange - } - if skx.key[0] != 3 { // named curve - return errors.New("server selected unsupported curve") - } - curveid := uint16(skx.key[1])<<8 | uint16(skx.key[2]) - - switch curveid { - case curveP256: - ka.curve = elliptic.P256() - case curveP384: - ka.curve = elliptic.P384() - case curveP521: - ka.curve = elliptic.P521() - default: - return errors.New("server selected unsupported curve") - } - - publicLen := int(skx.key[3]) - if publicLen+4 > len(skx.key) { - return errServerKeyExchange - } - ka.x, ka.y = elliptic.Unmarshal(ka.curve, skx.key[4:4+publicLen]) - if ka.x == nil { - return errServerKeyExchange - } - serverECDHParams := skx.key[:4+publicLen] - - sig := skx.key[4+publicLen:] - if len(sig) < 2 { - return errServerKeyExchange - } - - var tls12HashId uint8 - if ka.version >= VersionTLS12 { - // handle SignatureAndHashAlgorithm - var sigAndHash []uint8 - sigAndHash, sig = sig[:2], sig[2:] - if sigAndHash[1] != ka.sigType { - return errServerKeyExchange - } - tls12HashId = sigAndHash[0] - if len(sig) < 2 { - return errServerKeyExchange - } - } - sigLen := int(sig[0])<<8 | int(sig[1]) - if sigLen+2 != len(sig) { - return errServerKeyExchange - } - sig = sig[2:] - - digest, hashFunc, err := hashForServerKeyExchange(ka.sigType, tls12HashId, ka.version, clientHello.random, serverHello.random, serverECDHParams) - if err != nil { - return err - } - switch ka.sigType { - case signatureECDSA: - pubKey, ok := cert.PublicKey.(*ecdsa.PublicKey) - if !ok { - return errors.New("ECDHE ECDSA requires a ECDSA server public key") - } - ecdsaSig := new(ecdsaSignature) - if _, err := asn1.Unmarshal(sig, ecdsaSig); err != nil { - return err - } - if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { - return errors.New("ECDSA signature contained zero or negative values") - } - if !ecdsa.Verify(pubKey, digest, ecdsaSig.R, ecdsaSig.S) { - return errors.New("ECDSA verification failure") - } - case signatureRSA: - pubKey, ok := cert.PublicKey.(*rsa.PublicKey) - if !ok { - return errors.New("ECDHE RSA requires a RSA server public key") - } - if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, digest, sig); err != nil { - return err - } - default: - return errors.New("unknown ECDHE signature algorithm") - } - - return nil -} - -func (ka *ecdheKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) { - if ka.curve == nil { - return nil, nil, errors.New("missing ServerKeyExchange message") - } - priv, mx, my, err := elliptic.GenerateKey(ka.curve, config.rand()) - if err != nil { - return nil, nil, err - } - x, _ := ka.curve.ScalarMult(ka.x, ka.y, priv) - preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3) - xBytes := x.Bytes() - copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes) - - serialized := elliptic.Marshal(ka.curve, mx, my) - - ckx := new(clientKeyExchangeMsg) - ckx.ciphertext = make([]byte, 1+len(serialized)) - ckx.ciphertext[0] = byte(len(serialized)) - copy(ckx.ciphertext[1:], serialized) - - return preMasterSecret, ckx, nil -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/prf.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/prf.go deleted file mode 100644 index fb8b3ab4d..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/prf.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "crypto" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -// Split a premaster secret in two as specified in RFC 4346, section 5. -func splitPreMasterSecret(secret []byte) (s1, s2 []byte) { - s1 = secret[0 : (len(secret)+1)/2] - s2 = secret[len(secret)/2:] - return -} - -// pHash implements the P_hash function, as defined in RFC 4346, section 5. -func pHash(result, secret, seed []byte, hash func() hash.Hash) { - h := hmac.New(hash, secret) - h.Write(seed) - a := h.Sum(nil) - - j := 0 - for j < len(result) { - h.Reset() - h.Write(a) - h.Write(seed) - b := h.Sum(nil) - todo := len(b) - if j+todo > len(result) { - todo = len(result) - j - } - copy(result[j:j+todo], b) - j += todo - - h.Reset() - h.Write(a) - a = h.Sum(nil) - } -} - -// prf10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, section 5. -func prf10(result, secret, label, seed []byte) { - hashSHA1 := sha1.New - hashMD5 := md5.New - - labelAndSeed := make([]byte, len(label)+len(seed)) - copy(labelAndSeed, label) - copy(labelAndSeed[len(label):], seed) - - s1, s2 := splitPreMasterSecret(secret) - pHash(result, s1, labelAndSeed, hashMD5) - result2 := make([]byte, len(result)) - pHash(result2, s2, labelAndSeed, hashSHA1) - - for i, b := range result2 { - result[i] ^= b - } -} - -// prf12 implements the TLS 1.2 pseudo-random function, as defined in RFC 5246, section 5. -func prf12(result, secret, label, seed []byte) { - labelAndSeed := make([]byte, len(label)+len(seed)) - copy(labelAndSeed, label) - copy(labelAndSeed[len(label):], seed) - - pHash(result, secret, labelAndSeed, sha256.New) -} - -// prf30 implements the SSL 3.0 pseudo-random function, as defined in -// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 6. -func prf30(result, secret, label, seed []byte) { - hashSHA1 := sha1.New() - hashMD5 := md5.New() - - done := 0 - i := 0 - // RFC5246 section 6.3 says that the largest PRF output needed is 128 - // bytes. Since no more ciphersuites will be added to SSLv3, this will - // remain true. Each iteration gives us 16 bytes so 10 iterations will - // be sufficient. - var b [11]byte - for done < len(result) { - for j := 0; j <= i; j++ { - b[j] = 'A' + byte(i) - } - - hashSHA1.Reset() - hashSHA1.Write(b[:i+1]) - hashSHA1.Write(secret) - hashSHA1.Write(seed) - digest := hashSHA1.Sum(nil) - - hashMD5.Reset() - hashMD5.Write(secret) - hashMD5.Write(digest) - - done += copy(result[done:], hashMD5.Sum(nil)) - i++ - } -} - -const ( - tlsRandomLength = 32 // Length of a random nonce in TLS 1.1. - masterSecretLength = 48 // Length of a master secret in TLS 1.1. - finishedVerifyLength = 12 // Length of verify_data in a Finished message. -) - -var masterSecretLabel = []byte("master secret") -var keyExpansionLabel = []byte("key expansion") -var clientFinishedLabel = []byte("client finished") -var serverFinishedLabel = []byte("server finished") - -func prfForVersion(version uint16) func(result, secret, label, seed []byte) { - switch version { - case VersionSSL30: - return prf30 - case VersionTLS10, VersionTLS11: - return prf10 - case VersionTLS12: - return prf12 - default: - panic("unknown version") - } -} - -// masterFromPreMasterSecret generates the master secret from the pre-master -// secret. See http://tools.ietf.org/html/rfc5246#section-8.1 -func masterFromPreMasterSecret(version uint16, preMasterSecret, clientRandom, serverRandom []byte) []byte { - var seed [tlsRandomLength * 2]byte - copy(seed[0:len(clientRandom)], clientRandom) - copy(seed[len(clientRandom):], serverRandom) - masterSecret := make([]byte, masterSecretLength) - prfForVersion(version)(masterSecret, preMasterSecret, masterSecretLabel, seed[0:]) - return masterSecret -} - -// keysFromMasterSecret generates the connection keys from the master -// secret, given the lengths of the MAC key, cipher key and IV, as defined in -// RFC 2246, section 6.3. -func keysFromMasterSecret(version uint16, masterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int) (clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV []byte) { - var seed [tlsRandomLength * 2]byte - copy(seed[0:len(clientRandom)], serverRandom) - copy(seed[len(serverRandom):], clientRandom) - - n := 2*macLen + 2*keyLen + 2*ivLen - keyMaterial := make([]byte, n) - prfForVersion(version)(keyMaterial, masterSecret, keyExpansionLabel, seed[0:]) - clientMAC = keyMaterial[:macLen] - keyMaterial = keyMaterial[macLen:] - serverMAC = keyMaterial[:macLen] - keyMaterial = keyMaterial[macLen:] - clientKey = keyMaterial[:keyLen] - keyMaterial = keyMaterial[keyLen:] - serverKey = keyMaterial[:keyLen] - keyMaterial = keyMaterial[keyLen:] - clientIV = keyMaterial[:ivLen] - keyMaterial = keyMaterial[ivLen:] - serverIV = keyMaterial[:ivLen] - return -} - -func newFinishedHash(version uint16) finishedHash { - if version >= VersionTLS12 { - return finishedHash{sha256.New(), sha256.New(), nil, nil, version} - } - return finishedHash{sha1.New(), sha1.New(), md5.New(), md5.New(), version} -} - -// A finishedHash calculates the hash of a set of handshake messages suitable -// for including in a Finished message. -type finishedHash struct { - client hash.Hash - server hash.Hash - - // Prior to TLS 1.2, an additional MD5 hash is required. - clientMD5 hash.Hash - serverMD5 hash.Hash - - version uint16 -} - -func (h finishedHash) Write(msg []byte) (n int, err error) { - h.client.Write(msg) - h.server.Write(msg) - - if h.version < VersionTLS12 { - h.clientMD5.Write(msg) - h.serverMD5.Write(msg) - } - return len(msg), nil -} - -// finishedSum30 calculates the contents of the verify_data member of a SSLv3 -// Finished message given the MD5 and SHA1 hashes of a set of handshake -// messages. -func finishedSum30(md5, sha1 hash.Hash, masterSecret []byte, magic [4]byte) []byte { - md5.Write(magic[:]) - md5.Write(masterSecret) - md5.Write(ssl30Pad1[:]) - md5Digest := md5.Sum(nil) - - md5.Reset() - md5.Write(masterSecret) - md5.Write(ssl30Pad2[:]) - md5.Write(md5Digest) - md5Digest = md5.Sum(nil) - - sha1.Write(magic[:]) - sha1.Write(masterSecret) - sha1.Write(ssl30Pad1[:40]) - sha1Digest := sha1.Sum(nil) - - sha1.Reset() - sha1.Write(masterSecret) - sha1.Write(ssl30Pad2[:40]) - sha1.Write(sha1Digest) - sha1Digest = sha1.Sum(nil) - - ret := make([]byte, len(md5Digest)+len(sha1Digest)) - copy(ret, md5Digest) - copy(ret[len(md5Digest):], sha1Digest) - return ret -} - -var ssl3ClientFinishedMagic = [4]byte{0x43, 0x4c, 0x4e, 0x54} -var ssl3ServerFinishedMagic = [4]byte{0x53, 0x52, 0x56, 0x52} - -// clientSum returns the contents of the verify_data member of a client's -// Finished message. -func (h finishedHash) clientSum(masterSecret []byte) []byte { - if h.version == VersionSSL30 { - return finishedSum30(h.clientMD5, h.client, masterSecret, ssl3ClientFinishedMagic) - } - - out := make([]byte, finishedVerifyLength) - if h.version >= VersionTLS12 { - seed := h.client.Sum(nil) - prf12(out, masterSecret, clientFinishedLabel, seed) - } else { - seed := make([]byte, 0, md5.Size+sha1.Size) - seed = h.clientMD5.Sum(seed) - seed = h.client.Sum(seed) - prf10(out, masterSecret, clientFinishedLabel, seed) - } - return out -} - -// serverSum returns the contents of the verify_data member of a server's -// Finished message. -func (h finishedHash) serverSum(masterSecret []byte) []byte { - if h.version == VersionSSL30 { - return finishedSum30(h.serverMD5, h.server, masterSecret, ssl3ServerFinishedMagic) - } - - out := make([]byte, finishedVerifyLength) - if h.version >= VersionTLS12 { - seed := h.server.Sum(nil) - prf12(out, masterSecret, serverFinishedLabel, seed) - } else { - seed := make([]byte, 0, md5.Size+sha1.Size) - seed = h.serverMD5.Sum(seed) - seed = h.server.Sum(seed) - prf10(out, masterSecret, serverFinishedLabel, seed) - } - return out -} - -// hashForClientCertificate returns a digest, hash function, and TLS 1.2 hash -// id suitable for signing by a TLS client certificate. -func (h finishedHash) hashForClientCertificate(sigType uint8) ([]byte, crypto.Hash, uint8) { - if h.version >= VersionTLS12 { - digest := h.server.Sum(nil) - return digest, crypto.SHA256, hashSHA256 - } - if sigType == signatureECDSA { - digest := h.server.Sum(nil) - return digest, crypto.SHA1, hashSHA1 - } - - digest := make([]byte, 0, 36) - digest = h.serverMD5.Sum(digest) - digest = h.server.Sum(digest) - return digest, crypto.MD5SHA1, 0 /* not specified in TLS 1.2. */ -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/ticket.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/ticket.go deleted file mode 100644 index 4cfc5a53f..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/ticket.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/subtle" - "errors" - "io" -) - -// sessionState contains the information that is serialized into a session -// ticket in order to later resume a connection. -type sessionState struct { - vers uint16 - cipherSuite uint16 - masterSecret []byte - certificates [][]byte -} - -func (s *sessionState) equal(i interface{}) bool { - s1, ok := i.(*sessionState) - if !ok { - return false - } - - if s.vers != s1.vers || - s.cipherSuite != s1.cipherSuite || - !bytes.Equal(s.masterSecret, s1.masterSecret) { - return false - } - - if len(s.certificates) != len(s1.certificates) { - return false - } - - for i := range s.certificates { - if !bytes.Equal(s.certificates[i], s1.certificates[i]) { - return false - } - } - - return true -} - -func (s *sessionState) marshal() []byte { - length := 2 + 2 + 2 + len(s.masterSecret) + 2 - for _, cert := range s.certificates { - length += 4 + len(cert) - } - - ret := make([]byte, length) - x := ret - x[0] = byte(s.vers >> 8) - x[1] = byte(s.vers) - x[2] = byte(s.cipherSuite >> 8) - x[3] = byte(s.cipherSuite) - x[4] = byte(len(s.masterSecret) >> 8) - x[5] = byte(len(s.masterSecret)) - x = x[6:] - copy(x, s.masterSecret) - x = x[len(s.masterSecret):] - - x[0] = byte(len(s.certificates) >> 8) - x[1] = byte(len(s.certificates)) - x = x[2:] - - for _, cert := range s.certificates { - x[0] = byte(len(cert) >> 24) - x[1] = byte(len(cert) >> 16) - x[2] = byte(len(cert) >> 8) - x[3] = byte(len(cert)) - copy(x[4:], cert) - x = x[4+len(cert):] - } - - return ret -} - -func (s *sessionState) unmarshal(data []byte) bool { - if len(data) < 8 { - return false - } - - s.vers = uint16(data[0])<<8 | uint16(data[1]) - s.cipherSuite = uint16(data[2])<<8 | uint16(data[3]) - masterSecretLen := int(data[4])<<8 | int(data[5]) - data = data[6:] - if len(data) < masterSecretLen { - return false - } - - s.masterSecret = data[:masterSecretLen] - data = data[masterSecretLen:] - - if len(data) < 2 { - return false - } - - numCerts := int(data[0])<<8 | int(data[1]) - data = data[2:] - - s.certificates = make([][]byte, numCerts) - for i := range s.certificates { - if len(data) < 4 { - return false - } - certLen := int(data[0])<<24 | int(data[1])<<16 | int(data[2])<<8 | int(data[3]) - data = data[4:] - if certLen < 0 { - return false - } - if len(data) < certLen { - return false - } - s.certificates[i] = data[:certLen] - data = data[certLen:] - } - - if len(data) > 0 { - return false - } - - return true -} - -func (c *Conn) encryptTicket(state *sessionState) ([]byte, error) { - serialized := state.marshal() - encrypted := make([]byte, aes.BlockSize+len(serialized)+sha256.Size) - iv := encrypted[:aes.BlockSize] - macBytes := encrypted[len(encrypted)-sha256.Size:] - - if _, err := io.ReadFull(c.config.rand(), iv); err != nil { - return nil, err - } - block, err := aes.NewCipher(c.config.SessionTicketKey[:16]) - if err != nil { - return nil, errors.New("tls: failed to create cipher while encrypting ticket: " + err.Error()) - } - cipher.NewCTR(block, iv).XORKeyStream(encrypted[aes.BlockSize:], serialized) - - mac := hmac.New(sha256.New, c.config.SessionTicketKey[16:32]) - mac.Write(encrypted[:len(encrypted)-sha256.Size]) - mac.Sum(macBytes[:0]) - - return encrypted, nil -} - -func (c *Conn) decryptTicket(encrypted []byte) (*sessionState, bool) { - if len(encrypted) < aes.BlockSize+sha256.Size { - return nil, false - } - - iv := encrypted[:aes.BlockSize] - macBytes := encrypted[len(encrypted)-sha256.Size:] - - mac := hmac.New(sha256.New, c.config.SessionTicketKey[16:32]) - mac.Write(encrypted[:len(encrypted)-sha256.Size]) - expected := mac.Sum(nil) - - if subtle.ConstantTimeCompare(macBytes, expected) != 1 { - return nil, false - } - - block, err := aes.NewCipher(c.config.SessionTicketKey[:16]) - if err != nil { - return nil, false - } - ciphertext := encrypted[aes.BlockSize : len(encrypted)-sha256.Size] - plaintext := ciphertext - cipher.NewCTR(block, iv).XORKeyStream(plaintext, ciphertext) - - state := new(sessionState) - ok := state.unmarshal(plaintext) - return state, ok -} diff --git a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/tls.go b/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/tls.go deleted file mode 100644 index 6c67506fc..000000000 --- a/vendor/github.com/masterzen/azure-sdk-for-go/core/tls/tls.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tls partially implements TLS 1.2, as specified in RFC 5246. -package tls - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "io/ioutil" - "net" - "strings" -) - -// Server returns a new TLS server side connection -// using conn as the underlying transport. -// The configuration config must be non-nil and must have -// at least one certificate. -func Server(conn net.Conn, config *Config) *Conn { - return &Conn{conn: conn, config: config} -} - -// Client returns a new TLS client side connection -// using conn as the underlying transport. -// Client interprets a nil configuration as equivalent to -// the zero configuration; see the documentation of Config -// for the defaults. -func Client(conn net.Conn, config *Config) *Conn { - return &Conn{conn: conn, config: config, isClient: true} -} - -// A listener implements a network listener (net.Listener) for TLS connections. -type listener struct { - net.Listener - config *Config -} - -// Accept waits for and returns the next incoming TLS connection. -// The returned connection c is a *tls.Conn. -func (l *listener) Accept() (c net.Conn, err error) { - c, err = l.Listener.Accept() - if err != nil { - return - } - c = Server(c, l.config) - return -} - -// NewListener creates a Listener which accepts connections from an inner -// Listener and wraps each connection with Server. -// The configuration config must be non-nil and must have -// at least one certificate. -func NewListener(inner net.Listener, config *Config) net.Listener { - l := new(listener) - l.Listener = inner - l.config = config - return l -} - -// Listen creates a TLS listener accepting connections on the -// given network address using net.Listen. -// The configuration config must be non-nil and must have -// at least one certificate. -func Listen(network, laddr string, config *Config) (net.Listener, error) { - if config == nil || len(config.Certificates) == 0 { - return nil, errors.New("tls.Listen: no certificates in configuration") - } - l, err := net.Listen(network, laddr) - if err != nil { - return nil, err - } - return NewListener(l, config), nil -} - -// Dial connects to the given network address using net.Dial -// and then initiates a TLS handshake, returning the resulting -// TLS connection. -// Dial interprets a nil configuration as equivalent to -// the zero configuration; see the documentation of Config -// for the defaults. -func Dial(network, addr string, config *Config) (*Conn, error) { - raddr := addr - c, err := net.Dial(network, raddr) - if err != nil { - return nil, err - } - - colonPos := strings.LastIndex(raddr, ":") - if colonPos == -1 { - colonPos = len(raddr) - } - hostname := raddr[:colonPos] - - if config == nil { - config = defaultConfig() - } - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := *config - c.ServerName = hostname - config = &c - } - conn := Client(c, config) - if err = conn.Handshake(); err != nil { - c.Close() - return nil, err - } - return conn, nil -} - -// LoadX509KeyPair reads and parses a public/private key pair from a pair of -// files. The files must contain PEM encoded data. -func LoadX509KeyPair(certFile, keyFile string) (cert Certificate, err error) { - certPEMBlock, err := ioutil.ReadFile(certFile) - if err != nil { - return - } - keyPEMBlock, err := ioutil.ReadFile(keyFile) - if err != nil { - return - } - return X509KeyPair(certPEMBlock, keyPEMBlock) -} - -// X509KeyPair parses a public/private key pair from a pair of -// PEM encoded data. -func X509KeyPair(certPEMBlock, keyPEMBlock []byte) (cert Certificate, err error) { - var certDERBlock *pem.Block - for { - certDERBlock, certPEMBlock = pem.Decode(certPEMBlock) - if certDERBlock == nil { - break - } - if certDERBlock.Type == "CERTIFICATE" { - cert.Certificate = append(cert.Certificate, certDERBlock.Bytes) - } - } - - if len(cert.Certificate) == 0 { - err = errors.New("crypto/tls: failed to parse certificate PEM data") - return - } - - var keyDERBlock *pem.Block - for { - keyDERBlock, keyPEMBlock = pem.Decode(keyPEMBlock) - if keyDERBlock == nil { - err = errors.New("crypto/tls: failed to parse key PEM data") - return - } - if keyDERBlock.Type == "PRIVATE KEY" || strings.HasSuffix(keyDERBlock.Type, " PRIVATE KEY") { - break - } - } - - cert.PrivateKey, err = parsePrivateKey(keyDERBlock.Bytes) - if err != nil { - return - } - - // We don't need to parse the public key for TLS, but we so do anyway - // to check that it looks sane and matches the private key. - x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return - } - - switch pub := x509Cert.PublicKey.(type) { - case *rsa.PublicKey: - priv, ok := cert.PrivateKey.(*rsa.PrivateKey) - if !ok { - err = errors.New("crypto/tls: private key type does not match public key type") - return - } - if pub.N.Cmp(priv.N) != 0 { - err = errors.New("crypto/tls: private key does not match public key") - return - } - case *ecdsa.PublicKey: - priv, ok := cert.PrivateKey.(*ecdsa.PrivateKey) - if !ok { - err = errors.New("crypto/tls: private key type does not match public key type") - return - - } - if pub.X.Cmp(priv.X) != 0 || pub.Y.Cmp(priv.Y) != 0 { - err = errors.New("crypto/tls: private key does not match public key") - return - } - default: - err = errors.New("crypto/tls: unknown public key algorithm") - return - } - - return -} - -// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates -// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. -// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. -func parsePrivateKey(der []byte) (crypto.PrivateKey, error) { - if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { - return key, nil - } - if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { - switch key := key.(type) { - case *rsa.PrivateKey, *ecdsa.PrivateKey: - return key, nil - default: - return nil, errors.New("crypto/tls: found unknown private key type in PKCS#8 wrapping") - } - } - if key, err := x509.ParseECPrivateKey(der); err == nil { - return key, nil - } - - return nil, errors.New("crypto/tls: failed to parse private key") -} diff --git a/vendor/github.com/masterzen/winrm/.travis.yml b/vendor/github.com/masterzen/winrm/.travis.yml index 7da672a34..18c04d4c1 100644 --- a/vendor/github.com/masterzen/winrm/.travis.yml +++ b/vendor/github.com/masterzen/winrm/.travis.yml @@ -1,8 +1,6 @@ sudo: false language: go go: -- 1.5 -- 1.6 - 1.7 - 1.8 - 1.9 diff --git a/vendor/github.com/masterzen/winrm/README.md b/vendor/github.com/masterzen/winrm/README.md index 2f1d0ad5c..ac4aff970 100644 --- a/vendor/github.com/masterzen/winrm/README.md +++ b/vendor/github.com/masterzen/winrm/README.md @@ -18,6 +18,8 @@ _Note_: this library doesn't support domain users (it doesn't support GSSAPI nor ## Getting Started WinRM is available on Windows Server 2008 and up. This project natively supports basic authentication for local accounts, see the steps in the next section on how to prepare the remote Windows machine for this scenario. The authentication model is pluggable, see below for an example on using Negotiate/NTLM authentication (e.g. for connecting to vanilla Azure VMs). +_Note_: This library only supports Golang 1.7+ + ### Preparing the remote Windows machine for Basic authentication This project supports only basic authentication for local accounts (domain users are not supported). The remote windows system must be prepared for winrm: @@ -132,6 +134,44 @@ if err != nil { ``` +By passing a Dial in the Parameters struct it is possible to use different dialer (e.g. tunnel through SSH) + +```go +package main + + import ( + "github.com/masterzen/winrm" + "golang.org/x/crypto/ssh" + "os" + ) + + func main() { + + sshClient, err := ssh.Dial("tcp","localhost:22", &ssh.ClientConfig{ + User:"ubuntu", + Auth: []ssh.AuthMethod{ssh.Password("ubuntu")}, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + }) + + endpoint := winrm.NewEndpoint("other-host", 5985, false, false, nil, nil, nil, 0) + + params := winrm.DefaultParameters + params.Dial = sshClient.Dial + + client, err := winrm.NewClientWithParameters(endpoint, "test", "test", params) + if err != nil { + panic(err) + } + + _, err = client.RunWithInput("ipconfig", os.Stdout, os.Stderr, os.Stdin) + if err != nil { + panic(err) + } + } + +``` + + For a more complex example, it is possible to call the various functions directly: ```go diff --git a/vendor/github.com/masterzen/winrm/auth.go b/vendor/github.com/masterzen/winrm/auth.go index 2e7c3757f..0ae5d9972 100644 --- a/vendor/github.com/masterzen/winrm/auth.go +++ b/vendor/github.com/masterzen/winrm/auth.go @@ -1,20 +1,20 @@ package winrm import ( + "crypto/tls" "fmt" "io/ioutil" "net" + "net/http" "strings" "time" - "github.com/masterzen/azure-sdk-for-go/core/http" - "github.com/masterzen/azure-sdk-for-go/core/tls" - "github.com/masterzen/winrm/soap" ) type ClientAuthRequest struct { transport http.RoundTripper + dial func(network, addr string) (net.Conn, error) } func (c *ClientAuthRequest) Transport(endpoint *Endpoint) error { @@ -23,16 +23,22 @@ func (c *ClientAuthRequest) Transport(endpoint *Endpoint) error { return err } + dial := (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial + + if c.dial != nil { + dial = c.dial + } + transport := &http.Transport{ Proxy: http.ProxyFromEnvironment, TLSClientConfig: &tls.Config{ InsecureSkipVerify: endpoint.Insecure, Certificates: []tls.Certificate{cert}, }, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, + Dial: dial, ResponseHeaderTimeout: endpoint.Timeout, } @@ -104,3 +110,9 @@ func (c ClientAuthRequest) Post(client *Client, request *soap.SoapMessage) (stri return body, err } + +func NewClientAuthRequestWithDial(dial func(network, addr string) (net.Conn, error)) *ClientAuthRequest { + return &ClientAuthRequest{ + dial:dial, + } +} \ No newline at end of file diff --git a/vendor/github.com/masterzen/winrm/client.go b/vendor/github.com/masterzen/winrm/client.go index c19515194..fb68c3cac 100644 --- a/vendor/github.com/masterzen/winrm/client.go +++ b/vendor/github.com/masterzen/winrm/client.go @@ -46,7 +46,7 @@ func NewClientWithParameters(endpoint *Endpoint, user, password string, params * url: endpoint.url(), useHTTPS: endpoint.HTTPS, // default transport - http: &clientRequest{}, + http: &clientRequest{ dial:params.Dial }, } // switch to other transport if provided diff --git a/vendor/github.com/masterzen/winrm/go.mod b/vendor/github.com/masterzen/winrm/go.mod new file mode 100644 index 000000000..8c674377f --- /dev/null +++ b/vendor/github.com/masterzen/winrm/go.mod @@ -0,0 +1,13 @@ +module github.com/masterzen/winrm + +require ( + github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 + github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 + github.com/kr/pretty v0.1.0 // indirect + github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9 + github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d + golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f // indirect + golang.org/x/net v0.0.0-20190213061140-3a22650c66bd // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 +) diff --git a/vendor/github.com/masterzen/winrm/go.sum b/vendor/github.com/masterzen/winrm/go.sum new file mode 100644 index 000000000..7e343bd10 --- /dev/null +++ b/vendor/github.com/masterzen/winrm/go.sum @@ -0,0 +1,21 @@ +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 h1:pSm8mp0T2OH2CPmPDPtwHPr3VAQaOwVF/JbllOPP4xA= +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 h1:y8Gs8CzNfDF5AZvjr+5UyGQvQEBL7pwo+v+wX6q9JI8= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9 h1:SmVbOZFWAlyQshuMfOkiAx1f5oUTsOGG5IXplAEYeeM= +github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f h1:qWFY9ZxP3tfI37wYIs/MnIAqK0vlXp1xnYEa5HxFSSY= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/masterzen/winrm/http.go b/vendor/github.com/masterzen/winrm/http.go index 929ea82c8..9767c2145 100644 --- a/vendor/github.com/masterzen/winrm/http.go +++ b/vendor/github.com/masterzen/winrm/http.go @@ -39,19 +39,27 @@ func body(response *http.Response) (string, error) { type clientRequest struct { transport http.RoundTripper + dial func(network, addr string) (net.Conn, error) } func (c *clientRequest) Transport(endpoint *Endpoint) error { + + dial := (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial + + if c.dial != nil { + dial = c.dial + } + transport := &http.Transport{ Proxy: http.ProxyFromEnvironment, TLSClientConfig: &tls.Config{ InsecureSkipVerify: endpoint.Insecure, ServerName: endpoint.TLSServerName, }, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, + Dial: dial, ResponseHeaderTimeout: endpoint.Timeout, } @@ -99,3 +107,10 @@ func (c clientRequest) Post(client *Client, request *soap.SoapMessage) (string, return body, err } + + +func NewClientWithDial(dial func(network, addr string) (net.Conn, error)) *clientRequest { + return &clientRequest{ + dial:dial, + } +} \ No newline at end of file diff --git a/vendor/github.com/masterzen/winrm/ntlm.go b/vendor/github.com/masterzen/winrm/ntlm.go index 6a0ee1060..08de8a522 100644 --- a/vendor/github.com/masterzen/winrm/ntlm.go +++ b/vendor/github.com/masterzen/winrm/ntlm.go @@ -3,6 +3,7 @@ package winrm import ( "github.com/Azure/go-ntlmssp" "github.com/masterzen/winrm/soap" + "net" ) // ClientNTLM provides a transport via NTLMv2 @@ -21,3 +22,12 @@ func (c *ClientNTLM) Transport(endpoint *Endpoint) error { func (c ClientNTLM) Post(client *Client, request *soap.SoapMessage) (string, error) { return c.clientRequest.Post(client, request) } + + +func NewClientNTLMWithDial(dial func(network, addr string) (net.Conn, error)) *ClientNTLM { + return &ClientNTLM{ + clientRequest{ + dial:dial, + }, + } +} \ No newline at end of file diff --git a/vendor/github.com/masterzen/winrm/parameters.go b/vendor/github.com/masterzen/winrm/parameters.go index eb3771be1..08adcc99f 100644 --- a/vendor/github.com/masterzen/winrm/parameters.go +++ b/vendor/github.com/masterzen/winrm/parameters.go @@ -1,5 +1,7 @@ package winrm +import "net" + // Parameters struct defines // metadata information and http transport config type Parameters struct { @@ -7,6 +9,7 @@ type Parameters struct { Locale string EnvelopeSize int TransportDecorator func() Transporter + Dial func(network, addr string) (net.Conn, error) } // DefaultParameters return constant config diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go index 2641dadd6..213bf204a 100644 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -3,6 +3,14 @@ // license that can be found in the LICENSE file. // Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +// +// Blowfish is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). package blowfish // import "golang.org/x/crypto/blowfish" // The code is a port of Bruce Schneier's C implementation. diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go index 0b4af37bd..ddcbeb6f2 100644 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -2,8 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common -// OpenPGP cipher. +// Package cast5 implements CAST5, as defined in RFC 2144. +// +// CAST5 is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). package cast5 // import "golang.org/x/crypto/cast5" import "errors" diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go index 6d9ba9e5f..59d348069 100644 --- a/vendor/golang.org/x/crypto/md4/md4.go +++ b/vendor/golang.org/x/crypto/md4/md4.go @@ -3,6 +3,10 @@ // license that can be found in the LICENSE file. // Package md4 implements the MD4 hash algorithm as defined in RFC 1320. +// +// Deprecated: MD4 is cryptographically broken and should should only be used +// where compatibility with legacy systems, not security, is the goal. Instead, +// use a secure hash like SHA-256 (from crypto/sha256). package md4 // import "golang.org/x/crypto/md4" import ( diff --git a/vendor/modules.txt b/vendor/modules.txt index 3808504cc..fb1848771 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -23,22 +23,22 @@ github.com/Azure/go-autorest/autorest/date github.com/Azure/go-autorest/autorest/azure/cli github.com/Azure/go-autorest/autorest/to github.com/Azure/go-autorest/autorest/validation -# github.com/Azure/go-ntlmssp v0.0.0-20170803034930-c92175d54006 +# github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 github.com/Azure/go-ntlmssp -# github.com/ChrisTrenkamp/goxpath v0.0.0-20170625215350-4fe035839290 +# github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 github.com/ChrisTrenkamp/goxpath github.com/ChrisTrenkamp/goxpath/tree github.com/ChrisTrenkamp/goxpath/tree/xmltree github.com/ChrisTrenkamp/goxpath/internal/execxp -github.com/ChrisTrenkamp/goxpath/internal/parser +github.com/ChrisTrenkamp/goxpath/parser github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele -github.com/ChrisTrenkamp/goxpath/internal/lexer -github.com/ChrisTrenkamp/goxpath/internal/parser/findutil -github.com/ChrisTrenkamp/goxpath/internal/parser/intfns -github.com/ChrisTrenkamp/goxpath/internal/parser/pathexpr -github.com/ChrisTrenkamp/goxpath/internal/xconst +github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil +github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns github.com/ChrisTrenkamp/goxpath/internal/xsort +github.com/ChrisTrenkamp/goxpath/lexer +github.com/ChrisTrenkamp/goxpath/parser/pathexpr +github.com/ChrisTrenkamp/goxpath/xconst github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode # github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292 github.com/Unknwon/com @@ -360,12 +360,9 @@ github.com/keybase/go-crypto/openpgp/elgamal github.com/lusis/go-artifactory/src/artifactory.v401 # github.com/marstr/guid v1.1.0 github.com/marstr/guid -# github.com/masterzen/azure-sdk-for-go v0.0.0-20161014135628-ee4f0065d00c -github.com/masterzen/azure-sdk-for-go/core/http -github.com/masterzen/azure-sdk-for-go/core/tls # github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9 github.com/masterzen/simplexml/dom -# github.com/masterzen/winrm v0.0.0-20180224160350-7e40f93ae939 +# github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b github.com/masterzen/winrm github.com/masterzen/winrm/soap # github.com/mattn/go-colorable v0.1.1 @@ -458,7 +455,7 @@ go.opencensus.io/trace/propagation go.opencensus.io go.opencensus.io/stats/internal go.opencensus.io/internal/tagencoding -# golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2 +# golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/knownhosts