diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore deleted file mode 100644 index 0f1d00e11..000000000 --- a/vendor/github.com/lib/pq/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.db -*.test -*~ -*.swp diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml deleted file mode 100644 index c94a8718e..000000000 --- a/vendor/github.com/lib/pq/.travis.yml +++ /dev/null @@ -1,71 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - tip - -before_install: - - psql --version - - sudo /etc/init.d/postgresql stop - - sudo apt-get -y --purge remove postgresql libpq-dev libpq5 postgresql-client-common postgresql-common - - sudo rm -rf /var/lib/postgresql - - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - - - sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/postgresql.list" - - sudo apt-get update -qq - - sudo apt-get -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::="--force-confnew" install postgresql-$PGVERSION postgresql-server-dev-$PGVERSION postgresql-contrib-$PGVERSION - - sudo chmod 777 /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "local all postgres trust" > /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "local all all trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostnossl all pqgossltest 127.0.0.1/32 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostnossl all pqgosslcert 127.0.0.1/32 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostssl all pqgossltest 127.0.0.1/32 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostssl all pqgosslcert 127.0.0.1/32 cert" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "host all all 127.0.0.1/32 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostnossl all pqgossltest ::1/128 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostnossl all pqgosslcert ::1/128 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostssl all pqgossltest ::1/128 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostssl all pqgosslcert ::1/128 cert" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "host all all ::1/128 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ certs/server.key certs/server.crt certs/root.crt - - sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_cert_file = 'server.crt'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)" - - sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_key_file = 'server.key'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)" - - sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_ca_file = 'root.crt'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)" - - sudo sh -c "echo 127.0.0.1 postgres >> /etc/hosts" - - sudo ls -l /var/lib/postgresql/$PGVERSION/main/ - - sudo cat /etc/postgresql/$PGVERSION/main/postgresql.conf - - sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key - - sudo /etc/init.d/postgresql restart - -env: - global: - - PGUSER=postgres - - PQGOSSLTESTS=1 - - PQSSLCERTTEST_PATH=$PWD/certs - - PGHOST=127.0.0.1 - matrix: - - PGVERSION=9.5 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.4 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.3 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.2 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.1 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.0 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=8.4 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.5 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=9.4 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=9.3 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=9.2 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=9.1 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=9.0 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=8.4 PQTEST_BINARY_PARAMETERS=no - -script: - - go test -v ./... - -before_script: - - psql -c 'create database pqgotest' -U postgres - - psql -c 'create user pqgossltest' -U postgres - - psql -c 'create user pqgosslcert' -U postgres diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md deleted file mode 100644 index b4e3f45cb..000000000 --- a/vendor/github.com/lib/pq/README.md +++ /dev/null @@ -1,105 +0,0 @@ -# pq - A pure Go postgres driver for Go's database/sql package - -[![Build Status](https://travis-ci.org/lib/pq.png?branch=master)](https://travis-ci.org/lib/pq) - -## Install - - go get github.com/lib/pq - -## Docs - -For detailed documentation and basic usage examples, please see the package -documentation at . - -## Tests - -`go test` is used for testing. A running PostgreSQL server is -required, with the ability to log in. The default database to connect -to test with is "pqgotest," but it can be overridden using environment -variables. - -Example: - - PGHOST=/var/run/postgresql go test github.com/lib/pq - -Optionally, a benchmark suite can be run as part of the tests: - - PGHOST=/var/run/postgresql go test -bench . - -## Features - -* SSL -* Handles bad connections for `database/sql` -* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) -* Scan binary blobs correctly (i.e. `bytea`) -* Package for `hstore` support -* COPY FROM support -* pq.ParseURL for converting urls to connection strings for sql.Open. -* Many libpq compatible environment variables -* Unix socket support -* Notifications: `LISTEN`/`NOTIFY` -* pgpass support - -## Future / Things you can help with - -* Better COPY FROM / COPY TO (see discussion in #181) - -## Thank you (alphabetical) - -Some of these contributors are from the original library `bmizerany/pq.go` whose -code still exists in here. - -* Andy Balholm (andybalholm) -* Ben Berkert (benburkert) -* Benjamin Heatwole (bheatwole) -* Bill Mill (llimllib) -* Bjørn Madsen (aeons) -* Blake Gentry (bgentry) -* Brad Fitzpatrick (bradfitz) -* Charlie Melbye (cmelbye) -* Chris Bandy (cbandy) -* Chris Gilling (cgilling) -* Chris Walsh (cwds) -* Dan Sosedoff (sosedoff) -* Daniel Farina (fdr) -* Eric Chlebek (echlebek) -* Eric Garrido (minusnine) -* Eric Urban (hydrogen18) -* Everyone at The Go Team -* Evan Shaw (edsrzf) -* Ewan Chou (coocood) -* Fazal Majid (fazalmajid) -* Federico Romero (federomero) -* Fumin (fumin) -* Gary Burd (garyburd) -* Heroku (heroku) -* James Pozdena (jpoz) -* Jason McVetta (jmcvetta) -* Jeremy Jay (pbnjay) -* Joakim Sernbrant (serbaut) -* John Gallagher (jgallagher) -* Jonathan Rudenberg (titanous) -* Joël Stemmer (jstemmer) -* Kamil Kisiel (kisielk) -* Kelly Dunn (kellydunn) -* Keith Rarick (kr) -* Kir Shatrov (kirs) -* Lann Martin (lann) -* Maciek Sakrejda (deafbybeheading) -* Marc Brinkmann (mbr) -* Marko Tiikkaja (johto) -* Matt Newberry (MattNewberry) -* Matt Robenolt (mattrobenolt) -* Martin Olsen (martinolsen) -* Mike Lewis (mikelikespie) -* Nicolas Patry (Narsil) -* Oliver Tonnhofer (olt) -* Patrick Hayes (phayes) -* Paul Hammond (paulhammond) -* Ryan Smith (ryandotsmith) -* Samuel Stauffer (samuel) -* Timothée Peignier (cyberdelia) -* Travis Cline (tmc) -* TruongSinh Tran-Nguyen (truongsinh) -* Yaismel Miranda (ympons) -* notedit (notedit) diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go new file mode 100644 index 000000000..27eb07a9e --- /dev/null +++ b/vendor/github.com/lib/pq/array.go @@ -0,0 +1,727 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" +) + +var typeByteSlice = reflect.TypeOf([]byte{}) +var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +var typeSqlScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// Array returns the optimal driver.Valuer and sql.Scanner for an array or +// slice of any dimension. +// +// For example: +// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) +// +// var x []sql.NullInt64 +// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x)) +// +// Scanning multi-dimensional arrays is not supported. Arrays where the lower +// bound is not one (such as `[0:0]={1}') are not supported. +func Array(a interface{}) interface { + driver.Valuer + sql.Scanner +} { + switch a := a.(type) { + case []bool: + return (*BoolArray)(&a) + case []float64: + return (*Float64Array)(&a) + case []int64: + return (*Int64Array)(&a) + case []string: + return (*StringArray)(&a) + + case *[]bool: + return (*BoolArray)(a) + case *[]float64: + return (*Float64Array)(a) + case *[]int64: + return (*Int64Array)(a) + case *[]string: + return (*StringArray)(a) + } + + return GenericArray{a} +} + +// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner +// to override the array delimiter used by GenericArray. +type ArrayDelimiter interface { + // ArrayDelimiter returns the delimiter character(s) for this element's type. + ArrayDelimiter() string +} + +// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. +type BoolArray []bool + +// Scan implements the sql.Scanner interface. +func (a *BoolArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + } + + return fmt.Errorf("pq: cannot convert %T to BoolArray", src) +} + +func (a *BoolArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "BoolArray") + if err != nil { + return err + } + if len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(BoolArray, len(elems)) + for i, v := range elems { + if len(v) != 1 { + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + switch v[0] { + case 't': + b[i] = true + case 'f': + b[i] = false + default: + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a BoolArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be exactly two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1+2*n) + + for i := 0; i < n; i++ { + b[2*i] = ',' + if a[i] { + b[1+2*i] = 't' + } else { + b[1+2*i] = 'f' + } + } + + b[0] = '{' + b[2*n] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. +type ByteaArray [][]byte + +// Scan implements the sql.Scanner interface. +func (a *ByteaArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + } + + return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) +} + +func (a *ByteaArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") + if err != nil { + return err + } + if len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(ByteaArray, len(elems)) + for i, v := range elems { + b[i], err = parseBytea(v) + if err != nil { + return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. It uses the "hex" format which +// is only supported on PostgreSQL 9.0 or newer. +func (a ByteaArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // 3*N bytes of hex formatting, and N-1 bytes of delimiters. + size := 1 + 6*n + for _, x := range a { + size += hex.EncodedLen(len(x)) + } + + b := make([]byte, size) + + for i, s := 0, b; i < n; i++ { + o := copy(s, `,"\\x`) + o += hex.Encode(s[o:], a[i]) + s[o] = '"' + s = s[o+1:] + } + + b[0] = '{' + b[size-1] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// Float64Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float64Array []float64 + +// Scan implements the sql.Scanner interface. +func (a *Float64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + } + + return fmt.Errorf("pq: cannot convert %T to Float64Array", src) +} + +func (a *Float64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float64Array") + if err != nil { + return err + } + if len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, a[0], 'f', -1, 64) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, a[i], 'f', -1, 64) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// GenericArray implements the driver.Valuer and sql.Scanner interfaces for +// an array or slice of any dimension. +type GenericArray struct{ A interface{} } + +func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { + var assign func([]byte, reflect.Value) error + var del = "," + + // TODO calculate the assign function for other types + // TODO repeat this section on the element type of arrays or slices (multidimensional) + { + if reflect.PtrTo(rt).Implements(typeSqlScanner) { + // dest is always addressable because it is an element of a slice. + assign = func(src []byte, dest reflect.Value) (err error) { + ss := dest.Addr().Interface().(sql.Scanner) + if src == nil { + err = ss.Scan(nil) + } else { + err = ss.Scan(src) + } + return + } + goto FoundType + } + + assign = func([]byte, reflect.Value) error { + return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) + } + } + +FoundType: + + if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + return rt, assign, del +} + +// Scan implements the sql.Scanner interface. +func (a GenericArray) Scan(src interface{}) error { + dpv := reflect.ValueOf(a.A) + switch { + case dpv.Kind() != reflect.Ptr: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + case dpv.IsNil(): + return fmt.Errorf("pq: destination %T is nil", a.A) + } + + dv := dpv.Elem() + switch dv.Kind() { + case reflect.Slice: + case reflect.Array: + default: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + } + + switch src := src.(type) { + case []byte: + return a.scanBytes(src, dv) + case string: + return a.scanBytes([]byte(src), dv) + } + + return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) +} + +func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { + dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) + dims, elems, err := parseArray(src, []byte(del)) + if err != nil { + return err + } + + // TODO allow multidimensional + + if len(dims) > 1 { + return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", + strings.Replace(fmt.Sprint(dims), " ", "][", -1)) + } + + // Treat a zero-dimensional array like an array with a single dimension of zero. + if len(dims) == 0 { + dims = append(dims, 0) + } + + for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { + switch rt.Kind() { + case reflect.Slice: + case reflect.Array: + if rt.Len() != dims[i] { + return fmt.Errorf("pq: cannot convert ARRAY%s to %s", + strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) + } + default: + // TODO handle multidimensional + } + } + + values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) + for i, e := range elems { + if err := assign(e, values.Index(i)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + + // TODO handle multidimensional + + switch dv.Kind() { + case reflect.Slice: + dv.Set(values.Slice(0, dims[0])) + case reflect.Array: + for i := 0; i < dims[0]; i++ { + dv.Index(i).Set(values.Index(i)) + } + } + + return nil +} + +// Value implements the driver.Valuer interface. +func (a GenericArray) Value() (driver.Value, error) { + if a.A == nil { + return nil, nil + } + + rv := reflect.ValueOf(a.A) + + if k := rv.Kind(); k != reflect.Array && k != reflect.Slice { + return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) + } + + if n := rv.Len(); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 0, 1+2*n) + + b, _, err := appendArray(b, rv, n) + return string(b), err + } + + return "{}", nil +} + +// Int64Array represents a one-dimensional array of the PostgreSQL integer types. +type Int64Array []int64 + +// Scan implements the sql.Scanner interface. +func (a *Int64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + } + + return fmt.Errorf("pq: cannot convert %T to Int64Array", src) +} + +func (a *Int64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int64Array") + if err != nil { + return err + } + if len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, a[0], 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, a[i], 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// StringArray represents a one-dimensional array of the PostgreSQL character types. +type StringArray []string + +// Scan implements the sql.Scanner interface. +func (a *StringArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + } + + return fmt.Errorf("pq: cannot convert %T to StringArray", src) +} + +func (a *StringArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "StringArray") + if err != nil { + return err + } + if len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(StringArray, len(elems)) + for i, v := range elems { + if b[i] = string(v); v == nil { + return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a StringArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+3*n) + b[0] = '{' + + b = appendArrayQuotedBytes(b, []byte(a[0])) + for i := 1; i < n; i++ { + b = append(b, ',') + b = appendArrayQuotedBytes(b, []byte(a[i])) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// appendArray appends rv to the buffer, returning the extended buffer and +// the delimiter used between elements. +// +// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. +func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { + var del string + var err error + + b = append(b, '{') + + if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { + return b, del, err + } + + for i := 1; i < n; i++ { + b = append(b, del...) + if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { + return b, del, err + } + } + + return append(b, '}'), del, nil +} + +// appendArrayElement appends rv to the buffer, returning the extended buffer +// and the delimiter to use before the next element. +// +// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted +// using driver.DefaultParameterConverter and the resulting []byte or string +// is double-quoted. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { + if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { + if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { + if n := rv.Len(); n > 0 { + return appendArray(b, rv, n) + } + + return b, "", nil + } + } + + var del string = "," + var err error + var iv interface{} = rv.Interface() + + if ad, ok := iv.(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { + return b, del, err + } + + switch v := iv.(type) { + case nil: + return append(b, "NULL"...), del, nil + case []byte: + return appendArrayQuotedBytes(b, v), del, nil + case string: + return appendArrayQuotedBytes(b, []byte(v)), del, nil + } + + b, err = appendValue(b, iv) + return b, del, err +} + +func appendArrayQuotedBytes(b, v []byte) []byte { + b = append(b, '"') + for { + i := bytes.IndexAny(v, `"\`) + if i < 0 { + b = append(b, v...) + break + } + if i > 0 { + b = append(b, v[:i]...) + } + b = append(b, '\\', v[i]) + v = v[i+1:] + } + return append(b, '"') +} + +func appendValue(b []byte, v driver.Value) ([]byte, error) { + return append(b, encode(nil, v, 0)...), nil +} + +// parseArray extracts the dimensions and elements of an array represented in +// text format. Only representations emitted by the backend are supported. +// Notably, whitespace around brackets and delimiters is significant, and NULL +// is case-sensitive. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { + var depth, i int + + if len(src) < 1 || src[0] != '{' { + return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) + } + +Open: + for i < len(src) { + switch src[i] { + case '{': + depth++ + i++ + case '}': + elems = make([][]byte, 0) + goto Close + default: + break Open + } + } + dims = make([]int, i) + +Element: + for i < len(src) { + switch src[i] { + case '{': + depth++ + dims[depth-1] = 0 + i++ + case '"': + var elem = []byte{} + var escape bool + for i++; i < len(src); i++ { + if escape { + elem = append(elem, src[i]) + escape = false + } else { + switch src[i] { + default: + elem = append(elem, src[i]) + case '\\': + escape = true + case '"': + elems = append(elems, elem) + i++ + break Element + } + } + } + default: + for start := i; i < len(src); i++ { + if bytes.HasPrefix(src[i:], del) || src[i] == '}' { + elem := src[start:i] + if len(elem) == 0 { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + if bytes.Equal(elem, []byte("NULL")) { + elem = nil + } + elems = append(elems, elem) + break Element + } + } + } + } + + for i < len(src) { + if bytes.HasPrefix(src[i:], del) { + dims[depth-1]++ + i += len(del) + goto Element + } else if src[i] == '}' { + dims[depth-1]++ + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + +Close: + for i < len(src) { + if src[i] == '}' && depth > 0 { + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + if depth > 0 { + err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) + } + if err == nil { + for _, d := range dims { + if (len(elems) % d) != 0 { + err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") + } + } + } + return +} + +func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { + dims, elems, err := parseArray(src, del) + if err != nil { + return nil, err + } + if len(dims) > 1 { + return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) + } + return elems, err +} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go index 394e659df..8e1aee9f0 100644 --- a/vendor/github.com/lib/pq/conn.go +++ b/vendor/github.com/lib/pq/conn.go @@ -164,7 +164,7 @@ func (c *conn) handlePgpass(o values) { return } mode := fileinfo.Mode() - if mode & (0x77) != 0 { + if mode&(0x77) != 0 { // XXX should warn about incorrect .pgpass permissions as psql does return } @@ -180,7 +180,7 @@ func (c *conn) handlePgpass(o values) { db := o.Get("dbname") username := o.Get("user") // From: https://github.com/tg/pgpass/blob/master/reader.go - getFields := func (s string) []string { + getFields := func(s string) []string { fs := make([]string, 0, 5) f := make([]rune, 0, len(s)) @@ -200,7 +200,7 @@ func (c *conn) handlePgpass(o values) { } } return append(fs, string(f)) - } + } for scanner.Scan() { line := scanner.Text() if len(line) == 0 || line[0] == '#' { @@ -210,7 +210,7 @@ func (c *conn) handlePgpass(o values) { if len(split) != 5 { continue } - if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { o["password"] = split[4] return } @@ -362,7 +362,7 @@ func network(o values) (string, string) { return "unix", sockPath } - return "tcp", host + ":" + o.Get("port") + return "tcp", net.JoinHostPort(host, o.Get("port")) } type values map[string]string @@ -614,8 +614,6 @@ func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err func (cn *conn) simpleQuery(q string) (res *rows, err error) { defer cn.errRecover(&err) - st := &stmt{cn: cn, name: ""} - b := cn.writeBuf('Q') b.string(q) cn.send(b) @@ -634,10 +632,7 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) { } if res == nil { res = &rows{ - cn: cn, - colNames: st.colNames, - colTyps: st.colTyps, - colFmts: st.colFmts, + cn: cn, } } res.done = true @@ -973,8 +968,23 @@ func (cn *conn) ssl(o values) { verifyCaOnly := false tlsConf := tls.Config{} switch mode := o.Get("sslmode"); mode { - case "require", "": + // "require" is the default. + case "", "require": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. tlsConf.InsecureSkipVerify = true + + // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: + // Note: For backwards compatibility with earlier versions of PostgreSQL, if a + // root CA file exists, the behavior of sslmode=require will be the same as + // that of verify-ca, meaning the server certificate is validated against the + // CA. Relying on this behavior is discouraged, and applications that need + // certificate validation should always use verify-ca or verify-full. + if _, err := os.Stat(o.Get("sslrootcert")); err == nil { + verifyCaOnly = true + } else { + o.Set("sslrootcert", "") + } case "verify-ca": // We must skip TLS's own verification since it requires full // verification since Go 1.3. @@ -985,7 +995,7 @@ func (cn *conn) ssl(o values) { case "disable": return default: - errorf(`unsupported sslmode %q; only "require" (default), "verify-full", and "disable" supported`, mode) + errorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) } cn.setupSSLClientCertificates(&tlsConf, o) diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go index 88422eb3d..29e8f6ff7 100644 --- a/vendor/github.com/lib/pq/encode.go +++ b/vendor/github.com/lib/pq/encode.go @@ -5,6 +5,7 @@ import ( "database/sql/driver" "encoding/binary" "encoding/hex" + "errors" "fmt" "math" "strconv" @@ -22,7 +23,6 @@ func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { default: return encode(parameterStatus, x, oid.T_unknown) } - panic("not reached") } func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { @@ -56,10 +56,13 @@ func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) [ } func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { - if f == formatBinary { + switch f { + case formatBinary: return binaryDecode(parameterStatus, s, typ) - } else { + case formatText: return textDecode(parameterStatus, s, typ) + default: + panic("not reached") } } @@ -75,7 +78,7 @@ func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) inter return int64(int16(binary.BigEndian.Uint16(s))) default: - errorf("don't know how to decode binary parameter of type %u", uint32(typ)) + errorf("don't know how to decode binary parameter of type %d", uint32(typ)) } panic("not reached") @@ -83,8 +86,14 @@ func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) inter func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { switch typ { + case oid.T_char, oid.T_varchar, oid.T_text: + return string(s) case oid.T_bytea: - return parseBytea(s) + b, err := parseBytea(s) + if err != nil { + errorf("%s", err) + } + return b case oid.T_timestamptz: return parseTs(parameterStatus.currentLocation, string(s)) case oid.T_timestamp, oid.T_date: @@ -195,16 +204,39 @@ func mustParse(f string, typ oid.Oid, s []byte) time.Time { return t } -func expect(str, char string, pos int) { - if c := str[pos : pos+1]; c != char { - errorf("expected '%v' at position %v; got '%v'", char, pos, c) +var errInvalidTimestamp = errors.New("invalid timestamp") + +type timestampParser struct { + err error +} + +func (p *timestampParser) expect(str string, char byte, pos int) { + if p.err != nil { + return + } + if pos+1 > len(str) { + p.err = errInvalidTimestamp + return + } + if c := str[pos]; c != char && p.err == nil { + p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) } } -func mustAtoi(str string) int { - result, err := strconv.Atoi(str) +func (p *timestampParser) mustAtoi(str string, begin int, end int) int { + if p.err != nil { + return 0 + } + if begin < 0 || end < 0 || begin > end || end > len(str) { + p.err = errInvalidTimestamp + return 0 + } + result, err := strconv.Atoi(str[begin:end]) if err != nil { - errorf("expected number; got '%v'", str) + if p.err == nil { + p.err = fmt.Errorf("expected number; got '%v'", str) + } + return 0 } return result } @@ -219,7 +251,7 @@ type locationCache struct { // about 5% speed could be gained by putting the cache in the connection and // losing the mutex, at the cost of a small amount of memory and a somewhat // significant increase in code complexity. -var globalLocationCache *locationCache = newLocationCache() +var globalLocationCache = newLocationCache() func newLocationCache() *locationCache { return &locationCache{cache: make(map[int]*time.Location)} @@ -249,26 +281,26 @@ const ( infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" ) -/* - * If EnableInfinityTs is not called, "-infinity" and "infinity" will return - * []byte("-infinity") and []byte("infinity") respectively, and potentially - * cause error "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time", - * when scanning into a time.Time value. - * - * Once EnableInfinityTs has been called, all connections created using this - * driver will decode Postgres' "-infinity" and "infinity" for "timestamp", - * "timestamp with time zone" and "date" types to the predefined minimum and - * maximum times, respectively. When encoding time.Time values, any time which - * equals or preceeds the predefined minimum time will be encoded to - * "-infinity". Any values at or past the maximum time will similarly be - * encoded to "infinity". - * - * - * If EnableInfinityTs is called with negative >= positive, it will panic. - * Calling EnableInfinityTs after a connection has been established results in - * undefined behavior. If EnableInfinityTs is called more than once, it will - * panic. - */ +// EnableInfinityTs controls the handling of Postgres' "-infinity" and +// "infinity" "timestamp"s. +// +// If EnableInfinityTs is not called, "-infinity" and "infinity" will return +// []byte("-infinity") and []byte("infinity") respectively, and potentially +// cause error "sql: Scan error on column index 0: unsupported driver -> Scan +// pair: []uint8 -> *time.Time", when scanning into a time.Time value. +// +// Once EnableInfinityTs has been called, all connections created using this +// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", +// "timestamp with time zone" and "date" types to the predefined minimum and +// maximum times, respectively. When encoding time.Time values, any time which +// equals or precedes the predefined minimum time will be encoded to +// "-infinity". Any values at or past the maximum time will similarly be +// encoded to "infinity". +// +// If EnableInfinityTs is called with negative >= positive, it will panic. +// Calling EnableInfinityTs after a connection has been established results in +// undefined behavior. If EnableInfinityTs is called more than once, it will +// panic. func EnableInfinityTs(negative time.Time, positive time.Time) { if infinityTsEnabled { panic(infinityTsEnabledAlready) @@ -305,28 +337,41 @@ func parseTs(currentLocation *time.Location, str string) interface{} { } return []byte(str) } + t, err := ParseTimestamp(currentLocation, str) + if err != nil { + panic(err) + } + return t +} + +// ParseTimestamp parses Postgres' text format. It returns a time.Time in +// currentLocation iff that time's offset agrees with the offset sent from the +// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the +// fixed offset offset provided by the Postgres server. +func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { + p := timestampParser{} monSep := strings.IndexRune(str, '-') // this is Gregorian year, not ISO Year // In Gregorian system, the year 1 BC is followed by AD 1 - year := mustAtoi(str[:monSep]) + year := p.mustAtoi(str, 0, monSep) daySep := monSep + 3 - month := mustAtoi(str[monSep+1 : daySep]) - expect(str, "-", daySep) + month := p.mustAtoi(str, monSep+1, daySep) + p.expect(str, '-', daySep) timeSep := daySep + 3 - day := mustAtoi(str[daySep+1 : timeSep]) + day := p.mustAtoi(str, daySep+1, timeSep) var hour, minute, second int if len(str) > monSep+len("01-01")+1 { - expect(str, " ", timeSep) + p.expect(str, ' ', timeSep) minSep := timeSep + 3 - expect(str, ":", minSep) - hour = mustAtoi(str[timeSep+1 : minSep]) + p.expect(str, ':', minSep) + hour = p.mustAtoi(str, timeSep+1, minSep) secSep := minSep + 3 - expect(str, ":", secSep) - minute = mustAtoi(str[minSep+1 : secSep]) + p.expect(str, ':', secSep) + minute = p.mustAtoi(str, minSep+1, secSep) secEnd := secSep + 3 - second = mustAtoi(str[secSep+1 : secEnd]) + second = p.mustAtoi(str, secSep+1, secEnd) } remainderIdx := monSep + len("01-01 00:00:00") + 1 // Three optional (but ordered) sections follow: the @@ -337,49 +382,50 @@ func parseTs(currentLocation *time.Location, str string) interface{} { nanoSec := 0 tzOff := 0 - if remainderIdx < len(str) && str[remainderIdx:remainderIdx+1] == "." { + if remainderIdx < len(str) && str[remainderIdx] == '.' { fracStart := remainderIdx + 1 fracOff := strings.IndexAny(str[fracStart:], "-+ ") if fracOff < 0 { fracOff = len(str) - fracStart } - fracSec := mustAtoi(str[fracStart : fracStart+fracOff]) + fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) remainderIdx += fracOff + 1 } - if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart:tzStart+1] == "-" || str[tzStart:tzStart+1] == "+") { + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { // time zone separator is always '-' or '+' (UTC is +00) var tzSign int - if c := str[tzStart : tzStart+1]; c == "-" { + switch c := str[tzStart]; c { + case '-': tzSign = -1 - } else if c == "+" { + case '+': tzSign = +1 - } else { - errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + default: + return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) } - tzHours := mustAtoi(str[tzStart+1 : tzStart+3]) + tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) remainderIdx += 3 var tzMin, tzSec int - if tzStart+3 < len(str) && str[tzStart+3:tzStart+4] == ":" { - tzMin = mustAtoi(str[tzStart+4 : tzStart+6]) + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) remainderIdx += 3 } - if tzStart+6 < len(str) && str[tzStart+6:tzStart+7] == ":" { - tzSec = mustAtoi(str[tzStart+7 : tzStart+9]) + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) remainderIdx += 3 } tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) } var isoYear int - if remainderIdx < len(str) && str[remainderIdx:remainderIdx+3] == " BC" { + if remainderIdx+3 <= len(str) && str[remainderIdx:remainderIdx+3] == " BC" { isoYear = 1 - year remainderIdx += 3 } else { isoYear = year } if remainderIdx < len(str) { - errorf("expected end of input, got %v", str[remainderIdx:]) + return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) } t := time.Date(isoYear, time.Month(month), day, hour, minute, second, nanoSec, @@ -396,11 +442,11 @@ func parseTs(currentLocation *time.Location, str string) interface{} { } } - return t + return t, p.err } // formatTs formats t into a format postgres understands. -func formatTs(t time.Time) (b []byte) { +func formatTs(t time.Time) []byte { if infinityTsEnabled { // t <= -infinity : ! (t > -infinity) if !t.After(infinityTsNegative) { @@ -411,6 +457,11 @@ func formatTs(t time.Time) (b []byte) { return []byte("infinity") } } + return FormatTimestamp(t) +} + +// FormatTimestamp formats t into Postgres' text format for timestamps. +func FormatTimestamp(t time.Time) []byte { // Need to send dates before 0001 A.D. with " BC" suffix, instead of the // minus sign preferred by Go. // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on @@ -420,7 +471,7 @@ func formatTs(t time.Time) (b []byte) { t = t.AddDate((-t.Year())*2+1, 0, 0) bc = true } - b = []byte(t.Format(time.RFC3339Nano)) + b := []byte(t.Format(time.RFC3339Nano)) _, offset := t.Zone() offset = offset % 60 @@ -445,14 +496,14 @@ func formatTs(t time.Time) (b []byte) { // Parse a bytea value received from the server. Both "hex" and the legacy // "escape" format are supported. -func parseBytea(s []byte) (result []byte) { +func parseBytea(s []byte) (result []byte, err error) { if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { // bytea_output = hex s = s[2:] // trim off leading "\\x" result = make([]byte, hex.DecodedLen(len(s))) _, err := hex.Decode(result, s) if err != nil { - errorf("%s", err) + return nil, err } } else { // bytea_output = escape @@ -467,11 +518,11 @@ func parseBytea(s []byte) (result []byte) { // '\\' followed by an octal number if len(s) < 4 { - errorf("invalid bytea sequence %v", s) + return nil, fmt.Errorf("invalid bytea sequence %v", s) } r, err := strconv.ParseInt(string(s[1:4]), 8, 9) if err != nil { - errorf("could not parse bytea value: %s", err.Error()) + return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) } result = append(result, byte(r)) s = s[4:] @@ -489,7 +540,7 @@ func parseBytea(s []byte) (result []byte) { } } - return result + return result, nil } func encodeBytea(serverVersion int, v []byte) (result []byte) { diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go index 8cad57815..09f94244b 100644 --- a/vendor/github.com/lib/pq/notify.go +++ b/vendor/github.com/lib/pq/notify.go @@ -62,14 +62,18 @@ type ListenerConn struct { // Creates a new ListenerConn. Use NewListener instead. func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { - cn, err := Open(name) + return newDialListenerConn(defaultDialer{}, name, notificationChan) +} + +func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { + cn, err := DialOpen(d, name) if err != nil { return nil, err } l := &ListenerConn{ cn: cn.(*conn), - notificationChan: notificationChan, + notificationChan: c, connState: connStateIdle, replyChan: make(chan message, 2), } @@ -391,6 +395,7 @@ type Listener struct { name string minReconnectInterval time.Duration maxReconnectInterval time.Duration + dialer Dialer eventCallback EventCallbackType lock sync.Mutex @@ -421,10 +426,21 @@ func NewListener(name string, minReconnectInterval time.Duration, maxReconnectInterval time.Duration, eventCallback EventCallbackType) *Listener { + return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) +} + +// NewDialListener is like NewListener but it takes a Dialer. +func NewDialListener(d Dialer, + name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + l := &Listener{ name: name, minReconnectInterval: minReconnectInterval, maxReconnectInterval: maxReconnectInterval, + dialer: d, eventCallback: eventCallback, channels: make(map[string]struct{}), @@ -660,7 +676,7 @@ func (l *Listener) closed() bool { func (l *Listener) connect() error { notificationChan := make(chan *Notification, 32) - cn, err := NewListenerConn(l.name, notificationChan) + cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) if err != nil { return err } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go index 9bac95c48..f4d8a7c20 100644 --- a/vendor/github.com/lib/pq/url.go +++ b/vendor/github.com/lib/pq/url.go @@ -2,6 +2,7 @@ package pq import ( "fmt" + "net" nurl "net/url" "sort" "strings" @@ -54,12 +55,11 @@ func ParseURL(url string) (string, error) { accrue("password", v) } - i := strings.Index(u.Host, ":") - if i < 0 { + if host, port, err := net.SplitHostPort(u.Host); err != nil { accrue("host", u.Host) } else { - accrue("host", u.Host[:i]) - accrue("port", u.Host[i+1:]) + accrue("host", host) + accrue("port", port) } if u.Path != "" { diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go index e937d7d08..bf982524f 100644 --- a/vendor/github.com/lib/pq/user_posix.go +++ b/vendor/github.com/lib/pq/user_posix.go @@ -1,6 +1,6 @@ // Package pq is a pure Go Postgres driver for the database/sql package. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun package pq diff --git a/vendor/vendor.json b/vendor/vendor.json index 4be4a8c62..7978b5342 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1982,9 +1982,11 @@ "revision": "0826b98aaa29c0766956cb40d45cf7482a597671" }, { + "checksumSHA1": "dNYxHiBLalTqluak2/Z8c3RsSEM=", "comment": "go1.0-cutoff-74-g8ad2b29", "path": "github.com/lib/pq", - "revision": "8ad2b298cadd691a77015666a5372eae5dbfac8f" + "revision": "50761b0867bd1d9d069276790bcd4a3bccf2324a", + "revisionTime": "2016-08-31T22:25:20Z" }, { "comment": "go1.0-cutoff-74-g8ad2b29",