vendor
This commit is contained in:
1
vendor/github.com/lib/pq/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/lib/pq/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.sh text eol=lf
|
||||
6
vendor/github.com/lib/pq/.gitignore
generated
vendored
Normal file
6
vendor/github.com/lib/pq/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
.db
|
||||
*.test
|
||||
*~
|
||||
*.swp
|
||||
.idea
|
||||
.vscode
|
||||
157
vendor/github.com/lib/pq/CHANGELOG.md
generated
vendored
Normal file
157
vendor/github.com/lib/pq/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
v1.11.2 (2025-02-10)
|
||||
--------------------
|
||||
This fixes two regressions:
|
||||
|
||||
- Don't send startup parameters if there is no value, improving compatibility
|
||||
with Supavisor ([#1260]).
|
||||
|
||||
- Don't send `dbname` as a startup parameter if `database=[..]` is used in the
|
||||
connection string. It's recommended to use dbname=, as database= is not a
|
||||
libpq option, and only worked by accident previously. ([#1261])
|
||||
|
||||
[#1260]: https://github.com/lib/pq/pull/1260
|
||||
[#1261]: https://github.com/lib/pq/pull/1261
|
||||
|
||||
v1.11.1 (2025-01-29)
|
||||
--------------------
|
||||
This fixes two regressions present in the v1.11.0 release:
|
||||
|
||||
- Fix build on 32bit systems, Windows, and Plan 9 ([#1253]).
|
||||
|
||||
- Named []byte types and pointers to []byte (e.g. `*[]byte`, `json.RawMessage`)
|
||||
would be treated as an array instead of bytea ([#1252]).
|
||||
|
||||
[#1252]: https://github.com/lib/pq/pull/1252
|
||||
[#1253]: https://github.com/lib/pq/pull/1253
|
||||
|
||||
v1.11.0 (2025-01-28)
|
||||
--------------------
|
||||
This version of pq requires Go 1.21 or newer.
|
||||
|
||||
pq now supports only maintained PostgreSQL releases, which is PostgreSQL 14 and
|
||||
newer. Previously PostgreSQL 8.4 and newer were supported.
|
||||
|
||||
### Features
|
||||
|
||||
- The `pq.Error.Error()` text includes the position of the error (if reported
|
||||
by PostgreSQL) and SQLSTATE code ([#1219], [#1224]):
|
||||
|
||||
pq: column "columndoesntexist" does not exist at column 8 (42703)
|
||||
pq: syntax error at or near ")" at position 2:71 (42601)
|
||||
|
||||
- The `pq.Error.ErrorWithDetail()` method prints a more detailed multiline
|
||||
message, with the Detail, Hint, and error position (if any) ([#1219]):
|
||||
|
||||
ERROR: syntax error at or near ")" (42601)
|
||||
CONTEXT: line 12, column 1:
|
||||
|
||||
10 | name varchar,
|
||||
11 | version varchar,
|
||||
12 | );
|
||||
^
|
||||
|
||||
- Add `Config`, `NewConfig()`, and `NewConnectorConfig()` to supply connection
|
||||
details in a more structured way ([#1240]).
|
||||
|
||||
- Support `hostaddr` and `$PGHOSTADDR` ([#1243]).
|
||||
|
||||
- Support multiple values in `host`, `port`, and `hostaddr`, which are each
|
||||
tried in order, or randomly if `load_balance_hosts=random` is set ([#1246]).
|
||||
|
||||
- Support `target_session_attrs` connection parameter ([#1246]).
|
||||
|
||||
- Support [`sslnegotiation`] to use SSL without negotiation ([#1180]).
|
||||
|
||||
- Allow using a custom `tls.Config`, for example for encrypted keys ([#1228]).
|
||||
|
||||
- Add `PQGO_DEBUG=1` print the communication with PostgreSQL to stderr, to aid
|
||||
in debugging, testing, and bug reports ([#1223]).
|
||||
|
||||
- Add support for NamedValueChecker interface ([#1125], [#1238]).
|
||||
|
||||
|
||||
### Fixes
|
||||
|
||||
- Match HOME directory lookup logic with libpq: prefer $HOME over /etc/passwd,
|
||||
ignore ENOTDIR errors, and use APPDATA on Windows ([#1214]).
|
||||
|
||||
- Fix `sslmode=verify-ca` verifying the hostname anyway when connecting to a DNS
|
||||
name (rather than IP) ([#1226]).
|
||||
|
||||
- Correctly detect pre-protocol errors such as the server not being able to fork
|
||||
or running out of memory ([#1248]).
|
||||
|
||||
- Fix build with wasm ([#1184]), appengine ([#745]), and Plan 9 ([#1133]).
|
||||
|
||||
- Deprecate and type alias `pq.NullTime` to `sql.NullTime` ([#1211]).
|
||||
|
||||
- Enforce integer limits of the Postgres wire protocol ([#1161]).
|
||||
|
||||
- Accept the `passfile` connection parameter to override `PGPASSFILE` ([#1129]).
|
||||
|
||||
- Fix connecting to socket on Windows systems ([#1179]).
|
||||
|
||||
- Don't perform a permission check on the .pgpass file on Windows ([#595]).
|
||||
|
||||
- Warn about incorrect .pgpass permissions ([#595]).
|
||||
|
||||
- Don't set extra_float_digits ([#1212]).
|
||||
|
||||
- Decode bpchar into a string ([#949]).
|
||||
|
||||
- Fix panic in Ping() by not requiring CommandComplete or EmptyQueryResponse in
|
||||
simpleQuery() ([#1234])
|
||||
|
||||
- Recognize bit/varbit ([#743]) and float types ([#1166]) in ColumnTypeScanType().
|
||||
|
||||
- Accept `PGGSSLIB` and `PGKRBSRVNAME` environment variables ([#1143]).
|
||||
|
||||
- Handle ErrorResponse in readReadyForQuery and return proper error ([#1136]).
|
||||
|
||||
- CopyIn() and CopyInSchema() now work if the list of columns is empty, in which
|
||||
case it will copy all columns ([#1239]).
|
||||
|
||||
- Treat nil []byte in query parameters as nil/NULL rather than `""` ([#838]).
|
||||
|
||||
- Accept multiple authentication methods before checking AuthOk, which improves
|
||||
compatibility with PgPool-II ([#1188]).
|
||||
|
||||
[`sslnegotiation`]: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNECT-SSLNEGOTIATION
|
||||
[#595]: https://github.com/lib/pq/pull/595
|
||||
[#745]: https://github.com/lib/pq/pull/745
|
||||
[#743]: https://github.com/lib/pq/pull/743
|
||||
[#838]: https://github.com/lib/pq/pull/838
|
||||
[#949]: https://github.com/lib/pq/pull/949
|
||||
[#1125]: https://github.com/lib/pq/pull/1125
|
||||
[#1129]: https://github.com/lib/pq/pull/1129
|
||||
[#1133]: https://github.com/lib/pq/pull/1133
|
||||
[#1136]: https://github.com/lib/pq/pull/1136
|
||||
[#1143]: https://github.com/lib/pq/pull/1143
|
||||
[#1161]: https://github.com/lib/pq/pull/1161
|
||||
[#1166]: https://github.com/lib/pq/pull/1166
|
||||
[#1179]: https://github.com/lib/pq/pull/1179
|
||||
[#1180]: https://github.com/lib/pq/pull/1180
|
||||
[#1184]: https://github.com/lib/pq/pull/1184
|
||||
[#1188]: https://github.com/lib/pq/pull/1188
|
||||
[#1211]: https://github.com/lib/pq/pull/1211
|
||||
[#1212]: https://github.com/lib/pq/pull/1212
|
||||
[#1214]: https://github.com/lib/pq/pull/1214
|
||||
[#1219]: https://github.com/lib/pq/pull/1219
|
||||
[#1223]: https://github.com/lib/pq/pull/1223
|
||||
[#1224]: https://github.com/lib/pq/pull/1224
|
||||
[#1226]: https://github.com/lib/pq/pull/1226
|
||||
[#1228]: https://github.com/lib/pq/pull/1228
|
||||
[#1234]: https://github.com/lib/pq/pull/1234
|
||||
[#1238]: https://github.com/lib/pq/pull/1238
|
||||
[#1239]: https://github.com/lib/pq/pull/1239
|
||||
[#1240]: https://github.com/lib/pq/pull/1240
|
||||
[#1243]: https://github.com/lib/pq/pull/1243
|
||||
[#1246]: https://github.com/lib/pq/pull/1246
|
||||
[#1248]: https://github.com/lib/pq/pull/1248
|
||||
|
||||
|
||||
v1.10.9 (2023-04-26)
|
||||
--------------------
|
||||
- Fixes backwards incompat bug with 1.13.
|
||||
|
||||
- Fixes pgpass issue
|
||||
21
vendor/github.com/lib/pq/LICENSE
generated
vendored
Normal file
21
vendor/github.com/lib/pq/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2011-2013, 'pq' Contributors. Portions Copyright (c) 2011 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
92
vendor/github.com/lib/pq/README.md
generated
vendored
Normal file
92
vendor/github.com/lib/pq/README.md
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
pq is a Go PostgreSQL driver for database/sql.
|
||||
|
||||
All [maintained versions of PostgreSQL] are supported. Older versions may work,
|
||||
but this is not tested.
|
||||
|
||||
API docs: https://pkg.go.dev/github.com/lib/pq
|
||||
|
||||
Install with:
|
||||
|
||||
go get github.com/lib/pq@latest
|
||||
|
||||
[maintained versions of PostgreSQL]: https://www.postgresql.org/support/versioning
|
||||
|
||||
Features
|
||||
--------
|
||||
* SSL
|
||||
* Handles bad connections for `database/sql`
|
||||
* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
|
||||
* Scan binary blobs correctly (i.e. `bytea`)
|
||||
* Package for `hstore` support
|
||||
* COPY FROM support
|
||||
* pq.ParseURL for converting urls to connection strings for sql.Open.
|
||||
* Many libpq compatible environment variables
|
||||
* Unix socket support
|
||||
* Notifications: `LISTEN`/`NOTIFY`
|
||||
* pgpass support
|
||||
* GSS (Kerberos) auth
|
||||
|
||||
Running Tests
|
||||
-------------
|
||||
Tests need to be run against a PostgreSQL database; you can use Docker compose
|
||||
to start one:
|
||||
|
||||
docker compose up -d
|
||||
|
||||
This starts the latest PostgreSQL; use `docker compose up -d pg«v»` to start a
|
||||
different version.
|
||||
|
||||
In addition, your `/etc/hosts` currently needs an entry:
|
||||
|
||||
127.0.0.1 postgres postgres-invalid
|
||||
|
||||
Or you can use any other PostgreSQL instance; see
|
||||
`testdata/init/docker-entrypoint-initdb.d` for the required setup. You can use
|
||||
the standard `PG*` environment variables to control the connection details; it
|
||||
uses the following defaults:
|
||||
|
||||
PGHOST=localhost
|
||||
PGDATABASE=pqgo
|
||||
PGUSER=pqgo
|
||||
PGSSLMODE=disable
|
||||
PGCONNECT_TIMEOUT=20
|
||||
|
||||
`PQTEST_BINARY_PARAMETERS` can be used to add `binary_parameters=yes` to all
|
||||
connection strings:
|
||||
|
||||
PQTEST_BINARY_PARAMETERS=1 go test
|
||||
|
||||
Tests can be run against pgbouncer with:
|
||||
|
||||
docker compose up -d pgbouncer pg18
|
||||
PGPORT=6432 go test ./...
|
||||
|
||||
and pgpool with:
|
||||
|
||||
docker compose up -d pgpool pg18
|
||||
PGPORT=7432 go test ./...
|
||||
|
||||
You can use PQGO_DEBUG=1 to make the driver print the communication with
|
||||
PostgreSQL to stderr; this works anywhere (test or applications) and can be
|
||||
useful to debug protocol problems.
|
||||
|
||||
For example:
|
||||
|
||||
% PQGO_DEBUG=1 go test -run TestSimpleQuery
|
||||
CLIENT → Startup 69 "\x00\x03\x00\x00database\x00pqgo\x00user [..]"
|
||||
SERVER ← (R) AuthRequest 4 "\x00\x00\x00\x00"
|
||||
SERVER ← (S) ParamStatus 19 "in_hot_standby\x00off\x00"
|
||||
[..]
|
||||
SERVER ← (Z) ReadyForQuery 1 "I"
|
||||
START conn.query
|
||||
START conn.simpleQuery
|
||||
CLIENT → (Q) Query 9 "select 1\x00"
|
||||
SERVER ← (T) RowDescription 29 "\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x17\x00\x04\xff\xff\xff\xff\x00\x00"
|
||||
SERVER ← (D) DataRow 7 "\x00\x01\x00\x00\x00\x011"
|
||||
END conn.simpleQuery
|
||||
END conn.query
|
||||
SERVER ← (C) CommandComplete 9 "SELECT 1\x00"
|
||||
SERVER ← (Z) ReadyForQuery 1 "I"
|
||||
CLIENT → (X) Terminate 0 ""
|
||||
PASS
|
||||
ok github.com/lib/pq 0.010s
|
||||
903
vendor/github.com/lib/pq/array.go
generated
vendored
Normal file
903
vendor/github.com/lib/pq/array.go
generated
vendored
Normal file
@@ -0,0 +1,903 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var typeByteSlice = reflect.TypeOf([]byte{})
|
||||
var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
|
||||
var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
|
||||
|
||||
// Array returns the optimal driver.Valuer and sql.Scanner for an array or
|
||||
// slice of any dimension.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
|
||||
//
|
||||
// var x []sql.NullInt64
|
||||
// db.QueryRow(`SELECT ARRAY[235, 401]`).Scan(pq.Array(&x))
|
||||
//
|
||||
// Scanning multi-dimensional arrays is not supported. Arrays where the lower
|
||||
// bound is not one (such as `[0:0]={1}') are not supported.
|
||||
func Array(a any) interface {
|
||||
driver.Valuer
|
||||
sql.Scanner
|
||||
} {
|
||||
switch a := a.(type) {
|
||||
case []bool:
|
||||
return (*BoolArray)(&a)
|
||||
case []float64:
|
||||
return (*Float64Array)(&a)
|
||||
case []float32:
|
||||
return (*Float32Array)(&a)
|
||||
case []int64:
|
||||
return (*Int64Array)(&a)
|
||||
case []int32:
|
||||
return (*Int32Array)(&a)
|
||||
case []string:
|
||||
return (*StringArray)(&a)
|
||||
case [][]byte:
|
||||
return (*ByteaArray)(&a)
|
||||
|
||||
case *[]bool:
|
||||
return (*BoolArray)(a)
|
||||
case *[]float64:
|
||||
return (*Float64Array)(a)
|
||||
case *[]float32:
|
||||
return (*Float32Array)(a)
|
||||
case *[]int64:
|
||||
return (*Int64Array)(a)
|
||||
case *[]int32:
|
||||
return (*Int32Array)(a)
|
||||
case *[]string:
|
||||
return (*StringArray)(a)
|
||||
case *[][]byte:
|
||||
return (*ByteaArray)(a)
|
||||
}
|
||||
|
||||
return GenericArray{a}
|
||||
}
|
||||
|
||||
// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
|
||||
// to override the array delimiter used by GenericArray.
|
||||
type ArrayDelimiter interface {
|
||||
// ArrayDelimiter returns the delimiter character(s) for this element's type.
|
||||
ArrayDelimiter() string
|
||||
}
|
||||
|
||||
// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
|
||||
type BoolArray []bool
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *BoolArray) Scan(src any) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
case nil:
|
||||
*a = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
|
||||
}
|
||||
|
||||
func (a *BoolArray) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *a != nil && len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(BoolArray, len(elems))
|
||||
for i, v := range elems {
|
||||
if len(v) != 1 {
|
||||
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
|
||||
}
|
||||
switch v[0] {
|
||||
case 't':
|
||||
b[i] = true
|
||||
case 'f':
|
||||
b[i] = false
|
||||
default:
|
||||
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
|
||||
}
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a BoolArray) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be exactly two curly brackets, N bytes of values,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 1+2*n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
b[2*i] = ','
|
||||
if a[i] {
|
||||
b[1+2*i] = 't'
|
||||
} else {
|
||||
b[1+2*i] = 'f'
|
||||
}
|
||||
}
|
||||
|
||||
b[0] = '{'
|
||||
b[2*n] = '}'
|
||||
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
|
||||
type ByteaArray [][]byte
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *ByteaArray) Scan(src any) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
case nil:
|
||||
*a = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
|
||||
}
|
||||
|
||||
func (a *ByteaArray) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *a != nil && len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(ByteaArray, len(elems))
|
||||
for i, v := range elems {
|
||||
b[i], err = parseBytea(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse bytea array index %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface. It uses the "hex" format which
|
||||
// is only supported on PostgreSQL 9.0 or newer.
|
||||
func (a ByteaArray) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be at least two curly brackets, 2*N bytes of quotes,
|
||||
// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
|
||||
size := 1 + 6*n
|
||||
for _, x := range a {
|
||||
size += hex.EncodedLen(len(x))
|
||||
}
|
||||
|
||||
b := make([]byte, size)
|
||||
|
||||
for i, s := 0, b; i < n; i++ {
|
||||
o := copy(s, `,"\\x`)
|
||||
o += hex.Encode(s[o:], a[i])
|
||||
s[o] = '"'
|
||||
s = s[o+1:]
|
||||
}
|
||||
|
||||
b[0] = '{'
|
||||
b[size-1] = '}'
|
||||
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// Float64Array represents a one-dimensional array of the PostgreSQL double
|
||||
// precision type.
|
||||
type Float64Array []float64
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *Float64Array) Scan(src any) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
case nil:
|
||||
*a = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
|
||||
}
|
||||
|
||||
func (a *Float64Array) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *a != nil && len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(Float64Array, len(elems))
|
||||
for i, v := range elems {
|
||||
b[i], err = strconv.ParseFloat(string(v), 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pq: parsing array element index %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a Float64Array) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be at least two curly brackets, N bytes of values,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 1, 1+2*n)
|
||||
b[0] = '{'
|
||||
|
||||
b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
|
||||
for i := 1; i < n; i++ {
|
||||
b = append(b, ',')
|
||||
b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
|
||||
}
|
||||
|
||||
return string(append(b, '}')), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// Float32Array represents a one-dimensional array of the PostgreSQL double
|
||||
// precision type.
|
||||
type Float32Array []float32
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *Float32Array) Scan(src any) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
case nil:
|
||||
*a = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to Float32Array", src)
|
||||
}
|
||||
|
||||
func (a *Float32Array) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "Float32Array")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *a != nil && len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(Float32Array, len(elems))
|
||||
for i, v := range elems {
|
||||
x, err := strconv.ParseFloat(string(v), 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pq: parsing array element index %d: %w", i, err)
|
||||
}
|
||||
b[i] = float32(x)
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a Float32Array) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be at least two curly brackets, N bytes of values,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 1, 1+2*n)
|
||||
b[0] = '{'
|
||||
|
||||
b = strconv.AppendFloat(b, float64(a[0]), 'f', -1, 32)
|
||||
for i := 1; i < n; i++ {
|
||||
b = append(b, ',')
|
||||
b = strconv.AppendFloat(b, float64(a[i]), 'f', -1, 32)
|
||||
}
|
||||
|
||||
return string(append(b, '}')), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
|
||||
// an array or slice of any dimension.
|
||||
type GenericArray struct{ A any }
|
||||
|
||||
func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
|
||||
var assign func([]byte, reflect.Value) error
|
||||
var del = ","
|
||||
|
||||
// TODO calculate the assign function for other types
|
||||
// TODO repeat this section on the element type of arrays or slices (multidimensional)
|
||||
{
|
||||
if reflect.PointerTo(rt).Implements(typeSQLScanner) {
|
||||
// dest is always addressable because it is an element of a slice.
|
||||
assign = func(src []byte, dest reflect.Value) (err error) {
|
||||
ss := dest.Addr().Interface().(sql.Scanner)
|
||||
if src == nil {
|
||||
err = ss.Scan(nil)
|
||||
} else {
|
||||
err = ss.Scan(src)
|
||||
}
|
||||
return
|
||||
}
|
||||
goto FoundType
|
||||
}
|
||||
|
||||
assign = func([]byte, reflect.Value) error {
|
||||
return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
|
||||
}
|
||||
}
|
||||
|
||||
FoundType:
|
||||
|
||||
if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
|
||||
del = ad.ArrayDelimiter()
|
||||
}
|
||||
|
||||
return rt, assign, del
|
||||
}
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a GenericArray) Scan(src any) error {
|
||||
dpv := reflect.ValueOf(a.A)
|
||||
switch {
|
||||
case dpv.Kind() != reflect.Ptr:
|
||||
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
|
||||
case dpv.IsNil():
|
||||
return fmt.Errorf("pq: destination %T is nil", a.A)
|
||||
}
|
||||
|
||||
dv := dpv.Elem()
|
||||
switch dv.Kind() {
|
||||
case reflect.Slice:
|
||||
case reflect.Array:
|
||||
default:
|
||||
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
|
||||
}
|
||||
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src, dv)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src), dv)
|
||||
case nil:
|
||||
if dv.Kind() == reflect.Slice {
|
||||
dv.Set(reflect.Zero(dv.Type()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
|
||||
}
|
||||
|
||||
func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
|
||||
dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
|
||||
dims, elems, err := parseArray(src, []byte(del))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO allow multidimensional
|
||||
|
||||
if len(dims) > 1 {
|
||||
return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
|
||||
strings.Replace(fmt.Sprint(dims), " ", "][", -1))
|
||||
}
|
||||
|
||||
// Treat a zero-dimensional array like an array with a single dimension of zero.
|
||||
if len(dims) == 0 {
|
||||
dims = append(dims, 0)
|
||||
}
|
||||
|
||||
for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
|
||||
switch rt.Kind() {
|
||||
case reflect.Slice:
|
||||
case reflect.Array:
|
||||
if rt.Len() != dims[i] {
|
||||
return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
|
||||
strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
|
||||
}
|
||||
default:
|
||||
// TODO handle multidimensional
|
||||
}
|
||||
}
|
||||
|
||||
values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
|
||||
for i, e := range elems {
|
||||
err := assign(e, values.Index(i))
|
||||
if err != nil {
|
||||
return fmt.Errorf("pq: parsing array element index %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO handle multidimensional
|
||||
|
||||
switch dv.Kind() {
|
||||
case reflect.Slice:
|
||||
dv.Set(values.Slice(0, dims[0]))
|
||||
case reflect.Array:
|
||||
for i := 0; i < dims[0]; i++ {
|
||||
dv.Index(i).Set(values.Index(i))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a GenericArray) Value() (driver.Value, error) {
|
||||
if a.A == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(a.A)
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.IsNil() {
|
||||
return nil, nil
|
||||
}
|
||||
case reflect.Array:
|
||||
default:
|
||||
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
|
||||
}
|
||||
|
||||
if n := rv.Len(); n > 0 {
|
||||
// There will be at least two curly brackets, N bytes of values,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 0, 1+2*n)
|
||||
|
||||
b, _, err := appendArray(b, rv, n)
|
||||
return string(b), err
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
|
||||
type Int64Array []int64
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *Int64Array) Scan(src any) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
case nil:
|
||||
*a = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
|
||||
}
|
||||
|
||||
func (a *Int64Array) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *a != nil && len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(Int64Array, len(elems))
|
||||
for i, v := range elems {
|
||||
b[i], err = strconv.ParseInt(string(v), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pq: parsing array element index %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a Int64Array) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be at least two curly brackets, N bytes of values,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 1, 1+2*n)
|
||||
b[0] = '{'
|
||||
|
||||
b = strconv.AppendInt(b, a[0], 10)
|
||||
for i := 1; i < n; i++ {
|
||||
b = append(b, ',')
|
||||
b = strconv.AppendInt(b, a[i], 10)
|
||||
}
|
||||
|
||||
return string(append(b, '}')), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// Int32Array represents a one-dimensional array of the PostgreSQL integer types.
|
||||
type Int32Array []int32
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *Int32Array) Scan(src any) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
case nil:
|
||||
*a = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to Int32Array", src)
|
||||
}
|
||||
|
||||
func (a *Int32Array) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "Int32Array")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *a != nil && len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(Int32Array, len(elems))
|
||||
for i, v := range elems {
|
||||
x, err := strconv.ParseInt(string(v), 10, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pq: parsing array element index %d: %w", i, err)
|
||||
}
|
||||
b[i] = int32(x)
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a Int32Array) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be at least two curly brackets, N bytes of values,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 1, 1+2*n)
|
||||
b[0] = '{'
|
||||
|
||||
b = strconv.AppendInt(b, int64(a[0]), 10)
|
||||
for i := 1; i < n; i++ {
|
||||
b = append(b, ',')
|
||||
b = strconv.AppendInt(b, int64(a[i]), 10)
|
||||
}
|
||||
|
||||
return string(append(b, '}')), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// StringArray represents a one-dimensional array of the PostgreSQL character types.
|
||||
type StringArray []string
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (a *StringArray) Scan(src any) error {
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
return a.scanBytes(src)
|
||||
case string:
|
||||
return a.scanBytes([]byte(src))
|
||||
case nil:
|
||||
*a = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("pq: cannot convert %T to StringArray", src)
|
||||
}
|
||||
|
||||
func (a *StringArray) scanBytes(src []byte) error {
|
||||
elems, err := scanLinearArray(src, []byte{','}, "StringArray")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *a != nil && len(elems) == 0 {
|
||||
*a = (*a)[:0]
|
||||
} else {
|
||||
b := make(StringArray, len(elems))
|
||||
for i, v := range elems {
|
||||
if b[i] = string(v); v == nil {
|
||||
return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
|
||||
}
|
||||
}
|
||||
*a = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (a StringArray) Value() (driver.Value, error) {
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n := len(a); n > 0 {
|
||||
// There will be at least two curly brackets, 2*N bytes of quotes,
|
||||
// and N-1 bytes of delimiters.
|
||||
b := make([]byte, 1, 1+3*n)
|
||||
b[0] = '{'
|
||||
|
||||
b = appendArrayQuotedBytes(b, []byte(a[0]))
|
||||
for i := 1; i < n; i++ {
|
||||
b = append(b, ',')
|
||||
b = appendArrayQuotedBytes(b, []byte(a[i]))
|
||||
}
|
||||
|
||||
return string(append(b, '}')), nil
|
||||
}
|
||||
|
||||
return "{}", nil
|
||||
}
|
||||
|
||||
// appendArray appends rv to the buffer, returning the extended buffer and the
|
||||
// delimiter used between elements.
|
||||
//
|
||||
// Returns an error when n <= 0 or rv is not a reflect.Array or reflect.Slice.
|
||||
func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
|
||||
var del string
|
||||
var err error
|
||||
|
||||
b = append(b, '{')
|
||||
|
||||
if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
|
||||
return b, del, err
|
||||
}
|
||||
|
||||
for i := 1; i < n; i++ {
|
||||
b = append(b, del...)
|
||||
if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
|
||||
return b, del, err
|
||||
}
|
||||
}
|
||||
|
||||
return append(b, '}'), del, nil
|
||||
}
|
||||
|
||||
// appendArrayElement appends rv to the buffer, returning the extended buffer
|
||||
// and the delimiter to use before the next element.
|
||||
//
|
||||
// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
|
||||
// using driver.DefaultParameterConverter and the resulting []byte or string
|
||||
// is double-quoted.
|
||||
//
|
||||
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
|
||||
func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
|
||||
if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
|
||||
if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
|
||||
if n := rv.Len(); n > 0 {
|
||||
return appendArray(b, rv, n)
|
||||
}
|
||||
|
||||
return b, "", nil
|
||||
}
|
||||
}
|
||||
|
||||
var del = ","
|
||||
var err error
|
||||
var iv any = rv.Interface()
|
||||
|
||||
if ad, ok := iv.(ArrayDelimiter); ok {
|
||||
del = ad.ArrayDelimiter()
|
||||
}
|
||||
|
||||
if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
|
||||
return b, del, err
|
||||
}
|
||||
|
||||
switch v := iv.(type) {
|
||||
case nil:
|
||||
return append(b, "NULL"...), del, nil
|
||||
case []byte:
|
||||
return appendArrayQuotedBytes(b, v), del, nil
|
||||
case string:
|
||||
return appendArrayQuotedBytes(b, []byte(v)), del, nil
|
||||
}
|
||||
|
||||
b, err = appendValue(b, iv)
|
||||
return b, del, err
|
||||
}
|
||||
|
||||
func appendArrayQuotedBytes(b, v []byte) []byte {
|
||||
b = append(b, '"')
|
||||
for {
|
||||
i := bytes.IndexAny(v, `"\`)
|
||||
if i < 0 {
|
||||
b = append(b, v...)
|
||||
break
|
||||
}
|
||||
if i > 0 {
|
||||
b = append(b, v[:i]...)
|
||||
}
|
||||
b = append(b, '\\', v[i])
|
||||
v = v[i+1:]
|
||||
}
|
||||
return append(b, '"')
|
||||
}
|
||||
|
||||
func appendValue(b []byte, v driver.Value) ([]byte, error) {
|
||||
enc, err := encode(v, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(b, enc...), nil
|
||||
}
|
||||
|
||||
// parseArray extracts the dimensions and elements of an array represented in
|
||||
// text format. Only representations emitted by the backend are supported.
|
||||
// Notably, whitespace around brackets and delimiters is significant, and NULL
|
||||
// is case-sensitive.
|
||||
//
|
||||
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
|
||||
func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
|
||||
var depth, i int
|
||||
|
||||
if len(src) < 1 || src[0] != '{' {
|
||||
return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
|
||||
}
|
||||
|
||||
Open:
|
||||
for i < len(src) {
|
||||
switch src[i] {
|
||||
case '{':
|
||||
depth++
|
||||
i++
|
||||
case '}':
|
||||
elems = make([][]byte, 0)
|
||||
goto Close
|
||||
default:
|
||||
break Open
|
||||
}
|
||||
}
|
||||
dims = make([]int, i)
|
||||
|
||||
Element:
|
||||
for i < len(src) {
|
||||
switch src[i] {
|
||||
case '{':
|
||||
if depth == len(dims) {
|
||||
break Element
|
||||
}
|
||||
depth++
|
||||
dims[depth-1] = 0
|
||||
i++
|
||||
case '"':
|
||||
var elem = []byte{}
|
||||
var escape bool
|
||||
for i++; i < len(src); i++ {
|
||||
if escape {
|
||||
elem = append(elem, src[i])
|
||||
escape = false
|
||||
} else {
|
||||
switch src[i] {
|
||||
default:
|
||||
elem = append(elem, src[i])
|
||||
case '\\':
|
||||
escape = true
|
||||
case '"':
|
||||
elems = append(elems, elem)
|
||||
i++
|
||||
break Element
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
for start := i; i < len(src); i++ {
|
||||
if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
|
||||
elem := src[start:i]
|
||||
if len(elem) == 0 {
|
||||
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
|
||||
}
|
||||
if bytes.Equal(elem, []byte("NULL")) {
|
||||
elem = nil
|
||||
}
|
||||
elems = append(elems, elem)
|
||||
break Element
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i < len(src) {
|
||||
if bytes.HasPrefix(src[i:], del) && depth > 0 {
|
||||
dims[depth-1]++
|
||||
i += len(del)
|
||||
goto Element
|
||||
} else if src[i] == '}' && depth > 0 {
|
||||
dims[depth-1]++
|
||||
depth--
|
||||
i++
|
||||
} else {
|
||||
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
|
||||
}
|
||||
}
|
||||
|
||||
Close:
|
||||
for i < len(src) {
|
||||
if src[i] == '}' && depth > 0 {
|
||||
depth--
|
||||
i++
|
||||
} else {
|
||||
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
|
||||
}
|
||||
}
|
||||
if depth > 0 {
|
||||
err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
|
||||
}
|
||||
if err == nil {
|
||||
for _, d := range dims {
|
||||
if (len(elems) % d) != 0 {
|
||||
err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
|
||||
dims, elems, err := parseArray(src, del)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dims) > 1 {
|
||||
return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
|
||||
}
|
||||
return elems, err
|
||||
}
|
||||
100
vendor/github.com/lib/pq/buf.go
generated
vendored
Normal file
100
vendor/github.com/lib/pq/buf.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/lib/pq/internal/proto"
|
||||
"github.com/lib/pq/oid"
|
||||
)
|
||||
|
||||
type readBuf []byte
|
||||
|
||||
func (b *readBuf) int32() (n int) {
|
||||
n = int(int32(binary.BigEndian.Uint32(*b)))
|
||||
*b = (*b)[4:]
|
||||
return
|
||||
}
|
||||
|
||||
func (b *readBuf) oid() (n oid.Oid) {
|
||||
n = oid.Oid(binary.BigEndian.Uint32(*b))
|
||||
*b = (*b)[4:]
|
||||
return
|
||||
}
|
||||
|
||||
// N.B: this is actually an unsigned 16-bit integer, unlike int32
|
||||
func (b *readBuf) int16() (n int) {
|
||||
n = int(binary.BigEndian.Uint16(*b))
|
||||
*b = (*b)[2:]
|
||||
return
|
||||
}
|
||||
|
||||
func (b *readBuf) string() string {
|
||||
i := bytes.IndexByte(*b, 0)
|
||||
if i < 0 {
|
||||
panic(errors.New("pq: invalid message format; expected string terminator"))
|
||||
}
|
||||
s := (*b)[:i]
|
||||
*b = (*b)[i+1:]
|
||||
return string(s)
|
||||
}
|
||||
|
||||
func (b *readBuf) next(n int) (v []byte) {
|
||||
v = (*b)[:n]
|
||||
*b = (*b)[n:]
|
||||
return
|
||||
}
|
||||
|
||||
func (b *readBuf) byte() byte {
|
||||
return b.next(1)[0]
|
||||
}
|
||||
|
||||
type writeBuf struct {
|
||||
buf []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func (b *writeBuf) int32(n int) {
|
||||
x := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(x, uint32(n))
|
||||
b.buf = append(b.buf, x...)
|
||||
}
|
||||
|
||||
func (b *writeBuf) int16(n int) {
|
||||
x := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(x, uint16(n))
|
||||
b.buf = append(b.buf, x...)
|
||||
}
|
||||
|
||||
func (b *writeBuf) string(s string) {
|
||||
b.buf = append(append(b.buf, s...), '\000')
|
||||
}
|
||||
|
||||
func (b *writeBuf) byte(c proto.RequestCode) {
|
||||
b.buf = append(b.buf, byte(c))
|
||||
}
|
||||
|
||||
func (b *writeBuf) bytes(v []byte) {
|
||||
b.buf = append(b.buf, v...)
|
||||
}
|
||||
|
||||
func (b *writeBuf) wrap() []byte {
|
||||
p := b.buf[b.pos:]
|
||||
if len(p) > proto.MaxUint32 {
|
||||
panic(fmt.Errorf("pq: message too large (%d > math.MaxUint32)", len(p)))
|
||||
}
|
||||
binary.BigEndian.PutUint32(p, uint32(len(p)))
|
||||
return b.buf
|
||||
}
|
||||
|
||||
func (b *writeBuf) next(c proto.RequestCode) {
|
||||
p := b.buf[b.pos:]
|
||||
if len(p) > proto.MaxUint32 {
|
||||
panic(fmt.Errorf("pq: message too large (%d > math.MaxUint32)", len(p)))
|
||||
}
|
||||
binary.BigEndian.PutUint32(p, uint32(len(p)))
|
||||
b.pos = len(b.buf) + 1
|
||||
b.buf = append(b.buf, byte(c), 0, 0, 0, 0)
|
||||
}
|
||||
77
vendor/github.com/lib/pq/compose.yaml
generated
vendored
Normal file
77
vendor/github.com/lib/pq/compose.yaml
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: 'pqgo'
|
||||
|
||||
services:
|
||||
pgbouncer:
|
||||
profiles: ['pgbouncer']
|
||||
image: 'cleanstart/pgbouncer:1.24'
|
||||
ports: ['127.0.0.1:6432:6432']
|
||||
command: ['/init/pgbouncer.ini']
|
||||
volumes: ['./testdata/init:/init']
|
||||
environment:
|
||||
'PGBOUNCER_DATABASE': 'pqgo'
|
||||
|
||||
pgpool:
|
||||
profiles: ['pgpool']
|
||||
image: 'pgpool/pgpool:4.4.3'
|
||||
ports: ['127.0.0.1:7432:7432']
|
||||
volumes: ['./testdata/init:/init']
|
||||
entrypoint: '/init/entry-pgpool.sh'
|
||||
environment:
|
||||
'PGPOOL_PARAMS_PORT': '7432'
|
||||
'PGPOOL_PARAMS_BACKEND_HOSTNAME0': 'pg18'
|
||||
|
||||
pg18:
|
||||
image: 'postgres:18'
|
||||
ports: ['127.0.0.1:5432:5432']
|
||||
entrypoint: '/init/entry.sh'
|
||||
volumes: ['./testdata/init:/init']
|
||||
shm_size: '128mb'
|
||||
environment:
|
||||
'POSTGRES_DATABASE': 'pqgo'
|
||||
'POSTGRES_USER': 'pqgo'
|
||||
'POSTGRES_PASSWORD': 'unused'
|
||||
pg17:
|
||||
profiles: ['pg17']
|
||||
image: 'postgres:17'
|
||||
ports: ['127.0.0.1:5432:5432']
|
||||
entrypoint: '/init/entry.sh'
|
||||
volumes: ['./testdata/init:/init']
|
||||
shm_size: '128mb'
|
||||
user: 'root'
|
||||
environment:
|
||||
'POSTGRES_DATABASE': 'pqgo'
|
||||
'POSTGRES_USER': 'pqgo'
|
||||
'POSTGRES_PASSWORD': 'unused'
|
||||
pg16:
|
||||
profiles: ['pg16']
|
||||
image: 'postgres:16'
|
||||
ports: ['127.0.0.1:5432:5432']
|
||||
entrypoint: '/init/entry.sh'
|
||||
volumes: ['./testdata/init:/init']
|
||||
shm_size: '128mb'
|
||||
environment:
|
||||
'POSTGRES_DATABASE': 'pqgo'
|
||||
'POSTGRES_USER': 'pqgo'
|
||||
'POSTGRES_PASSWORD': 'unused'
|
||||
pg15:
|
||||
profiles: ['pg15']
|
||||
image: 'postgres:15'
|
||||
ports: ['127.0.0.1:5432:5432']
|
||||
entrypoint: '/init/entry.sh'
|
||||
volumes: ['./testdata/init:/init']
|
||||
shm_size: '128mb'
|
||||
environment:
|
||||
'POSTGRES_DATABASE': 'pqgo'
|
||||
'POSTGRES_USER': 'pqgo'
|
||||
'POSTGRES_PASSWORD': 'unused'
|
||||
pg14:
|
||||
profiles: ['pg14']
|
||||
image: 'postgres:14'
|
||||
ports: ['127.0.0.1:5432:5432']
|
||||
entrypoint: '/init/entry.sh'
|
||||
volumes: ['./testdata/init:/init']
|
||||
shm_size: '128mb'
|
||||
environment:
|
||||
'POSTGRES_DATABASE': 'pqgo'
|
||||
'POSTGRES_USER': 'pqgo'
|
||||
'POSTGRES_PASSWORD': 'unused'
|
||||
1775
vendor/github.com/lib/pq/conn.go
generated
vendored
Normal file
1775
vendor/github.com/lib/pq/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
226
vendor/github.com/lib/pq/conn_go18.go
generated
vendored
Normal file
226
vendor/github.com/lib/pq/conn_go18.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq/internal/proto"
|
||||
)
|
||||
|
||||
const watchCancelDialContextTimeout = 10 * time.Second
|
||||
|
||||
// Implement the "QueryerContext" interface
|
||||
func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
|
||||
finish := cn.watchCancel(ctx)
|
||||
r, err := cn.query(query, args)
|
||||
if err != nil {
|
||||
if finish != nil {
|
||||
finish()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
r.finish = finish
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Implement the "ExecerContext" interface
|
||||
func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
|
||||
list := make([]driver.Value, len(args))
|
||||
for i, nv := range args {
|
||||
list[i] = nv.Value
|
||||
}
|
||||
|
||||
if finish := cn.watchCancel(ctx); finish != nil {
|
||||
defer finish()
|
||||
}
|
||||
|
||||
return cn.Exec(query, list)
|
||||
}
|
||||
|
||||
// Implement the "ConnPrepareContext" interface
|
||||
func (cn *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
if finish := cn.watchCancel(ctx); finish != nil {
|
||||
defer finish()
|
||||
}
|
||||
return cn.Prepare(query)
|
||||
}
|
||||
|
||||
// Implement the "ConnBeginTx" interface
|
||||
func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
var mode string
|
||||
switch sql.IsolationLevel(opts.Isolation) {
|
||||
case sql.LevelDefault:
|
||||
// Don't touch mode: use the server's default
|
||||
case sql.LevelReadUncommitted:
|
||||
mode = " ISOLATION LEVEL READ UNCOMMITTED"
|
||||
case sql.LevelReadCommitted:
|
||||
mode = " ISOLATION LEVEL READ COMMITTED"
|
||||
case sql.LevelRepeatableRead:
|
||||
mode = " ISOLATION LEVEL REPEATABLE READ"
|
||||
case sql.LevelSerializable:
|
||||
mode = " ISOLATION LEVEL SERIALIZABLE"
|
||||
default:
|
||||
return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation)
|
||||
}
|
||||
if opts.ReadOnly {
|
||||
mode += " READ ONLY"
|
||||
} else {
|
||||
mode += " READ WRITE"
|
||||
}
|
||||
|
||||
tx, err := cn.begin(mode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cn.txnFinish = cn.watchCancel(ctx)
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
func (cn *conn) Ping(ctx context.Context) error {
|
||||
if finish := cn.watchCancel(ctx); finish != nil {
|
||||
defer finish()
|
||||
}
|
||||
rows, err := cn.simpleQuery(";")
|
||||
if err != nil {
|
||||
return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger
|
||||
}
|
||||
_ = rows.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cn *conn) watchCancel(ctx context.Context) func() {
|
||||
if done := ctx.Done(); done != nil {
|
||||
finished := make(chan struct{}, 1)
|
||||
go func() {
|
||||
select {
|
||||
case <-done:
|
||||
select {
|
||||
case finished <- struct{}{}:
|
||||
default:
|
||||
// We raced with the finish func, let the next query handle this with the
|
||||
// context.
|
||||
return
|
||||
}
|
||||
|
||||
// Set the connection state to bad so it does not get reused.
|
||||
cn.err.set(ctx.Err())
|
||||
|
||||
// At this point the function level context is canceled,
|
||||
// so it must not be used for the additional network
|
||||
// request to cancel the query.
|
||||
// Create a new context to pass into the dial.
|
||||
ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout)
|
||||
defer cancel()
|
||||
|
||||
_ = cn.cancel(ctxCancel)
|
||||
case <-finished:
|
||||
}
|
||||
}()
|
||||
return func() {
|
||||
select {
|
||||
case <-finished:
|
||||
cn.err.set(ctx.Err())
|
||||
_ = cn.Close()
|
||||
case finished <- struct{}{}:
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cn *conn) cancel(ctx context.Context) error {
|
||||
// Use a copy since a new connection is created here. This is necessary
|
||||
// because cancel is called from a goroutine in watchCancel.
|
||||
cfg := cn.cfg.Clone()
|
||||
|
||||
c, err := dial(ctx, cn.dialer, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = c.Close() }()
|
||||
|
||||
cn2 := conn{c: c}
|
||||
err = cn2.ssl(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := cn2.writeBuf(0)
|
||||
w.int32(proto.CancelRequestCode)
|
||||
w.int32(cn.processID)
|
||||
w.int32(cn.secretKey)
|
||||
if err := cn2.sendStartupPacket(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read until EOF to ensure that the server received the cancel.
|
||||
_, err = io.Copy(io.Discard, c)
|
||||
return err
|
||||
}
|
||||
|
||||
// Implement the "StmtQueryContext" interface
|
||||
func (st *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
finish := st.watchCancel(ctx)
|
||||
r, err := st.query(args)
|
||||
if err != nil {
|
||||
if finish != nil {
|
||||
finish()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
r.finish = finish
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Implement the "StmtExecContext" interface
|
||||
func (st *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
if finish := st.watchCancel(ctx); finish != nil {
|
||||
defer finish()
|
||||
}
|
||||
if err := st.cn.err.get(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err := st.exec(args)
|
||||
if err != nil {
|
||||
return nil, st.cn.handleError(err)
|
||||
}
|
||||
res, _, err := st.cn.readExecuteResponse("simple query")
|
||||
return res, st.cn.handleError(err)
|
||||
}
|
||||
|
||||
// watchCancel is implemented on stmt in order to not mark the parent conn as bad
|
||||
func (st *stmt) watchCancel(ctx context.Context) func() {
|
||||
if done := ctx.Done(); done != nil {
|
||||
finished := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-done:
|
||||
// At this point the function level context is canceled, so it
|
||||
// must not be used for the additional network request to cancel
|
||||
// the query. Create a new context to pass into the dial.
|
||||
ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout)
|
||||
defer cancel()
|
||||
|
||||
_ = st.cancel(ctxCancel)
|
||||
finished <- struct{}{}
|
||||
case <-finished:
|
||||
}
|
||||
}()
|
||||
return func() {
|
||||
select {
|
||||
case <-finished:
|
||||
case finished <- struct{}{}:
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *stmt) cancel(ctx context.Context) error {
|
||||
return st.cn.cancel(ctx)
|
||||
}
|
||||
949
vendor/github.com/lib/pq/connector.go
generated
vendored
Normal file
949
vendor/github.com/lib/pq/connector.go
generated
vendored
Normal file
@@ -0,0 +1,949 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/netip"
|
||||
neturl "net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/lib/pq/internal/pqutil"
|
||||
)
|
||||
|
||||
type (
|
||||
// SSLMode is a sslmode setting.
|
||||
SSLMode string
|
||||
|
||||
// SSLNegotiation is a sslnegotiation setting.
|
||||
SSLNegotiation string
|
||||
|
||||
// TargetSessionAttrs is a target_session_attrs setting.
|
||||
TargetSessionAttrs string
|
||||
|
||||
// LoadBalanceHosts is a load_balance_hosts setting.
|
||||
LoadBalanceHosts string
|
||||
)
|
||||
|
||||
// Values for [SSLMode] that pq supports.
|
||||
const (
|
||||
// disable: No SSL
|
||||
SSLModeDisable = SSLMode("disable")
|
||||
|
||||
// require: require SSL, but skip verification.
|
||||
SSLModeRequire = SSLMode("require")
|
||||
|
||||
// verify-ca: require SSL and verify that the certificate was signed by a
|
||||
// trusted CA.
|
||||
SSLModeVerifyCA = SSLMode("verify-ca")
|
||||
|
||||
// verify-full: require SSK and verify that the certificate was signed by a
|
||||
// trusted CA and the server host name matches the one in the certificate.
|
||||
SSLModeVerifyFull = SSLMode("verify-full")
|
||||
)
|
||||
|
||||
var sslModes = []SSLMode{SSLModeDisable, SSLModeRequire, SSLModeVerifyFull, SSLModeVerifyCA}
|
||||
|
||||
// Values for [SSLNegotiation] that pq supports.
|
||||
const (
|
||||
// Negotiate whether SSL should be used. This is the default.
|
||||
SSLNegotiationPostgres = SSLNegotiation("postgres")
|
||||
|
||||
// Always use SSL, don't try to negotiate.
|
||||
SSLNegotiationDirect = SSLNegotiation("direct")
|
||||
)
|
||||
|
||||
var sslNegotiations = []SSLNegotiation{SSLNegotiationPostgres, SSLNegotiationDirect}
|
||||
|
||||
// Values for [TargetSessionAttrs] that pq supports.
|
||||
const (
|
||||
// Any successful connection is acceptable. This is the default.
|
||||
TargetSessionAttrsAny = TargetSessionAttrs("any")
|
||||
|
||||
// Session must accept read-write transactions by default: the server must
|
||||
// not be in hot standby mode and default_transaction_read_only must be
|
||||
// off.
|
||||
TargetSessionAttrsReadWrite = TargetSessionAttrs("read-write")
|
||||
|
||||
// Session must not accept read-write transactions by default.
|
||||
TargetSessionAttrsReadOnly = TargetSessionAttrs("read-only")
|
||||
|
||||
// Server must not be in hot standby mode.
|
||||
TargetSessionAttrsPrimary = TargetSessionAttrs("primary")
|
||||
|
||||
// Server must be in hot standby mode.
|
||||
TargetSessionAttrsStandby = TargetSessionAttrs("standby")
|
||||
|
||||
// First try to find a standby server, but if none of the listed hosts is a
|
||||
// standby server, try again in any mode.
|
||||
TargetSessionAttrsPreferStandby = TargetSessionAttrs("prefer-standby")
|
||||
)
|
||||
|
||||
var targetSessionAttrs = []TargetSessionAttrs{TargetSessionAttrsAny,
|
||||
TargetSessionAttrsReadWrite, TargetSessionAttrsReadOnly, TargetSessionAttrsPrimary,
|
||||
TargetSessionAttrsStandby, TargetSessionAttrsPreferStandby}
|
||||
|
||||
// Values for [LoadBalanceHosts] that pq supports.
|
||||
const (
|
||||
// Don't load balance; try hosts in the order in which they're provided.
|
||||
// This is the default.
|
||||
LoadBalanceHostsDisable = LoadBalanceHosts("disable")
|
||||
|
||||
// Hosts are tried in random order to balance connections across multiple
|
||||
// PostgreSQL servers.
|
||||
//
|
||||
// When using this value it's recommended to also configure a reasonable
|
||||
// value for connect_timeout. Because then, if one of the nodes that are
|
||||
// used for load balancing is not responding, a new node will be tried.
|
||||
LoadBalanceHostsRandom = LoadBalanceHosts("random")
|
||||
)
|
||||
|
||||
var loadBalanceHosts = []LoadBalanceHosts{LoadBalanceHostsDisable, LoadBalanceHostsRandom}
|
||||
|
||||
// Connector represents a fixed configuration for the pq driver with a given
|
||||
// dsn. Connector satisfies the [database/sql/driver.Connector] interface and
|
||||
// can be used to create any number of DB Conn's via [sql.OpenDB].
|
||||
type Connector struct {
|
||||
cfg Config
|
||||
dialer Dialer
|
||||
}
|
||||
|
||||
// NewConnector returns a connector for the pq driver in a fixed configuration
|
||||
// with the given dsn. The returned connector can be used to create any number
|
||||
// of equivalent Conn's. The returned connector is intended to be used with
|
||||
// [sql.OpenDB].
|
||||
func NewConnector(dsn string) (*Connector, error) {
|
||||
cfg, err := NewConfig(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewConnectorConfig(cfg)
|
||||
}
|
||||
|
||||
// NewConnectorConfig returns a connector for the pq driver in a fixed
|
||||
// configuration with the given [Config]. The returned connector can be used to
|
||||
// create any number of equivalent Conn's. The returned connector is intended to
|
||||
// be used with [sql.OpenDB].
|
||||
func NewConnectorConfig(cfg Config) (*Connector, error) {
|
||||
return &Connector{cfg: cfg, dialer: defaultDialer{}}, nil
|
||||
}
|
||||
|
||||
// Connect returns a connection to the database using the fixed configuration of
|
||||
// this Connector. Context is not used.
|
||||
func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { return c.open(ctx) }
|
||||
|
||||
// Dialer allows change the dialer used to open connections.
|
||||
func (c *Connector) Dialer(dialer Dialer) { c.dialer = dialer }
|
||||
|
||||
// Driver returns the underlying driver of this Connector.
|
||||
func (c *Connector) Driver() driver.Driver { return &Driver{} }
|
||||
|
||||
// Config holds options pq supports when connecting to PostgreSQL.
|
||||
//
|
||||
// The postgres struct tag is used for the value from the DSN (e.g.
|
||||
// "dbname=abc"), and the env struct tag is used for the environment variable
|
||||
// (e.g. "PGDATABASE=abc")
|
||||
type Config struct {
|
||||
// The host to connect to. Absolute paths and values that start with @ are
|
||||
// for unix domain sockets. Defaults to localhost.
|
||||
//
|
||||
// A comma-separated list of host names is also accepted, in which case each
|
||||
// host name in the list is tried in order or randomly if load_balance_hosts
|
||||
// is set. An empty item selects the default of localhost. The
|
||||
// target_session_attrs option controls properties the host must have to be
|
||||
// considered acceptable.
|
||||
Host string `postgres:"host" env:"PGHOST"`
|
||||
|
||||
// IPv4 or IPv6 address to connect to. Using hostaddr allows the application
|
||||
// to avoid a host name lookup, which might be important in applications
|
||||
// with time constraints. A hostname is required for sslmode=verify-full and
|
||||
// the GSSAPI or SSPI authentication methods.
|
||||
//
|
||||
// The following rules are used:
|
||||
//
|
||||
// - If host is given without hostaddr, a host name lookup occurs.
|
||||
//
|
||||
// - If hostaddr is given without host, the value for hostaddr gives the
|
||||
// server network address. The connection attempt will fail if the
|
||||
// authentication method requires a host name.
|
||||
//
|
||||
// - If both host and hostaddr are given, the value for hostaddr gives the
|
||||
// server network address. The value for host is ignored unless the
|
||||
// authentication method requires it, in which case it will be used as the
|
||||
// host name.
|
||||
//
|
||||
// A comma-separated list of hostaddr values is also accepted, in which case
|
||||
// each host in the list is tried in order or randonly if load_balance_hosts
|
||||
// is set. An empty item causes the corresponding host name to be used, or
|
||||
// the default host name if that is empty as well. The target_session_attrs
|
||||
// option controls properties the host must have to be considered
|
||||
// acceptable.
|
||||
Hostaddr netip.Addr `postgres:"hostaddr" env:"PGHOSTADDR"`
|
||||
|
||||
// The port to connect to. Defaults to 5432.
|
||||
//
|
||||
// If multiple hosts were given in the host or hostaddr parameters, this
|
||||
// parameter may specify a comma-separated list of ports of the same length
|
||||
// as the host list, or it may specify a single port number to be used for
|
||||
// all hosts. An empty string, or an empty item in a comma-separated list,
|
||||
// specifies the default of 5432.
|
||||
Port uint16 `postgres:"port" env:"PGPORT"`
|
||||
|
||||
// The name of the database to connect to.
|
||||
Database string `postgres:"dbname" env:"PGDATABASE"`
|
||||
|
||||
// The user to sign in as. Defaults to the current user.
|
||||
User string `postgres:"user" env:"PGUSER"`
|
||||
|
||||
// The user's password.
|
||||
Password string `postgres:"password" env:"PGPASSWORD"`
|
||||
|
||||
// Path to [pgpass] file to store passwords; overrides Password.
|
||||
//
|
||||
// [pgpass]: http://www.postgresql.org/docs/current/static/libpq-pgpass.html
|
||||
Passfile string `postgres:"passfile" env:"PGPASSFILE"`
|
||||
|
||||
// Commandline options to send to the server at connection start.
|
||||
Options string `postgres:"options" env:"PGOPTIONS"`
|
||||
|
||||
// Application name, displayed in pg_stat_activity and log entries.
|
||||
ApplicationName string `postgres:"application_name" env:"PGAPPNAME"`
|
||||
|
||||
// Used if application_name is not given. Specifying a fallback name is
|
||||
// useful in generic utility programs that wish to set a default application
|
||||
// name but allow it to be overridden by the user.
|
||||
FallbackApplicationName string `postgres:"fallback_application_name" env:"-"`
|
||||
|
||||
// Whether to use SSL. Defaults to "require" (different from libpq's default
|
||||
// of "prefer").
|
||||
//
|
||||
// [RegisterTLSConfig] can be used to registers a custom [tls.Config], which
|
||||
// can be used by setting sslmode=pqgo-«key» in the connection string.
|
||||
SSLMode SSLMode `postgres:"sslmode" env:"PGSSLMODE"`
|
||||
|
||||
// When set to "direct" it will use SSL without negotiation (PostgreSQL ≥17 only).
|
||||
SSLNegotiation SSLNegotiation `postgres:"sslnegotiation" env:"PGSSLNEGOTIATION"`
|
||||
|
||||
// Cert file location. The file must contain PEM encoded data.
|
||||
SSLCert string `postgres:"sslcert" env:"PGSSLCERT"`
|
||||
|
||||
// Key file location. The file must contain PEM encoded data.
|
||||
SSLKey string `postgres:"sslkey" env:"PGSSLKEY"`
|
||||
|
||||
// The location of the root certificate file. The file must contain PEM encoded data.
|
||||
SSLRootCert string `postgres:"sslrootcert" env:"PGSSLROOTCERT"`
|
||||
|
||||
// By default SNI is on, any value which is not starting with "1" disables
|
||||
// SNI.
|
||||
SSLSNI bool `postgres:"sslsni" env:"PGSSLSNI"`
|
||||
|
||||
// Interpert sslcert and sslkey as PEM encoded data, rather than a path to a
|
||||
// PEM file. This is a pq extension, not supported in libpq.
|
||||
SSLInline bool `postgres:"sslinline" env:"-"`
|
||||
|
||||
// GSS (Kerberos) service name when constructing the SPN (default is
|
||||
// postgres). This will be combined with the host to form the full SPN:
|
||||
// krbsrvname/host.
|
||||
KrbSrvname string `postgres:"krbsrvname" env:"PGKRBSRVNAME"`
|
||||
|
||||
// GSS (Kerberos) SPN. This takes priority over krbsrvname if present. This
|
||||
// is a pq extension, not supported in libpq.
|
||||
KrbSpn string `postgres:"krbspn" env:"-"`
|
||||
|
||||
// Maximum time to wait while connecting, in seconds. Zero, negative, or not
|
||||
// specified means wait indefinitely
|
||||
ConnectTimeout time.Duration `postgres:"connect_timeout" env:"PGCONNECT_TIMEOUT"`
|
||||
|
||||
// Whether to always send []byte parameters over as binary. Enables single
|
||||
// round-trip mode for non-prepared Query calls. This is a pq extension, not
|
||||
// supported in libpq.
|
||||
BinaryParameters bool `postgres:"binary_parameters" env:"-"`
|
||||
|
||||
// This connection should never use the binary format when receiving query
|
||||
// results from prepared statements. Only provided for debugging. This is a
|
||||
// pq extension, not supported in libpq.
|
||||
DisablePreparedBinaryResult bool `postgres:"disable_prepared_binary_result" env:"-"`
|
||||
|
||||
// Client encoding; pq only supports UTF8 and this must be blank or "UTF8".
|
||||
ClientEncoding string `postgres:"client_encoding" env:"PGCLIENTENCODING"`
|
||||
|
||||
// Date/time representation to use; pq only supports "ISO, MDY" and this
|
||||
// must be blank or "ISO, MDY".
|
||||
Datestyle string `postgres:"datestyle" env:"PGDATESTYLE"`
|
||||
|
||||
// Default time zone.
|
||||
TZ string `postgres:"tz" env:"PGTZ"`
|
||||
|
||||
// Default mode for the genetic query optimizer.
|
||||
Geqo string `postgres:"geqo" env:"PGGEQO"`
|
||||
|
||||
// Determine whether the session must have certain properties to be
|
||||
// acceptable. It's typically used in combination with multiple host names
|
||||
// to select the first acceptable alternative among several hosts.
|
||||
TargetSessionAttrs TargetSessionAttrs `postgres:"target_session_attrs" env:"PGTARGETSESSIONATTRS"`
|
||||
|
||||
// Controls the order in which the client tries to connect to the available
|
||||
// hosts. Once a connection attempt is successful no other hosts will be
|
||||
// tried. This parameter is typically used in combination with multiple host
|
||||
// names.
|
||||
//
|
||||
// This parameter can be used in combination with target_session_attrs to,
|
||||
// for example, load balance over standby servers only. Once successfully
|
||||
// connected, subsequent queries on the returned connection will all be sent
|
||||
// to the same server.
|
||||
LoadBalanceHosts LoadBalanceHosts `postgres:"load_balance_hosts" env:"PGLOADBALANCEHOSTS"`
|
||||
|
||||
// Runtime parameters: any unrecognized parameter in the DSN will be added
|
||||
// to this and sent to PostgreSQL during startup.
|
||||
Runtime map[string]string `postgres:"-" env:"-"`
|
||||
|
||||
// Multi contains additional connection details. The first value is
|
||||
// available in [Config.Host], [Config.Hostaddr], and [Config.Port], and
|
||||
// additional ones (if any) are available here.
|
||||
Multi []ConfigMultihost
|
||||
|
||||
// Record which parameters were given, so we can distinguish between an
|
||||
// empty string "not given at all".
|
||||
//
|
||||
// The alternative is to use pointers or sql.Null[..], but that's more
|
||||
// awkward to use.
|
||||
set []string `env:"set"`
|
||||
|
||||
multiHost []string
|
||||
multiHostaddr []netip.Addr
|
||||
multiPort []uint16
|
||||
}
|
||||
|
||||
// ConfigMultihost specifies an additional server to try to connect to.
|
||||
type ConfigMultihost struct {
|
||||
Host string
|
||||
Hostaddr netip.Addr
|
||||
Port uint16
|
||||
}
|
||||
|
||||
// NewConfig creates a new [Config] from the current environment and given DSN.
|
||||
//
|
||||
// A subset of the connection parameters supported by PostgreSQL are supported
|
||||
// by pq; see the [Config] struct fields for supported parameters. pq also lets
|
||||
// you specify any [run-time parameter] (such as search_path or work_mem)
|
||||
// directly in the connection string. This is different from libpq, which does
|
||||
// not allow run-time parameters in the connection string, instead requiring you
|
||||
// to supply them in the options parameter.
|
||||
//
|
||||
// # key=value connection strings
|
||||
//
|
||||
// For key=value strings, use single quotes for values that contain whitespace
|
||||
// or empty values. A backslash will escape the next character:
|
||||
//
|
||||
// "user=pqgo password='with spaces'"
|
||||
// "user=''"
|
||||
// "user=space\ man password='it\'s valid'"
|
||||
//
|
||||
// # URL connection strings
|
||||
//
|
||||
// pq supports URL-style postgres:// or postgresql:// connection strings in the
|
||||
// form:
|
||||
//
|
||||
// postgres[ql]://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
|
||||
//
|
||||
// Go's [net/url.Parse] is more strict than PostgreSQL's URL parser and will
|
||||
// (correctly) reject %2F in the host part. This means that unix-socket URLs:
|
||||
//
|
||||
// postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
|
||||
// postgres://%2Ftmp%2Fpostgres/db
|
||||
//
|
||||
// will not work. You will need to use "host=/tmp/postgres dbname=db".
|
||||
//
|
||||
// Similarly, multiple ports also won't work, but ?port= will:
|
||||
//
|
||||
// postgres://host1,host2:5432,6543/dbname Doesn't work
|
||||
// postgres://host1,host2/dbname?port=5432,6543 Works
|
||||
//
|
||||
// # Environment
|
||||
//
|
||||
// Most [PostgreSQL environment variables] are supported by pq. Environment
|
||||
// variables have a lower precedence than explicitly provided connection
|
||||
// parameters. pq will return an error if environment variables it does not
|
||||
// support are set. Environment variables have a lower precedence than
|
||||
// explicitly provided connection parameters.
|
||||
//
|
||||
// [run-time parameter]: http://www.postgresql.org/docs/current/static/runtime-config.html
|
||||
// [PostgreSQL environment variables]: http://www.postgresql.org/docs/current/static/libpq-envars.html
|
||||
func NewConfig(dsn string) (Config, error) {
|
||||
return newConfig(dsn, os.Environ())
|
||||
}
|
||||
|
||||
// Clone returns a copy of the [Config].
|
||||
func (cfg Config) Clone() Config {
|
||||
rt := make(map[string]string)
|
||||
for k, v := range cfg.Runtime {
|
||||
rt[k] = v
|
||||
}
|
||||
c := cfg
|
||||
c.Runtime = rt
|
||||
c.set = append([]string{}, cfg.set...)
|
||||
return c
|
||||
}
|
||||
|
||||
// hosts returns a slice of copies of this config, one for each host.
|
||||
func (cfg Config) hosts() []Config {
|
||||
cfgs := make([]Config, 1, len(cfg.Multi)+1)
|
||||
cfgs[0] = cfg.Clone()
|
||||
for _, m := range cfg.Multi {
|
||||
c := cfg.Clone()
|
||||
c.Host, c.Hostaddr, c.Port = m.Host, m.Hostaddr, m.Port
|
||||
cfgs = append(cfgs, c)
|
||||
}
|
||||
|
||||
if cfg.LoadBalanceHosts == LoadBalanceHostsRandom {
|
||||
rand.Shuffle(len(cfgs), func(i, j int) { cfgs[i], cfgs[j] = cfgs[j], cfgs[i] })
|
||||
}
|
||||
|
||||
return cfgs
|
||||
}
|
||||
|
||||
func newConfig(dsn string, env []string) (Config, error) {
|
||||
cfg := Config{Host: "localhost", Port: 5432, SSLSNI: true}
|
||||
if err := cfg.fromEnv(env); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
if err := cfg.fromDSN(dsn); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
// Need to have exactly the same number of host and hostaddr, or only specify one.
|
||||
if cfg.isset("host") && cfg.Host != "" && cfg.Hostaddr != (netip.Addr{}) && len(cfg.multiHost) != len(cfg.multiHostaddr) {
|
||||
return Config{}, fmt.Errorf("pq: could not match %d host names to %d hostaddr values",
|
||||
len(cfg.multiHost)+1, len(cfg.multiHostaddr)+1)
|
||||
}
|
||||
// Need one port that applies to all or exactly the same number of ports as hosts.
|
||||
l, ll := max(len(cfg.multiHost), len(cfg.multiHostaddr)), len(cfg.multiPort)
|
||||
if l > 0 && ll > 0 && l != ll {
|
||||
return Config{}, fmt.Errorf("pq: could not match %d port numbers to %d hosts", ll+1, l+1)
|
||||
}
|
||||
|
||||
// Populate Multi
|
||||
if len(cfg.multiHostaddr) > len(cfg.multiHost) {
|
||||
cfg.multiHost = make([]string, len(cfg.multiHostaddr))
|
||||
}
|
||||
for i, h := range cfg.multiHost {
|
||||
p := cfg.Port
|
||||
if len(cfg.multiPort) > 0 {
|
||||
p = cfg.multiPort[i]
|
||||
}
|
||||
var addr netip.Addr
|
||||
if len(cfg.multiHostaddr) > 0 {
|
||||
addr = cfg.multiHostaddr[i]
|
||||
}
|
||||
cfg.Multi = append(cfg.Multi, ConfigMultihost{
|
||||
Host: h,
|
||||
Port: p,
|
||||
Hostaddr: addr,
|
||||
})
|
||||
}
|
||||
|
||||
// Use the "fallback" application name if necessary
|
||||
if cfg.isset("fallback_application_name") && !cfg.isset("application_name") {
|
||||
cfg.ApplicationName = cfg.FallbackApplicationName
|
||||
}
|
||||
|
||||
// We can't work with any client_encoding other than UTF-8 currently.
|
||||
// However, we have historically allowed the user to set it to UTF-8
|
||||
// explicitly, and there's no reason to break such programs, so allow that.
|
||||
// Note that the "options" setting could also set client_encoding, but
|
||||
// parsing its value is not worth it. Instead, we always explicitly send
|
||||
// client_encoding as a separate run-time parameter, which should override
|
||||
// anything set in options.
|
||||
if cfg.isset("client_encoding") && !isUTF8(cfg.ClientEncoding) {
|
||||
return Config{}, fmt.Errorf(`pq: unsupported client_encoding %q: must be absent or "UTF8"`, cfg.ClientEncoding)
|
||||
}
|
||||
// DateStyle needs a similar treatment.
|
||||
if cfg.isset("datestyle") && cfg.Datestyle != "ISO, MDY" {
|
||||
return Config{}, fmt.Errorf(`pq: unsupported datestyle %q: must be absent or "ISO, MDY"`, cfg.Datestyle)
|
||||
}
|
||||
cfg.ClientEncoding, cfg.Datestyle = "UTF8", "ISO, MDY"
|
||||
|
||||
// Set default user if not explicitly provided.
|
||||
if !cfg.isset("user") {
|
||||
u, err := pqutil.User()
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.User = u
|
||||
}
|
||||
|
||||
// SSL is not necessary or supported over UNIX domain sockets.
|
||||
if nw, _ := cfg.network(); nw == "unix" {
|
||||
cfg.SSLMode = SSLModeDisable
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func (cfg Config) network() (string, string) {
|
||||
if cfg.Hostaddr != (netip.Addr{}) {
|
||||
return "tcp", net.JoinHostPort(cfg.Hostaddr.String(), strconv.Itoa(int(cfg.Port)))
|
||||
}
|
||||
// UNIX domain sockets are either represented by an (absolute) file system
|
||||
// path or they live in the abstract name space (starting with an @).
|
||||
if filepath.IsAbs(cfg.Host) || strings.HasPrefix(cfg.Host, "@") {
|
||||
sockPath := filepath.Join(cfg.Host, ".s.PGSQL."+strconv.Itoa(int(cfg.Port)))
|
||||
return "unix", sockPath
|
||||
}
|
||||
return "tcp", net.JoinHostPort(cfg.Host, strconv.Itoa(int(cfg.Port)))
|
||||
}
|
||||
|
||||
func (cfg *Config) fromEnv(env []string) error {
|
||||
e := make(map[string]string)
|
||||
for _, v := range env {
|
||||
k, v, ok := strings.Cut(v, "=")
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
switch k {
|
||||
case "PGREQUIREAUTH", "PGCHANNELBINDING", "PGSERVICE", "PGSERVICEFILE", "PGREALM",
|
||||
"PGSSLCERTMODE", "PGSSLCOMPRESSION", "PGREQUIRESSL", "PGSSLCRL", "PGREQUIREPEER",
|
||||
"PGSYSCONFDIR", "PGLOCALEDIR", "PGSSLCRLDIR", "PGSSLMINPROTOCOLVERSION", "PGSSLMAXPROTOCOLVERSION",
|
||||
"PGGSSENCMODE", "PGGSSDELEGATION", "PGMINPROTOCOLVERSION", "PGMAXPROTOCOLVERSION", "PGGSSLIB":
|
||||
return fmt.Errorf("pq: environment variable $%s is not supported", k)
|
||||
case "PGKRBSRVNAME":
|
||||
if newGss == nil {
|
||||
return fmt.Errorf("pq: environment variable $%s is not supported as Kerberos is not enabled", k)
|
||||
}
|
||||
}
|
||||
e[k] = v
|
||||
}
|
||||
return cfg.setFromTag(e, "env")
|
||||
}
|
||||
|
||||
// parseOpts parses the options from name and adds them to the values.
|
||||
//
|
||||
// The parsing code is based on conninfo_parse from libpq's fe-connect.c
|
||||
func (cfg *Config) fromDSN(dsn string) error {
|
||||
if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") {
|
||||
var err error
|
||||
dsn, err = convertURL(dsn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
opt = make(map[string]string)
|
||||
s = []rune(dsn)
|
||||
i int
|
||||
next = func() (rune, bool) {
|
||||
if i >= len(s) {
|
||||
return 0, false
|
||||
}
|
||||
r := s[i]
|
||||
i++
|
||||
return r, true
|
||||
}
|
||||
skipSpaces = func() (rune, bool) {
|
||||
r, ok := next()
|
||||
for unicode.IsSpace(r) && ok {
|
||||
r, ok = next()
|
||||
}
|
||||
return r, ok
|
||||
}
|
||||
)
|
||||
|
||||
for {
|
||||
var (
|
||||
keyRunes, valRunes []rune
|
||||
r rune
|
||||
ok bool
|
||||
)
|
||||
|
||||
if r, ok = skipSpaces(); !ok {
|
||||
break
|
||||
}
|
||||
|
||||
// Scan the key
|
||||
for !unicode.IsSpace(r) && r != '=' {
|
||||
keyRunes = append(keyRunes, r)
|
||||
if r, ok = next(); !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Skip any whitespace if we're not at the = yet
|
||||
if r != '=' {
|
||||
r, ok = skipSpaces()
|
||||
}
|
||||
|
||||
// The current character should be =
|
||||
if r != '=' || !ok {
|
||||
return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes))
|
||||
}
|
||||
|
||||
// Skip any whitespace after the =
|
||||
if r, ok = skipSpaces(); !ok {
|
||||
// If we reach the end here, the last value is just an empty string as per libpq.
|
||||
opt[string(keyRunes)] = ""
|
||||
break
|
||||
}
|
||||
|
||||
if r != '\'' {
|
||||
for !unicode.IsSpace(r) {
|
||||
if r == '\\' {
|
||||
if r, ok = next(); !ok {
|
||||
return fmt.Errorf(`missing character after backslash`)
|
||||
}
|
||||
}
|
||||
valRunes = append(valRunes, r)
|
||||
|
||||
if r, ok = next(); !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
quote:
|
||||
for {
|
||||
if r, ok = next(); !ok {
|
||||
return fmt.Errorf(`unterminated quoted string literal in connection string`)
|
||||
}
|
||||
switch r {
|
||||
case '\'':
|
||||
break quote
|
||||
case '\\':
|
||||
r, _ = next()
|
||||
fallthrough
|
||||
default:
|
||||
valRunes = append(valRunes, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
opt[string(keyRunes)] = string(valRunes)
|
||||
}
|
||||
|
||||
return cfg.setFromTag(opt, "postgres")
|
||||
}
|
||||
|
||||
func (cfg *Config) setFromTag(o map[string]string, tag string) error {
|
||||
f := "pq: wrong value for %q: "
|
||||
if tag == "env" {
|
||||
f = "pq: wrong value for $%s: "
|
||||
}
|
||||
var (
|
||||
types = reflect.TypeOf(cfg).Elem()
|
||||
values = reflect.ValueOf(cfg).Elem()
|
||||
)
|
||||
for i := 0; i < types.NumField(); i++ {
|
||||
var (
|
||||
rt = types.Field(i)
|
||||
rv = values.Field(i)
|
||||
k = rt.Tag.Get(tag)
|
||||
connectTimeout = (tag == "postgres" && k == "connect_timeout") || (tag == "env" && k == "PGCONNECT_TIMEOUT")
|
||||
host = (tag == "postgres" && k == "host") || (tag == "env" && k == "PGHOST")
|
||||
hostaddr = (tag == "postgres" && k == "hostaddr") || (tag == "env" && k == "PGHOSTADDR")
|
||||
port = (tag == "postgres" && k == "port") || (tag == "env" && k == "PGPORT")
|
||||
sslmode = (tag == "postgres" && k == "sslmode") || (tag == "env" && k == "PGSSLMODE")
|
||||
sslnegotiation = (tag == "postgres" && k == "sslnegotiation") || (tag == "env" && k == "PGSSLNEGOTIATION")
|
||||
targetsessionattrs = (tag == "postgres" && k == "target_session_attrs") || (tag == "env" && k == "PGTARGETSESSIONATTRS")
|
||||
loadbalancehosts = (tag == "postgres" && k == "load_balance_hosts") || (tag == "env" && k == "PGLOADBALANCEHOSTS")
|
||||
)
|
||||
if k == "" || k == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
v, ok := o[k]
|
||||
delete(o, k)
|
||||
if ok {
|
||||
if t, ok := rt.Tag.Lookup("postgres"); ok && t != "" && t != "-" {
|
||||
cfg.set = append(cfg.set, t)
|
||||
}
|
||||
switch rt.Type.Kind() {
|
||||
default:
|
||||
return fmt.Errorf("don't know how to set %s: unknown type %s", rt.Name, rt.Type.Kind())
|
||||
case reflect.Struct:
|
||||
if rt.Type == reflect.TypeOf(netip.Addr{}) {
|
||||
if hostaddr {
|
||||
vv := strings.Split(v, ",")
|
||||
v = vv[0]
|
||||
for _, vvv := range vv[1:] {
|
||||
if vvv == "" {
|
||||
cfg.multiHostaddr = append(cfg.multiHostaddr, netip.Addr{})
|
||||
} else {
|
||||
ip, err := netip.ParseAddr(vvv)
|
||||
if err != nil {
|
||||
return fmt.Errorf(f+"%w", k, err)
|
||||
}
|
||||
cfg.multiHostaddr = append(cfg.multiHostaddr, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
ip, err := netip.ParseAddr(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf(f+"%w", k, err)
|
||||
}
|
||||
rv.Set(reflect.ValueOf(ip))
|
||||
} else {
|
||||
return fmt.Errorf("don't know how to set %s: unknown type %s", rt.Name, rt.Type)
|
||||
}
|
||||
case reflect.String:
|
||||
if sslmode && !slices.Contains(sslModes, SSLMode(v)) && !(strings.HasPrefix(v, "pqgo-") && hasTLSConfig(v[5:])) {
|
||||
return fmt.Errorf(f+`%q is not supported; supported values are %s`, k, v, pqutil.Join(sslModes))
|
||||
}
|
||||
if sslnegotiation && !slices.Contains(sslNegotiations, SSLNegotiation(v)) {
|
||||
return fmt.Errorf(f+`%q is not supported; supported values are %s`, k, v, pqutil.Join(sslNegotiations))
|
||||
}
|
||||
if targetsessionattrs && !slices.Contains(targetSessionAttrs, TargetSessionAttrs(v)) {
|
||||
return fmt.Errorf(f+`%q is not supported; supported values are %s`, k, v, pqutil.Join(targetSessionAttrs))
|
||||
}
|
||||
if loadbalancehosts && !slices.Contains(loadBalanceHosts, LoadBalanceHosts(v)) {
|
||||
return fmt.Errorf(f+`%q is not supported; supported values are %s`, k, v, pqutil.Join(loadBalanceHosts))
|
||||
}
|
||||
if host {
|
||||
vv := strings.Split(v, ",")
|
||||
v = vv[0]
|
||||
for i, vvv := range vv[1:] {
|
||||
if vvv == "" {
|
||||
vv[i+1] = "localhost"
|
||||
}
|
||||
}
|
||||
cfg.multiHost = append(cfg.multiHost, vv[1:]...)
|
||||
}
|
||||
rv.SetString(v)
|
||||
case reflect.Int64:
|
||||
n, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf(f+"%w", k, err)
|
||||
}
|
||||
if connectTimeout {
|
||||
n = int64(time.Duration(n) * time.Second)
|
||||
}
|
||||
rv.SetInt(n)
|
||||
case reflect.Uint16:
|
||||
if port {
|
||||
vv := strings.Split(v, ",")
|
||||
v = vv[0]
|
||||
for _, vvv := range vv[1:] {
|
||||
if vvv == "" {
|
||||
vvv = "5432"
|
||||
}
|
||||
n, err := strconv.ParseUint(vvv, 10, 16)
|
||||
if err != nil {
|
||||
return fmt.Errorf(f+"%w", k, err)
|
||||
}
|
||||
cfg.multiPort = append(cfg.multiPort, uint16(n))
|
||||
}
|
||||
}
|
||||
n, err := strconv.ParseUint(v, 10, 16)
|
||||
if err != nil {
|
||||
return fmt.Errorf(f+"%w", k, err)
|
||||
}
|
||||
rv.SetUint(n)
|
||||
case reflect.Bool:
|
||||
b, err := pqutil.ParseBool(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf(f+"%w", k, err)
|
||||
}
|
||||
rv.SetBool(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set run-time; we delete map keys as they're set in the struct.
|
||||
if tag == "postgres" {
|
||||
// Make sure database= sets dbname=, as that previously worked (kind of
|
||||
// by accident).
|
||||
// TODO(v2): remove
|
||||
if d, ok := o["database"]; ok {
|
||||
cfg.Database = d
|
||||
delete(o, "database")
|
||||
}
|
||||
cfg.Runtime = o
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg Config) isset(name string) bool {
|
||||
return slices.Contains(cfg.set, name)
|
||||
}
|
||||
|
||||
// Convert to a map; used only in tests.
|
||||
func (cfg Config) tomap() map[string]string {
|
||||
var (
|
||||
o = make(map[string]string)
|
||||
values = reflect.ValueOf(cfg)
|
||||
types = reflect.TypeOf(cfg)
|
||||
)
|
||||
for i := 0; i < types.NumField(); i++ {
|
||||
var (
|
||||
rt = types.Field(i)
|
||||
rv = values.Field(i)
|
||||
k = rt.Tag.Get("postgres")
|
||||
)
|
||||
if k == "" || k == "-" {
|
||||
continue
|
||||
}
|
||||
if !rv.IsZero() || slices.Contains(cfg.set, k) {
|
||||
switch rt.Type.Kind() {
|
||||
default:
|
||||
if s, ok := rv.Interface().(fmt.Stringer); ok {
|
||||
o[k] = s.String()
|
||||
} else {
|
||||
o[k] = rv.String()
|
||||
}
|
||||
case reflect.Uint16:
|
||||
n := rv.Uint()
|
||||
o[k] = strconv.FormatUint(n, 10)
|
||||
case reflect.Int64:
|
||||
n := rv.Int()
|
||||
if k == "connect_timeout" {
|
||||
n = int64(time.Duration(n) / time.Second)
|
||||
}
|
||||
o[k] = strconv.FormatInt(n, 10)
|
||||
case reflect.Bool:
|
||||
if rv.Bool() {
|
||||
o[k] = "yes"
|
||||
} else {
|
||||
o[k] = "no"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for k, v := range cfg.Runtime {
|
||||
o[k] = v
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// Create DSN for this config; used only in tests.
|
||||
func (cfg Config) string() string {
|
||||
var (
|
||||
m = cfg.tomap()
|
||||
keys = make([]string, 0, len(m))
|
||||
)
|
||||
for k := range m {
|
||||
switch k {
|
||||
case "datestyle", "client_encoding":
|
||||
continue
|
||||
case "host", "port", "user", "sslsni":
|
||||
if !cfg.isset(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if k == "host" && len(cfg.multiHost) > 0 {
|
||||
m[k] += "," + strings.Join(cfg.multiHost, ",")
|
||||
}
|
||||
if k == "hostaddr" && len(cfg.multiHostaddr) > 0 {
|
||||
for _, ha := range cfg.multiHostaddr {
|
||||
m[k] += ","
|
||||
if ha != (netip.Addr{}) {
|
||||
m[k] += ha.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
if k == "port" && len(cfg.multiPort) > 0 {
|
||||
for _, p := range cfg.multiPort {
|
||||
m[k] += "," + strconv.Itoa(int(p))
|
||||
}
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
var b strings.Builder
|
||||
for i, k := range keys {
|
||||
if i > 0 {
|
||||
b.WriteByte(' ')
|
||||
}
|
||||
b.WriteString(k)
|
||||
b.WriteByte('=')
|
||||
var (
|
||||
v = m[k]
|
||||
nv = make([]rune, 0, len(v)+2)
|
||||
quote = v == ""
|
||||
)
|
||||
for _, c := range v {
|
||||
if c == ' ' {
|
||||
quote = true
|
||||
}
|
||||
if c == '\'' {
|
||||
nv = append(nv, '\\')
|
||||
}
|
||||
nv = append(nv, c)
|
||||
}
|
||||
if quote {
|
||||
b.WriteByte('\'')
|
||||
}
|
||||
b.WriteString(string(nv))
|
||||
if quote {
|
||||
b.WriteByte('\'')
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Recognize all sorts of silly things as "UTF-8", like Postgres does
|
||||
func isUTF8(name string) bool {
|
||||
s := strings.Map(func(c rune) rune {
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
return c + ('a' - 'A')
|
||||
}
|
||||
if 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
||||
return c
|
||||
}
|
||||
return -1 // discard
|
||||
}, name)
|
||||
return s == "utf8" || s == "unicode"
|
||||
}
|
||||
|
||||
func convertURL(url string) (string, error) {
|
||||
u, err := neturl.Parse(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if u.Scheme != "postgres" && u.Scheme != "postgresql" {
|
||||
return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
|
||||
}
|
||||
|
||||
var kvs []string
|
||||
escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`)
|
||||
accrue := func(k, v string) {
|
||||
if v != "" {
|
||||
kvs = append(kvs, k+"='"+escaper.Replace(v)+"'")
|
||||
}
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
pw, _ := u.User.Password()
|
||||
accrue("user", u.User.Username())
|
||||
accrue("password", pw)
|
||||
}
|
||||
|
||||
if host, port, err := net.SplitHostPort(u.Host); err != nil {
|
||||
accrue("host", u.Host)
|
||||
} else {
|
||||
accrue("host", host)
|
||||
accrue("port", port)
|
||||
}
|
||||
|
||||
if u.Path != "" {
|
||||
accrue("dbname", u.Path[1:])
|
||||
}
|
||||
|
||||
q := u.Query()
|
||||
for k := range q {
|
||||
accrue(k, q.Get(k))
|
||||
}
|
||||
|
||||
sort.Strings(kvs) // Makes testing easier (not a performance concern)
|
||||
return strings.Join(kvs, " "), nil
|
||||
}
|
||||
377
vendor/github.com/lib/pq/copy.go
generated
vendored
Normal file
377
vendor/github.com/lib/pq/copy.go
generated
vendored
Normal file
@@ -0,0 +1,377 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/lib/pq/internal/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
|
||||
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
|
||||
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
|
||||
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
|
||||
errCopyInProgress = errors.New("pq: COPY in progress")
|
||||
)
|
||||
|
||||
// CopyIn creates a COPY FROM statement which can be prepared with Tx.Prepare().
|
||||
// The target table should be visible in search_path.
|
||||
//
|
||||
// It copies all columns if the list of columns is empty.
|
||||
func CopyIn(table string, columns ...string) string {
|
||||
b := bytes.NewBufferString("COPY ")
|
||||
BufferQuoteIdentifier(table, b)
|
||||
makeStmt(b, columns...)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// CopyInSchema creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare().
|
||||
func CopyInSchema(schema, table string, columns ...string) string {
|
||||
b := bytes.NewBufferString("COPY ")
|
||||
BufferQuoteIdentifier(schema, b)
|
||||
b.WriteRune('.')
|
||||
BufferQuoteIdentifier(table, b)
|
||||
makeStmt(b, columns...)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func makeStmt(b *bytes.Buffer, columns ...string) {
|
||||
if len(columns) == 0 {
|
||||
b.WriteString(" FROM STDIN")
|
||||
return
|
||||
}
|
||||
b.WriteString(" (")
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
BufferQuoteIdentifier(col, b)
|
||||
}
|
||||
b.WriteString(") FROM STDIN")
|
||||
}
|
||||
|
||||
type copyin struct {
|
||||
cn *conn
|
||||
buffer []byte
|
||||
rowData chan []byte
|
||||
done chan bool
|
||||
|
||||
closed bool
|
||||
|
||||
mu struct {
|
||||
sync.Mutex
|
||||
err error
|
||||
driver.Result
|
||||
}
|
||||
}
|
||||
|
||||
const ciBufferSize = 64 * 1024
|
||||
|
||||
// flush buffer before the buffer is filled up and needs reallocation
|
||||
const ciBufferFlushSize = 63 * 1024
|
||||
|
||||
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, resErr error) {
|
||||
if !cn.isInTransaction() {
|
||||
return nil, errCopyNotSupportedOutsideTxn
|
||||
}
|
||||
|
||||
ci := ©in{
|
||||
cn: cn,
|
||||
buffer: make([]byte, 0, ciBufferSize),
|
||||
rowData: make(chan []byte),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
// add CopyData identifier + 4 bytes for message length
|
||||
ci.buffer = append(ci.buffer, byte(proto.CopyDataRequest), 0, 0, 0, 0)
|
||||
|
||||
b := cn.writeBuf(proto.Query)
|
||||
b.string(q)
|
||||
err := cn.send(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
awaitCopyInResponse:
|
||||
for {
|
||||
t, r, err := cn.recv1()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch t {
|
||||
case proto.CopyInResponse:
|
||||
if r.byte() != 0 {
|
||||
resErr = errBinaryCopyNotSupported
|
||||
break awaitCopyInResponse
|
||||
}
|
||||
go ci.resploop()
|
||||
return ci, nil
|
||||
case proto.CopyOutResponse:
|
||||
resErr = errCopyToNotSupported
|
||||
break awaitCopyInResponse
|
||||
case proto.ErrorResponse:
|
||||
resErr = parseError(r, q)
|
||||
case proto.ReadyForQuery:
|
||||
if resErr == nil {
|
||||
ci.setBad(driver.ErrBadConn)
|
||||
return nil, fmt.Errorf("pq: unexpected ReadyForQuery in response to COPY")
|
||||
}
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, resErr
|
||||
default:
|
||||
ci.setBad(driver.ErrBadConn)
|
||||
return nil, fmt.Errorf("pq: unknown response for copy query: %q", t)
|
||||
}
|
||||
}
|
||||
|
||||
// something went wrong, abort COPY before we return
|
||||
b = cn.writeBuf(proto.CopyFail)
|
||||
b.string(resErr.Error())
|
||||
err = cn.send(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
t, r, err := cn.recv1()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch t {
|
||||
case proto.CopyDoneResponse, proto.CommandComplete, proto.ErrorResponse:
|
||||
case proto.ReadyForQuery:
|
||||
// correctly aborted, we're done
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, resErr
|
||||
default:
|
||||
ci.setBad(driver.ErrBadConn)
|
||||
return nil, fmt.Errorf("pq: unknown response for CopyFail: %q", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) flush(buf []byte) error {
|
||||
if len(buf)-1 > proto.MaxUint32 {
|
||||
return errors.New("pq: too many columns")
|
||||
}
|
||||
// set message length (without message identifier)
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
|
||||
|
||||
_, err := ci.cn.c.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func (ci *copyin) resploop() {
|
||||
for {
|
||||
var r readBuf
|
||||
t, err := ci.cn.recvMessage(&r)
|
||||
if err != nil {
|
||||
ci.setBad(driver.ErrBadConn)
|
||||
ci.setError(err)
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
switch t {
|
||||
case proto.CommandComplete:
|
||||
// complete
|
||||
res, _, err := ci.cn.parseComplete(r.string())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ci.setResult(res)
|
||||
case proto.NoticeResponse:
|
||||
if n := ci.cn.noticeHandler; n != nil {
|
||||
n(parseError(&r, ""))
|
||||
}
|
||||
case proto.ReadyForQuery:
|
||||
ci.cn.processReadyForQuery(&r)
|
||||
ci.done <- true
|
||||
return
|
||||
case proto.ErrorResponse:
|
||||
err := parseError(&r, "")
|
||||
ci.setError(err)
|
||||
default:
|
||||
ci.setBad(driver.ErrBadConn)
|
||||
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) setBad(err error) {
|
||||
ci.cn.err.set(err)
|
||||
}
|
||||
|
||||
func (ci *copyin) getBad() error {
|
||||
return ci.cn.err.get()
|
||||
}
|
||||
|
||||
func (ci *copyin) err() error {
|
||||
ci.mu.Lock()
|
||||
err := ci.mu.err
|
||||
ci.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// setError() sets ci.err if one has not been set already. Caller must not be
|
||||
// holding ci.Mutex.
|
||||
func (ci *copyin) setError(err error) {
|
||||
ci.mu.Lock()
|
||||
if ci.mu.err == nil {
|
||||
ci.mu.err = err
|
||||
}
|
||||
ci.mu.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) setResult(result driver.Result) {
|
||||
ci.mu.Lock()
|
||||
ci.mu.Result = result
|
||||
ci.mu.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) getResult() driver.Result {
|
||||
ci.mu.Lock()
|
||||
result := ci.mu.Result
|
||||
ci.mu.Unlock()
|
||||
if result == nil {
|
||||
return driver.RowsAffected(0)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (ci *copyin) NumInput() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
// Exec inserts values into the COPY stream. The insert is asynchronous
|
||||
// and Exec can return errors from previous Exec calls to the same
|
||||
// COPY stmt.
|
||||
//
|
||||
// You need to call Exec(nil) to sync the COPY stream and to get any
|
||||
// errors from pending data, since Stmt.Close() doesn't return errors
|
||||
// to the user.
|
||||
func (ci *copyin) Exec(v []driver.Value) (driver.Result, error) {
|
||||
if ci.closed {
|
||||
return nil, errCopyInClosed
|
||||
}
|
||||
if err := ci.getBad(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ci.err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
if err := ci.Close(); err != nil {
|
||||
return driver.RowsAffected(0), err
|
||||
}
|
||||
return ci.getResult(), nil
|
||||
}
|
||||
|
||||
var (
|
||||
numValues = len(v)
|
||||
err error
|
||||
)
|
||||
for i, value := range v {
|
||||
ci.buffer, err = appendEncodedText(ci.buffer, value)
|
||||
if err != nil {
|
||||
return nil, ci.cn.handleError(err)
|
||||
}
|
||||
if i < numValues-1 {
|
||||
ci.buffer = append(ci.buffer, '\t')
|
||||
}
|
||||
}
|
||||
|
||||
ci.buffer = append(ci.buffer, '\n')
|
||||
|
||||
if len(ci.buffer) > ciBufferFlushSize {
|
||||
err := ci.flush(ci.buffer)
|
||||
if err != nil {
|
||||
return nil, ci.cn.handleError(err)
|
||||
}
|
||||
// reset buffer, keep bytes for message identifier and length
|
||||
ci.buffer = ci.buffer[:5]
|
||||
}
|
||||
|
||||
return driver.RowsAffected(0), nil
|
||||
}
|
||||
|
||||
// CopyData inserts a raw string into the COPY stream. The insert is
|
||||
// asynchronous and CopyData can return errors from previous CopyData calls to
|
||||
// the same COPY stmt.
|
||||
//
|
||||
// You need to call Exec(nil) to sync the COPY stream and to get any
|
||||
// errors from pending data, since Stmt.Close() doesn't return errors
|
||||
// to the user.
|
||||
func (ci *copyin) CopyData(ctx context.Context, line string) (driver.Result, error) {
|
||||
if ci.closed {
|
||||
return nil, errCopyInClosed
|
||||
}
|
||||
if finish := ci.cn.watchCancel(ctx); finish != nil {
|
||||
defer finish()
|
||||
}
|
||||
if err := ci.getBad(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ci.err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ci.buffer = append(ci.buffer, []byte(line)...)
|
||||
ci.buffer = append(ci.buffer, '\n')
|
||||
|
||||
if len(ci.buffer) > ciBufferFlushSize {
|
||||
err := ci.flush(ci.buffer)
|
||||
if err != nil {
|
||||
return nil, ci.cn.handleError(err)
|
||||
}
|
||||
|
||||
// reset buffer, keep bytes for message identifier and length
|
||||
ci.buffer = ci.buffer[:5]
|
||||
}
|
||||
|
||||
return driver.RowsAffected(0), nil
|
||||
}
|
||||
|
||||
func (ci *copyin) Close() error {
|
||||
if ci.closed { // Don't do anything, we're already closed
|
||||
return nil
|
||||
}
|
||||
ci.closed = true
|
||||
|
||||
if err := ci.getBad(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(ci.buffer) > 0 {
|
||||
err := ci.flush(ci.buffer)
|
||||
if err != nil {
|
||||
return ci.cn.handleError(err)
|
||||
}
|
||||
}
|
||||
// Avoid touching the scratch buffer as resploop could be using it.
|
||||
err := ci.cn.sendSimpleMessage(proto.CopyDoneRequest)
|
||||
if err != nil {
|
||||
return ci.cn.handleError(err)
|
||||
}
|
||||
|
||||
<-ci.done
|
||||
ci.cn.inCopy = false
|
||||
|
||||
if err := ci.err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
59
vendor/github.com/lib/pq/deprecated.go
generated
vendored
Normal file
59
vendor/github.com/lib/pq/deprecated.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
package pq
|
||||
|
||||
// PGError is an interface used by previous versions of pq.
|
||||
//
|
||||
// Deprecated: use the Error type. This is never used.
|
||||
type PGError interface {
|
||||
Error() string
|
||||
Fatal() bool
|
||||
Get(k byte) (v string)
|
||||
}
|
||||
|
||||
// Get implements the legacy PGError interface.
|
||||
//
|
||||
// Deprecated: new code should use the fields of the Error struct directly.
|
||||
func (e *Error) Get(k byte) (v string) {
|
||||
switch k {
|
||||
case 'S':
|
||||
return e.Severity
|
||||
case 'C':
|
||||
return string(e.Code)
|
||||
case 'M':
|
||||
return e.Message
|
||||
case 'D':
|
||||
return e.Detail
|
||||
case 'H':
|
||||
return e.Hint
|
||||
case 'P':
|
||||
return e.Position
|
||||
case 'p':
|
||||
return e.InternalPosition
|
||||
case 'q':
|
||||
return e.InternalQuery
|
||||
case 'W':
|
||||
return e.Where
|
||||
case 's':
|
||||
return e.Schema
|
||||
case 't':
|
||||
return e.Table
|
||||
case 'c':
|
||||
return e.Column
|
||||
case 'd':
|
||||
return e.DataTypeName
|
||||
case 'n':
|
||||
return e.Constraint
|
||||
case 'F':
|
||||
return e.File
|
||||
case 'L':
|
||||
return e.Line
|
||||
case 'R':
|
||||
return e.Routine
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ParseURL converts a url to a connection string for driver.Open.
|
||||
//
|
||||
// Deprecated: directly passing an URL to sql.Open("postgres", "postgres://...")
|
||||
// now works, and calling this manually is no longer required.
|
||||
func ParseURL(url string) (string, error) { return convertURL(url) }
|
||||
139
vendor/github.com/lib/pq/doc.go
generated
vendored
Normal file
139
vendor/github.com/lib/pq/doc.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
/*
|
||||
Package pq is a Go PostgreSQL driver for database/sql.
|
||||
|
||||
Most clients will use the database/sql package instead of using this package
|
||||
directly. For example:
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
func main() {
|
||||
dsn := "user=pqgo dbname=pqgo sslmode=verify-full"
|
||||
db, err := sql.Open("postgres", dsn)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
age := 21
|
||||
rows, err := db.Query("select name from users where age = $1", age)
|
||||
// …
|
||||
}
|
||||
|
||||
You can also connect with an URL:
|
||||
|
||||
dsn := "postgres://pqgo:password@localhost/pqgo?sslmode=verify-full"
|
||||
db, err := sql.Open("postgres", dsn)
|
||||
|
||||
# Connection String Parameters
|
||||
|
||||
See [NewConfig].
|
||||
|
||||
# Queries
|
||||
|
||||
database/sql does not dictate any specific format for parameter placeholders,
|
||||
and pq uses the PostgreSQL-native ordinal markers ($1, $2, etc.). The same
|
||||
placeholder can be used more than once:
|
||||
|
||||
rows, err := db.Query(
|
||||
`select * from users where name = $1 or age between $2 and $2 + 3`,
|
||||
"Duck", 64)
|
||||
|
||||
pq does not support [sql.Result.LastInsertId]. Use the RETURNING clause with a
|
||||
Query or QueryRow call instead to return the identifier:
|
||||
|
||||
row := db.QueryRow(`insert into users(name, age) values('Scrooge McDuck', 93) returning id`)
|
||||
|
||||
var userid int
|
||||
err := row.Scan(&userid)
|
||||
|
||||
# Data Types
|
||||
|
||||
Parameters pass through [driver.DefaultParameterConverter] before they are handled
|
||||
by this package. When the binary_parameters connection option is enabled, []byte
|
||||
values are sent directly to the backend as data in binary format.
|
||||
|
||||
This package returns the following types for values from the PostgreSQL backend:
|
||||
|
||||
- integer types smallint, integer, and bigint are returned as int64
|
||||
- floating-point types real and double precision are returned as float64
|
||||
- character types char, varchar, and text are returned as string
|
||||
- temporal types date, time, timetz, timestamp, and timestamptz are
|
||||
returned as time.Time
|
||||
- the boolean type is returned as bool
|
||||
- the bytea type is returned as []byte
|
||||
|
||||
All other types are returned directly from the backend as []byte values in text format.
|
||||
|
||||
# Errors
|
||||
|
||||
pq may return errors of type [*pq.Error] which contain error details:
|
||||
|
||||
pqErr := new(pq.Error)
|
||||
if errors.As(err, &pqErr) {
|
||||
fmt.Println("pq error:", pqErr.Code.Name())
|
||||
}
|
||||
|
||||
# Bulk imports
|
||||
|
||||
You can perform bulk imports by preparing a statement returned by [CopyIn] (or
|
||||
[CopyInSchema]) in an explicit transaction ([sql.Tx]). The returned statement
|
||||
handle can then be repeatedly "executed" to copy data into the target table.
|
||||
After all data has been processed you should call Exec() once with no arguments
|
||||
to flush all buffered data. Any call to Exec() might return an error which
|
||||
should be handled appropriately, but because of the internal buffering an error
|
||||
returned by Exec() might not be related to the data passed in the call that
|
||||
failed.
|
||||
|
||||
CopyIn uses COPY FROM internally. It is not possible to COPY outside of an
|
||||
explicit transaction in pq.
|
||||
|
||||
# Notifications
|
||||
|
||||
PostgreSQL supports a simple publish/subscribe model using PostgreSQL's [NOTIFY] mechanism.
|
||||
|
||||
To start listening for notifications, you first have to open a new connection to
|
||||
the database by calling [NewListener]. This connection can not be used for
|
||||
anything other than LISTEN / NOTIFY. Calling Listen will open a "notification
|
||||
channel"; once a notification channel is open, a notification generated on that
|
||||
channel will effect a send on the Listener.Notify channel. A notification
|
||||
channel will remain open until Unlisten is called, though connection loss might
|
||||
result in some notifications being lost. To solve this problem, Listener sends a
|
||||
nil pointer over the Notify channel any time the connection is re-established
|
||||
following a connection loss. The application can get information about the state
|
||||
of the underlying connection by setting an event callback in the call to
|
||||
NewListener.
|
||||
|
||||
A single [Listener] can safely be used from concurrent goroutines, which means
|
||||
that there is often no need to create more than one Listener in your
|
||||
application. However, a Listener is always connected to a single database, so
|
||||
you will need to create a new Listener instance for every database you want to
|
||||
receive notifications in.
|
||||
|
||||
The channel name in both Listen and Unlisten is case sensitive, and can contain
|
||||
any characters legal in an [identifier]. Note that the channel name will be
|
||||
truncated to 63 bytes by the PostgreSQL server.
|
||||
|
||||
You can find a complete, working example of Listener usage at [cmd/pqlisten].
|
||||
|
||||
# Kerberos Support
|
||||
|
||||
If you need support for Kerberos authentication, add the following to your main
|
||||
package:
|
||||
|
||||
import "github.com/lib/pq/auth/kerberos"
|
||||
|
||||
func init() {
|
||||
pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() })
|
||||
}
|
||||
|
||||
This package is in a separate module so that users who don't need Kerberos don't
|
||||
have to add unnecessary dependencies.
|
||||
|
||||
[cmd/pqlisten]: https://github.com/lib/pq/tree/master/cmd/pqlisten
|
||||
[identifier]: http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
|
||||
[NOTIFY]: http://www.postgresql.org/docs/current/static/sql-notify.html
|
||||
*/
|
||||
package pq
|
||||
612
vendor/github.com/lib/pq/encode.go
generated
vendored
Normal file
612
vendor/github.com/lib/pq/encode.go
generated
vendored
Normal file
@@ -0,0 +1,612 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq/oid"
|
||||
)
|
||||
|
||||
var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`)
|
||||
|
||||
func binaryEncode(x any) ([]byte, error) {
|
||||
switch v := x.(type) {
|
||||
case []byte:
|
||||
return v, nil
|
||||
default:
|
||||
return encode(x, oid.T_unknown)
|
||||
}
|
||||
}
|
||||
|
||||
func encode(x any, pgtypOid oid.Oid) ([]byte, error) {
|
||||
switch v := x.(type) {
|
||||
case int64:
|
||||
return strconv.AppendInt(nil, v, 10), nil
|
||||
case float64:
|
||||
return strconv.AppendFloat(nil, v, 'f', -1, 64), nil
|
||||
case []byte:
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if pgtypOid == oid.T_bytea {
|
||||
return encodeBytea(v), nil
|
||||
}
|
||||
return v, nil
|
||||
case string:
|
||||
if pgtypOid == oid.T_bytea {
|
||||
return encodeBytea([]byte(v)), nil
|
||||
}
|
||||
return []byte(v), nil
|
||||
case bool:
|
||||
return strconv.AppendBool(nil, v), nil
|
||||
case time.Time:
|
||||
return formatTS(v), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("pq: encode: unknown type for %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
func decode(ps *parameterStatus, s []byte, typ oid.Oid, f format) (any, error) {
|
||||
switch f {
|
||||
case formatBinary:
|
||||
return binaryDecode(s, typ)
|
||||
case formatText:
|
||||
return textDecode(ps, s, typ)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func binaryDecode(s []byte, typ oid.Oid) (any, error) {
|
||||
switch typ {
|
||||
case oid.T_bytea:
|
||||
return s, nil
|
||||
case oid.T_int8:
|
||||
return int64(binary.BigEndian.Uint64(s)), nil
|
||||
case oid.T_int4:
|
||||
return int64(int32(binary.BigEndian.Uint32(s))), nil
|
||||
case oid.T_int2:
|
||||
return int64(int16(binary.BigEndian.Uint16(s))), nil
|
||||
case oid.T_uuid:
|
||||
b, err := decodeUUIDBinary(s)
|
||||
if err != nil {
|
||||
err = errors.New("pq: " + err.Error())
|
||||
}
|
||||
return b, err
|
||||
default:
|
||||
return nil, fmt.Errorf("pq: don't know how to decode binary parameter of type %d", uint32(typ))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format.
|
||||
func decodeUUIDBinary(src []byte) ([]byte, error) {
|
||||
if len(src) != 16 {
|
||||
return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src))
|
||||
}
|
||||
|
||||
dst := make([]byte, 36)
|
||||
dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-'
|
||||
hex.Encode(dst[0:], src[0:4])
|
||||
hex.Encode(dst[9:], src[4:6])
|
||||
hex.Encode(dst[14:], src[6:8])
|
||||
hex.Encode(dst[19:], src[8:10])
|
||||
hex.Encode(dst[24:], src[10:16])
|
||||
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func textDecode(ps *parameterStatus, s []byte, typ oid.Oid) (any, error) {
|
||||
switch typ {
|
||||
case oid.T_char, oid.T_bpchar, oid.T_varchar, oid.T_text:
|
||||
return string(s), nil
|
||||
case oid.T_bytea:
|
||||
b, err := parseBytea(s)
|
||||
if err != nil {
|
||||
err = errors.New("pq: " + err.Error())
|
||||
}
|
||||
return b, err
|
||||
case oid.T_timestamptz:
|
||||
return parseTS(ps.currentLocation, string(s))
|
||||
case oid.T_timestamp, oid.T_date:
|
||||
return parseTS(nil, string(s))
|
||||
case oid.T_time:
|
||||
return parseTime("15:04:05", typ, s)
|
||||
case oid.T_timetz:
|
||||
return parseTime("15:04:05-07", typ, s)
|
||||
case oid.T_bool:
|
||||
return s[0] == 't', nil
|
||||
case oid.T_int8, oid.T_int4, oid.T_int2:
|
||||
i, err := strconv.ParseInt(string(s), 10, 64)
|
||||
if err != nil {
|
||||
err = errors.New("pq: " + err.Error())
|
||||
}
|
||||
return i, err
|
||||
case oid.T_float4, oid.T_float8:
|
||||
// We always use 64 bit parsing, regardless of whether the input text is for
|
||||
// a float4 or float8, because clients expect float64s for all float datatypes
|
||||
// and returning a 32-bit parsed float64 produces lossy results.
|
||||
f, err := strconv.ParseFloat(string(s), 64)
|
||||
if err != nil {
|
||||
err = errors.New("pq: " + err.Error())
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// appendEncodedText encodes item in text format as required by COPY
|
||||
// and appends to buf
|
||||
func appendEncodedText(buf []byte, x any) ([]byte, error) {
|
||||
switch v := x.(type) {
|
||||
case int64:
|
||||
return strconv.AppendInt(buf, v, 10), nil
|
||||
case float64:
|
||||
return strconv.AppendFloat(buf, v, 'f', -1, 64), nil
|
||||
case []byte:
|
||||
encodedBytea := encodeBytea(v)
|
||||
return appendEscapedText(buf, string(encodedBytea)), nil
|
||||
case string:
|
||||
return appendEscapedText(buf, v), nil
|
||||
case bool:
|
||||
return strconv.AppendBool(buf, v), nil
|
||||
case time.Time:
|
||||
return append(buf, formatTS(v)...), nil
|
||||
case nil:
|
||||
return append(buf, "\\N"...), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("pq: encode: unknown type for %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
func appendEscapedText(buf []byte, text string) []byte {
|
||||
escapeNeeded := false
|
||||
startPos := 0
|
||||
var c byte
|
||||
|
||||
// check if we need to escape
|
||||
for i := 0; i < len(text); i++ {
|
||||
c = text[i]
|
||||
if c == '\\' || c == '\n' || c == '\r' || c == '\t' {
|
||||
escapeNeeded = true
|
||||
startPos = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if !escapeNeeded {
|
||||
return append(buf, text...)
|
||||
}
|
||||
|
||||
// copy till first char to escape, iterate the rest
|
||||
result := append(buf, text[:startPos]...)
|
||||
for i := startPos; i < len(text); i++ {
|
||||
c = text[i]
|
||||
switch c {
|
||||
case '\\':
|
||||
result = append(result, '\\', '\\')
|
||||
case '\n':
|
||||
result = append(result, '\\', 'n')
|
||||
case '\r':
|
||||
result = append(result, '\\', 'r')
|
||||
case '\t':
|
||||
result = append(result, '\\', 't')
|
||||
default:
|
||||
result = append(result, c)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseTime(f string, typ oid.Oid, s []byte) (time.Time, error) {
|
||||
str := string(s)
|
||||
|
||||
// Check for a minute and second offset in the timezone.
|
||||
if typ == oid.T_timestamptz || typ == oid.T_timetz {
|
||||
for i := 3; i <= 6; i += 3 {
|
||||
if str[len(str)-i] == ':' {
|
||||
f += ":00"
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Special case for 24:00 time.
|
||||
// Unfortunately, golang does not parse 24:00 as a proper time.
|
||||
// In this case, we want to try "round to the next day", to differentiate.
|
||||
// As such, we find if the 24:00 time matches at the beginning; if so,
|
||||
// we default it back to 00:00 but add a day later.
|
||||
var is2400Time bool
|
||||
switch typ {
|
||||
case oid.T_timetz, oid.T_time:
|
||||
if matches := time2400Regex.FindStringSubmatch(str); matches != nil {
|
||||
// Concatenate timezone information at the back.
|
||||
str = "00:00:00" + str[len(matches[1]):]
|
||||
is2400Time = true
|
||||
}
|
||||
}
|
||||
t, err := time.Parse(f, str)
|
||||
if err != nil {
|
||||
return time.Time{}, errors.New("pq: " + err.Error())
|
||||
}
|
||||
if is2400Time {
|
||||
t = t.Add(24 * time.Hour)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
var errInvalidTimestamp = errors.New("invalid timestamp")
|
||||
|
||||
type timestampParser struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *timestampParser) expect(str string, char byte, pos int) {
|
||||
if p.err != nil {
|
||||
return
|
||||
}
|
||||
if pos+1 > len(str) {
|
||||
p.err = errInvalidTimestamp
|
||||
return
|
||||
}
|
||||
if c := str[pos]; c != char && p.err == nil {
|
||||
p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
if begin < 0 || end < 0 || begin > end || end > len(str) {
|
||||
p.err = errInvalidTimestamp
|
||||
return 0
|
||||
}
|
||||
result, err := strconv.Atoi(str[begin:end])
|
||||
if err != nil {
|
||||
if p.err == nil {
|
||||
p.err = fmt.Errorf("expected number; got '%v'", str)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// The location cache caches the time zones typically used by the client.
|
||||
type locationCache struct {
|
||||
cache map[int]*time.Location
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// All connections share the same list of timezones. Benchmarking shows that
|
||||
// about 5% speed could be gained by putting the cache in the connection and
|
||||
// losing the mutex, at the cost of a small amount of memory and a somewhat
|
||||
// significant increase in code complexity.
|
||||
var globalLocationCache = newLocationCache()
|
||||
|
||||
func newLocationCache() *locationCache {
|
||||
return &locationCache{cache: make(map[int]*time.Location)}
|
||||
}
|
||||
|
||||
// Returns the cached timezone for the specified offset, creating and caching
|
||||
// it if necessary.
|
||||
func (c *locationCache) getLocation(offset int) *time.Location {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
location, ok := c.cache[offset]
|
||||
if !ok {
|
||||
location = time.FixedZone("", offset)
|
||||
c.cache[offset] = location
|
||||
}
|
||||
|
||||
return location
|
||||
}
|
||||
|
||||
var (
|
||||
infinityTSEnabled = false
|
||||
infinityTSNegative time.Time
|
||||
infinityTSPositive time.Time
|
||||
)
|
||||
|
||||
const (
|
||||
infinityTSEnabledAlready = "pq: infinity timestamp enabled already"
|
||||
infinityTSNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
|
||||
)
|
||||
|
||||
// EnableInfinityTs controls the handling of Postgres' "-infinity" and
|
||||
// "infinity" "timestamp"s.
|
||||
//
|
||||
// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
|
||||
// []byte("-infinity") and []byte("infinity") respectively, and potentially
|
||||
// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
|
||||
// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
|
||||
//
|
||||
// Once EnableInfinityTs has been called, all connections created using this
|
||||
// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
|
||||
// "timestamp with time zone" and "date" types to the predefined minimum and
|
||||
// maximum times, respectively. When encoding time.Time values, any time which
|
||||
// equals or precedes the predefined minimum time will be encoded to
|
||||
// "-infinity". Any values at or past the maximum time will similarly be
|
||||
// encoded to "infinity".
|
||||
//
|
||||
// If EnableInfinityTs is called with negative >= positive, it will panic.
|
||||
// Calling EnableInfinityTs after a connection has been established results in
|
||||
// undefined behavior. If EnableInfinityTs is called more than once, it will
|
||||
// panic.
|
||||
func EnableInfinityTs(negative time.Time, positive time.Time) {
|
||||
if infinityTSEnabled {
|
||||
panic(infinityTSEnabledAlready)
|
||||
}
|
||||
if !negative.Before(positive) {
|
||||
panic(infinityTSNegativeMustBeSmaller)
|
||||
}
|
||||
infinityTSEnabled = true
|
||||
infinityTSNegative = negative
|
||||
infinityTSPositive = positive
|
||||
}
|
||||
|
||||
// Testing might want to toggle infinityTSEnabled
|
||||
func disableInfinityTS() {
|
||||
infinityTSEnabled = false
|
||||
}
|
||||
|
||||
// This is a time function specific to the Postgres default DateStyle
|
||||
// setting ("ISO, MDY"), the only one we currently support. This
|
||||
// accounts for the discrepancies between the parsing available with
|
||||
// time.Parse and the Postgres date formatting quirks.
|
||||
func parseTS(currentLocation *time.Location, str string) (any, error) {
|
||||
switch str {
|
||||
case "-infinity":
|
||||
if infinityTSEnabled {
|
||||
return infinityTSNegative, nil
|
||||
}
|
||||
return []byte(str), nil
|
||||
case "infinity":
|
||||
if infinityTSEnabled {
|
||||
return infinityTSPositive, nil
|
||||
}
|
||||
return []byte(str), nil
|
||||
}
|
||||
t, err := ParseTimestamp(currentLocation, str)
|
||||
if err != nil {
|
||||
err = errors.New("pq: " + err.Error())
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
|
||||
// ParseTimestamp parses Postgres' text format. It returns a time.Time in
|
||||
// currentLocation iff that time's offset agrees with the offset sent from the
|
||||
// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
|
||||
// fixed offset offset provided by the Postgres server.
|
||||
func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
|
||||
p := timestampParser{}
|
||||
|
||||
monSep := strings.IndexRune(str, '-')
|
||||
// this is Gregorian year, not ISO Year
|
||||
// In Gregorian system, the year 1 BC is followed by AD 1
|
||||
year := p.mustAtoi(str, 0, monSep)
|
||||
daySep := monSep + 3
|
||||
month := p.mustAtoi(str, monSep+1, daySep)
|
||||
p.expect(str, '-', daySep)
|
||||
timeSep := daySep + 3
|
||||
day := p.mustAtoi(str, daySep+1, timeSep)
|
||||
|
||||
minLen := monSep + len("01-01") + 1
|
||||
|
||||
isBC := strings.HasSuffix(str, " BC")
|
||||
if isBC {
|
||||
minLen += 3
|
||||
}
|
||||
|
||||
var hour, minute, second int
|
||||
if len(str) > minLen {
|
||||
p.expect(str, ' ', timeSep)
|
||||
minSep := timeSep + 3
|
||||
p.expect(str, ':', minSep)
|
||||
hour = p.mustAtoi(str, timeSep+1, minSep)
|
||||
secSep := minSep + 3
|
||||
p.expect(str, ':', secSep)
|
||||
minute = p.mustAtoi(str, minSep+1, secSep)
|
||||
secEnd := secSep + 3
|
||||
second = p.mustAtoi(str, secSep+1, secEnd)
|
||||
}
|
||||
remainderIdx := monSep + len("01-01 00:00:00") + 1
|
||||
// Three optional (but ordered) sections follow: the
|
||||
// fractional seconds, the time zone offset, and the BC
|
||||
// designation. We set them up here and adjust the other
|
||||
// offsets if the preceding sections exist.
|
||||
|
||||
nanoSec := 0
|
||||
tzOff := 0
|
||||
|
||||
if remainderIdx < len(str) && str[remainderIdx] == '.' {
|
||||
fracStart := remainderIdx + 1
|
||||
fracOff := strings.IndexAny(str[fracStart:], "-+Z ")
|
||||
if fracOff < 0 {
|
||||
fracOff = len(str) - fracStart
|
||||
}
|
||||
fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
|
||||
nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
|
||||
|
||||
remainderIdx += fracOff + 1
|
||||
}
|
||||
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
|
||||
// time zone separator is always '-' or '+' or 'Z' (UTC is +00)
|
||||
var tzSign int
|
||||
switch c := str[tzStart]; c {
|
||||
case '-':
|
||||
tzSign = -1
|
||||
case '+':
|
||||
tzSign = +1
|
||||
default:
|
||||
return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
|
||||
}
|
||||
tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
|
||||
remainderIdx += 3
|
||||
var tzMin, tzSec int
|
||||
if remainderIdx < len(str) && str[remainderIdx] == ':' {
|
||||
tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
|
||||
remainderIdx += 3
|
||||
}
|
||||
if remainderIdx < len(str) && str[remainderIdx] == ':' {
|
||||
tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
|
||||
remainderIdx += 3
|
||||
}
|
||||
tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
|
||||
} else if tzStart < len(str) && str[tzStart] == 'Z' {
|
||||
// time zone Z separator indicates UTC is +00
|
||||
remainderIdx += 1
|
||||
}
|
||||
|
||||
var isoYear int
|
||||
|
||||
if isBC {
|
||||
isoYear = 1 - year
|
||||
remainderIdx += 3
|
||||
} else {
|
||||
isoYear = year
|
||||
}
|
||||
if remainderIdx < len(str) {
|
||||
return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
|
||||
}
|
||||
t := time.Date(isoYear, time.Month(month), day,
|
||||
hour, minute, second, nanoSec,
|
||||
globalLocationCache.getLocation(tzOff))
|
||||
|
||||
if currentLocation != nil {
|
||||
// Set the location of the returned Time based on the session's
|
||||
// TimeZone value, but only if the local time zone database agrees with
|
||||
// the remote database on the offset.
|
||||
lt := t.In(currentLocation)
|
||||
_, newOff := lt.Zone()
|
||||
if newOff == tzOff {
|
||||
t = lt
|
||||
}
|
||||
}
|
||||
|
||||
return t, p.err
|
||||
}
|
||||
|
||||
// formatTS formats t into a format postgres understands.
|
||||
func formatTS(t time.Time) []byte {
|
||||
if infinityTSEnabled {
|
||||
// t <= -infinity : ! (t > -infinity)
|
||||
if !t.After(infinityTSNegative) {
|
||||
return []byte("-infinity")
|
||||
}
|
||||
// t >= infinity : ! (!t < infinity)
|
||||
if !t.Before(infinityTSPositive) {
|
||||
return []byte("infinity")
|
||||
}
|
||||
}
|
||||
return FormatTimestamp(t)
|
||||
}
|
||||
|
||||
// FormatTimestamp formats t into Postgres' text format for timestamps.
|
||||
func FormatTimestamp(t time.Time) []byte {
|
||||
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
|
||||
// minus sign preferred by Go.
|
||||
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
|
||||
bc := false
|
||||
if t.Year() <= 0 {
|
||||
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
|
||||
t = t.AddDate((-t.Year())*2+1, 0, 0)
|
||||
bc = true
|
||||
}
|
||||
b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00"))
|
||||
|
||||
_, offset := t.Zone()
|
||||
offset %= 60
|
||||
if offset != 0 {
|
||||
// RFC3339Nano already printed the minus sign
|
||||
if offset < 0 {
|
||||
offset = -offset
|
||||
}
|
||||
|
||||
b = append(b, ':')
|
||||
if offset < 10 {
|
||||
b = append(b, '0')
|
||||
}
|
||||
b = strconv.AppendInt(b, int64(offset), 10)
|
||||
}
|
||||
|
||||
if bc {
|
||||
b = append(b, " BC"...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Parse a bytea value received from the server. Both "hex" and the legacy
|
||||
// "escape" format are supported.
|
||||
func parseBytea(s []byte) (result []byte, err error) {
|
||||
if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
|
||||
// bytea_output = hex
|
||||
s = s[2:] // trim off leading "\\x"
|
||||
result = make([]byte, hex.DecodedLen(len(s)))
|
||||
_, err := hex.Decode(result, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// bytea_output = escape
|
||||
for len(s) > 0 {
|
||||
if s[0] == '\\' {
|
||||
// escaped '\\'
|
||||
if len(s) >= 2 && s[1] == '\\' {
|
||||
result = append(result, '\\')
|
||||
s = s[2:]
|
||||
continue
|
||||
}
|
||||
|
||||
// '\\' followed by an octal number
|
||||
if len(s) < 4 {
|
||||
return nil, fmt.Errorf("invalid bytea sequence %v", s)
|
||||
}
|
||||
r, err := strconv.ParseUint(string(s[1:4]), 8, 8)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse bytea value: %w", err)
|
||||
}
|
||||
result = append(result, byte(r))
|
||||
s = s[4:]
|
||||
} else {
|
||||
// We hit an unescaped, raw byte. Try to read in as many as
|
||||
// possible in one go.
|
||||
i := bytes.IndexByte(s, '\\')
|
||||
if i == -1 {
|
||||
result = append(result, s...)
|
||||
break
|
||||
}
|
||||
result = append(result, s[:i]...)
|
||||
s = s[i:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func encodeBytea(v []byte) (result []byte) {
|
||||
result = make([]byte, 2+hex.EncodedLen(len(v)))
|
||||
result[0] = '\\'
|
||||
result[1] = 'x'
|
||||
hex.Encode(result[2:], v)
|
||||
return result
|
||||
}
|
||||
|
||||
// NullTime represents a [time.Time] that may be null.
|
||||
// NullTime implements the [sql.Scanner] interface so
|
||||
// it can be used as a scan destination, similar to [sql.NullString].
|
||||
//
|
||||
// Deprecated: this is an alias for [sql.NullTime].
|
||||
type NullTime = sql.NullTime
|
||||
637
vendor/github.com/lib/pq/error.go
generated
vendored
Normal file
637
vendor/github.com/lib/pq/error.go
generated
vendored
Normal file
@@ -0,0 +1,637 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// [pq.Error.Severity] values.
|
||||
const (
|
||||
Efatal = "FATAL"
|
||||
Epanic = "PANIC"
|
||||
Ewarning = "WARNING"
|
||||
Enotice = "NOTICE"
|
||||
Edebug = "DEBUG"
|
||||
Einfo = "INFO"
|
||||
Elog = "LOG"
|
||||
)
|
||||
|
||||
// Error represents an error communicating with the server.
|
||||
//
|
||||
// The [Error] method only returns the error message and error code:
|
||||
//
|
||||
// pq: invalid input syntax for type json (22P02)
|
||||
//
|
||||
// The [ErrorWithDetail] method also includes the error Detail, Hint, and
|
||||
// location context (if any):
|
||||
//
|
||||
// ERROR: invalid input syntax for type json (22P02)
|
||||
// DETAIL: Token "asd" is invalid.
|
||||
// CONTEXT: line 5, column 8:
|
||||
//
|
||||
// 3 | 'def',
|
||||
// 4 | 123,
|
||||
// 5 | 'foo', 'asd'::jsonb
|
||||
// ^
|
||||
//
|
||||
// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields
|
||||
type Error struct {
|
||||
// [Efatal], [Epanic], [Ewarning], [Enotice], [Edebug], [Einfo], or [Elog].
|
||||
// Always present.
|
||||
Severity string
|
||||
|
||||
// SQLSTATE code. Always present.
|
||||
Code ErrorCode
|
||||
|
||||
// Primary human-readable error message. This should be accurate but terse
|
||||
// (typically one line). Always present.
|
||||
Message string
|
||||
|
||||
// Optional secondary error message carrying more detail about the problem.
|
||||
// Might run to multiple lines.
|
||||
Detail string
|
||||
|
||||
// Optional suggestion what to do about the problem. This is intended to
|
||||
// differ from Detail in that it offers advice (potentially inappropriate)
|
||||
// rather than hard facts. Might run to multiple lines.
|
||||
Hint string
|
||||
|
||||
// error position as an index into the original query string, as decimal
|
||||
// ASCII integer. The first character has index 1, and positions are
|
||||
// measured in characters not bytes.
|
||||
Position string
|
||||
|
||||
// This is defined the same as the Position field, but it is used when the
|
||||
// cursor position refers to an internally generated command rather than the
|
||||
// one submitted by the client. The InternalQuery field will always appear
|
||||
// when this field appears.
|
||||
InternalPosition string
|
||||
|
||||
// Text of a failed internally-generated command. This could be, for
|
||||
// example, an SQL query issued by a PL/pgSQL function.
|
||||
InternalQuery string
|
||||
|
||||
// An indication of the context in which the error occurred. Presently this
|
||||
// includes a call stack traceback of active procedural language functions
|
||||
// and internally-generated queries. The trace is one entry per line, most
|
||||
// recent first.
|
||||
Where string
|
||||
|
||||
// If the error was associated with a specific database object, the name of
|
||||
// the schema containing that object, if any.
|
||||
Schema string
|
||||
|
||||
// If the error was associated with a specific table, the name of the table.
|
||||
// (Refer to the schema name field for the name of the table's schema.)
|
||||
Table string
|
||||
|
||||
// If the error was associated with a specific table column, the name of the
|
||||
// column. (Refer to the schema and table name fields to identify the
|
||||
// table.)
|
||||
Column string
|
||||
|
||||
// If the error was associated with a specific data type, the name of the
|
||||
// data type. (Refer to the schema name field for the name of the data
|
||||
// type's schema.)
|
||||
DataTypeName string
|
||||
|
||||
// If the error was associated with a specific constraint, the name of the
|
||||
// constraint. Refer to fields listed above for the associated table or
|
||||
// domain. (For this purpose, indexes are treated as constraints, even if
|
||||
// they weren't created with constraint syntax.)
|
||||
Constraint string
|
||||
|
||||
// File name of the source-code location where the error was reported.
|
||||
File string
|
||||
|
||||
// Line number of the source-code location where the error was reported.
|
||||
Line string
|
||||
|
||||
// Name of the source-code routine reporting the error.
|
||||
Routine string
|
||||
|
||||
query string
|
||||
}
|
||||
|
||||
// ErrorCode is a five-character error code.
|
||||
type ErrorCode string
|
||||
|
||||
// Name returns a more human friendly rendering of the error code, namely the
|
||||
// "condition name".
|
||||
//
|
||||
// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
|
||||
// details.
|
||||
func (ec ErrorCode) Name() string {
|
||||
return errorCodeNames[ec]
|
||||
}
|
||||
|
||||
// ErrorClass is only the class part of an error code.
|
||||
type ErrorClass string
|
||||
|
||||
// Name returns the condition name of an error class. It is equivalent to the
|
||||
// condition name of the "standard" error code (i.e. the one having the last
|
||||
// three characters "000").
|
||||
func (ec ErrorClass) Name() string {
|
||||
return errorCodeNames[ErrorCode(ec+"000")]
|
||||
}
|
||||
|
||||
// Class returns the error class, e.g. "28".
|
||||
//
|
||||
// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
|
||||
// details.
|
||||
func (ec ErrorCode) Class() ErrorClass {
|
||||
return ErrorClass(ec[0:2])
|
||||
}
|
||||
|
||||
// errorCodeNames is a mapping between the five-character error codes and the
|
||||
// human readable "condition names". It is derived from the list at
|
||||
// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html
|
||||
var errorCodeNames = map[ErrorCode]string{
|
||||
// Class 00 - Successful Completion
|
||||
"00000": "successful_completion",
|
||||
// Class 01 - Warning
|
||||
"01000": "warning",
|
||||
"0100C": "dynamic_result_sets_returned",
|
||||
"01008": "implicit_zero_bit_padding",
|
||||
"01003": "null_value_eliminated_in_set_function",
|
||||
"01007": "privilege_not_granted",
|
||||
"01006": "privilege_not_revoked",
|
||||
"01004": "string_data_right_truncation",
|
||||
"01P01": "deprecated_feature",
|
||||
// Class 02 - No Data (this is also a warning class per the SQL standard)
|
||||
"02000": "no_data",
|
||||
"02001": "no_additional_dynamic_result_sets_returned",
|
||||
// Class 03 - SQL Statement Not Yet Complete
|
||||
"03000": "sql_statement_not_yet_complete",
|
||||
// Class 08 - Connection Exception
|
||||
"08000": "connection_exception",
|
||||
"08003": "connection_does_not_exist",
|
||||
"08006": "connection_failure",
|
||||
"08001": "sqlclient_unable_to_establish_sqlconnection",
|
||||
"08004": "sqlserver_rejected_establishment_of_sqlconnection",
|
||||
"08007": "transaction_resolution_unknown",
|
||||
"08P01": "protocol_violation",
|
||||
// Class 09 - Triggered Action Exception
|
||||
"09000": "triggered_action_exception",
|
||||
// Class 0A - Feature Not Supported
|
||||
"0A000": "feature_not_supported",
|
||||
// Class 0B - Invalid Transaction Initiation
|
||||
"0B000": "invalid_transaction_initiation",
|
||||
// Class 0F - Locator Exception
|
||||
"0F000": "locator_exception",
|
||||
"0F001": "invalid_locator_specification",
|
||||
// Class 0L - Invalid Grantor
|
||||
"0L000": "invalid_grantor",
|
||||
"0LP01": "invalid_grant_operation",
|
||||
// Class 0P - Invalid Role Specification
|
||||
"0P000": "invalid_role_specification",
|
||||
// Class 0Z - Diagnostics Exception
|
||||
"0Z000": "diagnostics_exception",
|
||||
"0Z002": "stacked_diagnostics_accessed_without_active_handler",
|
||||
// Class 20 - Case Not Found
|
||||
"20000": "case_not_found",
|
||||
// Class 21 - Cardinality Violation
|
||||
"21000": "cardinality_violation",
|
||||
// Class 22 - Data Exception
|
||||
"22000": "data_exception",
|
||||
"2202E": "array_subscript_error",
|
||||
"22021": "character_not_in_repertoire",
|
||||
"22008": "datetime_field_overflow",
|
||||
"22012": "division_by_zero",
|
||||
"22005": "error_in_assignment",
|
||||
"2200B": "escape_character_conflict",
|
||||
"22022": "indicator_overflow",
|
||||
"22015": "interval_field_overflow",
|
||||
"2201E": "invalid_argument_for_logarithm",
|
||||
"22014": "invalid_argument_for_ntile_function",
|
||||
"22016": "invalid_argument_for_nth_value_function",
|
||||
"2201F": "invalid_argument_for_power_function",
|
||||
"2201G": "invalid_argument_for_width_bucket_function",
|
||||
"22018": "invalid_character_value_for_cast",
|
||||
"22007": "invalid_datetime_format",
|
||||
"22019": "invalid_escape_character",
|
||||
"2200D": "invalid_escape_octet",
|
||||
"22025": "invalid_escape_sequence",
|
||||
"22P06": "nonstandard_use_of_escape_character",
|
||||
"22010": "invalid_indicator_parameter_value",
|
||||
"22023": "invalid_parameter_value",
|
||||
"2201B": "invalid_regular_expression",
|
||||
"2201W": "invalid_row_count_in_limit_clause",
|
||||
"2201X": "invalid_row_count_in_result_offset_clause",
|
||||
"22009": "invalid_time_zone_displacement_value",
|
||||
"2200C": "invalid_use_of_escape_character",
|
||||
"2200G": "most_specific_type_mismatch",
|
||||
"22004": "null_value_not_allowed",
|
||||
"22002": "null_value_no_indicator_parameter",
|
||||
"22003": "numeric_value_out_of_range",
|
||||
"2200H": "sequence_generator_limit_exceeded",
|
||||
"22026": "string_data_length_mismatch",
|
||||
"22001": "string_data_right_truncation",
|
||||
"22011": "substring_error",
|
||||
"22027": "trim_error",
|
||||
"22024": "unterminated_c_string",
|
||||
"2200F": "zero_length_character_string",
|
||||
"22P01": "floating_point_exception",
|
||||
"22P02": "invalid_text_representation",
|
||||
"22P03": "invalid_binary_representation",
|
||||
"22P04": "bad_copy_file_format",
|
||||
"22P05": "untranslatable_character",
|
||||
"2200L": "not_an_xml_document",
|
||||
"2200M": "invalid_xml_document",
|
||||
"2200N": "invalid_xml_content",
|
||||
"2200S": "invalid_xml_comment",
|
||||
"2200T": "invalid_xml_processing_instruction",
|
||||
// Class 23 - Integrity Constraint Violation
|
||||
"23000": "integrity_constraint_violation",
|
||||
"23001": "restrict_violation",
|
||||
"23502": "not_null_violation",
|
||||
"23503": "foreign_key_violation",
|
||||
"23505": "unique_violation",
|
||||
"23514": "check_violation",
|
||||
"23P01": "exclusion_violation",
|
||||
// Class 24 - Invalid Cursor State
|
||||
"24000": "invalid_cursor_state",
|
||||
// Class 25 - Invalid Transaction State
|
||||
"25000": "invalid_transaction_state",
|
||||
"25001": "active_sql_transaction",
|
||||
"25002": "branch_transaction_already_active",
|
||||
"25008": "held_cursor_requires_same_isolation_level",
|
||||
"25003": "inappropriate_access_mode_for_branch_transaction",
|
||||
"25004": "inappropriate_isolation_level_for_branch_transaction",
|
||||
"25005": "no_active_sql_transaction_for_branch_transaction",
|
||||
"25006": "read_only_sql_transaction",
|
||||
"25007": "schema_and_data_statement_mixing_not_supported",
|
||||
"25P01": "no_active_sql_transaction",
|
||||
"25P02": "in_failed_sql_transaction",
|
||||
// Class 26 - Invalid SQL Statement Name
|
||||
"26000": "invalid_sql_statement_name",
|
||||
// Class 27 - Triggered Data Change Violation
|
||||
"27000": "triggered_data_change_violation",
|
||||
// Class 28 - Invalid Authorization Specification
|
||||
"28000": "invalid_authorization_specification",
|
||||
"28P01": "invalid_password",
|
||||
// Class 2B - Dependent Privilege Descriptors Still Exist
|
||||
"2B000": "dependent_privilege_descriptors_still_exist",
|
||||
"2BP01": "dependent_objects_still_exist",
|
||||
// Class 2D - Invalid Transaction Termination
|
||||
"2D000": "invalid_transaction_termination",
|
||||
// Class 2F - SQL Routine Exception
|
||||
"2F000": "sql_routine_exception",
|
||||
"2F005": "function_executed_no_return_statement",
|
||||
"2F002": "modifying_sql_data_not_permitted",
|
||||
"2F003": "prohibited_sql_statement_attempted",
|
||||
"2F004": "reading_sql_data_not_permitted",
|
||||
// Class 34 - Invalid Cursor Name
|
||||
"34000": "invalid_cursor_name",
|
||||
// Class 38 - External Routine Exception
|
||||
"38000": "external_routine_exception",
|
||||
"38001": "containing_sql_not_permitted",
|
||||
"38002": "modifying_sql_data_not_permitted",
|
||||
"38003": "prohibited_sql_statement_attempted",
|
||||
"38004": "reading_sql_data_not_permitted",
|
||||
// Class 39 - External Routine Invocation Exception
|
||||
"39000": "external_routine_invocation_exception",
|
||||
"39001": "invalid_sqlstate_returned",
|
||||
"39004": "null_value_not_allowed",
|
||||
"39P01": "trigger_protocol_violated",
|
||||
"39P02": "srf_protocol_violated",
|
||||
// Class 3B - Savepoint Exception
|
||||
"3B000": "savepoint_exception",
|
||||
"3B001": "invalid_savepoint_specification",
|
||||
// Class 3D - Invalid Catalog Name
|
||||
"3D000": "invalid_catalog_name",
|
||||
// Class 3F - Invalid Schema Name
|
||||
"3F000": "invalid_schema_name",
|
||||
// Class 40 - Transaction Rollback
|
||||
"40000": "transaction_rollback",
|
||||
"40002": "transaction_integrity_constraint_violation",
|
||||
"40001": "serialization_failure",
|
||||
"40003": "statement_completion_unknown",
|
||||
"40P01": "deadlock_detected",
|
||||
// Class 42 - Syntax Error or Access Rule Violation
|
||||
"42000": "syntax_error_or_access_rule_violation",
|
||||
"42601": "syntax_error",
|
||||
"42501": "insufficient_privilege",
|
||||
"42846": "cannot_coerce",
|
||||
"42803": "grouping_error",
|
||||
"42P20": "windowing_error",
|
||||
"42P19": "invalid_recursion",
|
||||
"42830": "invalid_foreign_key",
|
||||
"42602": "invalid_name",
|
||||
"42622": "name_too_long",
|
||||
"42939": "reserved_name",
|
||||
"42804": "datatype_mismatch",
|
||||
"42P18": "indeterminate_datatype",
|
||||
"42P21": "collation_mismatch",
|
||||
"42P22": "indeterminate_collation",
|
||||
"42809": "wrong_object_type",
|
||||
"42703": "undefined_column",
|
||||
"42883": "undefined_function",
|
||||
"42P01": "undefined_table",
|
||||
"42P02": "undefined_parameter",
|
||||
"42704": "undefined_object",
|
||||
"42701": "duplicate_column",
|
||||
"42P03": "duplicate_cursor",
|
||||
"42P04": "duplicate_database",
|
||||
"42723": "duplicate_function",
|
||||
"42P05": "duplicate_prepared_statement",
|
||||
"42P06": "duplicate_schema",
|
||||
"42P07": "duplicate_table",
|
||||
"42712": "duplicate_alias",
|
||||
"42710": "duplicate_object",
|
||||
"42702": "ambiguous_column",
|
||||
"42725": "ambiguous_function",
|
||||
"42P08": "ambiguous_parameter",
|
||||
"42P09": "ambiguous_alias",
|
||||
"42P10": "invalid_column_reference",
|
||||
"42611": "invalid_column_definition",
|
||||
"42P11": "invalid_cursor_definition",
|
||||
"42P12": "invalid_database_definition",
|
||||
"42P13": "invalid_function_definition",
|
||||
"42P14": "invalid_prepared_statement_definition",
|
||||
"42P15": "invalid_schema_definition",
|
||||
"42P16": "invalid_table_definition",
|
||||
"42P17": "invalid_object_definition",
|
||||
// Class 44 - WITH CHECK OPTION Violation
|
||||
"44000": "with_check_option_violation",
|
||||
// Class 53 - Insufficient Resources
|
||||
"53000": "insufficient_resources",
|
||||
"53100": "disk_full",
|
||||
"53200": "out_of_memory",
|
||||
"53300": "too_many_connections",
|
||||
"53400": "configuration_limit_exceeded",
|
||||
// Class 54 - Program Limit Exceeded
|
||||
"54000": "program_limit_exceeded",
|
||||
"54001": "statement_too_complex",
|
||||
"54011": "too_many_columns",
|
||||
"54023": "too_many_arguments",
|
||||
// Class 55 - Object Not In Prerequisite State
|
||||
"55000": "object_not_in_prerequisite_state",
|
||||
"55006": "object_in_use",
|
||||
"55P02": "cant_change_runtime_param",
|
||||
"55P03": "lock_not_available",
|
||||
// Class 57 - Operator Intervention
|
||||
"57000": "operator_intervention",
|
||||
"57014": "query_canceled",
|
||||
"57P01": "admin_shutdown",
|
||||
"57P02": "crash_shutdown",
|
||||
"57P03": "cannot_connect_now",
|
||||
"57P04": "database_dropped",
|
||||
// Class 58 - System Error (errors external to PostgreSQL itself)
|
||||
"58000": "system_error",
|
||||
"58030": "io_error",
|
||||
"58P01": "undefined_file",
|
||||
"58P02": "duplicate_file",
|
||||
// Class F0 - Configuration File Error
|
||||
"F0000": "config_file_error",
|
||||
"F0001": "lock_file_exists",
|
||||
// Class HV - Foreign Data Wrapper Error (SQL/MED)
|
||||
"HV000": "fdw_error",
|
||||
"HV005": "fdw_column_name_not_found",
|
||||
"HV002": "fdw_dynamic_parameter_value_needed",
|
||||
"HV010": "fdw_function_sequence_error",
|
||||
"HV021": "fdw_inconsistent_descriptor_information",
|
||||
"HV024": "fdw_invalid_attribute_value",
|
||||
"HV007": "fdw_invalid_column_name",
|
||||
"HV008": "fdw_invalid_column_number",
|
||||
"HV004": "fdw_invalid_data_type",
|
||||
"HV006": "fdw_invalid_data_type_descriptors",
|
||||
"HV091": "fdw_invalid_descriptor_field_identifier",
|
||||
"HV00B": "fdw_invalid_handle",
|
||||
"HV00C": "fdw_invalid_option_index",
|
||||
"HV00D": "fdw_invalid_option_name",
|
||||
"HV090": "fdw_invalid_string_length_or_buffer_length",
|
||||
"HV00A": "fdw_invalid_string_format",
|
||||
"HV009": "fdw_invalid_use_of_null_pointer",
|
||||
"HV014": "fdw_too_many_handles",
|
||||
"HV001": "fdw_out_of_memory",
|
||||
"HV00P": "fdw_no_schemas",
|
||||
"HV00J": "fdw_option_name_not_found",
|
||||
"HV00K": "fdw_reply_handle",
|
||||
"HV00Q": "fdw_schema_not_found",
|
||||
"HV00R": "fdw_table_not_found",
|
||||
"HV00L": "fdw_unable_to_create_execution",
|
||||
"HV00M": "fdw_unable_to_create_reply",
|
||||
"HV00N": "fdw_unable_to_establish_connection",
|
||||
// Class P0 - PL/pgSQL Error
|
||||
"P0000": "plpgsql_error",
|
||||
"P0001": "raise_exception",
|
||||
"P0002": "no_data_found",
|
||||
"P0003": "too_many_rows",
|
||||
// Class XX - Internal Error
|
||||
"XX000": "internal_error",
|
||||
"XX001": "data_corrupted",
|
||||
"XX002": "index_corrupted",
|
||||
}
|
||||
|
||||
func parseError(r *readBuf, q string) *Error {
|
||||
err := &Error{query: q}
|
||||
for t := r.byte(); t != 0; t = r.byte() {
|
||||
msg := r.string()
|
||||
switch t {
|
||||
case 'S':
|
||||
err.Severity = msg
|
||||
case 'C':
|
||||
err.Code = ErrorCode(msg)
|
||||
case 'M':
|
||||
err.Message = msg
|
||||
case 'D':
|
||||
err.Detail = msg
|
||||
case 'H':
|
||||
err.Hint = msg
|
||||
case 'P':
|
||||
err.Position = msg
|
||||
case 'p':
|
||||
err.InternalPosition = msg
|
||||
case 'q':
|
||||
err.InternalQuery = msg
|
||||
case 'W':
|
||||
err.Where = msg
|
||||
case 's':
|
||||
err.Schema = msg
|
||||
case 't':
|
||||
err.Table = msg
|
||||
case 'c':
|
||||
err.Column = msg
|
||||
case 'd':
|
||||
err.DataTypeName = msg
|
||||
case 'n':
|
||||
err.Constraint = msg
|
||||
case 'F':
|
||||
err.File = msg
|
||||
case 'L':
|
||||
err.Line = msg
|
||||
case 'R':
|
||||
err.Routine = msg
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Fatal returns true if the Error Severity is fatal.
|
||||
func (e *Error) Fatal() bool {
|
||||
return e.Severity == Efatal
|
||||
}
|
||||
|
||||
// SQLState returns the SQLState of the error.
|
||||
func (e *Error) SQLState() string {
|
||||
return string(e.Code)
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
msg := e.Message
|
||||
if e.query != "" && e.Position != "" {
|
||||
pos, err := strconv.Atoi(e.Position)
|
||||
if err == nil {
|
||||
lines := strings.Split(e.query, "\n")
|
||||
line, col := posToLine(pos, lines)
|
||||
if len(lines) == 1 {
|
||||
msg += " at column " + strconv.Itoa(col)
|
||||
} else {
|
||||
msg += " at position " + strconv.Itoa(line) + ":" + strconv.Itoa(col)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if e.Code != "" {
|
||||
return "pq: " + msg + " (" + string(e.Code) + ")"
|
||||
}
|
||||
return "pq: " + msg
|
||||
}
|
||||
|
||||
// ErrorWithDetail returns the error message with detailed information and
|
||||
// location context (if any).
|
||||
//
|
||||
// See the documentation on [Error].
|
||||
func (e *Error) ErrorWithDetail() string {
|
||||
b := new(strings.Builder)
|
||||
b.Grow(len(e.Message) + len(e.Detail) + len(e.Hint) + 30)
|
||||
b.WriteString("ERROR: ")
|
||||
b.WriteString(e.Message)
|
||||
if e.Code != "" {
|
||||
b.WriteString(" (")
|
||||
b.WriteString(string(e.Code))
|
||||
b.WriteByte(')')
|
||||
}
|
||||
if e.Detail != "" {
|
||||
b.WriteString("\nDETAIL: ")
|
||||
b.WriteString(e.Detail)
|
||||
}
|
||||
if e.Hint != "" {
|
||||
b.WriteString("\nHINT: ")
|
||||
b.WriteString(e.Hint)
|
||||
}
|
||||
|
||||
if e.query != "" && e.Position != "" {
|
||||
b.Grow(512)
|
||||
pos, err := strconv.Atoi(e.Position)
|
||||
if err != nil {
|
||||
return b.String()
|
||||
}
|
||||
lines := strings.Split(e.query, "\n")
|
||||
line, col := posToLine(pos, lines)
|
||||
|
||||
fmt.Fprintf(b, "\nCONTEXT: line %d, column %d:\n\n", line, col)
|
||||
if line > 2 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", line-2, expandTab(lines[line-3]))
|
||||
}
|
||||
if line > 1 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", line-1, expandTab(lines[line-2]))
|
||||
}
|
||||
/// Expand tabs, so that the ^ is at at the correct position, but leave
|
||||
/// "column 10-13" intact. Adjusting this to the visual column would be
|
||||
/// better, but we don't know the tabsize of the user in their editor,
|
||||
/// which can be 8, 4, 2, or something else. We can't know. So leaving
|
||||
/// it as the character index is probably the "most correct".
|
||||
expanded := expandTab(lines[line-1])
|
||||
diff := len(expanded) - len(lines[line-1])
|
||||
fmt.Fprintf(b, "% 7d | %s\n", line, expanded)
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col-1+diff), "^")
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func posToLine(pos int, lines []string) (line, col int) {
|
||||
read := 0
|
||||
for i := range lines {
|
||||
line++
|
||||
ll := utf8.RuneCountInString(lines[i]) + 1 // +1 for the removed newline
|
||||
if read+ll >= pos {
|
||||
col = pos - read
|
||||
if col < 1 { // Should never happen, but just in case.
|
||||
col = 1
|
||||
}
|
||||
break
|
||||
}
|
||||
read += ll
|
||||
}
|
||||
return line, col
|
||||
}
|
||||
|
||||
func expandTab(s string) string {
|
||||
var (
|
||||
b strings.Builder
|
||||
l int
|
||||
fill = func(n int) string {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = ' '
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
)
|
||||
b.Grow(len(s))
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case '\t':
|
||||
tw := 8 - l%8
|
||||
b.WriteString(fill(tw))
|
||||
l += tw
|
||||
default:
|
||||
b.WriteRune(r)
|
||||
l += 1
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (cn *conn) handleError(reported error, query ...string) error {
|
||||
switch err := reported.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case runtime.Error, *net.OpError:
|
||||
cn.err.set(driver.ErrBadConn)
|
||||
case *safeRetryError:
|
||||
cn.err.set(driver.ErrBadConn)
|
||||
reported = driver.ErrBadConn
|
||||
case *Error:
|
||||
if len(query) > 0 && query[0] != "" {
|
||||
err.query = query[0]
|
||||
reported = err
|
||||
}
|
||||
if err.Fatal() {
|
||||
reported = driver.ErrBadConn
|
||||
}
|
||||
case error:
|
||||
if err == io.EOF || err.Error() == "remote error: handshake failure" {
|
||||
reported = driver.ErrBadConn
|
||||
}
|
||||
default:
|
||||
cn.err.set(driver.ErrBadConn)
|
||||
reported = fmt.Errorf("pq: unknown error %T: %[1]s", err)
|
||||
}
|
||||
|
||||
// Any time we return ErrBadConn, we need to remember it since *Tx doesn't
|
||||
// mark the connection bad in database/sql.
|
||||
if reported == driver.ErrBadConn {
|
||||
cn.err.set(driver.ErrBadConn)
|
||||
}
|
||||
return reported
|
||||
}
|
||||
71
vendor/github.com/lib/pq/internal/pgpass/pgpass.go
generated
vendored
Normal file
71
vendor/github.com/lib/pq/internal/pgpass/pgpass.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
package pgpass
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/lib/pq/internal/pqutil"
|
||||
)
|
||||
|
||||
func PasswordFromPgpass(passfile, user, password, host, port, dbname string, passwordSet bool) string {
|
||||
// Do not process .pgpass if a password was supplied.
|
||||
if passwordSet {
|
||||
return password
|
||||
}
|
||||
|
||||
filename := pqutil.Pgpass(passfile)
|
||||
if filename == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
fp, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer fp.Close()
|
||||
|
||||
scan := bufio.NewScanner(fp)
|
||||
for scan.Scan() {
|
||||
line := scan.Text()
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
split := splitFields(line)
|
||||
if len(split) != 5 {
|
||||
continue
|
||||
}
|
||||
|
||||
socket := host == "" || filepath.IsAbs(host) || strings.HasPrefix(host, "@")
|
||||
if (split[0] == "*" || split[0] == host || (split[0] == "localhost" && socket)) &&
|
||||
(split[1] == "*" || split[1] == port) &&
|
||||
(split[2] == "*" || split[2] == dbname) &&
|
||||
(split[3] == "*" || split[3] == user) {
|
||||
return split[4]
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func splitFields(s string) []string {
|
||||
var (
|
||||
fs = make([]string, 0, 5)
|
||||
f = make([]rune, 0, len(s))
|
||||
esc bool
|
||||
)
|
||||
for _, c := range s {
|
||||
switch {
|
||||
case esc:
|
||||
f, esc = append(f, c), false
|
||||
case c == '\\':
|
||||
esc = true
|
||||
case c == ':':
|
||||
fs, f = append(fs, string(f)), f[:0]
|
||||
default:
|
||||
f = append(f, c)
|
||||
}
|
||||
}
|
||||
return append(fs, string(f))
|
||||
}
|
||||
37
vendor/github.com/lib/pq/internal/pqsql/copy.go
generated
vendored
Normal file
37
vendor/github.com/lib/pq/internal/pqsql/copy.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
package pqsql
|
||||
|
||||
// StartsWithCopy reports if the SQL strings start with "copy", ignoring
|
||||
// whitespace, comments, and casing.
|
||||
func StartsWithCopy(query string) bool {
|
||||
if len(query) < 4 {
|
||||
return false
|
||||
}
|
||||
var linecmt, blockcmt bool
|
||||
for i := 0; i < len(query); i++ {
|
||||
c := query[i]
|
||||
if linecmt {
|
||||
linecmt = c != '\n'
|
||||
continue
|
||||
}
|
||||
if blockcmt {
|
||||
blockcmt = !(c == '/' && query[i-1] == '*')
|
||||
continue
|
||||
}
|
||||
if c == '-' && len(query) > i+1 && query[i+1] == '-' {
|
||||
linecmt = true
|
||||
continue
|
||||
}
|
||||
if c == '/' && len(query) > i+1 && query[i+1] == '*' {
|
||||
blockcmt = true
|
||||
continue
|
||||
}
|
||||
if c == ' ' || c == '\t' || c == '\r' || c == '\n' {
|
||||
continue
|
||||
}
|
||||
|
||||
// First non-comment and non-whitespace.
|
||||
return len(query) > i+3 && c|0x20 == 'c' && query[i+1]|0x20 == 'o' &&
|
||||
query[i+2]|0x20 == 'p' && query[i+3]|0x20 == 'y'
|
||||
}
|
||||
return false
|
||||
}
|
||||
65
vendor/github.com/lib/pq/internal/pqutil/path.go
generated
vendored
Normal file
65
vendor/github.com/lib/pq/internal/pqutil/path.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package pqutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Home gets the user's home directory. Matches pqGetHomeDirectory() from
|
||||
// PostgreSQL
|
||||
//
|
||||
// https://github.com/postgres/postgres/blob/2b117bb/src/interfaces/libpq/fe-connect.c#L8214
|
||||
func Home() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
// pq uses SHGetFolderPath(), which is deprecated but x/sys/windows has
|
||||
// KnownFolderPath(). We don't really want to pull that in though, so
|
||||
// use APPDATA env. This is also what PostgreSQL uses in some other
|
||||
// codepaths (get_home_path() for example).
|
||||
ad := os.Getenv("APPDATA")
|
||||
if ad == "" {
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(ad, "postgresql")
|
||||
}
|
||||
|
||||
home, _ := os.UserHomeDir()
|
||||
if home == "" {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
home = u.HomeDir
|
||||
}
|
||||
return home
|
||||
}
|
||||
|
||||
// Pgpass gets the filepath to the pgpass file to use, returning "" if a pgpass
|
||||
// file shouldn't be used.
|
||||
func Pgpass(passfile string) string {
|
||||
// Get passfile from the options.
|
||||
if passfile == "" {
|
||||
home := Home()
|
||||
if home == "" {
|
||||
return ""
|
||||
}
|
||||
passfile = filepath.Join(home, ".pgpass")
|
||||
}
|
||||
|
||||
// On Win32, the directory is protected, so we don't have to check the file.
|
||||
if runtime.GOOS != "windows" {
|
||||
fi, err := os.Stat(passfile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if fi.Mode().Perm()&(0x77) != 0 {
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"WARNING: password file %q has group or world access; permissions should be u=rw (0600) or less\n",
|
||||
passfile)
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return passfile
|
||||
}
|
||||
64
vendor/github.com/lib/pq/internal/pqutil/perm.go
generated
vendored
Normal file
64
vendor/github.com/lib/pq/internal/pqutil/perm.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
//go:build !windows && !plan9
|
||||
|
||||
package pqutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSSLKeyUnknownOwnership = errors.New("pq: could not get owner information for private key, may not be properly protected")
|
||||
ErrSSLKeyHasWorldPermissions = errors.New("pq: private key has world access; permissions should be u=rw,g=r (0640) if owned by root, or u=rw (0600), or less")
|
||||
)
|
||||
|
||||
// SSLKeyPermissions checks the permissions on user-supplied SSL key files,
|
||||
// which should have very little access. libpq does not check key file
|
||||
// permissions on Windows.
|
||||
//
|
||||
// If the file is owned by the same user the process is running as, the file
|
||||
// should only have 0600. If the file is owned by root, and the group matches
|
||||
// the group that the process is running in, the permissions cannot be more than
|
||||
// 0640. The file should never have world permissions.
|
||||
//
|
||||
// Returns an error when the permission check fails.
|
||||
func SSLKeyPermissions(sslkey string) error {
|
||||
fi, err := os.Stat(sslkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return checkPermissions(fi)
|
||||
}
|
||||
|
||||
func checkPermissions(fi os.FileInfo) error {
|
||||
// The maximum permissions that a private key file owned by a regular user
|
||||
// is allowed to have. This translates to u=rw. Regardless of if we're
|
||||
// running as root or not, 0600 is acceptable, so we return if we match the
|
||||
// regular user permission mask.
|
||||
if fi.Mode().Perm()&os.FileMode(0777)^0600 == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We need to pull the Unix file information to get the file's owner.
|
||||
// If we can't access it, there's some sort of operating system level error
|
||||
// and we should fail rather than attempting to use faulty information.
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return ErrSSLKeyUnknownOwnership
|
||||
}
|
||||
|
||||
// if the file is owned by root, we allow 0640 (u=rw,g=r) to match what
|
||||
// Postgres does.
|
||||
if sys.Uid == 0 {
|
||||
// The maximum permissions that a private key file owned by root is
|
||||
// allowed to have. This translates to u=rw,g=r.
|
||||
if fi.Mode().Perm()&os.FileMode(0777)^0640 != 0 {
|
||||
return ErrSSLKeyHasWorldPermissions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrSSLKeyHasWorldPermissions
|
||||
}
|
||||
12
vendor/github.com/lib/pq/internal/pqutil/perm_unsupported.go
generated
vendored
Normal file
12
vendor/github.com/lib/pq/internal/pqutil/perm_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
//go:build windows || plan9
|
||||
|
||||
package pqutil
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrSSLKeyUnknownOwnership = errors.New("unused")
|
||||
ErrSSLKeyHasWorldPermissions = errors.New("unused")
|
||||
)
|
||||
|
||||
func SSLKeyPermissions(sslkey string) error { return nil }
|
||||
32
vendor/github.com/lib/pq/internal/pqutil/pqutil.go
generated
vendored
Normal file
32
vendor/github.com/lib/pq/internal/pqutil/pqutil.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
package pqutil
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseBool is like strconv.ParseBool, but also accepts "yes"/"no" and
|
||||
// "on"/"off".
|
||||
func ParseBool(str string) (bool, error) {
|
||||
switch str {
|
||||
case "1", "t", "T", "true", "TRUE", "True", "yes", "on":
|
||||
return true, nil
|
||||
case "0", "f", "F", "false", "FALSE", "False", "no", "off":
|
||||
return false, nil
|
||||
}
|
||||
return false, &strconv.NumError{Func: "ParseBool", Num: str, Err: strconv.ErrSyntax}
|
||||
}
|
||||
|
||||
func Join[S ~[]E, E ~string](s S) string {
|
||||
var b strings.Builder
|
||||
for i := range s {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
if i == len(s)-1 {
|
||||
b.WriteString("or ")
|
||||
}
|
||||
b.WriteString(string(s[i]))
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
9
vendor/github.com/lib/pq/internal/pqutil/user_other.go
generated
vendored
Normal file
9
vendor/github.com/lib/pq/internal/pqutil/user_other.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build js || android || hurd || zos || wasip1 || appengine
|
||||
|
||||
package pqutil
|
||||
|
||||
import "errors"
|
||||
|
||||
func User() (string, error) {
|
||||
return "", errors.New("pqutil.User: not supported on current platform")
|
||||
}
|
||||
25
vendor/github.com/lib/pq/internal/pqutil/user_posix.go
generated
vendored
Normal file
25
vendor/github.com/lib/pq/internal/pqutil/user_posix.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
//go:build !windows && !js && !android && !hurd && !zos && !wasip1 && !appengine
|
||||
|
||||
package pqutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func User() (string, error) {
|
||||
env := "USER"
|
||||
if runtime.GOOS == "plan9" {
|
||||
env = "user"
|
||||
}
|
||||
if n := os.Getenv(env); n != "" {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return u.Username, nil
|
||||
}
|
||||
28
vendor/github.com/lib/pq/internal/pqutil/user_windows.go
generated
vendored
Normal file
28
vendor/github.com/lib/pq/internal/pqutil/user_windows.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
//go:build windows && !appengine
|
||||
|
||||
package pqutil
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func User() (string, error) {
|
||||
// Perform Windows user name lookup identically to libpq.
|
||||
//
|
||||
// The PostgreSQL code makes use of the legacy Win32 function GetUserName,
|
||||
// and that function has not been imported into stock Go. GetUserNameEx is
|
||||
// available though, the difference being that a wider range of names are
|
||||
// available. To get the output to be the same as GetUserName, only the
|
||||
// base (or last) component of the result is returned.
|
||||
var (
|
||||
name = make([]uint16, 128)
|
||||
pwnameSz = uint32(len(name)) - 1
|
||||
)
|
||||
err := syscall.GetUserNameEx(syscall.NameSamCompatible, &name[0], &pwnameSz)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s := syscall.UTF16ToString(name)
|
||||
return filepath.Base(s), nil
|
||||
}
|
||||
186
vendor/github.com/lib/pq/internal/proto/proto.go
generated
vendored
Normal file
186
vendor/github.com/lib/pq/internal/proto/proto.go
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
// From src/include/libpq/protocol.h and src/include/libpq/pqcomm.h – PostgreSQL 18.1
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Constants from pqcomm.h
|
||||
const (
|
||||
ProtocolVersion30 = (3 << 16) | 0 //lint:ignore SA4016 x
|
||||
ProtocolVersion32 = (3 << 16) | 2 // PostgreSQL ≥18; not yet supported.
|
||||
CancelRequestCode = (1234 << 16) | 5678
|
||||
NegotiateSSLCode = (1234 << 16) | 5679
|
||||
NegotiateGSSCode = (1234 << 16) | 5680
|
||||
)
|
||||
|
||||
// Constants from fe-connect.c
|
||||
const (
|
||||
MaxErrlen = 30_000 // https://github.com/postgres/postgres/blob/c6a10a89f/src/interfaces/libpq/fe-connect.c#L4067
|
||||
)
|
||||
|
||||
// RequestCode is a request codes sent by the frontend.
|
||||
type RequestCode byte
|
||||
|
||||
// These are the request codes sent by the frontend.
|
||||
const (
|
||||
Bind = RequestCode('B')
|
||||
Close = RequestCode('C')
|
||||
Describe = RequestCode('D')
|
||||
Execute = RequestCode('E')
|
||||
FunctionCall = RequestCode('F')
|
||||
Flush = RequestCode('H')
|
||||
Parse = RequestCode('P')
|
||||
Query = RequestCode('Q')
|
||||
Sync = RequestCode('S')
|
||||
Terminate = RequestCode('X')
|
||||
CopyFail = RequestCode('f')
|
||||
GSSResponse = RequestCode('p')
|
||||
PasswordMessage = RequestCode('p')
|
||||
SASLInitialResponse = RequestCode('p')
|
||||
SASLResponse = RequestCode('p')
|
||||
CopyDoneRequest = RequestCode('c')
|
||||
CopyDataRequest = RequestCode('d')
|
||||
)
|
||||
|
||||
func (r RequestCode) String() string {
|
||||
s, ok := map[RequestCode]string{
|
||||
Bind: "Bind",
|
||||
Close: "Close",
|
||||
Describe: "Describe",
|
||||
Execute: "Execute",
|
||||
FunctionCall: "FunctionCall",
|
||||
Flush: "Flush",
|
||||
Parse: "Parse",
|
||||
Query: "Query",
|
||||
Sync: "Sync",
|
||||
Terminate: "Terminate",
|
||||
CopyFail: "CopyFail",
|
||||
// These are all the same :-/
|
||||
//GSSResponse: "GSSResponse",
|
||||
PasswordMessage: "PasswordMessage",
|
||||
//SASLInitialResponse: "SASLInitialResponse",
|
||||
//SASLResponse: "SASLResponse",
|
||||
CopyDoneRequest: "CopyDone",
|
||||
CopyDataRequest: "CopyData",
|
||||
}[r]
|
||||
if !ok {
|
||||
s = "<unknown>"
|
||||
}
|
||||
c := string(r)
|
||||
if r <= 0x1f || r == 0x7f {
|
||||
c = fmt.Sprintf("0x%x", string(r))
|
||||
}
|
||||
return "(" + c + ") " + s
|
||||
}
|
||||
|
||||
// ResponseCode is a response codes sent by the backend.
|
||||
type ResponseCode byte
|
||||
|
||||
// These are the response codes sent by the backend.
|
||||
const (
|
||||
ParseComplete = ResponseCode('1')
|
||||
BindComplete = ResponseCode('2')
|
||||
CloseComplete = ResponseCode('3')
|
||||
NotificationResponse = ResponseCode('A')
|
||||
CommandComplete = ResponseCode('C')
|
||||
DataRow = ResponseCode('D')
|
||||
ErrorResponse = ResponseCode('E')
|
||||
CopyInResponse = ResponseCode('G')
|
||||
CopyOutResponse = ResponseCode('H')
|
||||
EmptyQueryResponse = ResponseCode('I')
|
||||
BackendKeyData = ResponseCode('K')
|
||||
NoticeResponse = ResponseCode('N')
|
||||
AuthenticationRequest = ResponseCode('R')
|
||||
ParameterStatus = ResponseCode('S')
|
||||
RowDescription = ResponseCode('T')
|
||||
FunctionCallResponse = ResponseCode('V')
|
||||
CopyBothResponse = ResponseCode('W')
|
||||
ReadyForQuery = ResponseCode('Z')
|
||||
NoData = ResponseCode('n')
|
||||
PortalSuspended = ResponseCode('s')
|
||||
ParameterDescription = ResponseCode('t')
|
||||
NegotiateProtocolVersion = ResponseCode('v')
|
||||
CopyDoneResponse = ResponseCode('c')
|
||||
CopyDataResponse = ResponseCode('d')
|
||||
)
|
||||
|
||||
func (r ResponseCode) String() string {
|
||||
s, ok := map[ResponseCode]string{
|
||||
ParseComplete: "ParseComplete",
|
||||
BindComplete: "BindComplete",
|
||||
CloseComplete: "CloseComplete",
|
||||
NotificationResponse: "NotificationResponse",
|
||||
CommandComplete: "CommandComplete",
|
||||
DataRow: "DataRow",
|
||||
ErrorResponse: "ErrorResponse",
|
||||
CopyInResponse: "CopyInResponse",
|
||||
CopyOutResponse: "CopyOutResponse",
|
||||
EmptyQueryResponse: "EmptyQueryResponse",
|
||||
BackendKeyData: "BackendKeyData",
|
||||
NoticeResponse: "NoticeResponse",
|
||||
AuthenticationRequest: "AuthRequest",
|
||||
ParameterStatus: "ParamStatus",
|
||||
RowDescription: "RowDescription",
|
||||
FunctionCallResponse: "FunctionCallResponse",
|
||||
CopyBothResponse: "CopyBothResponse",
|
||||
ReadyForQuery: "ReadyForQuery",
|
||||
NoData: "NoData",
|
||||
PortalSuspended: "PortalSuspended",
|
||||
ParameterDescription: "ParamDescription",
|
||||
NegotiateProtocolVersion: "NegotiateProtocolVersion",
|
||||
CopyDoneResponse: "CopyDone",
|
||||
CopyDataResponse: "CopyData",
|
||||
}[r]
|
||||
if !ok {
|
||||
s = "<unknown>"
|
||||
}
|
||||
c := string(r)
|
||||
if r <= 0x1f || r == 0x7f {
|
||||
c = fmt.Sprintf("0x%x", string(r))
|
||||
}
|
||||
return "(" + c + ") " + s
|
||||
}
|
||||
|
||||
// AuthCode are authentication request codes sent by the backend.
|
||||
type AuthCode int32
|
||||
|
||||
// These are the authentication request codes sent by the backend.
|
||||
const (
|
||||
AuthReqOk = AuthCode(0) // User is authenticated
|
||||
AuthReqKrb4 = AuthCode(1) // Kerberos V4. Not supported any more.
|
||||
AuthReqKrb5 = AuthCode(2) // Kerberos V5. Not supported any more.
|
||||
AuthReqPassword = AuthCode(3) // Password
|
||||
AuthReqCrypt = AuthCode(4) // crypt password. Not supported any more.
|
||||
AuthReqMD5 = AuthCode(5) // md5 password
|
||||
_ = AuthCode(6) // 6 is available. It was used for SCM creds, not supported any more.
|
||||
AuthReqGSS = AuthCode(7) // GSSAPI without wrap()
|
||||
AuthReqGSSCont = AuthCode(8) // Continue GSS exchanges
|
||||
AuthReqSSPI = AuthCode(9) // SSPI negotiate without wrap()
|
||||
AuthReqSASL = AuthCode(10) // Begin SASL authentication
|
||||
AuthReqSASLCont = AuthCode(11) // Continue SASL authentication
|
||||
AuthReqSASLFin = AuthCode(12) // Final SASL message
|
||||
)
|
||||
|
||||
func (a AuthCode) String() string {
|
||||
s, ok := map[AuthCode]string{
|
||||
AuthReqOk: "ok",
|
||||
AuthReqKrb4: "krb4",
|
||||
AuthReqKrb5: "krb5",
|
||||
AuthReqPassword: "password",
|
||||
AuthReqCrypt: "crypt",
|
||||
AuthReqMD5: "md5",
|
||||
AuthReqGSS: "GDD",
|
||||
AuthReqGSSCont: "GSSCont",
|
||||
AuthReqSSPI: "SSPI",
|
||||
AuthReqSASL: "SASL",
|
||||
AuthReqSASLCont: "SASLCont",
|
||||
AuthReqSASLFin: "SASLFin",
|
||||
}[a]
|
||||
if !ok {
|
||||
s = "<unknown>"
|
||||
}
|
||||
return s + " (" + strconv.Itoa(int(a)) + ")"
|
||||
}
|
||||
7
vendor/github.com/lib/pq/internal/proto/sz_32.go
generated
vendored
Normal file
7
vendor/github.com/lib/pq/internal/proto/sz_32.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build 386 || arm || mips || mipsle
|
||||
|
||||
package proto
|
||||
|
||||
import "math"
|
||||
|
||||
const MaxUint32 = math.MaxInt
|
||||
7
vendor/github.com/lib/pq/internal/proto/sz_64.go
generated
vendored
Normal file
7
vendor/github.com/lib/pq/internal/proto/sz_64.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build !386 && !arm && !mips && !mipsle
|
||||
|
||||
package proto
|
||||
|
||||
import "math"
|
||||
|
||||
const MaxUint32 = math.MaxUint32
|
||||
27
vendor/github.com/lib/pq/krb.go
generated
vendored
Normal file
27
vendor/github.com/lib/pq/krb.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package pq
|
||||
|
||||
// NewGSSFunc creates a GSS authentication provider, for use with
|
||||
// RegisterGSSProvider.
|
||||
type NewGSSFunc func() (GSS, error)
|
||||
|
||||
var newGss NewGSSFunc
|
||||
|
||||
// RegisterGSSProvider registers a GSS authentication provider. For example, if
|
||||
// you need to use Kerberos to authenticate with your server, add this to your
|
||||
// main package:
|
||||
//
|
||||
// import "github.com/lib/pq/auth/kerberos"
|
||||
//
|
||||
// func init() {
|
||||
// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() })
|
||||
// }
|
||||
func RegisterGSSProvider(newGssArg NewGSSFunc) {
|
||||
newGss = newGssArg
|
||||
}
|
||||
|
||||
// GSS provides GSSAPI authentication (e.g., Kerberos).
|
||||
type GSS interface {
|
||||
GetInitToken(host string, service string) ([]byte, error)
|
||||
GetInitTokenFromSpn(spn string) ([]byte, error)
|
||||
Continue(inToken []byte) (done bool, outToken []byte, err error)
|
||||
}
|
||||
69
vendor/github.com/lib/pq/notice.go
generated
vendored
Normal file
69
vendor/github.com/lib/pq/notice.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
)
|
||||
|
||||
// NoticeHandler returns the notice handler on the given connection, if any. A
|
||||
// runtime panic occurs if c is not a pq connection. This is rarely used
|
||||
// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead.
|
||||
func NoticeHandler(c driver.Conn) func(*Error) {
|
||||
return c.(*conn).noticeHandler
|
||||
}
|
||||
|
||||
// SetNoticeHandler sets the given notice handler on the given connection. A
|
||||
// runtime panic occurs if c is not a pq connection. A nil handler may be used
|
||||
// to unset it. This is rarely used directly, use ConnectorNoticeHandler and
|
||||
// ConnectorWithNoticeHandler instead.
|
||||
//
|
||||
// Note: Notice handlers are executed synchronously by pq meaning commands
|
||||
// won't continue to be processed until the handler returns.
|
||||
func SetNoticeHandler(c driver.Conn, handler func(*Error)) {
|
||||
c.(*conn).noticeHandler = handler
|
||||
}
|
||||
|
||||
// NoticeHandlerConnector wraps a regular connector and sets a notice handler
|
||||
// on it.
|
||||
type NoticeHandlerConnector struct {
|
||||
driver.Connector
|
||||
noticeHandler func(*Error)
|
||||
}
|
||||
|
||||
// Connect calls the underlying connector's connect method and then sets the
|
||||
// notice handler.
|
||||
func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
c, err := n.Connector.Connect(ctx)
|
||||
if err == nil {
|
||||
SetNoticeHandler(c, n.noticeHandler)
|
||||
}
|
||||
return c, err
|
||||
}
|
||||
|
||||
// ConnectorNoticeHandler returns the currently set notice handler, if any. If
|
||||
// the given connector is not a result of ConnectorWithNoticeHandler, nil is
|
||||
// returned.
|
||||
func ConnectorNoticeHandler(c driver.Connector) func(*Error) {
|
||||
if c, ok := c.(*NoticeHandlerConnector); ok {
|
||||
return c.noticeHandler
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectorWithNoticeHandler creates or sets the given handler for the given
|
||||
// connector. If the given connector is a result of calling this function
|
||||
// previously, it is simply set on the given connector and returned. Otherwise,
|
||||
// this returns a new connector wrapping the given one and setting the notice
|
||||
// handler. A nil notice handler may be used to unset it.
|
||||
//
|
||||
// The returned connector is intended to be used with database/sql.OpenDB.
|
||||
//
|
||||
// Note: Notice handlers are executed synchronously by pq meaning commands
|
||||
// won't continue to be processed until the handler returns.
|
||||
func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector {
|
||||
if c, ok := c.(*NoticeHandlerConnector); ok {
|
||||
c.noticeHandler = handler
|
||||
return c
|
||||
}
|
||||
return &NoticeHandlerConnector{Connector: c, noticeHandler: handler}
|
||||
}
|
||||
846
vendor/github.com/lib/pq/notify.go
generated
vendored
Normal file
846
vendor/github.com/lib/pq/notify.go
generated
vendored
Normal file
@@ -0,0 +1,846 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq/internal/proto"
|
||||
)
|
||||
|
||||
// Notification represents a single notification from the database.
|
||||
type Notification struct {
|
||||
// Process ID (PID) of the notifying postgres backend.
|
||||
BePid int
|
||||
// Name of the channel the notification was sent on.
|
||||
Channel string
|
||||
// Payload, or the empty string if unspecified.
|
||||
Extra string
|
||||
}
|
||||
|
||||
func recvNotification(r *readBuf) *Notification {
|
||||
bePid := r.int32()
|
||||
channel := r.string()
|
||||
extra := r.string()
|
||||
|
||||
return &Notification{bePid, channel, extra}
|
||||
}
|
||||
|
||||
// SetNotificationHandler sets the given notification handler on the given
|
||||
// connection. A runtime panic occurs if c is not a pq connection. A nil handler
|
||||
// may be used to unset it.
|
||||
//
|
||||
// Note: Notification handlers are executed synchronously by pq meaning commands
|
||||
// won't continue to be processed until the handler returns.
|
||||
func SetNotificationHandler(c driver.Conn, handler func(*Notification)) {
|
||||
c.(*conn).notificationHandler = handler
|
||||
}
|
||||
|
||||
// NotificationHandlerConnector wraps a regular connector and sets a notification handler
|
||||
// on it.
|
||||
type NotificationHandlerConnector struct {
|
||||
driver.Connector
|
||||
notificationHandler func(*Notification)
|
||||
}
|
||||
|
||||
// Connect calls the underlying connector's connect method and then sets the
|
||||
// notification handler.
|
||||
func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
c, err := n.Connector.Connect(ctx)
|
||||
if err == nil {
|
||||
SetNotificationHandler(c, n.notificationHandler)
|
||||
}
|
||||
return c, err
|
||||
}
|
||||
|
||||
// ConnectorNotificationHandler returns the currently set notification handler, if any. If
|
||||
// the given connector is not a result of ConnectorWithNotificationHandler, nil is
|
||||
// returned.
|
||||
func ConnectorNotificationHandler(c driver.Connector) func(*Notification) {
|
||||
if c, ok := c.(*NotificationHandlerConnector); ok {
|
||||
return c.notificationHandler
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectorWithNotificationHandler creates or sets the given handler for the given
|
||||
// connector. If the given connector is a result of calling this function
|
||||
// previously, it is simply set on the given connector and returned. Otherwise,
|
||||
// this returns a new connector wrapping the given one and setting the notification
|
||||
// handler. A nil notification handler may be used to unset it.
|
||||
//
|
||||
// The returned connector is intended to be used with database/sql.OpenDB.
|
||||
//
|
||||
// Note: Notification handlers are executed synchronously by pq meaning commands
|
||||
// won't continue to be processed until the handler returns.
|
||||
func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector {
|
||||
if c, ok := c.(*NotificationHandlerConnector); ok {
|
||||
c.notificationHandler = handler
|
||||
return c
|
||||
}
|
||||
return &NotificationHandlerConnector{Connector: c, notificationHandler: handler}
|
||||
}
|
||||
|
||||
const (
|
||||
connStateIdle int32 = iota
|
||||
connStateExpectResponse
|
||||
connStateExpectReadyForQuery
|
||||
)
|
||||
|
||||
type message struct {
|
||||
typ proto.ResponseCode
|
||||
err error
|
||||
}
|
||||
|
||||
var errListenerConnClosed = errors.New("pq: ListenerConn has been closed")
|
||||
|
||||
// ListenerConn is a low-level interface for waiting for notifications. You
|
||||
// should use Listener instead.
|
||||
type ListenerConn struct {
|
||||
connectionLock sync.Mutex // guards cn and err
|
||||
senderLock sync.Mutex // the sending goroutine will be holding this lock
|
||||
cn *conn
|
||||
err error
|
||||
connState int32
|
||||
notificationChan chan<- *Notification
|
||||
replyChan chan message
|
||||
}
|
||||
|
||||
// NewListenerConn creates a new ListenerConn. Use NewListener instead.
|
||||
func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) {
|
||||
return newDialListenerConn(defaultDialer{}, name, notificationChan)
|
||||
}
|
||||
|
||||
func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) {
|
||||
cn, err := DialOpen(d, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l := &ListenerConn{
|
||||
cn: cn.(*conn),
|
||||
notificationChan: c,
|
||||
connState: connStateIdle,
|
||||
replyChan: make(chan message, 2),
|
||||
}
|
||||
|
||||
go l.listenerConnMain()
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// We can only allow one goroutine at a time to be running a query on the
|
||||
// connection for various reasons, so the goroutine sending on the connection
|
||||
// must be holding senderLock.
|
||||
//
|
||||
// Returns an error if an unrecoverable error has occurred and the ListenerConn
|
||||
// should be abandoned.
|
||||
func (l *ListenerConn) acquireSenderLock() error {
|
||||
// we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery
|
||||
l.senderLock.Lock()
|
||||
|
||||
l.connectionLock.Lock()
|
||||
err := l.err
|
||||
l.connectionLock.Unlock()
|
||||
if err != nil {
|
||||
l.senderLock.Unlock()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *ListenerConn) releaseSenderLock() {
|
||||
l.senderLock.Unlock()
|
||||
}
|
||||
|
||||
// setState advances the protocol state to newState. Returns false if moving
|
||||
// to that state from the current state is not allowed.
|
||||
func (l *ListenerConn) setState(newState int32) bool {
|
||||
var expectedState int32
|
||||
|
||||
switch newState {
|
||||
case connStateIdle:
|
||||
expectedState = connStateExpectReadyForQuery
|
||||
case connStateExpectResponse:
|
||||
expectedState = connStateIdle
|
||||
case connStateExpectReadyForQuery:
|
||||
expectedState = connStateExpectResponse
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected listenerConnState %d", newState))
|
||||
}
|
||||
|
||||
return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState)
|
||||
}
|
||||
|
||||
// Main logic is here: receive messages from the postgres backend, forward
|
||||
// notifications and query replies and keep the internal state in sync with the
|
||||
// protocol state. Returns when the connection has been lost, is about to go
|
||||
// away or should be discarded because we couldn't agree on the state with the
|
||||
// server backend.
|
||||
func (l *ListenerConn) listenerConnLoop() (err error) {
|
||||
r := &readBuf{}
|
||||
for {
|
||||
t, err := l.cn.recvMessage(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch t {
|
||||
case proto.NotificationResponse:
|
||||
// recvNotification copies all the data so we don't need to worry
|
||||
// about the scratch buffer being overwritten.
|
||||
l.notificationChan <- recvNotification(r)
|
||||
|
||||
case proto.RowDescription, proto.DataRow:
|
||||
// only used by tests; ignore
|
||||
|
||||
case proto.ErrorResponse:
|
||||
// We might receive an ErrorResponse even when not in a query; it
|
||||
// is expected that the server will close the connection after
|
||||
// that, but we should make sure that the error we display is the
|
||||
// one from the stray ErrorResponse, not io.ErrUnexpectedEOF.
|
||||
if !l.setState(connStateExpectReadyForQuery) {
|
||||
return parseError(r, "")
|
||||
}
|
||||
l.replyChan <- message{t, parseError(r, "")}
|
||||
|
||||
case proto.CommandComplete, proto.EmptyQueryResponse:
|
||||
if !l.setState(connStateExpectReadyForQuery) {
|
||||
// protocol out of sync
|
||||
return fmt.Errorf("unexpected CommandComplete")
|
||||
}
|
||||
// ExecSimpleQuery doesn't need to know about this message
|
||||
|
||||
case proto.ReadyForQuery:
|
||||
if !l.setState(connStateIdle) {
|
||||
// protocol out of sync
|
||||
return fmt.Errorf("unexpected ReadyForQuery")
|
||||
}
|
||||
l.replyChan <- message{t, nil}
|
||||
|
||||
case proto.ParameterStatus:
|
||||
// ignore
|
||||
case proto.NoticeResponse:
|
||||
if n := l.cn.noticeHandler; n != nil {
|
||||
n(parseError(r, ""))
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is the main routine for the goroutine receiving on the database
|
||||
// connection. Most of the main logic is in listenerConnLoop.
|
||||
func (l *ListenerConn) listenerConnMain() {
|
||||
err := l.listenerConnLoop()
|
||||
|
||||
// listenerConnLoop terminated; we're done, but we still have to clean up.
|
||||
// Make sure nobody tries to start any new queries by making sure the err
|
||||
// pointer is set. It is important that we do not overwrite its value; a
|
||||
// connection could be closed by either this goroutine or one sending on
|
||||
// the connection -- whoever closes the connection is assumed to have the
|
||||
// more meaningful error message (as the other one will probably get
|
||||
// net.errClosed), so that goroutine sets the error we expose while the
|
||||
// other error is discarded. If the connection is lost while two
|
||||
// goroutines are operating on the socket, it probably doesn't matter which
|
||||
// error we expose so we don't try to do anything more complex.
|
||||
l.connectionLock.Lock()
|
||||
if l.err == nil {
|
||||
l.err = err
|
||||
}
|
||||
_ = l.cn.Close()
|
||||
l.connectionLock.Unlock()
|
||||
|
||||
// There might be a query in-flight; make sure nobody's waiting for a
|
||||
// response to it, since there's not going to be one.
|
||||
close(l.replyChan)
|
||||
|
||||
// let the listener know we're done
|
||||
close(l.notificationChan)
|
||||
|
||||
// this ListenerConn is done
|
||||
}
|
||||
|
||||
// Listen sends a LISTEN query to the server. See ExecSimpleQuery.
|
||||
func (l *ListenerConn) Listen(channel string) (bool, error) {
|
||||
return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel))
|
||||
}
|
||||
|
||||
// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery.
|
||||
func (l *ListenerConn) Unlisten(channel string) (bool, error) {
|
||||
return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel))
|
||||
}
|
||||
|
||||
// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery.
|
||||
func (l *ListenerConn) UnlistenAll() (bool, error) {
|
||||
return l.ExecSimpleQuery("UNLISTEN *")
|
||||
}
|
||||
|
||||
// Ping the remote server to make sure it's alive. Non-nil error means the
|
||||
// connection has failed and should be abandoned.
|
||||
func (l *ListenerConn) Ping() error {
|
||||
sent, err := l.ExecSimpleQuery("")
|
||||
if !sent {
|
||||
return err
|
||||
}
|
||||
if err != nil { // shouldn't happen
|
||||
panic(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attempt to send a query on the connection. Returns an error if sending the
|
||||
// query failed, and the caller should initiate closure of this connection.
|
||||
// The caller must be holding senderLock (see acquireSenderLock and
|
||||
// releaseSenderLock).
|
||||
func (l *ListenerConn) sendSimpleQuery(q string) (err error) {
|
||||
// Must set connection state before sending the query
|
||||
if !l.setState(connStateExpectResponse) {
|
||||
return errors.New("pq: two queries running at the same time")
|
||||
}
|
||||
|
||||
// Can't use l.cn.writeBuf here because it uses the scratch buffer which
|
||||
// might get overwritten by listenerConnLoop.
|
||||
b := &writeBuf{
|
||||
buf: []byte("Q\x00\x00\x00\x00"),
|
||||
pos: 1,
|
||||
}
|
||||
b.string(q)
|
||||
return l.cn.send(b)
|
||||
}
|
||||
|
||||
// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable
|
||||
// parameters) on the connection. The possible return values are:
|
||||
// 1. "executed" is true; the query was executed to completion on the
|
||||
// database server. If the query failed, err will be set to the error
|
||||
// returned by the database, otherwise err will be nil.
|
||||
// 2. If "executed" is false, the query could not be executed on the remote
|
||||
// server. err will be non-nil.
|
||||
//
|
||||
// After a call to ExecSimpleQuery has returned an executed=false value, the
|
||||
// connection has either been closed or will be closed shortly thereafter, and
|
||||
// all subsequently executed queries will return an error.
|
||||
func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) {
|
||||
if err = l.acquireSenderLock(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer l.releaseSenderLock()
|
||||
|
||||
err = l.sendSimpleQuery(q)
|
||||
if err != nil {
|
||||
// We can't know what state the protocol is in, so we need to abandon
|
||||
// this connection.
|
||||
l.connectionLock.Lock()
|
||||
// Set the error pointer if it hasn't been set already; see
|
||||
// listenerConnMain.
|
||||
if l.err == nil {
|
||||
l.err = err
|
||||
}
|
||||
l.connectionLock.Unlock()
|
||||
_ = l.cn.c.Close()
|
||||
return false, err
|
||||
}
|
||||
|
||||
// now we just wait for a reply..
|
||||
for {
|
||||
m, ok := <-l.replyChan
|
||||
if !ok {
|
||||
// We lost the connection to server, don't bother waiting for a
|
||||
// a response. err should have been set already.
|
||||
l.connectionLock.Lock()
|
||||
err := l.err
|
||||
l.connectionLock.Unlock()
|
||||
return false, err
|
||||
}
|
||||
switch m.typ {
|
||||
case proto.ReadyForQuery:
|
||||
// sanity check
|
||||
if m.err != nil {
|
||||
panic("m.err != nil")
|
||||
}
|
||||
// done; err might or might not be set
|
||||
return true, err
|
||||
|
||||
case proto.ErrorResponse:
|
||||
// sanity check
|
||||
if m.err == nil {
|
||||
panic("m.err == nil")
|
||||
}
|
||||
// server responded with an error; ReadyForQuery to follow
|
||||
err = m.err
|
||||
|
||||
default:
|
||||
return false, fmt.Errorf("unknown response for simple query: %q", m.typ)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection.
|
||||
func (l *ListenerConn) Close() error {
|
||||
l.connectionLock.Lock()
|
||||
if l.err != nil {
|
||||
l.connectionLock.Unlock()
|
||||
return errListenerConnClosed
|
||||
}
|
||||
l.err = errListenerConnClosed
|
||||
l.connectionLock.Unlock()
|
||||
// We can't send anything on the connection without holding senderLock.
|
||||
// Simply close the net.Conn to wake up everyone operating on it.
|
||||
return l.cn.c.Close()
|
||||
}
|
||||
|
||||
// Err returns the reason the connection was closed. It is not safe to call
|
||||
// this function until l.Notify has been closed.
|
||||
func (l *ListenerConn) Err() error {
|
||||
return l.err
|
||||
}
|
||||
|
||||
// ErrChannelAlreadyOpen is returned from Listen when a channel is already
|
||||
// open.
|
||||
var ErrChannelAlreadyOpen = errors.New("pq: channel is already open")
|
||||
|
||||
// ErrChannelNotOpen is returned from Unlisten when a channel is not open.
|
||||
var ErrChannelNotOpen = errors.New("pq: channel is not open")
|
||||
|
||||
// ListenerEventType is an enumeration of listener event types.
|
||||
type ListenerEventType int
|
||||
|
||||
const (
|
||||
// ListenerEventConnected is emitted only when the database connection
|
||||
// has been initially initialized. The err argument of the callback
|
||||
// will always be nil.
|
||||
ListenerEventConnected ListenerEventType = iota
|
||||
|
||||
// ListenerEventDisconnected is emitted after a database connection has
|
||||
// been lost, either because of an error or because Close has been
|
||||
// called. The err argument will be set to the reason the database
|
||||
// connection was lost.
|
||||
ListenerEventDisconnected
|
||||
|
||||
// ListenerEventReconnected is emitted after a database connection has
|
||||
// been re-established after connection loss. The err argument of the
|
||||
// callback will always be nil. After this event has been emitted, a
|
||||
// nil pq.Notification is sent on the Listener.Notify channel.
|
||||
ListenerEventReconnected
|
||||
|
||||
// ListenerEventConnectionAttemptFailed is emitted after a connection
|
||||
// to the database was attempted, but failed. The err argument will be
|
||||
// set to an error describing why the connection attempt did not
|
||||
// succeed.
|
||||
ListenerEventConnectionAttemptFailed
|
||||
)
|
||||
|
||||
// EventCallbackType is the event callback type. See also ListenerEventType
|
||||
// constants' documentation.
|
||||
type EventCallbackType func(event ListenerEventType, err error)
|
||||
|
||||
// Listener provides an interface for listening to notifications from a
|
||||
// PostgreSQL database. For general usage information, see section
|
||||
// "Notifications".
|
||||
//
|
||||
// Listener can safely be used from concurrently running goroutines.
|
||||
type Listener struct {
|
||||
// Channel for receiving notifications from the database. In some cases a
|
||||
// nil value will be sent. See section "Notifications" above.
|
||||
Notify chan *Notification
|
||||
|
||||
name string
|
||||
minReconnectInterval time.Duration
|
||||
maxReconnectInterval time.Duration
|
||||
dialer Dialer
|
||||
eventCallback EventCallbackType
|
||||
|
||||
lock sync.Mutex
|
||||
isClosed bool
|
||||
reconnectCond *sync.Cond
|
||||
cn *ListenerConn
|
||||
connNotificationChan <-chan *Notification
|
||||
channels map[string]struct{}
|
||||
}
|
||||
|
||||
// NewListener creates a new database connection dedicated to LISTEN / NOTIFY.
|
||||
//
|
||||
// name should be set to a connection string to be used to establish the
|
||||
// database connection (see section "Connection String Parameters" above).
|
||||
//
|
||||
// minReconnectInterval controls the duration to wait before trying to
|
||||
// re-establish the database connection after connection loss. After each
|
||||
// consecutive failure this interval is doubled, until maxReconnectInterval is
|
||||
// reached. Successfully completing the connection establishment procedure
|
||||
// resets the interval back to minReconnectInterval.
|
||||
//
|
||||
// The last parameter eventCallback can be set to a function which will be
|
||||
// called by the Listener when the state of the underlying database connection
|
||||
// changes. This callback will be called by the goroutine which dispatches the
|
||||
// notifications over the Notify channel, so you should try to avoid doing
|
||||
// potentially time-consuming operations from the callback.
|
||||
func NewListener(name string,
|
||||
minReconnectInterval time.Duration,
|
||||
maxReconnectInterval time.Duration,
|
||||
eventCallback EventCallbackType) *Listener {
|
||||
return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback)
|
||||
}
|
||||
|
||||
// NewDialListener is like NewListener but it takes a Dialer.
|
||||
func NewDialListener(d Dialer,
|
||||
name string,
|
||||
minReconnectInterval time.Duration,
|
||||
maxReconnectInterval time.Duration,
|
||||
eventCallback EventCallbackType) *Listener {
|
||||
|
||||
l := &Listener{
|
||||
name: name,
|
||||
minReconnectInterval: minReconnectInterval,
|
||||
maxReconnectInterval: maxReconnectInterval,
|
||||
dialer: d,
|
||||
eventCallback: eventCallback,
|
||||
|
||||
channels: make(map[string]struct{}),
|
||||
|
||||
Notify: make(chan *Notification, 32),
|
||||
}
|
||||
l.reconnectCond = sync.NewCond(&l.lock)
|
||||
|
||||
go l.listenerMain()
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// NotificationChannel returns the notification channel for this listener.
|
||||
// This is the same channel as Notify, and will not be recreated during the
|
||||
// life time of the Listener.
|
||||
func (l *Listener) NotificationChannel() <-chan *Notification {
|
||||
return l.Notify
|
||||
}
|
||||
|
||||
// Listen starts listening for notifications on a channel. Calls to this
|
||||
// function will block until an acknowledgement has been received from the
|
||||
// server. Note that Listener automatically re-establishes the connection
|
||||
// after connection loss, so this function may block indefinitely if the
|
||||
// connection can not be re-established.
|
||||
//
|
||||
// Listen will only fail in three conditions:
|
||||
// 1. The channel is already open. The returned error will be
|
||||
// ErrChannelAlreadyOpen.
|
||||
// 2. The query was executed on the remote server, but PostgreSQL returned an
|
||||
// error message in response to the query. The returned error will be a
|
||||
// pq.Error containing the information the server supplied.
|
||||
// 3. Close is called on the Listener before the request could be completed.
|
||||
//
|
||||
// The channel name is case-sensitive.
|
||||
func (l *Listener) Listen(channel string) error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
if l.isClosed {
|
||||
return net.ErrClosed
|
||||
}
|
||||
|
||||
// The server allows you to issue a LISTEN on a channel which is already
|
||||
// open, but it seems useful to be able to detect this case to spot for
|
||||
// mistakes in application logic. If the application genuinely does't
|
||||
// care, it can check the exported error and ignore it.
|
||||
_, exists := l.channels[channel]
|
||||
if exists {
|
||||
return ErrChannelAlreadyOpen
|
||||
}
|
||||
|
||||
if l.cn != nil {
|
||||
// If gotResponse is true but error is set, the query was executed on
|
||||
// the remote server, but resulted in an error. This should be
|
||||
// relatively rare, so it's fine if we just pass the error to our
|
||||
// caller. However, if gotResponse is false, we could not complete the
|
||||
// query on the remote server and our underlying connection is about
|
||||
// to go away, so we only add relname to l.channels, and wait for
|
||||
// resync() to take care of the rest.
|
||||
gotResponse, err := l.cn.Listen(channel)
|
||||
if gotResponse && err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
l.channels[channel] = struct{}{}
|
||||
for l.cn == nil {
|
||||
l.reconnectCond.Wait()
|
||||
// we let go of the mutex for a while
|
||||
if l.isClosed {
|
||||
return net.ErrClosed
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unlisten removes a channel from the Listener's channel list. Returns
|
||||
// ErrChannelNotOpen if the Listener is not listening on the specified channel.
|
||||
// Returns immediately with no error if there is no connection. Note that you
|
||||
// might still get notifications for this channel even after Unlisten has
|
||||
// returned.
|
||||
//
|
||||
// The channel name is case-sensitive.
|
||||
func (l *Listener) Unlisten(channel string) error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
if l.isClosed {
|
||||
return net.ErrClosed
|
||||
}
|
||||
|
||||
// Similarly to LISTEN, this is not an error in Postgres, but it seems
|
||||
// useful to distinguish from the normal conditions.
|
||||
_, exists := l.channels[channel]
|
||||
if !exists {
|
||||
return ErrChannelNotOpen
|
||||
}
|
||||
|
||||
if l.cn != nil {
|
||||
// Similarly to Listen (see comment in that function), the caller
|
||||
// should only be bothered with an error if it came from the backend as
|
||||
// a response to our query.
|
||||
gotResponse, err := l.cn.Unlisten(channel)
|
||||
if gotResponse && err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Don't bother waiting for resync if there's no connection.
|
||||
delete(l.channels, channel)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnlistenAll removes all channels from the Listener's channel list. Returns
|
||||
// immediately with no error if there is no connection. Note that you might
|
||||
// still get notifications for any of the deleted channels even after
|
||||
// UnlistenAll has returned.
|
||||
func (l *Listener) UnlistenAll() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
if l.isClosed {
|
||||
return net.ErrClosed
|
||||
}
|
||||
|
||||
if l.cn != nil {
|
||||
// Similarly to Listen (see comment in that function), the caller
|
||||
// should only be bothered with an error if it came from the backend as
|
||||
// a response to our query.
|
||||
gotResponse, err := l.cn.UnlistenAll()
|
||||
if gotResponse && err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Don't bother waiting for resync if there's no connection.
|
||||
l.channels = make(map[string]struct{})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping the remote server to make sure it's alive. Non-nil return value means
|
||||
// that there is no active connection.
|
||||
func (l *Listener) Ping() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
if l.isClosed {
|
||||
return net.ErrClosed
|
||||
}
|
||||
if l.cn == nil {
|
||||
return errors.New("no connection")
|
||||
}
|
||||
|
||||
return l.cn.Ping()
|
||||
}
|
||||
|
||||
// Clean up after losing the server connection. Returns l.cn.Err(), which
|
||||
// should have the reason the connection was lost.
|
||||
func (l *Listener) disconnectCleanup() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
// sanity check; can't look at Err() until the channel has been closed
|
||||
select {
|
||||
case _, ok := <-l.connNotificationChan:
|
||||
if ok {
|
||||
panic("connNotificationChan not closed")
|
||||
}
|
||||
default:
|
||||
panic("connNotificationChan not closed")
|
||||
}
|
||||
|
||||
err := l.cn.Err()
|
||||
_ = l.cn.Close()
|
||||
l.cn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Synchronize the list of channels we want to be listening on with the server
|
||||
// after the connection has been established.
|
||||
func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error {
|
||||
doneChan := make(chan error)
|
||||
go func(notificationChan <-chan *Notification) {
|
||||
for channel := range l.channels {
|
||||
// If we got a response, return that error to our caller as it's
|
||||
// going to be more descriptive than cn.Err().
|
||||
gotResponse, err := cn.Listen(channel)
|
||||
if gotResponse && err != nil {
|
||||
doneChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// If we couldn't reach the server, wait for notificationChan to
|
||||
// close and then return the error message from the connection, as
|
||||
// per ListenerConn's interface.
|
||||
if err != nil {
|
||||
for range notificationChan {
|
||||
}
|
||||
doneChan <- cn.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
doneChan <- nil
|
||||
}(notificationChan)
|
||||
|
||||
// Ignore notifications while synchronization is going on to avoid
|
||||
// deadlocks. We have to send a nil notification over Notify anyway as
|
||||
// we can't possibly know which notifications (if any) were lost while
|
||||
// the connection was down, so there's no reason to try and process
|
||||
// these messages at all.
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-notificationChan:
|
||||
if !ok {
|
||||
notificationChan = nil
|
||||
}
|
||||
|
||||
case err := <-doneChan:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// caller should NOT be holding l.lock
|
||||
func (l *Listener) closed() bool {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
return l.isClosed
|
||||
}
|
||||
|
||||
func (l *Listener) connect() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
if l.isClosed {
|
||||
return net.ErrClosed
|
||||
}
|
||||
|
||||
notificationChan := make(chan *Notification, 32)
|
||||
|
||||
var err error
|
||||
l.cn, err = newDialListenerConn(l.dialer, l.name, notificationChan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = l.resync(l.cn, notificationChan)
|
||||
if err != nil {
|
||||
_ = l.cn.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
l.connNotificationChan = notificationChan
|
||||
l.reconnectCond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close disconnects the Listener from the database and shuts it down.
|
||||
// Subsequent calls to its methods will return an error. Close returns an
|
||||
// error if the connection has already been closed.
|
||||
func (l *Listener) Close() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
if l.isClosed {
|
||||
return net.ErrClosed
|
||||
}
|
||||
|
||||
if l.cn != nil {
|
||||
_ = l.cn.Close()
|
||||
}
|
||||
l.isClosed = true
|
||||
|
||||
// Unblock calls to Listen()
|
||||
l.reconnectCond.Broadcast()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Listener) emitEvent(event ListenerEventType, err error) {
|
||||
if l.eventCallback != nil {
|
||||
l.eventCallback(event, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Main logic here: maintain a connection to the server when possible, wait
|
||||
// for notifications and emit events.
|
||||
func (l *Listener) listenerConnLoop() {
|
||||
var nextReconnect time.Time
|
||||
|
||||
reconnectInterval := l.minReconnectInterval
|
||||
for {
|
||||
for {
|
||||
err := l.connect()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if l.closed() {
|
||||
return
|
||||
}
|
||||
l.emitEvent(ListenerEventConnectionAttemptFailed, err)
|
||||
|
||||
time.Sleep(reconnectInterval)
|
||||
reconnectInterval *= 2
|
||||
if reconnectInterval > l.maxReconnectInterval {
|
||||
reconnectInterval = l.maxReconnectInterval
|
||||
}
|
||||
}
|
||||
|
||||
if nextReconnect.IsZero() {
|
||||
l.emitEvent(ListenerEventConnected, nil)
|
||||
} else {
|
||||
l.emitEvent(ListenerEventReconnected, nil)
|
||||
l.Notify <- nil
|
||||
}
|
||||
|
||||
reconnectInterval = l.minReconnectInterval
|
||||
nextReconnect = time.Now().Add(reconnectInterval)
|
||||
|
||||
for {
|
||||
notification, ok := <-l.connNotificationChan
|
||||
if !ok {
|
||||
// lost connection, loop again
|
||||
break
|
||||
}
|
||||
l.Notify <- notification
|
||||
}
|
||||
|
||||
err := l.disconnectCleanup()
|
||||
if l.closed() {
|
||||
return
|
||||
}
|
||||
l.emitEvent(ListenerEventDisconnected, err)
|
||||
|
||||
time.Sleep(time.Until(nextReconnect))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Listener) listenerMain() {
|
||||
l.listenerConnLoop()
|
||||
close(l.Notify)
|
||||
}
|
||||
7
vendor/github.com/lib/pq/oid/doc.go
generated
vendored
Normal file
7
vendor/github.com/lib/pq/oid/doc.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:generate go run ./gen.go
|
||||
|
||||
// Package oid contains OID constants as defined by the Postgres server.
|
||||
package oid
|
||||
|
||||
// Oid is a Postgres Object ID.
|
||||
type Oid uint32
|
||||
343
vendor/github.com/lib/pq/oid/types.go
generated
vendored
Normal file
343
vendor/github.com/lib/pq/oid/types.go
generated
vendored
Normal file
@@ -0,0 +1,343 @@
|
||||
// Code generated by gen.go. DO NOT EDIT.
|
||||
|
||||
package oid
|
||||
|
||||
const (
|
||||
T_bool Oid = 16
|
||||
T_bytea Oid = 17
|
||||
T_char Oid = 18
|
||||
T_name Oid = 19
|
||||
T_int8 Oid = 20
|
||||
T_int2 Oid = 21
|
||||
T_int2vector Oid = 22
|
||||
T_int4 Oid = 23
|
||||
T_regproc Oid = 24
|
||||
T_text Oid = 25
|
||||
T_oid Oid = 26
|
||||
T_tid Oid = 27
|
||||
T_xid Oid = 28
|
||||
T_cid Oid = 29
|
||||
T_oidvector Oid = 30
|
||||
T_pg_ddl_command Oid = 32
|
||||
T_pg_type Oid = 71
|
||||
T_pg_attribute Oid = 75
|
||||
T_pg_proc Oid = 81
|
||||
T_pg_class Oid = 83
|
||||
T_json Oid = 114
|
||||
T_xml Oid = 142
|
||||
T__xml Oid = 143
|
||||
T_pg_node_tree Oid = 194
|
||||
T__json Oid = 199
|
||||
T_smgr Oid = 210
|
||||
T_index_am_handler Oid = 325
|
||||
T_point Oid = 600
|
||||
T_lseg Oid = 601
|
||||
T_path Oid = 602
|
||||
T_box Oid = 603
|
||||
T_polygon Oid = 604
|
||||
T_line Oid = 628
|
||||
T__line Oid = 629
|
||||
T_cidr Oid = 650
|
||||
T__cidr Oid = 651
|
||||
T_float4 Oid = 700
|
||||
T_float8 Oid = 701
|
||||
T_abstime Oid = 702
|
||||
T_reltime Oid = 703
|
||||
T_tinterval Oid = 704
|
||||
T_unknown Oid = 705
|
||||
T_circle Oid = 718
|
||||
T__circle Oid = 719
|
||||
T_money Oid = 790
|
||||
T__money Oid = 791
|
||||
T_macaddr Oid = 829
|
||||
T_inet Oid = 869
|
||||
T__bool Oid = 1000
|
||||
T__bytea Oid = 1001
|
||||
T__char Oid = 1002
|
||||
T__name Oid = 1003
|
||||
T__int2 Oid = 1005
|
||||
T__int2vector Oid = 1006
|
||||
T__int4 Oid = 1007
|
||||
T__regproc Oid = 1008
|
||||
T__text Oid = 1009
|
||||
T__tid Oid = 1010
|
||||
T__xid Oid = 1011
|
||||
T__cid Oid = 1012
|
||||
T__oidvector Oid = 1013
|
||||
T__bpchar Oid = 1014
|
||||
T__varchar Oid = 1015
|
||||
T__int8 Oid = 1016
|
||||
T__point Oid = 1017
|
||||
T__lseg Oid = 1018
|
||||
T__path Oid = 1019
|
||||
T__box Oid = 1020
|
||||
T__float4 Oid = 1021
|
||||
T__float8 Oid = 1022
|
||||
T__abstime Oid = 1023
|
||||
T__reltime Oid = 1024
|
||||
T__tinterval Oid = 1025
|
||||
T__polygon Oid = 1027
|
||||
T__oid Oid = 1028
|
||||
T_aclitem Oid = 1033
|
||||
T__aclitem Oid = 1034
|
||||
T__macaddr Oid = 1040
|
||||
T__inet Oid = 1041
|
||||
T_bpchar Oid = 1042
|
||||
T_varchar Oid = 1043
|
||||
T_date Oid = 1082
|
||||
T_time Oid = 1083
|
||||
T_timestamp Oid = 1114
|
||||
T__timestamp Oid = 1115
|
||||
T__date Oid = 1182
|
||||
T__time Oid = 1183
|
||||
T_timestamptz Oid = 1184
|
||||
T__timestamptz Oid = 1185
|
||||
T_interval Oid = 1186
|
||||
T__interval Oid = 1187
|
||||
T__numeric Oid = 1231
|
||||
T_pg_database Oid = 1248
|
||||
T__cstring Oid = 1263
|
||||
T_timetz Oid = 1266
|
||||
T__timetz Oid = 1270
|
||||
T_bit Oid = 1560
|
||||
T__bit Oid = 1561
|
||||
T_varbit Oid = 1562
|
||||
T__varbit Oid = 1563
|
||||
T_numeric Oid = 1700
|
||||
T_refcursor Oid = 1790
|
||||
T__refcursor Oid = 2201
|
||||
T_regprocedure Oid = 2202
|
||||
T_regoper Oid = 2203
|
||||
T_regoperator Oid = 2204
|
||||
T_regclass Oid = 2205
|
||||
T_regtype Oid = 2206
|
||||
T__regprocedure Oid = 2207
|
||||
T__regoper Oid = 2208
|
||||
T__regoperator Oid = 2209
|
||||
T__regclass Oid = 2210
|
||||
T__regtype Oid = 2211
|
||||
T_record Oid = 2249
|
||||
T_cstring Oid = 2275
|
||||
T_any Oid = 2276
|
||||
T_anyarray Oid = 2277
|
||||
T_void Oid = 2278
|
||||
T_trigger Oid = 2279
|
||||
T_language_handler Oid = 2280
|
||||
T_internal Oid = 2281
|
||||
T_opaque Oid = 2282
|
||||
T_anyelement Oid = 2283
|
||||
T__record Oid = 2287
|
||||
T_anynonarray Oid = 2776
|
||||
T_pg_authid Oid = 2842
|
||||
T_pg_auth_members Oid = 2843
|
||||
T__txid_snapshot Oid = 2949
|
||||
T_uuid Oid = 2950
|
||||
T__uuid Oid = 2951
|
||||
T_txid_snapshot Oid = 2970
|
||||
T_fdw_handler Oid = 3115
|
||||
T_pg_lsn Oid = 3220
|
||||
T__pg_lsn Oid = 3221
|
||||
T_tsm_handler Oid = 3310
|
||||
T_anyenum Oid = 3500
|
||||
T_tsvector Oid = 3614
|
||||
T_tsquery Oid = 3615
|
||||
T_gtsvector Oid = 3642
|
||||
T__tsvector Oid = 3643
|
||||
T__gtsvector Oid = 3644
|
||||
T__tsquery Oid = 3645
|
||||
T_regconfig Oid = 3734
|
||||
T__regconfig Oid = 3735
|
||||
T_regdictionary Oid = 3769
|
||||
T__regdictionary Oid = 3770
|
||||
T_jsonb Oid = 3802
|
||||
T__jsonb Oid = 3807
|
||||
T_anyrange Oid = 3831
|
||||
T_event_trigger Oid = 3838
|
||||
T_int4range Oid = 3904
|
||||
T__int4range Oid = 3905
|
||||
T_numrange Oid = 3906
|
||||
T__numrange Oid = 3907
|
||||
T_tsrange Oid = 3908
|
||||
T__tsrange Oid = 3909
|
||||
T_tstzrange Oid = 3910
|
||||
T__tstzrange Oid = 3911
|
||||
T_daterange Oid = 3912
|
||||
T__daterange Oid = 3913
|
||||
T_int8range Oid = 3926
|
||||
T__int8range Oid = 3927
|
||||
T_pg_shseclabel Oid = 4066
|
||||
T_regnamespace Oid = 4089
|
||||
T__regnamespace Oid = 4090
|
||||
T_regrole Oid = 4096
|
||||
T__regrole Oid = 4097
|
||||
)
|
||||
|
||||
var TypeName = map[Oid]string{
|
||||
T_bool: "BOOL",
|
||||
T_bytea: "BYTEA",
|
||||
T_char: "CHAR",
|
||||
T_name: "NAME",
|
||||
T_int8: "INT8",
|
||||
T_int2: "INT2",
|
||||
T_int2vector: "INT2VECTOR",
|
||||
T_int4: "INT4",
|
||||
T_regproc: "REGPROC",
|
||||
T_text: "TEXT",
|
||||
T_oid: "OID",
|
||||
T_tid: "TID",
|
||||
T_xid: "XID",
|
||||
T_cid: "CID",
|
||||
T_oidvector: "OIDVECTOR",
|
||||
T_pg_ddl_command: "PG_DDL_COMMAND",
|
||||
T_pg_type: "PG_TYPE",
|
||||
T_pg_attribute: "PG_ATTRIBUTE",
|
||||
T_pg_proc: "PG_PROC",
|
||||
T_pg_class: "PG_CLASS",
|
||||
T_json: "JSON",
|
||||
T_xml: "XML",
|
||||
T__xml: "_XML",
|
||||
T_pg_node_tree: "PG_NODE_TREE",
|
||||
T__json: "_JSON",
|
||||
T_smgr: "SMGR",
|
||||
T_index_am_handler: "INDEX_AM_HANDLER",
|
||||
T_point: "POINT",
|
||||
T_lseg: "LSEG",
|
||||
T_path: "PATH",
|
||||
T_box: "BOX",
|
||||
T_polygon: "POLYGON",
|
||||
T_line: "LINE",
|
||||
T__line: "_LINE",
|
||||
T_cidr: "CIDR",
|
||||
T__cidr: "_CIDR",
|
||||
T_float4: "FLOAT4",
|
||||
T_float8: "FLOAT8",
|
||||
T_abstime: "ABSTIME",
|
||||
T_reltime: "RELTIME",
|
||||
T_tinterval: "TINTERVAL",
|
||||
T_unknown: "UNKNOWN",
|
||||
T_circle: "CIRCLE",
|
||||
T__circle: "_CIRCLE",
|
||||
T_money: "MONEY",
|
||||
T__money: "_MONEY",
|
||||
T_macaddr: "MACADDR",
|
||||
T_inet: "INET",
|
||||
T__bool: "_BOOL",
|
||||
T__bytea: "_BYTEA",
|
||||
T__char: "_CHAR",
|
||||
T__name: "_NAME",
|
||||
T__int2: "_INT2",
|
||||
T__int2vector: "_INT2VECTOR",
|
||||
T__int4: "_INT4",
|
||||
T__regproc: "_REGPROC",
|
||||
T__text: "_TEXT",
|
||||
T__tid: "_TID",
|
||||
T__xid: "_XID",
|
||||
T__cid: "_CID",
|
||||
T__oidvector: "_OIDVECTOR",
|
||||
T__bpchar: "_BPCHAR",
|
||||
T__varchar: "_VARCHAR",
|
||||
T__int8: "_INT8",
|
||||
T__point: "_POINT",
|
||||
T__lseg: "_LSEG",
|
||||
T__path: "_PATH",
|
||||
T__box: "_BOX",
|
||||
T__float4: "_FLOAT4",
|
||||
T__float8: "_FLOAT8",
|
||||
T__abstime: "_ABSTIME",
|
||||
T__reltime: "_RELTIME",
|
||||
T__tinterval: "_TINTERVAL",
|
||||
T__polygon: "_POLYGON",
|
||||
T__oid: "_OID",
|
||||
T_aclitem: "ACLITEM",
|
||||
T__aclitem: "_ACLITEM",
|
||||
T__macaddr: "_MACADDR",
|
||||
T__inet: "_INET",
|
||||
T_bpchar: "BPCHAR",
|
||||
T_varchar: "VARCHAR",
|
||||
T_date: "DATE",
|
||||
T_time: "TIME",
|
||||
T_timestamp: "TIMESTAMP",
|
||||
T__timestamp: "_TIMESTAMP",
|
||||
T__date: "_DATE",
|
||||
T__time: "_TIME",
|
||||
T_timestamptz: "TIMESTAMPTZ",
|
||||
T__timestamptz: "_TIMESTAMPTZ",
|
||||
T_interval: "INTERVAL",
|
||||
T__interval: "_INTERVAL",
|
||||
T__numeric: "_NUMERIC",
|
||||
T_pg_database: "PG_DATABASE",
|
||||
T__cstring: "_CSTRING",
|
||||
T_timetz: "TIMETZ",
|
||||
T__timetz: "_TIMETZ",
|
||||
T_bit: "BIT",
|
||||
T__bit: "_BIT",
|
||||
T_varbit: "VARBIT",
|
||||
T__varbit: "_VARBIT",
|
||||
T_numeric: "NUMERIC",
|
||||
T_refcursor: "REFCURSOR",
|
||||
T__refcursor: "_REFCURSOR",
|
||||
T_regprocedure: "REGPROCEDURE",
|
||||
T_regoper: "REGOPER",
|
||||
T_regoperator: "REGOPERATOR",
|
||||
T_regclass: "REGCLASS",
|
||||
T_regtype: "REGTYPE",
|
||||
T__regprocedure: "_REGPROCEDURE",
|
||||
T__regoper: "_REGOPER",
|
||||
T__regoperator: "_REGOPERATOR",
|
||||
T__regclass: "_REGCLASS",
|
||||
T__regtype: "_REGTYPE",
|
||||
T_record: "RECORD",
|
||||
T_cstring: "CSTRING",
|
||||
T_any: "ANY",
|
||||
T_anyarray: "ANYARRAY",
|
||||
T_void: "VOID",
|
||||
T_trigger: "TRIGGER",
|
||||
T_language_handler: "LANGUAGE_HANDLER",
|
||||
T_internal: "INTERNAL",
|
||||
T_opaque: "OPAQUE",
|
||||
T_anyelement: "ANYELEMENT",
|
||||
T__record: "_RECORD",
|
||||
T_anynonarray: "ANYNONARRAY",
|
||||
T_pg_authid: "PG_AUTHID",
|
||||
T_pg_auth_members: "PG_AUTH_MEMBERS",
|
||||
T__txid_snapshot: "_TXID_SNAPSHOT",
|
||||
T_uuid: "UUID",
|
||||
T__uuid: "_UUID",
|
||||
T_txid_snapshot: "TXID_SNAPSHOT",
|
||||
T_fdw_handler: "FDW_HANDLER",
|
||||
T_pg_lsn: "PG_LSN",
|
||||
T__pg_lsn: "_PG_LSN",
|
||||
T_tsm_handler: "TSM_HANDLER",
|
||||
T_anyenum: "ANYENUM",
|
||||
T_tsvector: "TSVECTOR",
|
||||
T_tsquery: "TSQUERY",
|
||||
T_gtsvector: "GTSVECTOR",
|
||||
T__tsvector: "_TSVECTOR",
|
||||
T__gtsvector: "_GTSVECTOR",
|
||||
T__tsquery: "_TSQUERY",
|
||||
T_regconfig: "REGCONFIG",
|
||||
T__regconfig: "_REGCONFIG",
|
||||
T_regdictionary: "REGDICTIONARY",
|
||||
T__regdictionary: "_REGDICTIONARY",
|
||||
T_jsonb: "JSONB",
|
||||
T__jsonb: "_JSONB",
|
||||
T_anyrange: "ANYRANGE",
|
||||
T_event_trigger: "EVENT_TRIGGER",
|
||||
T_int4range: "INT4RANGE",
|
||||
T__int4range: "_INT4RANGE",
|
||||
T_numrange: "NUMRANGE",
|
||||
T__numrange: "_NUMRANGE",
|
||||
T_tsrange: "TSRANGE",
|
||||
T__tsrange: "_TSRANGE",
|
||||
T_tstzrange: "TSTZRANGE",
|
||||
T__tstzrange: "_TSTZRANGE",
|
||||
T_daterange: "DATERANGE",
|
||||
T__daterange: "_DATERANGE",
|
||||
T_int8range: "INT8RANGE",
|
||||
T__int8range: "_INT8RANGE",
|
||||
T_pg_shseclabel: "PG_SHSECLABEL",
|
||||
T_regnamespace: "REGNAMESPACE",
|
||||
T__regnamespace: "_REGNAMESPACE",
|
||||
T_regrole: "REGROLE",
|
||||
T__regrole: "_REGROLE",
|
||||
}
|
||||
71
vendor/github.com/lib/pq/quote.go
generated
vendored
Normal file
71
vendor/github.com/lib/pq/quote.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be
|
||||
// used as part of an SQL statement. For example:
|
||||
//
|
||||
// tblname := "my_table"
|
||||
// data := "my_data"
|
||||
// quoted := pq.QuoteIdentifier(tblname)
|
||||
// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data)
|
||||
//
|
||||
// Any double quotes in name will be escaped. The quoted identifier will be case
|
||||
// sensitive when used in a query. If the input string contains a zero byte, the
|
||||
// result will be truncated immediately before it.
|
||||
func QuoteIdentifier(name string) string {
|
||||
end := strings.IndexRune(name, 0)
|
||||
if end > -1 {
|
||||
name = name[:end]
|
||||
}
|
||||
return `"` + strings.Replace(name, `"`, `""`, -1) + `"`
|
||||
}
|
||||
|
||||
// BufferQuoteIdentifier satisfies the same purpose as QuoteIdentifier, but backed by a
|
||||
// byte buffer.
|
||||
func BufferQuoteIdentifier(name string, buffer *bytes.Buffer) {
|
||||
// TODO(v2): this should have accepted an io.Writer, not *bytes.Buffer.
|
||||
end := strings.IndexRune(name, 0)
|
||||
if end > -1 {
|
||||
name = name[:end]
|
||||
}
|
||||
buffer.WriteRune('"')
|
||||
buffer.WriteString(strings.Replace(name, `"`, `""`, -1))
|
||||
buffer.WriteRune('"')
|
||||
}
|
||||
|
||||
// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal
|
||||
// to DDL and other statements that do not accept parameters) to be used as part
|
||||
// of an SQL statement. For example:
|
||||
//
|
||||
// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z")
|
||||
// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date))
|
||||
//
|
||||
// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be
|
||||
// replaced by two backslashes (i.e. "\\") and the C-style escape identifier
|
||||
// that PostgreSQL provides ('E') will be prepended to the string.
|
||||
func QuoteLiteral(literal string) string {
|
||||
// This follows the PostgreSQL internal algorithm for handling quoted literals
|
||||
// from libpq, which can be found in the "PQEscapeStringInternal" function,
|
||||
// which is found in the libpq/fe-exec.c source file:
|
||||
// https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c
|
||||
//
|
||||
// substitute any single-quotes (') with two single-quotes ('')
|
||||
literal = strings.Replace(literal, `'`, `''`, -1)
|
||||
// determine if the string has any backslashes (\) in it.
|
||||
// if it does, replace any backslashes (\) with two backslashes (\\)
|
||||
// then, we need to wrap the entire string with a PostgreSQL
|
||||
// C-style escape. Per how "PQEscapeStringInternal" handles this case, we
|
||||
// also add a space before the "E"
|
||||
if strings.Contains(literal, `\`) {
|
||||
literal = strings.Replace(literal, `\`, `\\`, -1)
|
||||
literal = ` E'` + literal + `'`
|
||||
} else {
|
||||
// otherwise, we can just wrap the literal with a pair of single quotes
|
||||
literal = `'` + literal + `'`
|
||||
}
|
||||
return literal
|
||||
}
|
||||
245
vendor/github.com/lib/pq/rows.go
generated
vendored
Normal file
245
vendor/github.com/lib/pq/rows.go
generated
vendored
Normal file
@@ -0,0 +1,245 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq/internal/proto"
|
||||
"github.com/lib/pq/oid"
|
||||
)
|
||||
|
||||
type noRows struct{}
|
||||
|
||||
var emptyRows noRows
|
||||
|
||||
var _ driver.Result = noRows{}
|
||||
|
||||
func (noRows) LastInsertId() (int64, error) { return 0, errNoLastInsertID }
|
||||
func (noRows) RowsAffected() (int64, error) { return 0, errNoRowsAffected }
|
||||
|
||||
type (
|
||||
rowsHeader struct {
|
||||
colNames []string
|
||||
colTyps []fieldDesc
|
||||
colFmts []format
|
||||
}
|
||||
rows struct {
|
||||
cn *conn
|
||||
finish func()
|
||||
rowsHeader
|
||||
done bool
|
||||
rb readBuf
|
||||
result driver.Result
|
||||
tag string
|
||||
|
||||
next *rowsHeader
|
||||
}
|
||||
)
|
||||
|
||||
func (rs *rows) Close() error {
|
||||
if finish := rs.finish; finish != nil {
|
||||
defer finish()
|
||||
}
|
||||
// no need to look at cn.bad as Next() will
|
||||
for {
|
||||
err := rs.Next(nil)
|
||||
switch err {
|
||||
case nil:
|
||||
case io.EOF:
|
||||
// rs.Next can return io.EOF on both ReadyForQuery and
|
||||
// RowDescription (used with HasNextResultSet). We need to fetch
|
||||
// messages until we hit a ReadyForQuery, which is done by waiting
|
||||
// for done to be set.
|
||||
if rs.done {
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *rows) Columns() []string {
|
||||
return rs.colNames
|
||||
}
|
||||
|
||||
func (rs *rows) Result() driver.Result {
|
||||
if rs.result == nil {
|
||||
return emptyRows
|
||||
}
|
||||
return rs.result
|
||||
}
|
||||
|
||||
func (rs *rows) Tag() string {
|
||||
return rs.tag
|
||||
}
|
||||
|
||||
func (rs *rows) Next(dest []driver.Value) (resErr error) {
|
||||
if rs.done {
|
||||
return io.EOF
|
||||
}
|
||||
if err := rs.cn.err.getForNext(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
t, err := rs.cn.recv1Buf(&rs.rb)
|
||||
if err != nil {
|
||||
return rs.cn.handleError(err)
|
||||
}
|
||||
switch t {
|
||||
case proto.ErrorResponse:
|
||||
resErr = parseError(&rs.rb, "")
|
||||
case proto.CommandComplete, proto.EmptyQueryResponse:
|
||||
if t == proto.CommandComplete {
|
||||
rs.result, rs.tag, err = rs.cn.parseComplete(rs.rb.string())
|
||||
if err != nil {
|
||||
return rs.cn.handleError(err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
case proto.ReadyForQuery:
|
||||
rs.cn.processReadyForQuery(&rs.rb)
|
||||
rs.done = true
|
||||
if resErr != nil {
|
||||
return rs.cn.handleError(resErr)
|
||||
}
|
||||
return io.EOF
|
||||
case proto.DataRow:
|
||||
n := rs.rb.int16()
|
||||
if resErr != nil {
|
||||
rs.cn.err.set(driver.ErrBadConn)
|
||||
return fmt.Errorf("pq: unexpected DataRow after error %s", resErr)
|
||||
}
|
||||
if n < len(dest) {
|
||||
dest = dest[:n]
|
||||
}
|
||||
for i := range dest {
|
||||
l := rs.rb.int32()
|
||||
if l == -1 {
|
||||
dest[i] = nil
|
||||
continue
|
||||
}
|
||||
dest[i], err = decode(&rs.cn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i])
|
||||
if err != nil {
|
||||
return rs.cn.handleError(err)
|
||||
}
|
||||
}
|
||||
return rs.cn.handleError(resErr)
|
||||
case proto.RowDescription:
|
||||
next := parsePortalRowDescribe(&rs.rb)
|
||||
rs.next = &next
|
||||
return io.EOF
|
||||
default:
|
||||
return fmt.Errorf("pq: unexpected message after execute: %q", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *rows) HasNextResultSet() bool {
|
||||
hasNext := rs.next != nil && !rs.done
|
||||
return hasNext
|
||||
}
|
||||
|
||||
func (rs *rows) NextResultSet() error {
|
||||
if rs.next == nil {
|
||||
return io.EOF
|
||||
}
|
||||
rs.rowsHeader = *rs.next
|
||||
rs.next = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// ColumnTypeScanType returns the value type that can be used to scan types into.
|
||||
func (rs *rows) ColumnTypeScanType(index int) reflect.Type {
|
||||
return rs.colTyps[index].Type()
|
||||
}
|
||||
|
||||
// ColumnTypeDatabaseTypeName return the database system type name.
|
||||
func (rs *rows) ColumnTypeDatabaseTypeName(index int) string {
|
||||
return rs.colTyps[index].Name()
|
||||
}
|
||||
|
||||
// ColumnTypeLength returns the length of the column type if the column is a
|
||||
// variable length type. If the column is not a variable length type ok
|
||||
// should return false.
|
||||
func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) {
|
||||
return rs.colTyps[index].Length()
|
||||
}
|
||||
|
||||
// ColumnTypePrecisionScale should return the precision and scale for decimal
|
||||
// types. If not applicable, ok should be false.
|
||||
func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
|
||||
return rs.colTyps[index].PrecisionScale()
|
||||
}
|
||||
|
||||
const headerSize = 4
|
||||
|
||||
type fieldDesc struct {
|
||||
// The object ID of the data type.
|
||||
OID oid.Oid
|
||||
// The data type size (see pg_type.typlen).
|
||||
// Note that negative values denote variable-width types.
|
||||
Len int
|
||||
// The type modifier (see pg_attribute.atttypmod).
|
||||
// The meaning of the modifier is type-specific.
|
||||
Mod int
|
||||
}
|
||||
|
||||
func (fd fieldDesc) Type() reflect.Type {
|
||||
switch fd.OID {
|
||||
case oid.T_int8:
|
||||
return reflect.TypeOf(int64(0))
|
||||
case oid.T_int4:
|
||||
return reflect.TypeOf(int32(0))
|
||||
case oid.T_int2:
|
||||
return reflect.TypeOf(int16(0))
|
||||
case oid.T_float8:
|
||||
return reflect.TypeOf(float64(0))
|
||||
case oid.T_float4:
|
||||
return reflect.TypeOf(float32(0))
|
||||
case oid.T_varchar, oid.T_text, oid.T_varbit, oid.T_bit:
|
||||
return reflect.TypeOf("")
|
||||
case oid.T_bool:
|
||||
return reflect.TypeOf(false)
|
||||
case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz:
|
||||
return reflect.TypeOf(time.Time{})
|
||||
case oid.T_bytea:
|
||||
return reflect.TypeOf([]byte(nil))
|
||||
default:
|
||||
return reflect.TypeOf(new(any)).Elem()
|
||||
}
|
||||
}
|
||||
|
||||
func (fd fieldDesc) Name() string {
|
||||
return oid.TypeName[fd.OID]
|
||||
}
|
||||
|
||||
func (fd fieldDesc) Length() (length int64, ok bool) {
|
||||
switch fd.OID {
|
||||
case oid.T_text, oid.T_bytea:
|
||||
return math.MaxInt64, true
|
||||
case oid.T_varchar, oid.T_bpchar:
|
||||
return int64(fd.Mod - headerSize), true
|
||||
case oid.T_varbit, oid.T_bit:
|
||||
return int64(fd.Mod), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) {
|
||||
switch fd.OID {
|
||||
case oid.T_numeric, oid.T__numeric:
|
||||
mod := fd.Mod - headerSize
|
||||
precision = int64((mod >> 16) & 0xffff)
|
||||
scale = int64(mod & 0xffff)
|
||||
return precision, scale, true
|
||||
default:
|
||||
return 0, 0, false
|
||||
}
|
||||
}
|
||||
261
vendor/github.com/lib/pq/scram/scram.go
generated
vendored
Normal file
261
vendor/github.com/lib/pq/scram/scram.go
generated
vendored
Normal file
@@ -0,0 +1,261 @@
|
||||
// Copyright (c) 2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802.
|
||||
//
|
||||
// http://tools.ietf.org/html/rfc5802
|
||||
package scram
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc).
|
||||
//
|
||||
// A Client may be used within a SASL conversation with logic resembling:
|
||||
//
|
||||
// var in []byte
|
||||
// var client = scram.NewClient(sha1.New, user, pass)
|
||||
// for client.Step(in) {
|
||||
// out := client.Out()
|
||||
// // send out to server
|
||||
// in := serverOut
|
||||
// }
|
||||
// if client.Err() != nil {
|
||||
// // auth failed
|
||||
// }
|
||||
type Client struct {
|
||||
newHash func() hash.Hash
|
||||
|
||||
user string
|
||||
pass string
|
||||
step int
|
||||
out bytes.Buffer
|
||||
err error
|
||||
|
||||
clientNonce []byte
|
||||
serverNonce []byte
|
||||
saltedPass []byte
|
||||
authMsg bytes.Buffer
|
||||
}
|
||||
|
||||
// NewClient returns a new SCRAM-* client with the provided hash algorithm.
|
||||
//
|
||||
// For SCRAM-SHA-256, for example, use:
|
||||
//
|
||||
// client := scram.NewClient(sha256.New, user, pass)
|
||||
func NewClient(newHash func() hash.Hash, user, pass string) *Client {
|
||||
c := &Client{
|
||||
newHash: newHash,
|
||||
user: user,
|
||||
pass: pass,
|
||||
}
|
||||
c.out.Grow(256)
|
||||
c.authMsg.Grow(256)
|
||||
return c
|
||||
}
|
||||
|
||||
// Out returns the data to be sent to the server in the current step.
|
||||
func (c *Client) Out() []byte {
|
||||
if c.out.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
return c.out.Bytes()
|
||||
}
|
||||
|
||||
// Err returns the error that occurred, or nil if there were no errors.
|
||||
func (c *Client) Err() error {
|
||||
return c.err
|
||||
}
|
||||
|
||||
// SetNonce sets the client nonce to the provided value.
|
||||
// If not set, the nonce is generated automatically out of crypto/rand on the first step.
|
||||
func (c *Client) SetNonce(nonce []byte) {
|
||||
c.clientNonce = nonce
|
||||
}
|
||||
|
||||
var escaper = strings.NewReplacer("=", "=3D", ",", "=2C")
|
||||
|
||||
// Step processes the incoming data from the server and makes the
|
||||
// next round of data for the server available via Client.Out.
|
||||
// Step returns false if there are no errors and more data is
|
||||
// still expected.
|
||||
func (c *Client) Step(in []byte) bool {
|
||||
c.out.Reset()
|
||||
if c.step > 2 || c.err != nil {
|
||||
return false
|
||||
}
|
||||
c.step++
|
||||
switch c.step {
|
||||
case 1:
|
||||
c.err = c.step1(in)
|
||||
case 2:
|
||||
c.err = c.step2(in)
|
||||
case 3:
|
||||
c.err = c.step3(in)
|
||||
}
|
||||
return c.step > 2 || c.err != nil
|
||||
}
|
||||
|
||||
func (c *Client) step1(in []byte) error {
|
||||
if len(c.clientNonce) == 0 {
|
||||
const nonceLen = 16
|
||||
buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen))
|
||||
if _, err := rand.Read(buf[:nonceLen]); err != nil {
|
||||
return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %w", err)
|
||||
}
|
||||
c.clientNonce = buf[nonceLen:]
|
||||
b64.Encode(c.clientNonce, buf[:nonceLen])
|
||||
}
|
||||
c.authMsg.WriteString("n=")
|
||||
escaper.WriteString(&c.authMsg, c.user)
|
||||
c.authMsg.WriteString(",r=")
|
||||
c.authMsg.Write(c.clientNonce)
|
||||
|
||||
c.out.WriteString("n,,")
|
||||
c.out.Write(c.authMsg.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
var b64 = base64.StdEncoding
|
||||
|
||||
func (c *Client) step2(in []byte) error {
|
||||
c.authMsg.WriteByte(',')
|
||||
c.authMsg.Write(in)
|
||||
|
||||
fields := bytes.Split(in, []byte(","))
|
||||
if len(fields) != 3 {
|
||||
return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in)
|
||||
}
|
||||
if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0])
|
||||
}
|
||||
if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1])
|
||||
}
|
||||
if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2])
|
||||
}
|
||||
|
||||
c.serverNonce = fields[0][2:]
|
||||
if !bytes.HasPrefix(c.serverNonce, c.clientNonce) {
|
||||
return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce)
|
||||
}
|
||||
|
||||
salt := make([]byte, b64.DecodedLen(len(fields[1][2:])))
|
||||
n, err := b64.Decode(salt, fields[1][2:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1])
|
||||
}
|
||||
salt = salt[:n]
|
||||
iterCount, err := strconv.Atoi(string(fields[2][2:]))
|
||||
if err != nil {
|
||||
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2])
|
||||
}
|
||||
c.saltPassword(salt, iterCount)
|
||||
|
||||
c.authMsg.WriteString(",c=biws,r=")
|
||||
c.authMsg.Write(c.serverNonce)
|
||||
|
||||
c.out.WriteString("c=biws,r=")
|
||||
c.out.Write(c.serverNonce)
|
||||
c.out.WriteString(",p=")
|
||||
c.out.Write(c.clientProof())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) step3(in []byte) error {
|
||||
var isv, ise bool
|
||||
var fields = bytes.Split(in, []byte(","))
|
||||
if len(fields) == 1 {
|
||||
isv = bytes.HasPrefix(fields[0], []byte("v="))
|
||||
ise = bytes.HasPrefix(fields[0], []byte("e="))
|
||||
}
|
||||
if ise {
|
||||
return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:])
|
||||
} else if !isv {
|
||||
return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in)
|
||||
}
|
||||
if !bytes.Equal(c.serverSignature(), fields[0][2:]) {
|
||||
return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) saltPassword(salt []byte, iterCount int) {
|
||||
mac := hmac.New(c.newHash, []byte(c.pass))
|
||||
mac.Write(salt)
|
||||
mac.Write([]byte{0, 0, 0, 1})
|
||||
ui := mac.Sum(nil)
|
||||
hi := make([]byte, len(ui))
|
||||
copy(hi, ui)
|
||||
for i := 1; i < iterCount; i++ {
|
||||
mac.Reset()
|
||||
mac.Write(ui)
|
||||
mac.Sum(ui[:0])
|
||||
for j, b := range ui {
|
||||
hi[j] ^= b
|
||||
}
|
||||
}
|
||||
c.saltedPass = hi
|
||||
}
|
||||
|
||||
func (c *Client) clientProof() []byte {
|
||||
mac := hmac.New(c.newHash, c.saltedPass)
|
||||
mac.Write([]byte("Client Key"))
|
||||
clientKey := mac.Sum(nil)
|
||||
hash := c.newHash()
|
||||
hash.Write(clientKey)
|
||||
storedKey := hash.Sum(nil)
|
||||
mac = hmac.New(c.newHash, storedKey)
|
||||
mac.Write(c.authMsg.Bytes())
|
||||
clientProof := mac.Sum(nil)
|
||||
for i, b := range clientKey {
|
||||
clientProof[i] ^= b
|
||||
}
|
||||
clientProof64 := make([]byte, b64.EncodedLen(len(clientProof)))
|
||||
b64.Encode(clientProof64, clientProof)
|
||||
return clientProof64
|
||||
}
|
||||
|
||||
func (c *Client) serverSignature() []byte {
|
||||
mac := hmac.New(c.newHash, c.saltedPass)
|
||||
mac.Write([]byte("Server Key"))
|
||||
serverKey := mac.Sum(nil)
|
||||
|
||||
mac = hmac.New(c.newHash, serverKey)
|
||||
mac.Write(c.authMsg.Bytes())
|
||||
serverSignature := mac.Sum(nil)
|
||||
|
||||
encoded := make([]byte, b64.EncodedLen(len(serverSignature)))
|
||||
b64.Encode(encoded, serverSignature)
|
||||
return encoded
|
||||
}
|
||||
250
vendor/github.com/lib/pq/ssl.go
generated
vendored
Normal file
250
vendor/github.com/lib/pq/ssl.go
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/lib/pq/internal/pqutil"
|
||||
)
|
||||
|
||||
// Registry for custom tls.Configs
|
||||
var (
|
||||
tlsConfs = make(map[string]*tls.Config)
|
||||
tlsConfsMu sync.RWMutex
|
||||
)
|
||||
|
||||
// RegisterTLSConfig registers a custom [tls.Config]. They are used by using
|
||||
// sslmode=pqgo-«key» in the connection string.
|
||||
//
|
||||
// Set the config to nil to remove a configuration.
|
||||
func RegisterTLSConfig(key string, config *tls.Config) error {
|
||||
key = strings.TrimPrefix(key, "pqgo-")
|
||||
if config == nil {
|
||||
tlsConfsMu.Lock()
|
||||
delete(tlsConfs, key)
|
||||
tlsConfsMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
tlsConfsMu.Lock()
|
||||
tlsConfs[key] = config
|
||||
tlsConfsMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasTLSConfig(key string) bool {
|
||||
tlsConfsMu.RLock()
|
||||
defer tlsConfsMu.RUnlock()
|
||||
_, ok := tlsConfs[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
func getTLSConfigClone(key string) *tls.Config {
|
||||
tlsConfsMu.RLock()
|
||||
defer tlsConfsMu.RUnlock()
|
||||
if v, ok := tlsConfs[key]; ok {
|
||||
return v.Clone()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ssl generates a function to upgrade a net.Conn based on the "sslmode" and
|
||||
// related settings. The function is nil when no upgrade should take place.
|
||||
func ssl(cfg Config) (func(net.Conn) (net.Conn, error), error) {
|
||||
var (
|
||||
verifyCaOnly = false
|
||||
tlsConf = &tls.Config{}
|
||||
mode = cfg.SSLMode
|
||||
)
|
||||
switch {
|
||||
// "require" is the default.
|
||||
case mode == "" || mode == SSLModeRequire:
|
||||
// We must skip TLS's own verification since it requires full
|
||||
// verification since Go 1.3.
|
||||
tlsConf.InsecureSkipVerify = true
|
||||
|
||||
// From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
|
||||
//
|
||||
// Note: For backwards compatibility with earlier versions of
|
||||
// PostgreSQL, if a root CA file exists, the behavior of
|
||||
// sslmode=require will be the same as that of verify-ca, meaning the
|
||||
// server certificate is validated against the CA. Relying on this
|
||||
// behavior is discouraged, and applications that need certificate
|
||||
// validation should always use verify-ca or verify-full.
|
||||
if cfg.SSLRootCert != "" {
|
||||
if _, err := os.Stat(cfg.SSLRootCert); err == nil {
|
||||
verifyCaOnly = true
|
||||
} else {
|
||||
cfg.SSLRootCert = ""
|
||||
}
|
||||
}
|
||||
case mode == SSLModeVerifyCA:
|
||||
// We must skip TLS's own verification since it requires full
|
||||
// verification since Go 1.3.
|
||||
tlsConf.InsecureSkipVerify = true
|
||||
verifyCaOnly = true
|
||||
case mode == SSLModeVerifyFull:
|
||||
tlsConf.ServerName = cfg.Host
|
||||
case mode == SSLModeDisable:
|
||||
return nil, nil
|
||||
case strings.HasPrefix(string(mode), "pqgo-"):
|
||||
tlsConf = getTLSConfigClone(string(mode[5:]))
|
||||
if tlsConf == nil {
|
||||
return nil, fmt.Errorf(`pq: unknown custom sslmode %q`, mode)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf(
|
||||
`pq: unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`,
|
||||
mode)
|
||||
}
|
||||
|
||||
// Set Server Name Indication (SNI), if enabled by connection parameters.
|
||||
if cfg.SSLSNI {
|
||||
// RFC 6066 asks to not set SNI if the host is a literal IP address (IPv4
|
||||
// or IPv6). This check is coded already crypto.tls.hostnameInSNI, so
|
||||
// just always set ServerName here and let crypto/tls do the filtering.
|
||||
tlsConf.ServerName = cfg.Host
|
||||
}
|
||||
|
||||
err := sslClientCertificates(tlsConf, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = sslCertificateAuthority(tlsConf, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Accept renegotiation requests initiated by the backend.
|
||||
//
|
||||
// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
|
||||
// the default configuration of older versions has it enabled. Redshift
|
||||
// also initiates renegotiations and cannot be reconfigured.
|
||||
tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient
|
||||
|
||||
return func(conn net.Conn) (net.Conn, error) {
|
||||
client := tls.Client(conn, tlsConf)
|
||||
if verifyCaOnly {
|
||||
err := client.Handshake()
|
||||
if err != nil {
|
||||
return client, err
|
||||
}
|
||||
var (
|
||||
certs = client.ConnectionState().PeerCertificates
|
||||
opts = x509.VerifyOptions{Intermediates: x509.NewCertPool(), Roots: tlsConf.RootCAs}
|
||||
)
|
||||
for _, cert := range certs[1:] {
|
||||
opts.Intermediates.AddCert(cert)
|
||||
}
|
||||
_, err = certs[0].Verify(opts)
|
||||
return client, err
|
||||
}
|
||||
return client, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
// sslClientCertificates adds the certificate specified in the "sslcert" and
|
||||
//
|
||||
// "sslkey" settings, or if they aren't set, from the .postgresql directory
|
||||
// in the user's home directory. The configured files must exist and have
|
||||
// the correct permissions.
|
||||
func sslClientCertificates(tlsConf *tls.Config, cfg Config) error {
|
||||
if cfg.SSLInline {
|
||||
cert, err := tls.X509KeyPair([]byte(cfg.SSLCert), []byte(cfg.SSLKey))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tlsConf.Certificates = []tls.Certificate{cert}
|
||||
return nil
|
||||
}
|
||||
|
||||
home := pqutil.Home()
|
||||
|
||||
// In libpq, the client certificate is only loaded if the setting is not blank.
|
||||
//
|
||||
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037
|
||||
sslcert := cfg.SSLCert
|
||||
if len(sslcert) == 0 && home != "" {
|
||||
if runtime.GOOS == "windows" {
|
||||
sslcert = filepath.Join(sslcert, "postgresql.crt")
|
||||
} else {
|
||||
sslcert = filepath.Join(home, ".postgresql/postgresql.crt")
|
||||
}
|
||||
}
|
||||
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
|
||||
if len(sslcert) == 0 {
|
||||
return nil
|
||||
}
|
||||
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
|
||||
_, err := os.Stat(sslcert)
|
||||
if err != nil {
|
||||
perr := new(os.PathError)
|
||||
if errors.As(err, &perr) && (perr.Err == syscall.ENOENT || perr.Err == syscall.ENOTDIR) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// In libpq, the ssl key is only loaded if the setting is not blank.
|
||||
//
|
||||
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222
|
||||
sslkey := cfg.SSLKey
|
||||
if len(sslkey) == 0 && home != "" {
|
||||
if runtime.GOOS == "windows" {
|
||||
sslkey = filepath.Join(home, "postgresql.key")
|
||||
} else {
|
||||
sslkey = filepath.Join(home, ".postgresql/postgresql.key")
|
||||
}
|
||||
}
|
||||
|
||||
if len(sslkey) > 0 {
|
||||
err := pqutil.SSLKeyPermissions(sslkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tlsConf.Certificates = []tls.Certificate{cert}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
|
||||
func sslCertificateAuthority(tlsConf *tls.Config, cfg Config) error {
|
||||
// In libpq, the root certificate is only loaded if the setting is not blank.
|
||||
//
|
||||
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
|
||||
if sslrootcert := cfg.SSLRootCert; len(sslrootcert) > 0 {
|
||||
tlsConf.RootCAs = x509.NewCertPool()
|
||||
|
||||
var cert []byte
|
||||
if cfg.SSLInline {
|
||||
cert = []byte(sslrootcert)
|
||||
} else {
|
||||
var err error
|
||||
cert, err = os.ReadFile(sslrootcert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
|
||||
return errors.New("pq: couldn't parse pem in sslrootcert")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
5
vendor/github.com/lib/pq/staticcheck.conf
generated
vendored
Normal file
5
vendor/github.com/lib/pq/staticcheck.conf
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
checks = [
|
||||
'all',
|
||||
'-ST1000', # "Must have at least one package comment"
|
||||
'-ST1003', # "func EnableInfinityTs should be EnableInfinityTS"
|
||||
]
|
||||
150
vendor/github.com/lib/pq/stmt.go
generated
vendored
Normal file
150
vendor/github.com/lib/pq/stmt.go
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/lib/pq/internal/proto"
|
||||
"github.com/lib/pq/oid"
|
||||
)
|
||||
|
||||
type stmt struct {
|
||||
cn *conn
|
||||
name string
|
||||
rowsHeader
|
||||
colFmtData []byte
|
||||
paramTyps []oid.Oid
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (st *stmt) Close() error {
|
||||
if st.closed {
|
||||
return nil
|
||||
}
|
||||
if err := st.cn.err.get(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := st.cn.writeBuf(proto.Close)
|
||||
w.byte(proto.Sync)
|
||||
w.string(st.name)
|
||||
err := st.cn.send(w)
|
||||
if err != nil {
|
||||
return st.cn.handleError(err)
|
||||
}
|
||||
err = st.cn.send(st.cn.writeBuf(proto.Sync))
|
||||
if err != nil {
|
||||
return st.cn.handleError(err)
|
||||
}
|
||||
|
||||
t, _, err := st.cn.recv1()
|
||||
if err != nil {
|
||||
return st.cn.handleError(err)
|
||||
}
|
||||
if t != proto.CloseComplete {
|
||||
st.cn.err.set(driver.ErrBadConn)
|
||||
return fmt.Errorf("pq: unexpected close response: %q", t)
|
||||
}
|
||||
st.closed = true
|
||||
|
||||
t, r, err := st.cn.recv1()
|
||||
if err != nil {
|
||||
return st.cn.handleError(err)
|
||||
}
|
||||
if t != proto.ReadyForQuery {
|
||||
st.cn.err.set(driver.ErrBadConn)
|
||||
return fmt.Errorf("pq: expected ready for query, but got: %q", t)
|
||||
}
|
||||
st.cn.processReadyForQuery(r)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
return st.query(toNamedValue(v))
|
||||
}
|
||||
|
||||
func (st *stmt) query(v []driver.NamedValue) (*rows, error) {
|
||||
if err := st.cn.err.get(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err := st.exec(v)
|
||||
if err != nil {
|
||||
return nil, st.cn.handleError(err)
|
||||
}
|
||||
return &rows{
|
||||
cn: st.cn,
|
||||
rowsHeader: st.rowsHeader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (st *stmt) Exec(v []driver.Value) (driver.Result, error) {
|
||||
return st.ExecContext(context.Background(), toNamedValue(v))
|
||||
}
|
||||
|
||||
func (st *stmt) exec(v []driver.NamedValue) error {
|
||||
if debugProto {
|
||||
fmt.Fprintf(os.Stderr, " START stmt.exec\n")
|
||||
defer fmt.Fprintf(os.Stderr, " END stmt.exec\n")
|
||||
}
|
||||
if len(v) >= 65536 {
|
||||
return fmt.Errorf("pq: got %d parameters but PostgreSQL only supports 65535 parameters", len(v))
|
||||
}
|
||||
if len(v) != len(st.paramTyps) {
|
||||
return fmt.Errorf("pq: got %d parameters but the statement requires %d", len(v), len(st.paramTyps))
|
||||
}
|
||||
|
||||
cn := st.cn
|
||||
w := cn.writeBuf(proto.Bind)
|
||||
w.byte(0) // unnamed portal
|
||||
w.string(st.name)
|
||||
|
||||
if cn.cfg.BinaryParameters {
|
||||
err := cn.sendBinaryParameters(w, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
w.int16(0)
|
||||
w.int16(len(v))
|
||||
for i, x := range v {
|
||||
if x.Value == nil {
|
||||
w.int32(-1)
|
||||
} else {
|
||||
b, err := encode(x.Value, st.paramTyps[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b == nil {
|
||||
w.int32(-1)
|
||||
} else {
|
||||
w.int32(len(b))
|
||||
w.bytes(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
w.bytes(st.colFmtData)
|
||||
|
||||
w.next(proto.Execute)
|
||||
w.byte(0)
|
||||
w.int32(0)
|
||||
|
||||
w.next(proto.Sync)
|
||||
err := cn.send(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.readBindResponse()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cn.postExecuteWorkaround()
|
||||
}
|
||||
|
||||
func (st *stmt) NumInput() int {
|
||||
return len(st.paramTyps)
|
||||
}
|
||||
Reference in New Issue
Block a user