newline battles continue
This commit is contained in:
201
vendor/go.mongodb.org/mongo-driver/LICENSE
generated
vendored
Executable file
201
vendor/go.mongodb.org/mongo-driver/LICENSE
generated
vendored
Executable file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1336
vendor/go.mongodb.org/mongo-driver/THIRD-PARTY-NOTICES
generated
vendored
Executable file
1336
vendor/go.mongodb.org/mongo-driver/THIRD-PARTY-NOTICES
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
60
vendor/go.mongodb.org/mongo-driver/bson/bson.go
generated
vendored
Executable file
60
vendor/go.mongodb.org/mongo-driver/bson/bson.go
generated
vendored
Executable file
@@ -0,0 +1,60 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
|
||||
// See THIRD-PARTY-NOTICES for original license terms.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package bson // import "go.mongodb.org/mongo-driver/bson"
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// Zeroer allows custom struct types to implement a report of zero
|
||||
// state. All struct types that don't implement Zeroer or where IsZero
|
||||
// returns false are considered to be not zero.
|
||||
type Zeroer interface {
|
||||
IsZero() bool
|
||||
}
|
||||
|
||||
// D represents a BSON Document. This type can be used to represent BSON in a concise and readable
|
||||
// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or
|
||||
// Document types should be used.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
|
||||
//
|
||||
// This type should be used in situations where order matters, such as MongoDB commands. If the
|
||||
// order is not important, a map is more comfortable and concise.
|
||||
type D = primitive.D
|
||||
|
||||
// E represents a BSON element for a D. It is usually used inside a D.
|
||||
type E = primitive.E
|
||||
|
||||
// M is an unordered, concise representation of a BSON Document. It should generally be used to
|
||||
// serialize BSON when the order of the elements of a BSON document do not matter. If the element
|
||||
// order matters, use a D instead.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
|
||||
//
|
||||
// This type is handled in the encoders as a regular map[string]interface{}. The elements will be
|
||||
// serialized in an undefined, random order, and the order will be different each time.
|
||||
type M = primitive.M
|
||||
|
||||
// An A represents a BSON array. This type can be used to represent a BSON array in a concise and
|
||||
// readable manner. It should generally be used when serializing to BSON. For deserializing, the
|
||||
// RawArray or Array types should be used.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}}
|
||||
//
|
||||
type A = primitive.A
|
||||
91
vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go
generated
vendored
Executable file
91
vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go
generated
vendored
Executable file
@@ -0,0 +1,91 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package bson // import "go.mongodb.org/mongo-driver/bson"
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Zeroer allows custom struct types to implement a report of zero
|
||||
// state. All struct types that don't implement Zeroer or where IsZero
|
||||
// returns false are considered to be not zero.
|
||||
type Zeroer interface {
|
||||
IsZero() bool
|
||||
}
|
||||
|
||||
// D represents a BSON Document. This type can be used to represent BSON in a concise and readable
|
||||
// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or
|
||||
// Document types should be used.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// primitive.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
|
||||
//
|
||||
// This type should be used in situations where order matters, such as MongoDB commands. If the
|
||||
// order is not important, a map is more comfortable and concise.
|
||||
type D []E
|
||||
|
||||
// Map creates a map from the elements of the D.
|
||||
func (d D) Map() M {
|
||||
m := make(M, len(d))
|
||||
for _, e := range d {
|
||||
m[e.Key] = e.Value
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// E represents a BSON element for a D. It is usually used inside a D.
|
||||
type E struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// M is an unordered, concise representation of a BSON Document. It should generally be used to
|
||||
// serialize BSON when the order of the elements of a BSON document do not matter. If the element
|
||||
// order matters, use a D instead.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// primitive.M{"foo": "bar", "hello": "world", "pi": 3.14159}
|
||||
//
|
||||
// This type is handled in the encoders as a regular map[string]interface{}. The elements will be
|
||||
// serialized in an undefined, random order, and the order will be different each time.
|
||||
type M map[string]interface{}
|
||||
|
||||
// An A represents a BSON array. This type can be used to represent a BSON array in a concise and
|
||||
// readable manner. It should generally be used when serializing to BSON. For deserializing, the
|
||||
// RawArray or Array types should be used.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// primitive.A{"bar", "world", 3.14159, primitive.D{{"qux", 12345}}}
|
||||
//
|
||||
type A []interface{}
|
||||
|
||||
func formatDouble(f float64) string {
|
||||
var s string
|
||||
if math.IsInf(f, 1) {
|
||||
s = "Infinity"
|
||||
} else if math.IsInf(f, -1) {
|
||||
s = "-Infinity"
|
||||
} else if math.IsNaN(f) {
|
||||
s = "NaN"
|
||||
} else {
|
||||
// Print exactly one decimalType place for integers; otherwise, print as many are necessary to
|
||||
// perfectly represent it.
|
||||
s = strconv.FormatFloat(f, 'G', -1, 64)
|
||||
if !strings.ContainsRune(s, '.') {
|
||||
s += ".0"
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
163
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go
generated
vendored
Executable file
163
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go
generated
vendored
Executable file
@@ -0,0 +1,163 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
)
|
||||
|
||||
// Marshaler is an interface implemented by types that can marshal themselves
|
||||
// into a BSON document represented as bytes. The bytes returned must be a valid
|
||||
// BSON document if the error is nil.
|
||||
type Marshaler interface {
|
||||
MarshalBSON() ([]byte, error)
|
||||
}
|
||||
|
||||
// ValueMarshaler is an interface implemented by types that can marshal
|
||||
// themselves into a BSON value as bytes. The type must be the valid type for
|
||||
// the bytes returned. The bytes and byte type together must be valid if the
|
||||
// error is nil.
|
||||
type ValueMarshaler interface {
|
||||
MarshalBSONValue() (bsontype.Type, []byte, error)
|
||||
}
|
||||
|
||||
// Unmarshaler is an interface implemented by types that can unmarshal a BSON
|
||||
// document representation of themselves. The BSON bytes can be assumed to be
|
||||
// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
|
||||
// after returning.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalBSON([]byte) error
|
||||
}
|
||||
|
||||
// ValueUnmarshaler is an interface implemented by types that can unmarshal a
|
||||
// BSON value representaiton of themselves. The BSON bytes and type can be
|
||||
// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
|
||||
// wishes to retain the data after returning.
|
||||
type ValueUnmarshaler interface {
|
||||
UnmarshalBSONValue(bsontype.Type, []byte) error
|
||||
}
|
||||
|
||||
// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be
|
||||
// encoded by the ValueEncoder.
|
||||
type ValueEncoderError struct {
|
||||
Name string
|
||||
Types []reflect.Type
|
||||
Kinds []reflect.Kind
|
||||
Received reflect.Value
|
||||
}
|
||||
|
||||
func (vee ValueEncoderError) Error() string {
|
||||
typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds))
|
||||
for _, t := range vee.Types {
|
||||
typeKinds = append(typeKinds, t.String())
|
||||
}
|
||||
for _, k := range vee.Kinds {
|
||||
if k == reflect.Map {
|
||||
typeKinds = append(typeKinds, "map[string]*")
|
||||
continue
|
||||
}
|
||||
typeKinds = append(typeKinds, k.String())
|
||||
}
|
||||
received := vee.Received.Kind().String()
|
||||
if vee.Received.IsValid() {
|
||||
received = vee.Received.Type().String()
|
||||
}
|
||||
return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received)
|
||||
}
|
||||
|
||||
// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be
|
||||
// decoded by the ValueDecoder.
|
||||
type ValueDecoderError struct {
|
||||
Name string
|
||||
Types []reflect.Type
|
||||
Kinds []reflect.Kind
|
||||
Received reflect.Value
|
||||
}
|
||||
|
||||
func (vde ValueDecoderError) Error() string {
|
||||
typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds))
|
||||
for _, t := range vde.Types {
|
||||
typeKinds = append(typeKinds, t.String())
|
||||
}
|
||||
for _, k := range vde.Kinds {
|
||||
if k == reflect.Map {
|
||||
typeKinds = append(typeKinds, "map[string]*")
|
||||
continue
|
||||
}
|
||||
typeKinds = append(typeKinds, k.String())
|
||||
}
|
||||
received := vde.Received.Kind().String()
|
||||
if vde.Received.IsValid() {
|
||||
received = vde.Received.Type().String()
|
||||
}
|
||||
return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received)
|
||||
}
|
||||
|
||||
// EncodeContext is the contextual information required for a Codec to encode a
|
||||
// value.
|
||||
type EncodeContext struct {
|
||||
*Registry
|
||||
MinSize bool
|
||||
}
|
||||
|
||||
// DecodeContext is the contextual information required for a Codec to decode a
|
||||
// value.
|
||||
type DecodeContext struct {
|
||||
*Registry
|
||||
Truncate bool
|
||||
// Ancestor is the type of a containing document. This is mainly used to determine what type
|
||||
// should be used when decoding an embedded document into an empty interface. For example, if
|
||||
// Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface
|
||||
// will be decoded into a bson.M.
|
||||
Ancestor reflect.Type
|
||||
}
|
||||
|
||||
// ValueCodec is the interface that groups the methods to encode and decode
|
||||
// values.
|
||||
type ValueCodec interface {
|
||||
ValueEncoder
|
||||
ValueDecoder
|
||||
}
|
||||
|
||||
// ValueEncoder is the interface implemented by types that can handle the encoding of a value.
|
||||
type ValueEncoder interface {
|
||||
EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error
|
||||
}
|
||||
|
||||
// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be
|
||||
// used as a ValueEncoder.
|
||||
type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error
|
||||
|
||||
// EncodeValue implements the ValueEncoder interface.
|
||||
func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
return fn(ec, vw, val)
|
||||
}
|
||||
|
||||
// ValueDecoder is the interface implemented by types that can handle the decoding of a value.
|
||||
type ValueDecoder interface {
|
||||
DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error
|
||||
}
|
||||
|
||||
// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be
|
||||
// used as a ValueDecoder.
|
||||
type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error
|
||||
|
||||
// DecodeValue implements the ValueDecoder interface.
|
||||
func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
return fn(dc, vr, val)
|
||||
}
|
||||
|
||||
// CodecZeroer is the interface implemented by Codecs that can also determine if
|
||||
// a value of the type that would be encoded is zero.
|
||||
type CodecZeroer interface {
|
||||
IsTypeZero(interface{}) bool
|
||||
}
|
||||
1014
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go
generated
vendored
Executable file
1014
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
648
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go
generated
vendored
Executable file
648
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go
generated
vendored
Executable file
@@ -0,0 +1,648 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncodec
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
var defaultValueEncoders DefaultValueEncoders
|
||||
|
||||
var bvwPool = bsonrw.NewBSONValueWriterPool()
|
||||
|
||||
var sliceWriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
sw := make(bsonrw.SliceWriter, 0, 0)
|
||||
return &sw
|
||||
},
|
||||
}
|
||||
|
||||
func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error {
|
||||
vw, err := dw.WriteDocumentElement(e.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if e.Value == nil {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DefaultValueEncoders is a namespace type for the default ValueEncoders used
|
||||
// when creating a registry.
|
||||
type DefaultValueEncoders struct{}
|
||||
|
||||
// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with
|
||||
// the provided RegistryBuilder.
|
||||
func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
|
||||
if rb == nil {
|
||||
panic(errors.New("argument to RegisterDefaultEncoders must not be nil"))
|
||||
}
|
||||
rb.
|
||||
RegisterEncoder(tByteSlice, ValueEncoderFunc(dve.ByteSliceEncodeValue)).
|
||||
RegisterEncoder(tTime, ValueEncoderFunc(dve.TimeEncodeValue)).
|
||||
RegisterEncoder(tEmpty, ValueEncoderFunc(dve.EmptyInterfaceEncodeValue)).
|
||||
RegisterEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)).
|
||||
RegisterEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)).
|
||||
RegisterEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)).
|
||||
RegisterEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)).
|
||||
RegisterEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)).
|
||||
RegisterEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)).
|
||||
RegisterEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)).
|
||||
RegisterEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)).
|
||||
RegisterEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)).
|
||||
RegisterEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)).
|
||||
RegisterEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)).
|
||||
RegisterEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)).
|
||||
RegisterEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)).
|
||||
RegisterEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)).
|
||||
RegisterEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)).
|
||||
RegisterEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)).
|
||||
RegisterEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)).
|
||||
RegisterEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)).
|
||||
RegisterEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)).
|
||||
RegisterEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Uint, ValueEncoderFunc(dve.UintEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Uint8, ValueEncoderFunc(dve.UintEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Uint16, ValueEncoderFunc(dve.UintEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Uint32, ValueEncoderFunc(dve.UintEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Uint64, ValueEncoderFunc(dve.UintEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Map, ValueEncoderFunc(dve.MapEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Slice, ValueEncoderFunc(dve.SliceEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.String, ValueEncoderFunc(dve.StringEncodeValue)).
|
||||
RegisterDefaultEncoder(reflect.Struct, &StructCodec{cache: make(map[reflect.Type]*structDescription), parser: DefaultStructTagParser}).
|
||||
RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec())
|
||||
}
|
||||
|
||||
// BooleanEncodeValue is the ValueEncoderFunc for bool types.
|
||||
func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Kind() != reflect.Bool {
|
||||
return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
|
||||
}
|
||||
return vw.WriteBoolean(val.Bool())
|
||||
}
|
||||
|
||||
func fitsIn32Bits(i int64) bool {
|
||||
return math.MinInt32 <= i && i <= math.MaxInt32
|
||||
}
|
||||
|
||||
// IntEncodeValue is the ValueEncoderFunc for int types.
|
||||
func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
switch val.Kind() {
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32:
|
||||
return vw.WriteInt32(int32(val.Int()))
|
||||
case reflect.Int:
|
||||
i64 := val.Int()
|
||||
if fitsIn32Bits(i64) {
|
||||
return vw.WriteInt32(int32(i64))
|
||||
}
|
||||
return vw.WriteInt64(i64)
|
||||
case reflect.Int64:
|
||||
i64 := val.Int()
|
||||
if ec.MinSize && fitsIn32Bits(i64) {
|
||||
return vw.WriteInt32(int32(i64))
|
||||
}
|
||||
return vw.WriteInt64(i64)
|
||||
}
|
||||
|
||||
return ValueEncoderError{
|
||||
Name: "IntEncodeValue",
|
||||
Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
|
||||
Received: val,
|
||||
}
|
||||
}
|
||||
|
||||
// UintEncodeValue is the ValueEncoderFunc for uint types.
|
||||
func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
switch val.Kind() {
|
||||
case reflect.Uint8, reflect.Uint16:
|
||||
return vw.WriteInt32(int32(val.Uint()))
|
||||
case reflect.Uint, reflect.Uint32, reflect.Uint64:
|
||||
u64 := val.Uint()
|
||||
if ec.MinSize && u64 <= math.MaxInt32 {
|
||||
return vw.WriteInt32(int32(u64))
|
||||
}
|
||||
if u64 > math.MaxInt64 {
|
||||
return fmt.Errorf("%d overflows int64", u64)
|
||||
}
|
||||
return vw.WriteInt64(int64(u64))
|
||||
}
|
||||
|
||||
return ValueEncoderError{
|
||||
Name: "UintEncodeValue",
|
||||
Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
|
||||
Received: val,
|
||||
}
|
||||
}
|
||||
|
||||
// FloatEncodeValue is the ValueEncoderFunc for float types.
|
||||
func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
switch val.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return vw.WriteDouble(val.Float())
|
||||
}
|
||||
|
||||
return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val}
|
||||
}
|
||||
|
||||
// StringEncodeValue is the ValueEncoderFunc for string types.
|
||||
func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if val.Kind() != reflect.String {
|
||||
return ValueEncoderError{
|
||||
Name: "StringEncodeValue",
|
||||
Kinds: []reflect.Kind{reflect.String},
|
||||
Received: val,
|
||||
}
|
||||
}
|
||||
|
||||
return vw.WriteString(val.String())
|
||||
}
|
||||
|
||||
// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID.
|
||||
func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tOID {
|
||||
return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val}
|
||||
}
|
||||
return vw.WriteObjectID(val.Interface().(primitive.ObjectID))
|
||||
}
|
||||
|
||||
// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128.
|
||||
func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tDecimal {
|
||||
return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val}
|
||||
}
|
||||
return vw.WriteDecimal128(val.Interface().(primitive.Decimal128))
|
||||
}
|
||||
|
||||
// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number.
|
||||
func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tJSONNumber {
|
||||
return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
|
||||
}
|
||||
jsnum := val.Interface().(json.Number)
|
||||
|
||||
// Attempt int first, then float64
|
||||
if i64, err := jsnum.Int64(); err == nil {
|
||||
return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64))
|
||||
}
|
||||
|
||||
f64, err := jsnum.Float64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64))
|
||||
}
|
||||
|
||||
// URLEncodeValue is the ValueEncoderFunc for url.URL.
|
||||
func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tURL {
|
||||
return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val}
|
||||
}
|
||||
u := val.Interface().(url.URL)
|
||||
return vw.WriteString(u.String())
|
||||
}
|
||||
|
||||
// TimeEncodeValue is the ValueEncoderFunc for time.TIme.
|
||||
func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tTime {
|
||||
return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
|
||||
}
|
||||
tt := val.Interface().(time.Time)
|
||||
return vw.WriteDateTime(tt.Unix()*1000 + int64(tt.Nanosecond()/1e6))
|
||||
}
|
||||
|
||||
// ByteSliceEncodeValue is the ValueEncoderFunc for []byte.
|
||||
func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tByteSlice {
|
||||
return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
|
||||
}
|
||||
if val.IsNil() {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
return vw.WriteBinary(val.Interface().([]byte))
|
||||
}
|
||||
|
||||
// MapEncodeValue is the ValueEncoderFunc for map[string]* types.
|
||||
func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String {
|
||||
return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
|
||||
}
|
||||
|
||||
if val.IsNil() {
|
||||
// If we have a nill map but we can't WriteNull, that means we're probably trying to encode
|
||||
// to a TopLevel document. We can't currently tell if this is what actually happened, but if
|
||||
// there's a deeper underlying problem, the error will also be returned from WriteDocument,
|
||||
// so just continue. The operations on a map reflection value are valid, so we can call
|
||||
// MapKeys within mapEncodeValue without a problem.
|
||||
err := vw.WriteNull()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
dw, err := vw.WriteDocument()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dve.mapEncodeValue(ec, dw, val, nil)
|
||||
}
|
||||
|
||||
// mapEncodeValue handles encoding of the values of a map. The collisionFn returns
|
||||
// true if the provided key exists, this is mainly used for inline maps in the
|
||||
// struct codec.
|
||||
func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error {
|
||||
|
||||
encoder, err := ec.LookupEncoder(val.Type().Elem())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keys := val.MapKeys()
|
||||
for _, key := range keys {
|
||||
if collisionFn != nil && collisionFn(key.String()) {
|
||||
return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key)
|
||||
}
|
||||
vw, err := dw.WriteDocumentElement(key.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if enc, ok := encoder.(ValueEncoder); ok {
|
||||
err = enc.EncodeValue(ec, vw, val.MapIndex(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
err = encoder.EncodeValue(ec, vw, val.MapIndex(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dw.WriteDocumentEnd()
|
||||
}
|
||||
|
||||
// ArrayEncodeValue is the ValueEncoderFunc for array types.
|
||||
func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Kind() != reflect.Array {
|
||||
return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
|
||||
}
|
||||
|
||||
// If we have a []primitive.E we want to treat it as a document instead of as an array.
|
||||
if val.Type().Elem() == tE {
|
||||
dw, err := vw.WriteDocument()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for idx := 0; idx < val.Len(); idx++ {
|
||||
e := val.Index(idx).Interface().(primitive.E)
|
||||
err = encodeElement(ec, dw, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dw.WriteDocumentEnd()
|
||||
}
|
||||
|
||||
aw, err := vw.WriteArray()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encoder, err := ec.LookupEncoder(val.Type().Elem())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for idx := 0; idx < val.Len(); idx++ {
|
||||
vw, err := aw.WriteArrayElement()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = encoder.EncodeValue(ec, vw, val.Index(idx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return aw.WriteArrayEnd()
|
||||
}
|
||||
|
||||
// SliceEncodeValue is the ValueEncoderFunc for slice types.
|
||||
func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Kind() != reflect.Slice {
|
||||
return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
|
||||
}
|
||||
|
||||
if val.IsNil() {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
|
||||
// If we have a []primitive.E we want to treat it as a document instead of as an array.
|
||||
if val.Type().ConvertibleTo(tD) {
|
||||
d := val.Convert(tD).Interface().(primitive.D)
|
||||
|
||||
dw, err := vw.WriteDocument()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, e := range d {
|
||||
err = encodeElement(ec, dw, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dw.WriteDocumentEnd()
|
||||
}
|
||||
|
||||
aw, err := vw.WriteArray()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encoder, err := ec.LookupEncoder(val.Type().Elem())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for idx := 0; idx < val.Len(); idx++ {
|
||||
vw, err := aw.WriteArrayElement()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = encoder.EncodeValue(ec, vw, val.Index(idx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return aw.WriteArrayEnd()
|
||||
}
|
||||
|
||||
// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}.
|
||||
func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tEmpty {
|
||||
return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val}
|
||||
}
|
||||
|
||||
if val.IsNil() {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
encoder, err := ec.LookupEncoder(val.Elem().Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return encoder.EncodeValue(ec, vw, val.Elem())
|
||||
}
|
||||
|
||||
// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations.
|
||||
func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || !val.Type().Implements(tValueMarshaler) {
|
||||
return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val}
|
||||
}
|
||||
|
||||
fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue")
|
||||
returns := fn.Call(nil)
|
||||
if !returns[2].IsNil() {
|
||||
return returns[2].Interface().(error)
|
||||
}
|
||||
t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte)
|
||||
return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data)
|
||||
}
|
||||
|
||||
// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations.
|
||||
func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || !val.Type().Implements(tMarshaler) {
|
||||
return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val}
|
||||
}
|
||||
|
||||
fn := val.Convert(tMarshaler).MethodByName("MarshalBSON")
|
||||
returns := fn.Call(nil)
|
||||
if !returns[1].IsNil() {
|
||||
return returns[1].Interface().(error)
|
||||
}
|
||||
data := returns[0].Interface().([]byte)
|
||||
return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data)
|
||||
}
|
||||
|
||||
// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations.
|
||||
func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || !val.Type().Implements(tProxy) {
|
||||
return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val}
|
||||
}
|
||||
|
||||
fn := val.Convert(tProxy).MethodByName("ProxyBSON")
|
||||
returns := fn.Call(nil)
|
||||
if !returns[1].IsNil() {
|
||||
return returns[1].Interface().(error)
|
||||
}
|
||||
data := returns[0]
|
||||
var encoder ValueEncoder
|
||||
var err error
|
||||
if data.Elem().IsValid() {
|
||||
encoder, err = ec.LookupEncoder(data.Elem().Type())
|
||||
} else {
|
||||
encoder, err = ec.LookupEncoder(nil)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return encoder.EncodeValue(ec, vw, data.Elem())
|
||||
}
|
||||
|
||||
// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type.
|
||||
func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tJavaScript {
|
||||
return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
|
||||
}
|
||||
|
||||
return vw.WriteJavascript(val.String())
|
||||
}
|
||||
|
||||
// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type.
|
||||
func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tSymbol {
|
||||
return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val}
|
||||
}
|
||||
|
||||
return vw.WriteSymbol(val.String())
|
||||
}
|
||||
|
||||
// BinaryEncodeValue is the ValueEncoderFunc for Binary.
|
||||
func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tBinary {
|
||||
return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val}
|
||||
}
|
||||
b := val.Interface().(primitive.Binary)
|
||||
|
||||
return vw.WriteBinaryWithSubtype(b.Data, b.Subtype)
|
||||
}
|
||||
|
||||
// UndefinedEncodeValue is the ValueEncoderFunc for Undefined.
|
||||
func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tUndefined {
|
||||
return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val}
|
||||
}
|
||||
|
||||
return vw.WriteUndefined()
|
||||
}
|
||||
|
||||
// DateTimeEncodeValue is the ValueEncoderFunc for DateTime.
|
||||
func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tDateTime {
|
||||
return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val}
|
||||
}
|
||||
|
||||
return vw.WriteDateTime(val.Int())
|
||||
}
|
||||
|
||||
// NullEncodeValue is the ValueEncoderFunc for Null.
|
||||
func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tNull {
|
||||
return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val}
|
||||
}
|
||||
|
||||
return vw.WriteNull()
|
||||
}
|
||||
|
||||
// RegexEncodeValue is the ValueEncoderFunc for Regex.
|
||||
func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tRegex {
|
||||
return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val}
|
||||
}
|
||||
|
||||
regex := val.Interface().(primitive.Regex)
|
||||
|
||||
return vw.WriteRegex(regex.Pattern, regex.Options)
|
||||
}
|
||||
|
||||
// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer.
|
||||
func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tDBPointer {
|
||||
return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
|
||||
}
|
||||
|
||||
dbp := val.Interface().(primitive.DBPointer)
|
||||
|
||||
return vw.WriteDBPointer(dbp.DB, dbp.Pointer)
|
||||
}
|
||||
|
||||
// TimestampEncodeValue is the ValueEncoderFunc for Timestamp.
|
||||
func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tTimestamp {
|
||||
return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
|
||||
}
|
||||
|
||||
ts := val.Interface().(primitive.Timestamp)
|
||||
|
||||
return vw.WriteTimestamp(ts.T, ts.I)
|
||||
}
|
||||
|
||||
// MinKeyEncodeValue is the ValueEncoderFunc for MinKey.
|
||||
func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tMinKey {
|
||||
return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val}
|
||||
}
|
||||
|
||||
return vw.WriteMinKey()
|
||||
}
|
||||
|
||||
// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey.
|
||||
func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tMaxKey {
|
||||
return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
|
||||
}
|
||||
|
||||
return vw.WriteMaxKey()
|
||||
}
|
||||
|
||||
// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document.
|
||||
func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tCoreDocument {
|
||||
return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
|
||||
}
|
||||
|
||||
cdoc := val.Interface().(bsoncore.Document)
|
||||
|
||||
return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc)
|
||||
}
|
||||
|
||||
// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope.
|
||||
func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tCodeWithScope {
|
||||
return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
|
||||
}
|
||||
|
||||
cws := val.Interface().(primitive.CodeWithScope)
|
||||
|
||||
dw, err := vw.WriteCodeWithScope(string(cws.Code))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sw := sliceWriterPool.Get().(*bsonrw.SliceWriter)
|
||||
defer sliceWriterPool.Put(sw)
|
||||
*sw = (*sw)[:0]
|
||||
|
||||
scopeVW := bvwPool.Get(sw)
|
||||
defer bvwPool.Put(scopeVW)
|
||||
|
||||
encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dw.WriteDocumentEnd()
|
||||
}
|
||||
61
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go
generated
vendored
Executable file
61
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go
generated
vendored
Executable file
@@ -0,0 +1,61 @@
|
||||
// Package bsoncodec provides a system for encoding values to BSON representations and decoding
|
||||
// values from BSON representations. This package considers both binary BSON and ExtendedJSON as
|
||||
// BSON representations. The types in this package enable a flexible system for handling this
|
||||
// encoding and decoding.
|
||||
//
|
||||
// The codec system is composed of two parts:
|
||||
//
|
||||
// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON
|
||||
// representations.
|
||||
//
|
||||
// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for
|
||||
// retrieving them.
|
||||
//
|
||||
// ValueEncoders and ValueDecoders
|
||||
//
|
||||
// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON.
|
||||
// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the
|
||||
// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc
|
||||
// is provided to allow use of a function with the correct signature as a ValueEncoder. An
|
||||
// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and
|
||||
// to provide configuration information.
|
||||
//
|
||||
// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that
|
||||
// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to
|
||||
// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext
|
||||
// instance is provided and serves similar functionality to the EncodeContext.
|
||||
//
|
||||
// Registry and RegistryBuilder
|
||||
//
|
||||
// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. For looking up
|
||||
// ValueEncoders and Decoders the Registry first attempts to find a ValueEncoder or ValueDecoder for
|
||||
// the type provided; if one cannot be found it then checks to see if a registered ValueEncoder or
|
||||
// ValueDecoder exists for an interface the type implements. Finally, the reflect.Kind of the type
|
||||
// is used to lookup a default ValueEncoder or ValueDecoder for that kind. If no ValueEncoder or
|
||||
// ValueDecoder can be found, an error is returned.
|
||||
//
|
||||
// The Registry also holds a type map. This allows users to retrieve the Go type that should be used
|
||||
// when decoding a BSON value into an empty interface. This is primarily only used for the empty
|
||||
// interface ValueDecoder.
|
||||
//
|
||||
// A RegistryBuilder is used to construct a Registry. The Register methods are used to associate
|
||||
// either a reflect.Type or a reflect.Kind with a ValueEncoder or ValueDecoder. A RegistryBuilder
|
||||
// returned from NewRegistryBuilder contains no registered ValueEncoders nor ValueDecoders and
|
||||
// contains an empty type map.
|
||||
//
|
||||
// The RegisterTypeMapEntry method handles associating a BSON type with a Go type. For example, if
|
||||
// you want to decode BSON int64 and int32 values into Go int instances, you would do the following:
|
||||
//
|
||||
// var regbuilder *RegistryBuilder = ... intType := reflect.TypeOf(int(0))
|
||||
// regbuilder.RegisterTypeMapEntry(bsontype.Int64, intType).RegisterTypeMapEntry(bsontype.Int32,
|
||||
// intType)
|
||||
//
|
||||
// DefaultValueEncoders and DefaultValueDecoders
|
||||
//
|
||||
// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and
|
||||
// ValueDecoders for handling a wide range of Go types, including all of the types within the
|
||||
// primitive package. To make registering these codecs easier, a helper method on each type is
|
||||
// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for
|
||||
// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also
|
||||
// handles registering type map entries for each BSON type.
|
||||
package bsoncodec
|
||||
65
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go
generated
vendored
Executable file
65
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go
generated
vendored
Executable file
@@ -0,0 +1,65 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncodec
|
||||
|
||||
import "fmt"
|
||||
|
||||
type mode int
|
||||
|
||||
const (
|
||||
_ mode = iota
|
||||
mTopLevel
|
||||
mDocument
|
||||
mArray
|
||||
mValue
|
||||
mElement
|
||||
mCodeWithScope
|
||||
mSpacer
|
||||
)
|
||||
|
||||
func (m mode) String() string {
|
||||
var str string
|
||||
|
||||
switch m {
|
||||
case mTopLevel:
|
||||
str = "TopLevel"
|
||||
case mDocument:
|
||||
str = "DocumentMode"
|
||||
case mArray:
|
||||
str = "ArrayMode"
|
||||
case mValue:
|
||||
str = "ValueMode"
|
||||
case mElement:
|
||||
str = "ElementMode"
|
||||
case mCodeWithScope:
|
||||
str = "CodeWithScopeMode"
|
||||
case mSpacer:
|
||||
str = "CodeWithScopeSpacerFrame"
|
||||
default:
|
||||
str = "UnknownMode"
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// TransitionError is an error returned when an invalid progressing a
|
||||
// ValueReader or ValueWriter state machine occurs.
|
||||
type TransitionError struct {
|
||||
parent mode
|
||||
current mode
|
||||
destination mode
|
||||
}
|
||||
|
||||
func (te TransitionError) Error() string {
|
||||
if te.destination == mode(0) {
|
||||
return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current)
|
||||
}
|
||||
if te.parent == mode(0) {
|
||||
return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination)
|
||||
}
|
||||
return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent)
|
||||
}
|
||||
110
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go
generated
vendored
Executable file
110
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go
generated
vendored
Executable file
@@ -0,0 +1,110 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncodec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
)
|
||||
|
||||
var defaultPointerCodec = &PointerCodec{
|
||||
ecache: make(map[reflect.Type]ValueEncoder),
|
||||
dcache: make(map[reflect.Type]ValueDecoder),
|
||||
}
|
||||
|
||||
var _ ValueEncoder = &PointerCodec{}
|
||||
var _ ValueDecoder = &PointerCodec{}
|
||||
|
||||
// PointerCodec is the Codec used for pointers.
|
||||
type PointerCodec struct {
|
||||
ecache map[reflect.Type]ValueEncoder
|
||||
dcache map[reflect.Type]ValueDecoder
|
||||
l sync.RWMutex
|
||||
}
|
||||
|
||||
// NewPointerCodec returns a PointerCodec that has been initialized.
|
||||
func NewPointerCodec() *PointerCodec {
|
||||
return &PointerCodec{
|
||||
ecache: make(map[reflect.Type]ValueEncoder),
|
||||
dcache: make(map[reflect.Type]ValueDecoder),
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil
|
||||
// or looking up an encoder for the type of value the pointer points to.
|
||||
func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if val.Kind() != reflect.Ptr {
|
||||
if !val.IsValid() {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
|
||||
}
|
||||
|
||||
if val.IsNil() {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
|
||||
pc.l.RLock()
|
||||
enc, ok := pc.ecache[val.Type()]
|
||||
pc.l.RUnlock()
|
||||
if ok {
|
||||
if enc == nil {
|
||||
return ErrNoEncoder{Type: val.Type()}
|
||||
}
|
||||
return enc.EncodeValue(ec, vw, val.Elem())
|
||||
}
|
||||
|
||||
enc, err := ec.LookupEncoder(val.Type().Elem())
|
||||
pc.l.Lock()
|
||||
pc.ecache[val.Type()] = enc
|
||||
pc.l.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return enc.EncodeValue(ec, vw, val.Elem())
|
||||
}
|
||||
|
||||
// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and
|
||||
// using that to decode. If the BSON value is Null, this method will set the pointer to nil.
|
||||
func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Kind() != reflect.Ptr {
|
||||
return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
|
||||
}
|
||||
|
||||
if vr.Type() == bsontype.Null {
|
||||
val.Set(reflect.Zero(val.Type()))
|
||||
return vr.ReadNull()
|
||||
}
|
||||
|
||||
if val.IsNil() {
|
||||
val.Set(reflect.New(val.Type().Elem()))
|
||||
}
|
||||
|
||||
pc.l.RLock()
|
||||
dec, ok := pc.dcache[val.Type()]
|
||||
pc.l.RUnlock()
|
||||
if ok {
|
||||
if dec == nil {
|
||||
return ErrNoDecoder{Type: val.Type()}
|
||||
}
|
||||
return dec.DecodeValue(dc, vr, val.Elem())
|
||||
}
|
||||
|
||||
dec, err := dc.LookupDecoder(val.Type().Elem())
|
||||
pc.l.Lock()
|
||||
pc.dcache[val.Type()] = dec
|
||||
pc.l.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dec.DecodeValue(dc, vr, val.Elem())
|
||||
}
|
||||
14
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go
generated
vendored
Executable file
14
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go
generated
vendored
Executable file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncodec
|
||||
|
||||
// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types
|
||||
// that implement this interface with have ProxyBSON called during the encoding process and that
|
||||
// value will be encoded in place for the implementer.
|
||||
type Proxy interface {
|
||||
ProxyBSON() (interface{}, error)
|
||||
}
|
||||
384
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go
generated
vendored
Executable file
384
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go
generated
vendored
Executable file
@@ -0,0 +1,384 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncodec
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
)
|
||||
|
||||
// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder.
|
||||
var ErrNilType = errors.New("cannot perform a decoder lookup on <nil>")
|
||||
|
||||
// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder.
|
||||
var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder")
|
||||
|
||||
// ErrNoEncoder is returned when there wasn't an encoder available for a type.
|
||||
type ErrNoEncoder struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
func (ene ErrNoEncoder) Error() string {
|
||||
if ene.Type == nil {
|
||||
return "no encoder found for <nil>"
|
||||
}
|
||||
return "no encoder found for " + ene.Type.String()
|
||||
}
|
||||
|
||||
// ErrNoDecoder is returned when there wasn't a decoder available for a type.
|
||||
type ErrNoDecoder struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
func (end ErrNoDecoder) Error() string {
|
||||
return "no decoder found for " + end.Type.String()
|
||||
}
|
||||
|
||||
// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type.
|
||||
type ErrNoTypeMapEntry struct {
|
||||
Type bsontype.Type
|
||||
}
|
||||
|
||||
func (entme ErrNoTypeMapEntry) Error() string {
|
||||
return "no type map entry found for " + entme.Type.String()
|
||||
}
|
||||
|
||||
// ErrNotInterface is returned when the provided type is not an interface.
|
||||
var ErrNotInterface = errors.New("The provided type is not an interface")
|
||||
|
||||
var defaultRegistry *Registry
|
||||
|
||||
func init() {
|
||||
defaultRegistry = buildDefaultRegistry()
|
||||
}
|
||||
|
||||
// A RegistryBuilder is used to build a Registry. This type is not goroutine
|
||||
// safe.
|
||||
type RegistryBuilder struct {
|
||||
typeEncoders map[reflect.Type]ValueEncoder
|
||||
interfaceEncoders []interfaceValueEncoder
|
||||
kindEncoders map[reflect.Kind]ValueEncoder
|
||||
|
||||
typeDecoders map[reflect.Type]ValueDecoder
|
||||
interfaceDecoders []interfaceValueDecoder
|
||||
kindDecoders map[reflect.Kind]ValueDecoder
|
||||
|
||||
typeMap map[bsontype.Type]reflect.Type
|
||||
}
|
||||
|
||||
// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main
|
||||
// typed passed around and Encoders and Decoders are constructed from it.
|
||||
type Registry struct {
|
||||
typeEncoders map[reflect.Type]ValueEncoder
|
||||
typeDecoders map[reflect.Type]ValueDecoder
|
||||
|
||||
interfaceEncoders []interfaceValueEncoder
|
||||
interfaceDecoders []interfaceValueDecoder
|
||||
|
||||
kindEncoders map[reflect.Kind]ValueEncoder
|
||||
kindDecoders map[reflect.Kind]ValueDecoder
|
||||
|
||||
typeMap map[bsontype.Type]reflect.Type
|
||||
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewRegistryBuilder creates a new empty RegistryBuilder.
|
||||
func NewRegistryBuilder() *RegistryBuilder {
|
||||
return &RegistryBuilder{
|
||||
typeEncoders: make(map[reflect.Type]ValueEncoder),
|
||||
typeDecoders: make(map[reflect.Type]ValueDecoder),
|
||||
|
||||
interfaceEncoders: make([]interfaceValueEncoder, 0),
|
||||
interfaceDecoders: make([]interfaceValueDecoder, 0),
|
||||
|
||||
kindEncoders: make(map[reflect.Kind]ValueEncoder),
|
||||
kindDecoders: make(map[reflect.Kind]ValueDecoder),
|
||||
|
||||
typeMap: make(map[bsontype.Type]reflect.Type),
|
||||
}
|
||||
}
|
||||
|
||||
func buildDefaultRegistry() *Registry {
|
||||
rb := NewRegistryBuilder()
|
||||
defaultValueEncoders.RegisterDefaultEncoders(rb)
|
||||
defaultValueDecoders.RegisterDefaultDecoders(rb)
|
||||
return rb.Build()
|
||||
}
|
||||
|
||||
// RegisterCodec will register the provided ValueCodec for the provided type.
|
||||
func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder {
|
||||
rb.RegisterEncoder(t, codec)
|
||||
rb.RegisterDecoder(t, codec)
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterEncoder will register the provided ValueEncoder to the provided type.
|
||||
//
|
||||
// The type registered will be used directly, so an encoder can be registered for a type and a
|
||||
// different encoder can be registered for a pointer to that type.
|
||||
func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
|
||||
if t == tEmpty {
|
||||
rb.typeEncoders[t] = enc
|
||||
return rb
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Interface:
|
||||
for idx, ir := range rb.interfaceEncoders {
|
||||
if ir.i == t {
|
||||
rb.interfaceEncoders[idx].ve = enc
|
||||
return rb
|
||||
}
|
||||
}
|
||||
|
||||
rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc})
|
||||
default:
|
||||
rb.typeEncoders[t] = enc
|
||||
}
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterDecoder will register the provided ValueDecoder to the provided type.
|
||||
//
|
||||
// The type registered will be used directly, so a decoder can be registered for a type and a
|
||||
// different decoder can be registered for a pointer to that type.
|
||||
func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
|
||||
if t == nil {
|
||||
rb.typeDecoders[nil] = dec
|
||||
return rb
|
||||
}
|
||||
if t == tEmpty {
|
||||
rb.typeDecoders[t] = dec
|
||||
return rb
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Interface:
|
||||
for idx, ir := range rb.interfaceDecoders {
|
||||
if ir.i == t {
|
||||
rb.interfaceDecoders[idx].vd = dec
|
||||
return rb
|
||||
}
|
||||
}
|
||||
|
||||
rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec})
|
||||
default:
|
||||
rb.typeDecoders[t] = dec
|
||||
}
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided
|
||||
// kind.
|
||||
func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder {
|
||||
rb.kindEncoders[kind] = enc
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterDefaultDecoder will register the provided ValueDecoder to the
|
||||
// provided kind.
|
||||
func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder {
|
||||
rb.kindDecoders[kind] = dec
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this
|
||||
// mapping is decoding situations where an empty interface is used and a default type needs to be
|
||||
// created and decoded into.
|
||||
//
|
||||
// NOTE: It is unlikely that registering a type for BSON Embedded Document is actually desired. By
|
||||
// registering a type map entry for BSON Embedded Document the type registered will be used in any
|
||||
// case where a BSON Embedded Document will be decoded into an empty interface. For example, if you
|
||||
// register primitive.M, the EmptyInterface decoder will always use primitive.M, even if an ancestor
|
||||
// was a primitive.D.
|
||||
func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder {
|
||||
rb.typeMap[bt] = rt
|
||||
return rb
|
||||
}
|
||||
|
||||
// Build creates a Registry from the current state of this RegistryBuilder.
|
||||
func (rb *RegistryBuilder) Build() *Registry {
|
||||
registry := new(Registry)
|
||||
|
||||
registry.typeEncoders = make(map[reflect.Type]ValueEncoder)
|
||||
for t, enc := range rb.typeEncoders {
|
||||
registry.typeEncoders[t] = enc
|
||||
}
|
||||
|
||||
registry.typeDecoders = make(map[reflect.Type]ValueDecoder)
|
||||
for t, dec := range rb.typeDecoders {
|
||||
registry.typeDecoders[t] = dec
|
||||
}
|
||||
|
||||
registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders))
|
||||
copy(registry.interfaceEncoders, rb.interfaceEncoders)
|
||||
|
||||
registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders))
|
||||
copy(registry.interfaceDecoders, rb.interfaceDecoders)
|
||||
|
||||
registry.kindEncoders = make(map[reflect.Kind]ValueEncoder)
|
||||
for kind, enc := range rb.kindEncoders {
|
||||
registry.kindEncoders[kind] = enc
|
||||
}
|
||||
|
||||
registry.kindDecoders = make(map[reflect.Kind]ValueDecoder)
|
||||
for kind, dec := range rb.kindDecoders {
|
||||
registry.kindDecoders[kind] = dec
|
||||
}
|
||||
|
||||
registry.typeMap = make(map[bsontype.Type]reflect.Type)
|
||||
for bt, rt := range rb.typeMap {
|
||||
registry.typeMap[bt] = rt
|
||||
}
|
||||
|
||||
return registry
|
||||
}
|
||||
|
||||
// LookupEncoder will inspect the registry for an encoder that satisfies the
|
||||
// type provided. An encoder registered for a specific type will take
|
||||
// precedence over an encoder registered for an interface the type satisfies,
|
||||
// which takes precedence over an encoder for the reflect.Kind of the value. If
|
||||
// no encoder can be found, an error is returned.
|
||||
func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) {
|
||||
encodererr := ErrNoEncoder{Type: t}
|
||||
r.mu.RLock()
|
||||
enc, found := r.lookupTypeEncoder(t)
|
||||
r.mu.RUnlock()
|
||||
if found {
|
||||
if enc == nil {
|
||||
return nil, ErrNoEncoder{Type: t}
|
||||
}
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
enc, found = r.lookupInterfaceEncoder(t)
|
||||
if found {
|
||||
r.mu.Lock()
|
||||
r.typeEncoders[t] = enc
|
||||
r.mu.Unlock()
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
r.mu.Lock()
|
||||
r.typeEncoders[t] = nil
|
||||
r.mu.Unlock()
|
||||
return nil, encodererr
|
||||
}
|
||||
|
||||
enc, found = r.kindEncoders[t.Kind()]
|
||||
if !found {
|
||||
r.mu.Lock()
|
||||
r.typeEncoders[t] = nil
|
||||
r.mu.Unlock()
|
||||
return nil, encodererr
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.typeEncoders[t] = enc
|
||||
r.mu.Unlock()
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) {
|
||||
enc, found := r.typeEncoders[t]
|
||||
return enc, found
|
||||
}
|
||||
|
||||
func (r *Registry) lookupInterfaceEncoder(t reflect.Type) (ValueEncoder, bool) {
|
||||
if t == nil {
|
||||
return nil, false
|
||||
}
|
||||
for _, ienc := range r.interfaceEncoders {
|
||||
if !t.Implements(ienc.i) {
|
||||
continue
|
||||
}
|
||||
|
||||
return ienc.ve, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// LookupDecoder will inspect the registry for a decoder that satisfies the
|
||||
// type provided. A decoder registered for a specific type will take
|
||||
// precedence over a decoder registered for an interface the type satisfies,
|
||||
// which takes precedence over a decoder for the reflect.Kind of the value. If
|
||||
// no decoder can be found, an error is returned.
|
||||
func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) {
|
||||
if t == nil {
|
||||
return nil, ErrNilType
|
||||
}
|
||||
decodererr := ErrNoDecoder{Type: t}
|
||||
r.mu.RLock()
|
||||
dec, found := r.lookupTypeDecoder(t)
|
||||
r.mu.RUnlock()
|
||||
if found {
|
||||
if dec == nil {
|
||||
return nil, ErrNoDecoder{Type: t}
|
||||
}
|
||||
return dec, nil
|
||||
}
|
||||
|
||||
dec, found = r.lookupInterfaceDecoder(t)
|
||||
if found {
|
||||
r.mu.Lock()
|
||||
r.typeDecoders[t] = dec
|
||||
r.mu.Unlock()
|
||||
return dec, nil
|
||||
}
|
||||
|
||||
dec, found = r.kindDecoders[t.Kind()]
|
||||
if !found {
|
||||
r.mu.Lock()
|
||||
r.typeDecoders[t] = nil
|
||||
r.mu.Unlock()
|
||||
return nil, decodererr
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.typeDecoders[t] = dec
|
||||
r.mu.Unlock()
|
||||
return dec, nil
|
||||
}
|
||||
|
||||
func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) {
|
||||
dec, found := r.typeDecoders[t]
|
||||
return dec, found
|
||||
}
|
||||
|
||||
func (r *Registry) lookupInterfaceDecoder(t reflect.Type) (ValueDecoder, bool) {
|
||||
for _, idec := range r.interfaceDecoders {
|
||||
if !t.Implements(idec.i) && !reflect.PtrTo(t).Implements(idec.i) {
|
||||
continue
|
||||
}
|
||||
|
||||
return idec.vd, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON
|
||||
// type. If no type is found, ErrNoTypeMapEntry is returned.
|
||||
func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) {
|
||||
t, ok := r.typeMap[bt]
|
||||
if !ok || t == nil {
|
||||
return nil, ErrNoTypeMapEntry{Type: bt}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
type interfaceValueEncoder struct {
|
||||
i reflect.Type
|
||||
ve ValueEncoder
|
||||
}
|
||||
|
||||
type interfaceValueDecoder struct {
|
||||
i reflect.Type
|
||||
vd ValueDecoder
|
||||
}
|
||||
359
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go
generated
vendored
Executable file
359
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go
generated
vendored
Executable file
@@ -0,0 +1,359 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncodec
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
)
|
||||
|
||||
var defaultStructCodec = &StructCodec{
|
||||
cache: make(map[reflect.Type]*structDescription),
|
||||
parser: DefaultStructTagParser,
|
||||
}
|
||||
|
||||
// Zeroer allows custom struct types to implement a report of zero
|
||||
// state. All struct types that don't implement Zeroer or where IsZero
|
||||
// returns false are considered to be not zero.
|
||||
type Zeroer interface {
|
||||
IsZero() bool
|
||||
}
|
||||
|
||||
// StructCodec is the Codec used for struct values.
|
||||
type StructCodec struct {
|
||||
cache map[reflect.Type]*structDescription
|
||||
l sync.RWMutex
|
||||
parser StructTagParser
|
||||
}
|
||||
|
||||
var _ ValueEncoder = &StructCodec{}
|
||||
var _ ValueDecoder = &StructCodec{}
|
||||
|
||||
// NewStructCodec returns a StructCodec that uses p for struct tag parsing.
|
||||
func NewStructCodec(p StructTagParser) (*StructCodec, error) {
|
||||
if p == nil {
|
||||
return nil, errors.New("a StructTagParser must be provided to NewStructCodec")
|
||||
}
|
||||
|
||||
return &StructCodec{
|
||||
cache: make(map[reflect.Type]*structDescription),
|
||||
parser: p,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EncodeValue handles encoding generic struct types.
|
||||
func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Kind() != reflect.Struct {
|
||||
return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
|
||||
}
|
||||
|
||||
sd, err := sc.describeStruct(r.Registry, val.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dw, err := vw.WriteDocument()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var rv reflect.Value
|
||||
for _, desc := range sd.fl {
|
||||
if desc.inline == nil {
|
||||
rv = val.Field(desc.idx)
|
||||
} else {
|
||||
rv = val.FieldByIndex(desc.inline)
|
||||
}
|
||||
|
||||
if desc.encoder == nil {
|
||||
return ErrNoEncoder{Type: rv.Type()}
|
||||
}
|
||||
|
||||
encoder := desc.encoder
|
||||
|
||||
iszero := sc.isZero
|
||||
if iz, ok := encoder.(CodecZeroer); ok {
|
||||
iszero = iz.IsTypeZero
|
||||
}
|
||||
|
||||
if desc.omitEmpty && iszero(rv.Interface()) {
|
||||
continue
|
||||
}
|
||||
|
||||
vw2, err := dw.WriteDocumentElement(desc.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize}
|
||||
err = encoder.EncodeValue(ectx, vw2, rv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if sd.inlineMap >= 0 {
|
||||
rv := val.Field(sd.inlineMap)
|
||||
collisionFn := func(key string) bool {
|
||||
_, exists := sd.fm[key]
|
||||
return exists
|
||||
}
|
||||
|
||||
return defaultValueEncoders.mapEncodeValue(r, dw, rv, collisionFn)
|
||||
}
|
||||
|
||||
return dw.WriteDocumentEnd()
|
||||
}
|
||||
|
||||
// DecodeValue implements the Codec interface.
|
||||
// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr.
|
||||
// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared.
|
||||
func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Kind() != reflect.Struct {
|
||||
return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
|
||||
}
|
||||
|
||||
switch vr.Type() {
|
||||
case bsontype.Type(0), bsontype.EmbeddedDocument:
|
||||
default:
|
||||
return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type())
|
||||
}
|
||||
|
||||
sd, err := sc.describeStruct(r.Registry, val.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var decoder ValueDecoder
|
||||
var inlineMap reflect.Value
|
||||
if sd.inlineMap >= 0 {
|
||||
inlineMap = val.Field(sd.inlineMap)
|
||||
if inlineMap.IsNil() {
|
||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||
}
|
||||
decoder, err = r.LookupDecoder(inlineMap.Type().Elem())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dr, err := vr.ReadDocument()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
name, vr, err := dr.ReadElement()
|
||||
if err == bsonrw.ErrEOD {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fd, exists := sd.fm[name]
|
||||
if !exists {
|
||||
if sd.inlineMap < 0 {
|
||||
// The encoding/json package requires a flag to return on error for non-existent fields.
|
||||
// This functionality seems appropriate for the struct codec.
|
||||
err = vr.Skip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
elem := reflect.New(inlineMap.Type().Elem()).Elem()
|
||||
err = decoder.DecodeValue(r, vr, elem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inlineMap.SetMapIndex(reflect.ValueOf(name), elem)
|
||||
continue
|
||||
}
|
||||
|
||||
var field reflect.Value
|
||||
if fd.inline == nil {
|
||||
field = val.Field(fd.idx)
|
||||
} else {
|
||||
field = val.FieldByIndex(fd.inline)
|
||||
}
|
||||
|
||||
if !field.CanSet() { // Being settable is a super set of being addressable.
|
||||
return fmt.Errorf("cannot decode element '%s' into field %v; it is not settable", name, field)
|
||||
}
|
||||
if field.Kind() == reflect.Ptr && field.IsNil() {
|
||||
field.Set(reflect.New(field.Type().Elem()))
|
||||
}
|
||||
field = field.Addr()
|
||||
|
||||
dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate}
|
||||
if fd.decoder == nil {
|
||||
return ErrNoDecoder{Type: field.Elem().Type()}
|
||||
}
|
||||
|
||||
if decoder, ok := fd.decoder.(ValueDecoder); ok {
|
||||
err = decoder.DecodeValue(dctx, vr, field.Elem())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
err = fd.decoder.DecodeValue(dctx, vr, field)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *StructCodec) isZero(i interface{}) bool {
|
||||
v := reflect.ValueOf(i)
|
||||
|
||||
// check the value validity
|
||||
if !v.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
|
||||
return z.IsZero()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type structDescription struct {
|
||||
fm map[string]fieldDescription
|
||||
fl []fieldDescription
|
||||
inlineMap int
|
||||
}
|
||||
|
||||
type fieldDescription struct {
|
||||
name string
|
||||
idx int
|
||||
omitEmpty bool
|
||||
minSize bool
|
||||
truncate bool
|
||||
inline []int
|
||||
encoder ValueEncoder
|
||||
decoder ValueDecoder
|
||||
}
|
||||
|
||||
func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) {
|
||||
// We need to analyze the struct, including getting the tags, collecting
|
||||
// information about inlining, and create a map of the field name to the field.
|
||||
sc.l.RLock()
|
||||
ds, exists := sc.cache[t]
|
||||
sc.l.RUnlock()
|
||||
if exists {
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
numFields := t.NumField()
|
||||
sd := &structDescription{
|
||||
fm: make(map[string]fieldDescription, numFields),
|
||||
fl: make([]fieldDescription, 0, numFields),
|
||||
inlineMap: -1,
|
||||
}
|
||||
|
||||
for i := 0; i < numFields; i++ {
|
||||
sf := t.Field(i)
|
||||
if sf.PkgPath != "" {
|
||||
// unexported, ignore
|
||||
continue
|
||||
}
|
||||
|
||||
encoder, err := r.LookupEncoder(sf.Type)
|
||||
if err != nil {
|
||||
encoder = nil
|
||||
}
|
||||
decoder, err := r.LookupDecoder(sf.Type)
|
||||
if err != nil {
|
||||
decoder = nil
|
||||
}
|
||||
|
||||
description := fieldDescription{idx: i, encoder: encoder, decoder: decoder}
|
||||
|
||||
stags, err := sc.parser.ParseStructTags(sf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stags.Skip {
|
||||
continue
|
||||
}
|
||||
description.name = stags.Name
|
||||
description.omitEmpty = stags.OmitEmpty
|
||||
description.minSize = stags.MinSize
|
||||
description.truncate = stags.Truncate
|
||||
|
||||
if stags.Inline {
|
||||
switch sf.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if sd.inlineMap >= 0 {
|
||||
return nil, errors.New("(struct " + t.String() + ") multiple inline maps")
|
||||
}
|
||||
if sf.Type.Key() != tString {
|
||||
return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys")
|
||||
}
|
||||
sd.inlineMap = description.idx
|
||||
case reflect.Struct:
|
||||
inlinesf, err := sc.describeStruct(r, sf.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, fd := range inlinesf.fl {
|
||||
if _, exists := sd.fm[fd.name]; exists {
|
||||
return nil, fmt.Errorf("(struct %s) duplicated key %s", t.String(), fd.name)
|
||||
}
|
||||
if fd.inline == nil {
|
||||
fd.inline = []int{i, fd.idx}
|
||||
} else {
|
||||
fd.inline = append([]int{i}, fd.inline...)
|
||||
}
|
||||
sd.fm[fd.name] = fd
|
||||
sd.fl = append(sd.fl, fd)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("(struct %s) inline fields must be either a struct or a map", t.String())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := sd.fm[description.name]; exists {
|
||||
return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), description.name)
|
||||
}
|
||||
|
||||
sd.fm[description.name] = description
|
||||
sd.fl = append(sd.fl, description)
|
||||
}
|
||||
|
||||
sc.l.Lock()
|
||||
sc.cache[t] = sd
|
||||
sc.l.Unlock()
|
||||
|
||||
return sd, nil
|
||||
}
|
||||
119
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go
generated
vendored
Executable file
119
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go
generated
vendored
Executable file
@@ -0,0 +1,119 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncodec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StructTagParser returns the struct tags for a given struct field.
|
||||
type StructTagParser interface {
|
||||
ParseStructTags(reflect.StructField) (StructTags, error)
|
||||
}
|
||||
|
||||
// StructTagParserFunc is an adapter that allows a generic function to be used
|
||||
// as a StructTagParser.
|
||||
type StructTagParserFunc func(reflect.StructField) (StructTags, error)
|
||||
|
||||
// ParseStructTags implements the StructTagParser interface.
|
||||
func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) {
|
||||
return stpf(sf)
|
||||
}
|
||||
|
||||
// StructTags represents the struct tag fields that the StructCodec uses during
|
||||
// the encoding and decoding process.
|
||||
//
|
||||
// In the case of a struct, the lowercased field name is used as the key for each exported
|
||||
// field but this behavior may be changed using a struct tag. The tag may also contain flags to
|
||||
// adjust the marshalling behavior for the field.
|
||||
//
|
||||
// The properties are defined below:
|
||||
//
|
||||
// OmitEmpty Only include the field if it's not set to the zero value for the type or to
|
||||
// empty slices or maps.
|
||||
//
|
||||
// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's
|
||||
// feasible while preserving the numeric value.
|
||||
//
|
||||
// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within
|
||||
// a float32.
|
||||
//
|
||||
// Inline Inline the field, which must be a struct or a map, causing all of its fields
|
||||
// or keys to be processed as if they were part of the outer struct. For maps,
|
||||
// keys must not conflict with the bson keys of other struct fields.
|
||||
//
|
||||
// Skip This struct field should be skipped. This is usually denoted by parsing a "-"
|
||||
// for the name.
|
||||
//
|
||||
// TODO(skriptble): Add tags for undefined as nil and for null as nil.
|
||||
type StructTags struct {
|
||||
Name string
|
||||
OmitEmpty bool
|
||||
MinSize bool
|
||||
Truncate bool
|
||||
Inline bool
|
||||
Skip bool
|
||||
}
|
||||
|
||||
// DefaultStructTagParser is the StructTagParser used by the StructCodec by default.
|
||||
// It will handle the bson struct tag. See the documentation for StructTags to see
|
||||
// what each of the returned fields means.
|
||||
//
|
||||
// If there is no name in the struct tag fields, the struct field name is lowercased.
|
||||
// The tag formats accepted are:
|
||||
//
|
||||
// "[<key>][,<flag1>[,<flag2>]]"
|
||||
//
|
||||
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// An example:
|
||||
//
|
||||
// type T struct {
|
||||
// A bool
|
||||
// B int "myb"
|
||||
// C string "myc,omitempty"
|
||||
// D string `bson:",omitempty" json:"jsonkey"`
|
||||
// E int64 ",minsize"
|
||||
// F int64 "myf,omitempty,minsize"
|
||||
// }
|
||||
//
|
||||
// A struct tag either consisting entirely of '-' or with a bson key with a
|
||||
// value consisting entirely of '-' will return a StructTags with Skip true and
|
||||
// the remaining fields will be their default values.
|
||||
var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
|
||||
key := strings.ToLower(sf.Name)
|
||||
tag, ok := sf.Tag.Lookup("bson")
|
||||
if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 {
|
||||
tag = string(sf.Tag)
|
||||
}
|
||||
var st StructTags
|
||||
if tag == "-" {
|
||||
st.Skip = true
|
||||
return st, nil
|
||||
}
|
||||
|
||||
for idx, str := range strings.Split(tag, ",") {
|
||||
if idx == 0 && str != "" {
|
||||
key = str
|
||||
}
|
||||
switch str {
|
||||
case "omitempty":
|
||||
st.OmitEmpty = true
|
||||
case "minsize":
|
||||
st.MinSize = true
|
||||
case "truncate":
|
||||
st.Truncate = true
|
||||
case "inline":
|
||||
st.Inline = true
|
||||
}
|
||||
}
|
||||
|
||||
st.Name = key
|
||||
|
||||
return st, nil
|
||||
}
|
||||
80
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go
generated
vendored
Executable file
80
vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go
generated
vendored
Executable file
@@ -0,0 +1,80 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncodec
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
var ptBool = reflect.TypeOf((*bool)(nil))
|
||||
var ptInt8 = reflect.TypeOf((*int8)(nil))
|
||||
var ptInt16 = reflect.TypeOf((*int16)(nil))
|
||||
var ptInt32 = reflect.TypeOf((*int32)(nil))
|
||||
var ptInt64 = reflect.TypeOf((*int64)(nil))
|
||||
var ptInt = reflect.TypeOf((*int)(nil))
|
||||
var ptUint8 = reflect.TypeOf((*uint8)(nil))
|
||||
var ptUint16 = reflect.TypeOf((*uint16)(nil))
|
||||
var ptUint32 = reflect.TypeOf((*uint32)(nil))
|
||||
var ptUint64 = reflect.TypeOf((*uint64)(nil))
|
||||
var ptUint = reflect.TypeOf((*uint)(nil))
|
||||
var ptFloat32 = reflect.TypeOf((*float32)(nil))
|
||||
var ptFloat64 = reflect.TypeOf((*float64)(nil))
|
||||
var ptString = reflect.TypeOf((*string)(nil))
|
||||
|
||||
var tBool = reflect.TypeOf(false)
|
||||
var tFloat32 = reflect.TypeOf(float32(0))
|
||||
var tFloat64 = reflect.TypeOf(float64(0))
|
||||
var tInt = reflect.TypeOf(int(0))
|
||||
var tInt8 = reflect.TypeOf(int8(0))
|
||||
var tInt16 = reflect.TypeOf(int16(0))
|
||||
var tInt32 = reflect.TypeOf(int32(0))
|
||||
var tInt64 = reflect.TypeOf(int64(0))
|
||||
var tString = reflect.TypeOf("")
|
||||
var tTime = reflect.TypeOf(time.Time{})
|
||||
var tUint = reflect.TypeOf(uint(0))
|
||||
var tUint8 = reflect.TypeOf(uint8(0))
|
||||
var tUint16 = reflect.TypeOf(uint16(0))
|
||||
var tUint32 = reflect.TypeOf(uint32(0))
|
||||
var tUint64 = reflect.TypeOf(uint64(0))
|
||||
|
||||
var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
var tByteSlice = reflect.TypeOf([]byte(nil))
|
||||
var tByte = reflect.TypeOf(byte(0x00))
|
||||
var tURL = reflect.TypeOf(url.URL{})
|
||||
var tJSONNumber = reflect.TypeOf(json.Number(""))
|
||||
|
||||
var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem()
|
||||
var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem()
|
||||
var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem()
|
||||
|
||||
var tBinary = reflect.TypeOf(primitive.Binary{})
|
||||
var tUndefined = reflect.TypeOf(primitive.Undefined{})
|
||||
var tOID = reflect.TypeOf(primitive.ObjectID{})
|
||||
var tDateTime = reflect.TypeOf(primitive.DateTime(0))
|
||||
var tNull = reflect.TypeOf(primitive.Null{})
|
||||
var tRegex = reflect.TypeOf(primitive.Regex{})
|
||||
var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{})
|
||||
var tDBPointer = reflect.TypeOf(primitive.DBPointer{})
|
||||
var tJavaScript = reflect.TypeOf(primitive.JavaScript(""))
|
||||
var tSymbol = reflect.TypeOf(primitive.Symbol(""))
|
||||
var tTimestamp = reflect.TypeOf(primitive.Timestamp{})
|
||||
var tDecimal = reflect.TypeOf(primitive.Decimal128{})
|
||||
var tMinKey = reflect.TypeOf(primitive.MinKey{})
|
||||
var tMaxKey = reflect.TypeOf(primitive.MaxKey{})
|
||||
var tD = reflect.TypeOf(primitive.D{})
|
||||
var tA = reflect.TypeOf(primitive.A{})
|
||||
var tE = reflect.TypeOf(primitive.E{})
|
||||
|
||||
var tCoreDocument = reflect.TypeOf(bsoncore.Document{})
|
||||
389
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go
generated
vendored
Executable file
389
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go
generated
vendored
Executable file
@@ -0,0 +1,389 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
// Copier is a type that allows copying between ValueReaders, ValueWriters, and
|
||||
// []byte values.
|
||||
type Copier struct{}
|
||||
|
||||
// NewCopier creates a new copier with the given registry. If a nil registry is provided
|
||||
// a default registry is used.
|
||||
func NewCopier() Copier {
|
||||
return Copier{}
|
||||
}
|
||||
|
||||
// CopyDocument handles copying a document from src to dst.
|
||||
func CopyDocument(dst ValueWriter, src ValueReader) error {
|
||||
return Copier{}.CopyDocument(dst, src)
|
||||
}
|
||||
|
||||
// CopyDocument handles copying one document from the src to the dst.
|
||||
func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error {
|
||||
dr, err := src.ReadDocument()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dw, err := dst.WriteDocument()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.copyDocumentCore(dw, dr)
|
||||
}
|
||||
|
||||
// CopyDocumentFromBytes copies the values from a BSON document represented as a
|
||||
// []byte to a ValueWriter.
|
||||
func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error {
|
||||
dw, err := dst.WriteDocument()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.CopyBytesToDocumentWriter(dw, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dw.WriteDocumentEnd()
|
||||
}
|
||||
|
||||
// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a
|
||||
// DocumentWriter.
|
||||
func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error {
|
||||
// TODO(skriptble): Create errors types here. Anything thats a tag should be a property.
|
||||
length, rem, ok := bsoncore.ReadLength(src)
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src))
|
||||
}
|
||||
if len(src) < int(length) {
|
||||
return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length)
|
||||
}
|
||||
rem = rem[:length-4]
|
||||
|
||||
var t bsontype.Type
|
||||
var key string
|
||||
var val bsoncore.Value
|
||||
for {
|
||||
t, rem, ok = bsoncore.ReadType(rem)
|
||||
if !ok {
|
||||
return io.EOF
|
||||
}
|
||||
if t == bsontype.Type(0) {
|
||||
if len(rem) != 0 {
|
||||
return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
key, rem, ok = bsoncore.ReadKey(rem)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid key found. remaining bytes=%v", rem)
|
||||
}
|
||||
dvw, err := dst.WriteDocumentElement(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val, rem, ok = bsoncore.ReadValue(rem, t)
|
||||
if !ok {
|
||||
return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t)
|
||||
}
|
||||
err = c.CopyValueFromBytes(dvw, t, val.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyDocumentToBytes copies an entire document from the ValueReader and
|
||||
// returns it as bytes.
|
||||
func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) {
|
||||
return c.AppendDocumentBytes(nil, src)
|
||||
}
|
||||
|
||||
// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will
|
||||
// append the result to dst.
|
||||
func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) {
|
||||
if br, ok := src.(BytesReader); ok {
|
||||
_, dst, err := br.ReadValueBytes(dst)
|
||||
return dst, err
|
||||
}
|
||||
|
||||
vw := vwPool.Get().(*valueWriter)
|
||||
defer vwPool.Put(vw)
|
||||
|
||||
vw.reset(dst)
|
||||
|
||||
err := c.CopyDocument(vw, src)
|
||||
dst = vw.buf
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// CopyValueFromBytes will write the value represtend by t and src to dst.
|
||||
func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error {
|
||||
if wvb, ok := dst.(BytesWriter); ok {
|
||||
return wvb.WriteValueBytes(t, src)
|
||||
}
|
||||
|
||||
vr := vrPool.Get().(*valueReader)
|
||||
defer vrPool.Put(vr)
|
||||
|
||||
vr.reset(src)
|
||||
vr.pushElement(t)
|
||||
|
||||
return c.CopyValue(dst, vr)
|
||||
}
|
||||
|
||||
// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a
|
||||
// []byte.
|
||||
func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) {
|
||||
return c.AppendValueBytes(nil, src)
|
||||
}
|
||||
|
||||
// AppendValueBytes functions the same as CopyValueToBytes, but will append the
|
||||
// result to dst.
|
||||
func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) {
|
||||
if br, ok := src.(BytesReader); ok {
|
||||
return br.ReadValueBytes(dst)
|
||||
}
|
||||
|
||||
vw := vwPool.Get().(*valueWriter)
|
||||
defer vwPool.Put(vw)
|
||||
|
||||
start := len(dst)
|
||||
|
||||
vw.reset(dst)
|
||||
vw.push(mElement)
|
||||
|
||||
err := c.CopyValue(vw, src)
|
||||
if err != nil {
|
||||
return 0, dst, err
|
||||
}
|
||||
|
||||
return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil
|
||||
}
|
||||
|
||||
// CopyValue will copy a single value from src to dst.
|
||||
func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error {
|
||||
var err error
|
||||
switch src.Type() {
|
||||
case bsontype.Double:
|
||||
var f64 float64
|
||||
f64, err = src.ReadDouble()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteDouble(f64)
|
||||
case bsontype.String:
|
||||
var str string
|
||||
str, err = src.ReadString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dst.WriteString(str)
|
||||
case bsontype.EmbeddedDocument:
|
||||
err = c.CopyDocument(dst, src)
|
||||
case bsontype.Array:
|
||||
err = c.copyArray(dst, src)
|
||||
case bsontype.Binary:
|
||||
var data []byte
|
||||
var subtype byte
|
||||
data, subtype, err = src.ReadBinary()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteBinaryWithSubtype(data, subtype)
|
||||
case bsontype.Undefined:
|
||||
err = src.ReadUndefined()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteUndefined()
|
||||
case bsontype.ObjectID:
|
||||
var oid primitive.ObjectID
|
||||
oid, err = src.ReadObjectID()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteObjectID(oid)
|
||||
case bsontype.Boolean:
|
||||
var b bool
|
||||
b, err = src.ReadBoolean()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteBoolean(b)
|
||||
case bsontype.DateTime:
|
||||
var dt int64
|
||||
dt, err = src.ReadDateTime()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteDateTime(dt)
|
||||
case bsontype.Null:
|
||||
err = src.ReadNull()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteNull()
|
||||
case bsontype.Regex:
|
||||
var pattern, options string
|
||||
pattern, options, err = src.ReadRegex()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteRegex(pattern, options)
|
||||
case bsontype.DBPointer:
|
||||
var ns string
|
||||
var pointer primitive.ObjectID
|
||||
ns, pointer, err = src.ReadDBPointer()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteDBPointer(ns, pointer)
|
||||
case bsontype.JavaScript:
|
||||
var js string
|
||||
js, err = src.ReadJavascript()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteJavascript(js)
|
||||
case bsontype.Symbol:
|
||||
var symbol string
|
||||
symbol, err = src.ReadSymbol()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteSymbol(symbol)
|
||||
case bsontype.CodeWithScope:
|
||||
var code string
|
||||
var srcScope DocumentReader
|
||||
code, srcScope, err = src.ReadCodeWithScope()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
var dstScope DocumentWriter
|
||||
dstScope, err = dst.WriteCodeWithScope(code)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = c.copyDocumentCore(dstScope, srcScope)
|
||||
case bsontype.Int32:
|
||||
var i32 int32
|
||||
i32, err = src.ReadInt32()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteInt32(i32)
|
||||
case bsontype.Timestamp:
|
||||
var t, i uint32
|
||||
t, i, err = src.ReadTimestamp()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteTimestamp(t, i)
|
||||
case bsontype.Int64:
|
||||
var i64 int64
|
||||
i64, err = src.ReadInt64()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteInt64(i64)
|
||||
case bsontype.Decimal128:
|
||||
var d128 primitive.Decimal128
|
||||
d128, err = src.ReadDecimal128()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteDecimal128(d128)
|
||||
case bsontype.MinKey:
|
||||
err = src.ReadMinKey()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteMinKey()
|
||||
case bsontype.MaxKey:
|
||||
err = src.ReadMaxKey()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = dst.WriteMaxKey()
|
||||
default:
|
||||
err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c Copier) copyArray(dst ValueWriter, src ValueReader) error {
|
||||
ar, err := src.ReadArray()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
aw, err := dst.WriteArray()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
vr, err := ar.ReadValue()
|
||||
if err == ErrEOA {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw, err := aw.WriteArrayElement()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.CopyValue(vw, vr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return aw.WriteArrayEnd()
|
||||
}
|
||||
|
||||
func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error {
|
||||
for {
|
||||
key, vr, err := dr.ReadElement()
|
||||
if err == ErrEOD {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw, err := dw.WriteDocumentElement(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.CopyValue(vw, vr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dw.WriteDocumentEnd()
|
||||
}
|
||||
9
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go
generated
vendored
Executable file
9
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go
generated
vendored
Executable file
@@ -0,0 +1,9 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
// Package bsonrw contains abstractions for reading and writing
|
||||
// BSON and BSON like types from sources.
|
||||
package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
731
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go
generated
vendored
Executable file
731
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go
generated
vendored
Executable file
@@ -0,0 +1,731 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
)
|
||||
|
||||
const maxNestingDepth = 200
|
||||
|
||||
// ErrInvalidJSON indicates the JSON input is invalid
|
||||
var ErrInvalidJSON = errors.New("invalid JSON input")
|
||||
|
||||
type jsonParseState byte
|
||||
|
||||
const (
|
||||
jpsStartState jsonParseState = iota
|
||||
jpsSawBeginObject
|
||||
jpsSawEndObject
|
||||
jpsSawBeginArray
|
||||
jpsSawEndArray
|
||||
jpsSawColon
|
||||
jpsSawComma
|
||||
jpsSawKey
|
||||
jpsSawValue
|
||||
jpsDoneState
|
||||
jpsInvalidState
|
||||
)
|
||||
|
||||
type jsonParseMode byte
|
||||
|
||||
const (
|
||||
jpmInvalidMode jsonParseMode = iota
|
||||
jpmObjectMode
|
||||
jpmArrayMode
|
||||
)
|
||||
|
||||
type extJSONValue struct {
|
||||
t bsontype.Type
|
||||
v interface{}
|
||||
}
|
||||
|
||||
type extJSONObject struct {
|
||||
keys []string
|
||||
values []*extJSONValue
|
||||
}
|
||||
|
||||
type extJSONParser struct {
|
||||
js *jsonScanner
|
||||
s jsonParseState
|
||||
m []jsonParseMode
|
||||
k string
|
||||
v *extJSONValue
|
||||
|
||||
err error
|
||||
canonical bool
|
||||
depth int
|
||||
maxDepth int
|
||||
|
||||
emptyObject bool
|
||||
}
|
||||
|
||||
// newExtJSONParser returns a new extended JSON parser, ready to to begin
|
||||
// parsing from the first character of the argued json input. It will not
|
||||
// perform any read-ahead and will therefore not report any errors about
|
||||
// malformed JSON at this point.
|
||||
func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser {
|
||||
return &extJSONParser{
|
||||
js: &jsonScanner{r: r},
|
||||
s: jpsStartState,
|
||||
m: []jsonParseMode{},
|
||||
canonical: canonical,
|
||||
maxDepth: maxNestingDepth,
|
||||
}
|
||||
}
|
||||
|
||||
// peekType examines the next value and returns its BSON Type
|
||||
func (ejp *extJSONParser) peekType() (bsontype.Type, error) {
|
||||
var t bsontype.Type
|
||||
var err error
|
||||
|
||||
ejp.advanceState()
|
||||
switch ejp.s {
|
||||
case jpsSawValue:
|
||||
t = ejp.v.t
|
||||
case jpsSawBeginArray:
|
||||
t = bsontype.Array
|
||||
case jpsInvalidState:
|
||||
err = ejp.err
|
||||
case jpsSawComma:
|
||||
// in array mode, seeing a comma means we need to progress again to actually observe a type
|
||||
if ejp.peekMode() == jpmArrayMode {
|
||||
return ejp.peekType()
|
||||
}
|
||||
case jpsSawEndArray:
|
||||
// this would only be a valid state if we were in array mode, so return end-of-array error
|
||||
err = ErrEOA
|
||||
case jpsSawBeginObject:
|
||||
// peek key to determine type
|
||||
ejp.advanceState()
|
||||
switch ejp.s {
|
||||
case jpsSawEndObject: // empty embedded document
|
||||
t = bsontype.EmbeddedDocument
|
||||
ejp.emptyObject = true
|
||||
case jpsInvalidState:
|
||||
err = ejp.err
|
||||
case jpsSawKey:
|
||||
t = wrapperKeyBSONType(ejp.k)
|
||||
|
||||
if t == bsontype.JavaScript {
|
||||
// just saw $code, need to check for $scope at same level
|
||||
_, err := ejp.readValue(bsontype.JavaScript)
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
switch ejp.s {
|
||||
case jpsSawEndObject: // type is TypeJavaScript
|
||||
case jpsSawComma:
|
||||
ejp.advanceState()
|
||||
if ejp.s == jpsSawKey && ejp.k == "$scope" {
|
||||
t = bsontype.CodeWithScope
|
||||
} else {
|
||||
err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k)
|
||||
}
|
||||
case jpsInvalidState:
|
||||
err = ejp.err
|
||||
default:
|
||||
err = ErrInvalidJSON
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return t, err
|
||||
}
|
||||
|
||||
// readKey parses the next key and its type and returns them
|
||||
func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) {
|
||||
if ejp.emptyObject {
|
||||
ejp.emptyObject = false
|
||||
return "", 0, ErrEOD
|
||||
}
|
||||
|
||||
// advance to key (or return with error)
|
||||
switch ejp.s {
|
||||
case jpsStartState:
|
||||
ejp.advanceState()
|
||||
if ejp.s == jpsSawBeginObject {
|
||||
ejp.advanceState()
|
||||
}
|
||||
case jpsSawBeginObject:
|
||||
ejp.advanceState()
|
||||
case jpsSawValue, jpsSawEndObject, jpsSawEndArray:
|
||||
ejp.advanceState()
|
||||
switch ejp.s {
|
||||
case jpsSawBeginObject, jpsSawComma:
|
||||
ejp.advanceState()
|
||||
case jpsSawEndObject:
|
||||
return "", 0, ErrEOD
|
||||
case jpsDoneState:
|
||||
return "", 0, io.EOF
|
||||
case jpsInvalidState:
|
||||
return "", 0, ejp.err
|
||||
default:
|
||||
return "", 0, ErrInvalidJSON
|
||||
}
|
||||
case jpsSawKey: // do nothing (key was peeked before)
|
||||
default:
|
||||
return "", 0, invalidRequestError("key")
|
||||
}
|
||||
|
||||
// read key
|
||||
var key string
|
||||
|
||||
switch ejp.s {
|
||||
case jpsSawKey:
|
||||
key = ejp.k
|
||||
case jpsSawEndObject:
|
||||
return "", 0, ErrEOD
|
||||
case jpsInvalidState:
|
||||
return "", 0, ejp.err
|
||||
default:
|
||||
return "", 0, invalidRequestError("key")
|
||||
}
|
||||
|
||||
// check for colon
|
||||
ejp.advanceState()
|
||||
if err := ensureColon(ejp.s, key); err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
// peek at the value to determine type
|
||||
t, err := ejp.peekType()
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
return key, t, nil
|
||||
}
|
||||
|
||||
// readValue returns the value corresponding to the Type returned by peekType
|
||||
func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) {
|
||||
if ejp.s == jpsInvalidState {
|
||||
return nil, ejp.err
|
||||
}
|
||||
|
||||
var v *extJSONValue
|
||||
|
||||
switch t {
|
||||
case bsontype.Null, bsontype.Boolean, bsontype.String:
|
||||
if ejp.s != jpsSawValue {
|
||||
return nil, invalidRequestError(t.String())
|
||||
}
|
||||
v = ejp.v
|
||||
case bsontype.Int32, bsontype.Int64, bsontype.Double:
|
||||
// relaxed version allows these to be literal number values
|
||||
if ejp.s == jpsSawValue {
|
||||
v = ejp.v
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined:
|
||||
switch ejp.s {
|
||||
case jpsSawKey:
|
||||
// read colon
|
||||
ejp.advanceState()
|
||||
if err := ensureColon(ejp.s, ejp.k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read value
|
||||
ejp.advanceState()
|
||||
if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) {
|
||||
return nil, invalidJSONErrorForType("value", t)
|
||||
}
|
||||
|
||||
v = ejp.v
|
||||
|
||||
// read end object
|
||||
ejp.advanceState()
|
||||
if ejp.s != jpsSawEndObject {
|
||||
return nil, invalidJSONErrorForType("} after value", t)
|
||||
}
|
||||
default:
|
||||
return nil, invalidRequestError(t.String())
|
||||
}
|
||||
case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer:
|
||||
if ejp.s != jpsSawKey {
|
||||
return nil, invalidRequestError(t.String())
|
||||
}
|
||||
// read colon
|
||||
ejp.advanceState()
|
||||
if err := ensureColon(ejp.s, ejp.k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ejp.advanceState()
|
||||
if t == bsontype.Binary && ejp.s == jpsSawValue {
|
||||
// convert legacy $binary format
|
||||
base64 := ejp.v
|
||||
|
||||
ejp.advanceState()
|
||||
if ejp.s != jpsSawComma {
|
||||
return nil, invalidJSONErrorForType(",", bsontype.Binary)
|
||||
}
|
||||
|
||||
ejp.advanceState()
|
||||
key, t, err := ejp.readKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if key != "$type" {
|
||||
return nil, invalidJSONErrorForType("$type", bsontype.Binary)
|
||||
}
|
||||
|
||||
subType, err := ejp.readValue(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ejp.advanceState()
|
||||
if ejp.s != jpsSawEndObject {
|
||||
return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary)
|
||||
}
|
||||
|
||||
v = &extJSONValue{
|
||||
t: bsontype.EmbeddedDocument,
|
||||
v: &extJSONObject{
|
||||
keys: []string{"base64", "subType"},
|
||||
values: []*extJSONValue{base64, subType},
|
||||
},
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// read KV pairs
|
||||
if ejp.s != jpsSawBeginObject {
|
||||
return nil, invalidJSONErrorForType("{", t)
|
||||
}
|
||||
|
||||
keys, vals, err := ejp.readObject(2, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ejp.advanceState()
|
||||
if ejp.s != jpsSawEndObject {
|
||||
return nil, invalidJSONErrorForType("2 key-value pairs and then }", t)
|
||||
}
|
||||
|
||||
v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}}
|
||||
|
||||
case bsontype.DateTime:
|
||||
switch ejp.s {
|
||||
case jpsSawValue:
|
||||
v = ejp.v
|
||||
case jpsSawKey:
|
||||
// read colon
|
||||
ejp.advanceState()
|
||||
if err := ensureColon(ejp.s, ejp.k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ejp.advanceState()
|
||||
switch ejp.s {
|
||||
case jpsSawBeginObject:
|
||||
keys, vals, err := ejp.readObject(1, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}}
|
||||
case jpsSawValue:
|
||||
if ejp.canonical {
|
||||
return nil, invalidJSONError("{")
|
||||
}
|
||||
v = ejp.v
|
||||
default:
|
||||
if ejp.canonical {
|
||||
return nil, invalidJSONErrorForType("object", t)
|
||||
}
|
||||
return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as decribed in RFC-3339", t)
|
||||
}
|
||||
|
||||
ejp.advanceState()
|
||||
if ejp.s != jpsSawEndObject {
|
||||
return nil, invalidJSONErrorForType("value and then }", t)
|
||||
}
|
||||
default:
|
||||
return nil, invalidRequestError(t.String())
|
||||
}
|
||||
case bsontype.JavaScript:
|
||||
switch ejp.s {
|
||||
case jpsSawKey:
|
||||
// read colon
|
||||
ejp.advanceState()
|
||||
if err := ensureColon(ejp.s, ejp.k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read value
|
||||
ejp.advanceState()
|
||||
if ejp.s != jpsSawValue {
|
||||
return nil, invalidJSONErrorForType("value", t)
|
||||
}
|
||||
v = ejp.v
|
||||
|
||||
// read end object or comma and just return
|
||||
ejp.advanceState()
|
||||
case jpsSawEndObject:
|
||||
v = ejp.v
|
||||
default:
|
||||
return nil, invalidRequestError(t.String())
|
||||
}
|
||||
case bsontype.CodeWithScope:
|
||||
if ejp.s == jpsSawKey && ejp.k == "$scope" {
|
||||
v = ejp.v // this is the $code string from earlier
|
||||
|
||||
// read colon
|
||||
ejp.advanceState()
|
||||
if err := ensureColon(ejp.s, ejp.k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read {
|
||||
ejp.advanceState()
|
||||
if ejp.s != jpsSawBeginObject {
|
||||
return nil, invalidJSONError("$scope to be embedded document")
|
||||
}
|
||||
} else {
|
||||
return nil, invalidRequestError(t.String())
|
||||
}
|
||||
case bsontype.EmbeddedDocument, bsontype.Array:
|
||||
return nil, invalidRequestError(t.String())
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// readObject is a utility method for reading full objects of known (or expected) size
|
||||
// it is useful for extended JSON types such as binary, datetime, regex, and timestamp
|
||||
func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) {
|
||||
keys := make([]string, numKeys)
|
||||
vals := make([]*extJSONValue, numKeys)
|
||||
|
||||
if !started {
|
||||
ejp.advanceState()
|
||||
if ejp.s != jpsSawBeginObject {
|
||||
return nil, nil, invalidJSONError("{")
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < numKeys; i++ {
|
||||
key, t, err := ejp.readKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch ejp.s {
|
||||
case jpsSawKey:
|
||||
v, err := ejp.readValue(t)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
keys[i] = key
|
||||
vals[i] = v
|
||||
case jpsSawValue:
|
||||
keys[i] = key
|
||||
vals[i] = ejp.v
|
||||
default:
|
||||
return nil, nil, invalidJSONError("value")
|
||||
}
|
||||
}
|
||||
|
||||
ejp.advanceState()
|
||||
if ejp.s != jpsSawEndObject {
|
||||
return nil, nil, invalidJSONError("}")
|
||||
}
|
||||
|
||||
return keys, vals, nil
|
||||
}
|
||||
|
||||
// advanceState reads the next JSON token from the scanner and transitions
|
||||
// from the current state based on that token's type
|
||||
func (ejp *extJSONParser) advanceState() {
|
||||
if ejp.s == jpsDoneState || ejp.s == jpsInvalidState {
|
||||
return
|
||||
}
|
||||
|
||||
jt, err := ejp.js.nextToken()
|
||||
|
||||
if err != nil {
|
||||
ejp.err = err
|
||||
ejp.s = jpsInvalidState
|
||||
return
|
||||
}
|
||||
|
||||
valid := ejp.validateToken(jt.t)
|
||||
if !valid {
|
||||
ejp.err = unexpectedTokenError(jt)
|
||||
ejp.s = jpsInvalidState
|
||||
return
|
||||
}
|
||||
|
||||
switch jt.t {
|
||||
case jttBeginObject:
|
||||
ejp.s = jpsSawBeginObject
|
||||
ejp.pushMode(jpmObjectMode)
|
||||
ejp.depth++
|
||||
|
||||
if ejp.depth > ejp.maxDepth {
|
||||
ejp.err = nestingDepthError(jt.p, ejp.depth)
|
||||
ejp.s = jpsInvalidState
|
||||
}
|
||||
case jttEndObject:
|
||||
ejp.s = jpsSawEndObject
|
||||
ejp.depth--
|
||||
|
||||
if ejp.popMode() != jpmObjectMode {
|
||||
ejp.err = unexpectedTokenError(jt)
|
||||
ejp.s = jpsInvalidState
|
||||
}
|
||||
case jttBeginArray:
|
||||
ejp.s = jpsSawBeginArray
|
||||
ejp.pushMode(jpmArrayMode)
|
||||
case jttEndArray:
|
||||
ejp.s = jpsSawEndArray
|
||||
|
||||
if ejp.popMode() != jpmArrayMode {
|
||||
ejp.err = unexpectedTokenError(jt)
|
||||
ejp.s = jpsInvalidState
|
||||
}
|
||||
case jttColon:
|
||||
ejp.s = jpsSawColon
|
||||
case jttComma:
|
||||
ejp.s = jpsSawComma
|
||||
case jttEOF:
|
||||
ejp.s = jpsDoneState
|
||||
if len(ejp.m) != 0 {
|
||||
ejp.err = unexpectedTokenError(jt)
|
||||
ejp.s = jpsInvalidState
|
||||
}
|
||||
case jttString:
|
||||
switch ejp.s {
|
||||
case jpsSawComma:
|
||||
if ejp.peekMode() == jpmArrayMode {
|
||||
ejp.s = jpsSawValue
|
||||
ejp.v = extendJSONToken(jt)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case jpsSawBeginObject:
|
||||
ejp.s = jpsSawKey
|
||||
ejp.k = jt.v.(string)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
ejp.s = jpsSawValue
|
||||
ejp.v = extendJSONToken(jt)
|
||||
}
|
||||
}
|
||||
|
||||
var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{
|
||||
jpsStartState: {
|
||||
jttBeginObject: true,
|
||||
jttBeginArray: true,
|
||||
jttInt32: true,
|
||||
jttInt64: true,
|
||||
jttDouble: true,
|
||||
jttString: true,
|
||||
jttBool: true,
|
||||
jttNull: true,
|
||||
jttEOF: true,
|
||||
},
|
||||
jpsSawBeginObject: {
|
||||
jttEndObject: true,
|
||||
jttString: true,
|
||||
},
|
||||
jpsSawEndObject: {
|
||||
jttEndObject: true,
|
||||
jttEndArray: true,
|
||||
jttComma: true,
|
||||
jttEOF: true,
|
||||
},
|
||||
jpsSawBeginArray: {
|
||||
jttBeginObject: true,
|
||||
jttBeginArray: true,
|
||||
jttEndArray: true,
|
||||
jttInt32: true,
|
||||
jttInt64: true,
|
||||
jttDouble: true,
|
||||
jttString: true,
|
||||
jttBool: true,
|
||||
jttNull: true,
|
||||
},
|
||||
jpsSawEndArray: {
|
||||
jttEndObject: true,
|
||||
jttEndArray: true,
|
||||
jttComma: true,
|
||||
jttEOF: true,
|
||||
},
|
||||
jpsSawColon: {
|
||||
jttBeginObject: true,
|
||||
jttBeginArray: true,
|
||||
jttInt32: true,
|
||||
jttInt64: true,
|
||||
jttDouble: true,
|
||||
jttString: true,
|
||||
jttBool: true,
|
||||
jttNull: true,
|
||||
},
|
||||
jpsSawComma: {
|
||||
jttBeginObject: true,
|
||||
jttBeginArray: true,
|
||||
jttInt32: true,
|
||||
jttInt64: true,
|
||||
jttDouble: true,
|
||||
jttString: true,
|
||||
jttBool: true,
|
||||
jttNull: true,
|
||||
},
|
||||
jpsSawKey: {
|
||||
jttColon: true,
|
||||
},
|
||||
jpsSawValue: {
|
||||
jttEndObject: true,
|
||||
jttEndArray: true,
|
||||
jttComma: true,
|
||||
jttEOF: true,
|
||||
},
|
||||
jpsDoneState: {},
|
||||
jpsInvalidState: {},
|
||||
}
|
||||
|
||||
func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool {
|
||||
switch ejp.s {
|
||||
case jpsSawEndObject:
|
||||
// if we are at depth zero and the next token is a '{',
|
||||
// we can consider it valid only if we are not in array mode.
|
||||
if jtt == jttBeginObject && ejp.depth == 0 {
|
||||
return ejp.peekMode() != jpmArrayMode
|
||||
}
|
||||
case jpsSawComma:
|
||||
switch ejp.peekMode() {
|
||||
// the only valid next token after a comma inside a document is a string (a key)
|
||||
case jpmObjectMode:
|
||||
return jtt == jttString
|
||||
case jpmInvalidMode:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
_, ok := jpsValidTransitionTokens[ejp.s][jtt]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ensureExtValueType returns true if the current value has the expected
|
||||
// value type for single-key extended JSON types. For example,
|
||||
// {"$numberInt": v} v must be TypeString
|
||||
func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool {
|
||||
switch t {
|
||||
case bsontype.MinKey, bsontype.MaxKey:
|
||||
return ejp.v.t == bsontype.Int32
|
||||
case bsontype.Undefined:
|
||||
return ejp.v.t == bsontype.Boolean
|
||||
case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID:
|
||||
return ejp.v.t == bsontype.String
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (ejp *extJSONParser) pushMode(m jsonParseMode) {
|
||||
ejp.m = append(ejp.m, m)
|
||||
}
|
||||
|
||||
func (ejp *extJSONParser) popMode() jsonParseMode {
|
||||
l := len(ejp.m)
|
||||
if l == 0 {
|
||||
return jpmInvalidMode
|
||||
}
|
||||
|
||||
m := ejp.m[l-1]
|
||||
ejp.m = ejp.m[:l-1]
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (ejp *extJSONParser) peekMode() jsonParseMode {
|
||||
l := len(ejp.m)
|
||||
if l == 0 {
|
||||
return jpmInvalidMode
|
||||
}
|
||||
|
||||
return ejp.m[l-1]
|
||||
}
|
||||
|
||||
func extendJSONToken(jt *jsonToken) *extJSONValue {
|
||||
var t bsontype.Type
|
||||
|
||||
switch jt.t {
|
||||
case jttInt32:
|
||||
t = bsontype.Int32
|
||||
case jttInt64:
|
||||
t = bsontype.Int64
|
||||
case jttDouble:
|
||||
t = bsontype.Double
|
||||
case jttString:
|
||||
t = bsontype.String
|
||||
case jttBool:
|
||||
t = bsontype.Boolean
|
||||
case jttNull:
|
||||
t = bsontype.Null
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
return &extJSONValue{t: t, v: jt.v}
|
||||
}
|
||||
|
||||
func ensureColon(s jsonParseState, key string) error {
|
||||
if s != jpsSawColon {
|
||||
return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func invalidRequestError(s string) error {
|
||||
return fmt.Errorf("invalid request to read %s", s)
|
||||
}
|
||||
|
||||
func invalidJSONError(expected string) error {
|
||||
return fmt.Errorf("invalid JSON input; expected %s", expected)
|
||||
}
|
||||
|
||||
func invalidJSONErrorForType(expected string, t bsontype.Type) error {
|
||||
return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t)
|
||||
}
|
||||
|
||||
func unexpectedTokenError(jt *jsonToken) error {
|
||||
switch jt.t {
|
||||
case jttInt32, jttInt64, jttDouble:
|
||||
return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p)
|
||||
case jttString:
|
||||
return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p)
|
||||
case jttBool:
|
||||
return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p)
|
||||
case jttNull:
|
||||
return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p)
|
||||
case jttEOF:
|
||||
return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p)
|
||||
default:
|
||||
return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p)
|
||||
}
|
||||
}
|
||||
|
||||
func nestingDepthError(p, depth int) error {
|
||||
return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p)
|
||||
}
|
||||
659
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go
generated
vendored
Executable file
659
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go
generated
vendored
Executable file
@@ -0,0 +1,659 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON.
|
||||
type ExtJSONValueReaderPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool.
|
||||
func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool {
|
||||
return &ExtJSONValueReaderPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(extJSONValueReader)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON.
|
||||
func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) {
|
||||
vr := bvrp.pool.Get().(*extJSONValueReader)
|
||||
return vr.reset(r, canonical)
|
||||
}
|
||||
|
||||
// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing
|
||||
// is inserted into the pool and ok will be false.
|
||||
func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) {
|
||||
bvr, ok := vr.(*extJSONValueReader)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
bvr, _ = bvr.reset(nil, false)
|
||||
bvrp.pool.Put(bvr)
|
||||
return true
|
||||
}
|
||||
|
||||
type ejvrState struct {
|
||||
mode mode
|
||||
vType bsontype.Type
|
||||
depth int
|
||||
}
|
||||
|
||||
// extJSONValueReader is for reading extended JSON.
|
||||
type extJSONValueReader struct {
|
||||
p *extJSONParser
|
||||
|
||||
stack []ejvrState
|
||||
frame int
|
||||
}
|
||||
|
||||
// NewExtJSONValueReader creates a new ValueReader from a given io.Reader
|
||||
// It will interpret the JSON of r as canonical or relaxed according to the
|
||||
// given canonical flag
|
||||
func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) {
|
||||
return newExtJSONValueReader(r, canonical)
|
||||
}
|
||||
|
||||
func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) {
|
||||
ejvr := new(extJSONValueReader)
|
||||
return ejvr.reset(r, canonical)
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) {
|
||||
p := newExtJSONParser(r, canonical)
|
||||
typ, err := p.peekType()
|
||||
|
||||
if err != nil {
|
||||
return nil, ErrInvalidJSON
|
||||
}
|
||||
|
||||
var m mode
|
||||
switch typ {
|
||||
case bsontype.EmbeddedDocument:
|
||||
m = mTopLevel
|
||||
case bsontype.Array:
|
||||
m = mArray
|
||||
default:
|
||||
m = mValue
|
||||
}
|
||||
|
||||
stack := make([]ejvrState, 1, 5)
|
||||
stack[0] = ejvrState{
|
||||
mode: m,
|
||||
vType: typ,
|
||||
}
|
||||
return &extJSONValueReader{
|
||||
p: p,
|
||||
stack: stack,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) advanceFrame() {
|
||||
if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack
|
||||
length := len(ejvr.stack)
|
||||
if length+1 >= cap(ejvr.stack) {
|
||||
// double it
|
||||
buf := make([]ejvrState, 2*cap(ejvr.stack)+1)
|
||||
copy(buf, ejvr.stack)
|
||||
ejvr.stack = buf
|
||||
}
|
||||
ejvr.stack = ejvr.stack[:length+1]
|
||||
}
|
||||
ejvr.frame++
|
||||
|
||||
// Clean the stack
|
||||
ejvr.stack[ejvr.frame].mode = 0
|
||||
ejvr.stack[ejvr.frame].vType = 0
|
||||
ejvr.stack[ejvr.frame].depth = 0
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) pushDocument() {
|
||||
ejvr.advanceFrame()
|
||||
|
||||
ejvr.stack[ejvr.frame].mode = mDocument
|
||||
ejvr.stack[ejvr.frame].depth = ejvr.p.depth
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) pushCodeWithScope() {
|
||||
ejvr.advanceFrame()
|
||||
|
||||
ejvr.stack[ejvr.frame].mode = mCodeWithScope
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) pushArray() {
|
||||
ejvr.advanceFrame()
|
||||
|
||||
ejvr.stack[ejvr.frame].mode = mArray
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) {
|
||||
ejvr.advanceFrame()
|
||||
|
||||
ejvr.stack[ejvr.frame].mode = m
|
||||
ejvr.stack[ejvr.frame].vType = t
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) pop() {
|
||||
switch ejvr.stack[ejvr.frame].mode {
|
||||
case mElement, mValue:
|
||||
ejvr.frame--
|
||||
case mDocument, mArray, mCodeWithScope:
|
||||
ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc...
|
||||
}
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) skipDocument() error {
|
||||
// read entire document until ErrEOD (using readKey and readValue)
|
||||
_, typ, err := ejvr.p.readKey()
|
||||
for err == nil {
|
||||
_, err = ejvr.p.readValue(typ)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
_, typ, err = ejvr.p.readKey()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) skipArray() error {
|
||||
// read entire array until ErrEOA (using peekType)
|
||||
_, err := ejvr.p.peekType()
|
||||
for err == nil {
|
||||
_, err = ejvr.p.peekType()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error {
|
||||
te := TransitionError{
|
||||
name: name,
|
||||
current: ejvr.stack[ejvr.frame].mode,
|
||||
destination: destination,
|
||||
modes: modes,
|
||||
action: "read",
|
||||
}
|
||||
if ejvr.frame != 0 {
|
||||
te.parent = ejvr.stack[ejvr.frame-1].mode
|
||||
}
|
||||
return te
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error {
|
||||
return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t)
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error {
|
||||
switch ejvr.stack[ejvr.frame].mode {
|
||||
case mElement, mValue:
|
||||
if ejvr.stack[ejvr.frame].vType != t {
|
||||
return ejvr.typeError(t)
|
||||
}
|
||||
default:
|
||||
modes := []mode{mElement, mValue}
|
||||
if addModes != nil {
|
||||
modes = append(modes, addModes...)
|
||||
}
|
||||
return ejvr.invalidTransitionErr(destination, callerName, modes)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) Type() bsontype.Type {
|
||||
return ejvr.stack[ejvr.frame].vType
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) Skip() error {
|
||||
switch ejvr.stack[ejvr.frame].mode {
|
||||
case mElement, mValue:
|
||||
default:
|
||||
return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue})
|
||||
}
|
||||
|
||||
defer ejvr.pop()
|
||||
|
||||
t := ejvr.stack[ejvr.frame].vType
|
||||
switch t {
|
||||
case bsontype.Array:
|
||||
// read entire array until ErrEOA
|
||||
err := ejvr.skipArray()
|
||||
if err != ErrEOA {
|
||||
return err
|
||||
}
|
||||
case bsontype.EmbeddedDocument:
|
||||
// read entire doc until ErrEOD
|
||||
err := ejvr.skipDocument()
|
||||
if err != ErrEOD {
|
||||
return err
|
||||
}
|
||||
case bsontype.CodeWithScope:
|
||||
// read the code portion and set up parser in document mode
|
||||
_, err := ejvr.p.readValue(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// read until ErrEOD
|
||||
err = ejvr.skipDocument()
|
||||
if err != ErrEOD {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := ejvr.p.readValue(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) {
|
||||
switch ejvr.stack[ejvr.frame].mode {
|
||||
case mTopLevel: // allow reading array from top level
|
||||
case mArray:
|
||||
return ejvr, nil
|
||||
default:
|
||||
if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ejvr.pushArray()
|
||||
|
||||
return ejvr, nil
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) {
|
||||
if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Binary)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
b, btype, err = v.parseBinary()
|
||||
|
||||
ejvr.pop()
|
||||
return b, btype, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) {
|
||||
if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Boolean)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if v.t != bsontype.Boolean {
|
||||
return false, fmt.Errorf("expected type bool, but got type %s", v.t)
|
||||
}
|
||||
|
||||
ejvr.pop()
|
||||
return v.v.(bool), nil
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) {
|
||||
switch ejvr.stack[ejvr.frame].mode {
|
||||
case mTopLevel:
|
||||
return ejvr, nil
|
||||
case mElement, mValue:
|
||||
if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument {
|
||||
return nil, ejvr.typeError(bsontype.EmbeddedDocument)
|
||||
}
|
||||
|
||||
ejvr.pushDocument()
|
||||
return ejvr, nil
|
||||
default:
|
||||
return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue})
|
||||
}
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) {
|
||||
if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.CodeWithScope)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
code, err = v.parseJavascript()
|
||||
|
||||
ejvr.pushCodeWithScope()
|
||||
return code, ejvr, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) {
|
||||
if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil {
|
||||
return "", primitive.NilObjectID, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.DBPointer)
|
||||
if err != nil {
|
||||
return "", primitive.NilObjectID, err
|
||||
}
|
||||
|
||||
ns, oid, err = v.parseDBPointer()
|
||||
|
||||
ejvr.pop()
|
||||
return ns, oid, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) {
|
||||
if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.DateTime)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
d, err := v.parseDateTime()
|
||||
|
||||
ejvr.pop()
|
||||
return d, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) {
|
||||
if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil {
|
||||
return primitive.Decimal128{}, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Decimal128)
|
||||
if err != nil {
|
||||
return primitive.Decimal128{}, err
|
||||
}
|
||||
|
||||
d, err := v.parseDecimal128()
|
||||
|
||||
ejvr.pop()
|
||||
return d, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadDouble() (float64, error) {
|
||||
if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Double)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
d, err := v.parseDouble()
|
||||
|
||||
ejvr.pop()
|
||||
return d, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadInt32() (int32, error) {
|
||||
if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Int32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := v.parseInt32()
|
||||
|
||||
ejvr.pop()
|
||||
return i, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadInt64() (int64, error) {
|
||||
if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Int64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := v.parseInt64()
|
||||
|
||||
ejvr.pop()
|
||||
return i, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) {
|
||||
if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.JavaScript)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
code, err = v.parseJavascript()
|
||||
|
||||
ejvr.pop()
|
||||
return code, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadMaxKey() error {
|
||||
if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.MaxKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = v.parseMinMaxKey("max")
|
||||
|
||||
ejvr.pop()
|
||||
return err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadMinKey() error {
|
||||
if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.MinKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = v.parseMinMaxKey("min")
|
||||
|
||||
ejvr.pop()
|
||||
return err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadNull() error {
|
||||
if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Null)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v.t != bsontype.Null {
|
||||
return fmt.Errorf("expected type null but got type %s", v.t)
|
||||
}
|
||||
|
||||
ejvr.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) {
|
||||
if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil {
|
||||
return primitive.ObjectID{}, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.ObjectID)
|
||||
if err != nil {
|
||||
return primitive.ObjectID{}, err
|
||||
}
|
||||
|
||||
oid, err := v.parseObjectID()
|
||||
|
||||
ejvr.pop()
|
||||
return oid, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) {
|
||||
if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Regex)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
pattern, options, err = v.parseRegex()
|
||||
|
||||
ejvr.pop()
|
||||
return pattern, options, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadString() (string, error) {
|
||||
if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.String)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if v.t != bsontype.String {
|
||||
return "", fmt.Errorf("expected type string but got type %s", v.t)
|
||||
}
|
||||
|
||||
ejvr.pop()
|
||||
return v.v.(string), nil
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) {
|
||||
if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Symbol)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
symbol, err = v.parseSymbol()
|
||||
|
||||
ejvr.pop()
|
||||
return symbol, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) {
|
||||
if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Timestamp)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
t, i, err = v.parseTimestamp()
|
||||
|
||||
ejvr.pop()
|
||||
return t, i, err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadUndefined() error {
|
||||
if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, err := ejvr.p.readValue(bsontype.Undefined)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = v.parseUndefined()
|
||||
|
||||
ejvr.pop()
|
||||
return err
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) {
|
||||
switch ejvr.stack[ejvr.frame].mode {
|
||||
case mTopLevel, mDocument, mCodeWithScope:
|
||||
default:
|
||||
return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope})
|
||||
}
|
||||
|
||||
name, t, err := ejvr.p.readKey()
|
||||
|
||||
if err != nil {
|
||||
if err == ErrEOD {
|
||||
if ejvr.stack[ejvr.frame].mode == mCodeWithScope {
|
||||
_, err := ejvr.p.peekType()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ejvr.pop()
|
||||
}
|
||||
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
ejvr.push(mElement, t)
|
||||
return name, ejvr, nil
|
||||
}
|
||||
|
||||
func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) {
|
||||
switch ejvr.stack[ejvr.frame].mode {
|
||||
case mArray:
|
||||
default:
|
||||
return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray})
|
||||
}
|
||||
|
||||
t, err := ejvr.p.peekType()
|
||||
if err != nil {
|
||||
if err == ErrEOA {
|
||||
ejvr.pop()
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ejvr.push(mValue, t)
|
||||
return ejvr, nil
|
||||
}
|
||||
223
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go
generated
vendored
Executable file
223
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go
generated
vendored
Executable file
@@ -0,0 +1,223 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Based on github.com/golang/go by The Go Authors
|
||||
// See THIRD-PARTY-NOTICES for original license terms.
|
||||
|
||||
package bsonrw
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// safeSet holds the value true if the ASCII character with the given array
|
||||
// position can be represented inside a JSON string without any further
|
||||
// escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), and the backslash character ("\").
|
||||
var safeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': true,
|
||||
'=': true,
|
||||
'>': true,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
||||
|
||||
// htmlSafeSet holds the value true if the ASCII character with the given
|
||||
// array position can be safely represented inside a JSON string, embedded
|
||||
// inside of HTML <script> tags, without any additional escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), the backslash character ("\"), HTML opening and closing
|
||||
// tags ("<" and ">"), and the ampersand ("&").
|
||||
var htmlSafeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': false,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': false,
|
||||
'=': true,
|
||||
'>': false,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
||||
481
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go
generated
vendored
Executable file
481
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go
generated
vendored
Executable file
@@ -0,0 +1,481 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
func wrapperKeyBSONType(key string) bsontype.Type {
|
||||
switch string(key) {
|
||||
case "$numberInt":
|
||||
return bsontype.Int32
|
||||
case "$numberLong":
|
||||
return bsontype.Int64
|
||||
case "$oid":
|
||||
return bsontype.ObjectID
|
||||
case "$symbol":
|
||||
return bsontype.Symbol
|
||||
case "$numberDouble":
|
||||
return bsontype.Double
|
||||
case "$numberDecimal":
|
||||
return bsontype.Decimal128
|
||||
case "$binary":
|
||||
return bsontype.Binary
|
||||
case "$code":
|
||||
return bsontype.JavaScript
|
||||
case "$scope":
|
||||
return bsontype.CodeWithScope
|
||||
case "$timestamp":
|
||||
return bsontype.Timestamp
|
||||
case "$regularExpression":
|
||||
return bsontype.Regex
|
||||
case "$dbPointer":
|
||||
return bsontype.DBPointer
|
||||
case "$date":
|
||||
return bsontype.DateTime
|
||||
case "$ref":
|
||||
fallthrough
|
||||
case "$id":
|
||||
fallthrough
|
||||
case "$db":
|
||||
return bsontype.EmbeddedDocument // dbrefs aren't bson types
|
||||
case "$minKey":
|
||||
return bsontype.MinKey
|
||||
case "$maxKey":
|
||||
return bsontype.MaxKey
|
||||
case "$undefined":
|
||||
return bsontype.Undefined
|
||||
}
|
||||
|
||||
return bsontype.EmbeddedDocument
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseBinary() (b []byte, subType byte, err error) {
|
||||
if ejv.t != bsontype.EmbeddedDocument {
|
||||
return nil, 0, fmt.Errorf("$binary value should be object, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
binObj := ejv.v.(*extJSONObject)
|
||||
bFound := false
|
||||
stFound := false
|
||||
|
||||
for i, key := range binObj.keys {
|
||||
val := binObj.values[i]
|
||||
|
||||
switch key {
|
||||
case "base64":
|
||||
if bFound {
|
||||
return nil, 0, errors.New("duplicate base64 key in $binary")
|
||||
}
|
||||
|
||||
if val.t != bsontype.String {
|
||||
return nil, 0, fmt.Errorf("$binary base64 value should be string, but instead is %s", val.t)
|
||||
}
|
||||
|
||||
base64Bytes, err := base64.StdEncoding.DecodeString(val.v.(string))
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("invalid $binary base64 string: %s", val.v.(string))
|
||||
}
|
||||
|
||||
b = base64Bytes
|
||||
bFound = true
|
||||
case "subType":
|
||||
if stFound {
|
||||
return nil, 0, errors.New("duplicate subType key in $binary")
|
||||
}
|
||||
|
||||
if val.t != bsontype.String {
|
||||
return nil, 0, fmt.Errorf("$binary subType value should be string, but instead is %s", val.t)
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(val.v.(string), 16, 64)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("invalid $binary subType string: %s", val.v.(string))
|
||||
}
|
||||
|
||||
subType = byte(i)
|
||||
stFound = true
|
||||
default:
|
||||
return nil, 0, fmt.Errorf("invalid key in $binary object: %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
if !bFound {
|
||||
return nil, 0, errors.New("missing base64 field in $binary object")
|
||||
}
|
||||
|
||||
if !stFound {
|
||||
return nil, 0, errors.New("missing subType field in $binary object")
|
||||
|
||||
}
|
||||
|
||||
return b, subType, nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseDBPointer() (ns string, oid primitive.ObjectID, err error) {
|
||||
if ejv.t != bsontype.EmbeddedDocument {
|
||||
return "", primitive.NilObjectID, fmt.Errorf("$dbPointer value should be object, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
dbpObj := ejv.v.(*extJSONObject)
|
||||
oidFound := false
|
||||
nsFound := false
|
||||
|
||||
for i, key := range dbpObj.keys {
|
||||
val := dbpObj.values[i]
|
||||
|
||||
switch key {
|
||||
case "$ref":
|
||||
if nsFound {
|
||||
return "", primitive.NilObjectID, errors.New("duplicate $ref key in $dbPointer")
|
||||
}
|
||||
|
||||
if val.t != bsontype.String {
|
||||
return "", primitive.NilObjectID, fmt.Errorf("$dbPointer $ref value should be string, but instead is %s", val.t)
|
||||
}
|
||||
|
||||
ns = val.v.(string)
|
||||
nsFound = true
|
||||
case "$id":
|
||||
if oidFound {
|
||||
return "", primitive.NilObjectID, errors.New("duplicate $id key in $dbPointer")
|
||||
}
|
||||
|
||||
if val.t != bsontype.String {
|
||||
return "", primitive.NilObjectID, fmt.Errorf("$dbPointer $id value should be string, but instead is %s", val.t)
|
||||
}
|
||||
|
||||
oid, err = primitive.ObjectIDFromHex(val.v.(string))
|
||||
if err != nil {
|
||||
return "", primitive.NilObjectID, err
|
||||
}
|
||||
|
||||
oidFound = true
|
||||
default:
|
||||
return "", primitive.NilObjectID, fmt.Errorf("invalid key in $dbPointer object: %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
if !nsFound {
|
||||
return "", oid, errors.New("missing $ref field in $dbPointer object")
|
||||
}
|
||||
|
||||
if !oidFound {
|
||||
return "", oid, errors.New("missing $id field in $dbPointer object")
|
||||
}
|
||||
|
||||
return ns, oid, nil
|
||||
}
|
||||
|
||||
const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00"
|
||||
|
||||
func (ejv *extJSONValue) parseDateTime() (int64, error) {
|
||||
switch ejv.t {
|
||||
case bsontype.Int32:
|
||||
return int64(ejv.v.(int32)), nil
|
||||
case bsontype.Int64:
|
||||
return ejv.v.(int64), nil
|
||||
case bsontype.String:
|
||||
return parseDatetimeString(ejv.v.(string))
|
||||
case bsontype.EmbeddedDocument:
|
||||
return parseDatetimeObject(ejv.v.(*extJSONObject))
|
||||
default:
|
||||
return 0, fmt.Errorf("$date value should be string or object, but instead is %s", ejv.t)
|
||||
}
|
||||
}
|
||||
|
||||
func parseDatetimeString(data string) (int64, error) {
|
||||
t, err := time.Parse(rfc3339Milli, data)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid $date value string: %s", data)
|
||||
}
|
||||
|
||||
return t.Unix()*1e3 + int64(t.Nanosecond())/1e6, nil
|
||||
}
|
||||
|
||||
func parseDatetimeObject(data *extJSONObject) (d int64, err error) {
|
||||
dFound := false
|
||||
|
||||
for i, key := range data.keys {
|
||||
val := data.values[i]
|
||||
|
||||
switch key {
|
||||
case "$numberLong":
|
||||
if dFound {
|
||||
return 0, errors.New("duplicate $numberLong key in $date")
|
||||
}
|
||||
|
||||
if val.t != bsontype.String {
|
||||
return 0, fmt.Errorf("$date $numberLong field should be string, but instead is %s", val.t)
|
||||
}
|
||||
|
||||
d, err = val.parseInt64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dFound = true
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid key in $date object: %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
if !dFound {
|
||||
return 0, errors.New("missing $numberLong field in $date object")
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseDecimal128() (primitive.Decimal128, error) {
|
||||
if ejv.t != bsontype.String {
|
||||
return primitive.Decimal128{}, fmt.Errorf("$numberDecimal value should be string, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
d, err := primitive.ParseDecimal128(ejv.v.(string))
|
||||
if err != nil {
|
||||
return primitive.Decimal128{}, fmt.Errorf("$invalid $numberDecimal string: %s", ejv.v.(string))
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseDouble() (float64, error) {
|
||||
if ejv.t == bsontype.Double {
|
||||
return ejv.v.(float64), nil
|
||||
}
|
||||
|
||||
if ejv.t != bsontype.String {
|
||||
return 0, fmt.Errorf("$numberDouble value should be string, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
switch string(ejv.v.(string)) {
|
||||
case "Infinity":
|
||||
return math.Inf(1), nil
|
||||
case "-Infinity":
|
||||
return math.Inf(-1), nil
|
||||
case "NaN":
|
||||
return math.NaN(), nil
|
||||
}
|
||||
|
||||
f, err := strconv.ParseFloat(ejv.v.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseInt32() (int32, error) {
|
||||
if ejv.t == bsontype.Int32 {
|
||||
return ejv.v.(int32), nil
|
||||
}
|
||||
|
||||
if ejv.t != bsontype.String {
|
||||
return 0, fmt.Errorf("$numberInt value should be string, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(ejv.v.(string), 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if i < math.MinInt32 || i > math.MaxInt32 {
|
||||
return 0, fmt.Errorf("$numberInt value should be int32 but instead is int64: %d", i)
|
||||
}
|
||||
|
||||
return int32(i), nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseInt64() (int64, error) {
|
||||
if ejv.t == bsontype.Int64 {
|
||||
return ejv.v.(int64), nil
|
||||
}
|
||||
|
||||
if ejv.t != bsontype.String {
|
||||
return 0, fmt.Errorf("$numberLong value should be string, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(ejv.v.(string), 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseJavascript() (code string, err error) {
|
||||
if ejv.t != bsontype.String {
|
||||
return "", fmt.Errorf("$code value should be string, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
return ejv.v.(string), nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseMinMaxKey(minmax string) error {
|
||||
if ejv.t != bsontype.Int32 {
|
||||
return fmt.Errorf("$%sKey value should be int32, but instead is %s", minmax, ejv.t)
|
||||
}
|
||||
|
||||
if ejv.v.(int32) != 1 {
|
||||
return fmt.Errorf("$%sKey value must be 1, but instead is %d", minmax, ejv.v.(int32))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseObjectID() (primitive.ObjectID, error) {
|
||||
if ejv.t != bsontype.String {
|
||||
return primitive.NilObjectID, fmt.Errorf("$oid value should be string, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
return primitive.ObjectIDFromHex(ejv.v.(string))
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseRegex() (pattern, options string, err error) {
|
||||
if ejv.t != bsontype.EmbeddedDocument {
|
||||
return "", "", fmt.Errorf("$regularExpression value should be object, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
regexObj := ejv.v.(*extJSONObject)
|
||||
patFound := false
|
||||
optFound := false
|
||||
|
||||
for i, key := range regexObj.keys {
|
||||
val := regexObj.values[i]
|
||||
|
||||
switch string(key) {
|
||||
case "pattern":
|
||||
if patFound {
|
||||
return "", "", errors.New("duplicate pattern key in $regularExpression")
|
||||
}
|
||||
|
||||
if val.t != bsontype.String {
|
||||
return "", "", fmt.Errorf("$regularExpression pattern value should be string, but instead is %s", val.t)
|
||||
}
|
||||
|
||||
pattern = val.v.(string)
|
||||
patFound = true
|
||||
case "options":
|
||||
if optFound {
|
||||
return "", "", errors.New("duplicate options key in $regularExpression")
|
||||
}
|
||||
|
||||
if val.t != bsontype.String {
|
||||
return "", "", fmt.Errorf("$regularExpression options value should be string, but instead is %s", val.t)
|
||||
}
|
||||
|
||||
options = val.v.(string)
|
||||
optFound = true
|
||||
default:
|
||||
return "", "", fmt.Errorf("invalid key in $regularExpression object: %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
if !patFound {
|
||||
return "", "", errors.New("missing pattern field in $regularExpression object")
|
||||
}
|
||||
|
||||
if !optFound {
|
||||
return "", "", errors.New("missing options field in $regularExpression object")
|
||||
|
||||
}
|
||||
|
||||
return pattern, options, nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseSymbol() (string, error) {
|
||||
if ejv.t != bsontype.String {
|
||||
return "", fmt.Errorf("$symbol value should be string, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
return ejv.v.(string), nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseTimestamp() (t, i uint32, err error) {
|
||||
if ejv.t != bsontype.EmbeddedDocument {
|
||||
return 0, 0, fmt.Errorf("$timestamp value should be object, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
handleKey := func(key string, val *extJSONValue, flag bool) (uint32, error) {
|
||||
if flag {
|
||||
return 0, fmt.Errorf("duplicate %s key in $timestamp", key)
|
||||
}
|
||||
|
||||
switch val.t {
|
||||
case bsontype.Int32:
|
||||
if val.v.(int32) < 0 {
|
||||
return 0, fmt.Errorf("$timestamp %s number should be uint32: %s", key, string(val.v.(int32)))
|
||||
}
|
||||
|
||||
return uint32(val.v.(int32)), nil
|
||||
case bsontype.Int64:
|
||||
if val.v.(int64) < 0 || uint32(val.v.(int64)) > math.MaxUint32 {
|
||||
return 0, fmt.Errorf("$timestamp %s number should be uint32: %s", key, string(val.v.(int32)))
|
||||
}
|
||||
|
||||
return uint32(val.v.(int64)), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("$timestamp %s value should be uint32, but instead is %s", key, val.t)
|
||||
}
|
||||
}
|
||||
|
||||
tsObj := ejv.v.(*extJSONObject)
|
||||
tFound := false
|
||||
iFound := false
|
||||
|
||||
for j, key := range tsObj.keys {
|
||||
val := tsObj.values[j]
|
||||
|
||||
switch key {
|
||||
case "t":
|
||||
if t, err = handleKey(key, val, tFound); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
tFound = true
|
||||
case "i":
|
||||
if i, err = handleKey(key, val, iFound); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
iFound = true
|
||||
default:
|
||||
return 0, 0, fmt.Errorf("invalid key in $timestamp object: %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
if !tFound {
|
||||
return 0, 0, errors.New("missing t field in $timestamp object")
|
||||
}
|
||||
|
||||
if !iFound {
|
||||
return 0, 0, errors.New("missing i field in $timestamp object")
|
||||
}
|
||||
|
||||
return t, i, nil
|
||||
}
|
||||
|
||||
func (ejv *extJSONValue) parseUndefined() error {
|
||||
if ejv.t != bsontype.Boolean {
|
||||
return fmt.Errorf("undefined value should be boolean, but instead is %s", ejv.t)
|
||||
}
|
||||
|
||||
if !ejv.v.(bool) {
|
||||
return fmt.Errorf("$undefined balue boolean should be true, but instead is %v", ejv.v.(bool))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
734
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go
generated
vendored
Executable file
734
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go
generated
vendored
Executable file
@@ -0,0 +1,734 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var ejvwPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(extJSONValueWriter)
|
||||
},
|
||||
}
|
||||
|
||||
// ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters.
|
||||
type ExtJSONValueWriterPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// NewExtJSONValueWriterPool creates a new pool for ValueWriter instances that write to ExtJSON.
|
||||
func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool {
|
||||
return &ExtJSONValueWriterPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(extJSONValueWriter)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves a ExtJSON ValueWriter from the pool and resets it to use w as the destination.
|
||||
func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) ValueWriter {
|
||||
vw := bvwp.pool.Get().(*extJSONValueWriter)
|
||||
if writer, ok := w.(*SliceWriter); ok {
|
||||
vw.reset(*writer, canonical, escapeHTML)
|
||||
vw.w = writer
|
||||
return vw
|
||||
}
|
||||
vw.buf = vw.buf[:0]
|
||||
vw.w = w
|
||||
return vw
|
||||
}
|
||||
|
||||
// Put inserts a ValueWriter into the pool. If the ValueWriter is not a ExtJSON ValueWriter, nothing
|
||||
// happens and ok will be false.
|
||||
func (bvwp *ExtJSONValueWriterPool) Put(vw ValueWriter) (ok bool) {
|
||||
bvw, ok := vw.(*extJSONValueWriter)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, ok := bvw.w.(*SliceWriter); ok {
|
||||
bvw.buf = nil
|
||||
}
|
||||
bvw.w = nil
|
||||
|
||||
bvwp.pool.Put(bvw)
|
||||
return true
|
||||
}
|
||||
|
||||
type ejvwState struct {
|
||||
mode mode
|
||||
}
|
||||
|
||||
type extJSONValueWriter struct {
|
||||
w io.Writer
|
||||
buf []byte
|
||||
|
||||
stack []ejvwState
|
||||
frame int64
|
||||
canonical bool
|
||||
escapeHTML bool
|
||||
}
|
||||
|
||||
// NewExtJSONValueWriter creates a ValueWriter that writes Extended JSON to w.
|
||||
func NewExtJSONValueWriter(w io.Writer, canonical, escapeHTML bool) (ValueWriter, error) {
|
||||
if w == nil {
|
||||
return nil, errNilWriter
|
||||
}
|
||||
|
||||
return newExtJSONWriter(w, canonical, escapeHTML), nil
|
||||
}
|
||||
|
||||
func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWriter {
|
||||
stack := make([]ejvwState, 1, 5)
|
||||
stack[0] = ejvwState{mode: mTopLevel}
|
||||
|
||||
return &extJSONValueWriter{
|
||||
w: w,
|
||||
buf: []byte{},
|
||||
stack: stack,
|
||||
canonical: canonical,
|
||||
escapeHTML: escapeHTML,
|
||||
}
|
||||
}
|
||||
|
||||
func newExtJSONWriterFromSlice(buf []byte, canonical, escapeHTML bool) *extJSONValueWriter {
|
||||
stack := make([]ejvwState, 1, 5)
|
||||
stack[0] = ejvwState{mode: mTopLevel}
|
||||
|
||||
return &extJSONValueWriter{
|
||||
buf: buf,
|
||||
stack: stack,
|
||||
canonical: canonical,
|
||||
escapeHTML: escapeHTML,
|
||||
}
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) reset(buf []byte, canonical, escapeHTML bool) {
|
||||
if ejvw.stack == nil {
|
||||
ejvw.stack = make([]ejvwState, 1, 5)
|
||||
}
|
||||
|
||||
ejvw.stack = ejvw.stack[:1]
|
||||
ejvw.stack[0] = ejvwState{mode: mTopLevel}
|
||||
ejvw.canonical = canonical
|
||||
ejvw.escapeHTML = escapeHTML
|
||||
ejvw.frame = 0
|
||||
ejvw.buf = buf
|
||||
ejvw.w = nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) advanceFrame() {
|
||||
if ejvw.frame+1 >= int64(len(ejvw.stack)) { // We need to grow the stack
|
||||
length := len(ejvw.stack)
|
||||
if length+1 >= cap(ejvw.stack) {
|
||||
// double it
|
||||
buf := make([]ejvwState, 2*cap(ejvw.stack)+1)
|
||||
copy(buf, ejvw.stack)
|
||||
ejvw.stack = buf
|
||||
}
|
||||
ejvw.stack = ejvw.stack[:length+1]
|
||||
}
|
||||
ejvw.frame++
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) push(m mode) {
|
||||
ejvw.advanceFrame()
|
||||
|
||||
ejvw.stack[ejvw.frame].mode = m
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) pop() {
|
||||
switch ejvw.stack[ejvw.frame].mode {
|
||||
case mElement, mValue:
|
||||
ejvw.frame--
|
||||
case mDocument, mArray, mCodeWithScope:
|
||||
ejvw.frame -= 2 // we pop twice to jump over the mElement: mDocument -> mElement -> mDocument/mTopLevel/etc...
|
||||
}
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) invalidTransitionErr(destination mode, name string, modes []mode) error {
|
||||
te := TransitionError{
|
||||
name: name,
|
||||
current: ejvw.stack[ejvw.frame].mode,
|
||||
destination: destination,
|
||||
modes: modes,
|
||||
action: "write",
|
||||
}
|
||||
if ejvw.frame != 0 {
|
||||
te.parent = ejvw.stack[ejvw.frame-1].mode
|
||||
}
|
||||
return te
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) ensureElementValue(destination mode, callerName string, addmodes ...mode) error {
|
||||
switch ejvw.stack[ejvw.frame].mode {
|
||||
case mElement, mValue:
|
||||
default:
|
||||
modes := []mode{mElement, mValue}
|
||||
if addmodes != nil {
|
||||
modes = append(modes, addmodes...)
|
||||
}
|
||||
return ejvw.invalidTransitionErr(destination, callerName, modes)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) writeExtendedSingleValue(key string, value string, quotes bool) {
|
||||
var s string
|
||||
if quotes {
|
||||
s = fmt.Sprintf(`{"$%s":"%s"}`, key, value)
|
||||
} else {
|
||||
s = fmt.Sprintf(`{"$%s":%s}`, key, value)
|
||||
}
|
||||
|
||||
ejvw.buf = append(ejvw.buf, []byte(s)...)
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteArray() (ArrayWriter, error) {
|
||||
if err := ejvw.ensureElementValue(mArray, "WriteArray"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ejvw.buf = append(ejvw.buf, '[')
|
||||
|
||||
ejvw.push(mArray)
|
||||
return ejvw, nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteBinary(b []byte) error {
|
||||
return ejvw.WriteBinaryWithSubtype(b, 0x00)
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteBinaryWithSubtype(b []byte, btype byte) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteBinaryWithSubtype"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(`{"$binary":{"base64":"`)
|
||||
buf.WriteString(base64.StdEncoding.EncodeToString(b))
|
||||
buf.WriteString(fmt.Sprintf(`","subType":"%02x"}},`, btype))
|
||||
|
||||
ejvw.buf = append(ejvw.buf, buf.Bytes()...)
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteBoolean(b bool) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteBoolean"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ejvw.buf = append(ejvw.buf, []byte(strconv.FormatBool(b))...)
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteCodeWithScope(code string) (DocumentWriter, error) {
|
||||
if err := ejvw.ensureElementValue(mCodeWithScope, "WriteCodeWithScope"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(`{"$code":`)
|
||||
writeStringWithEscapes(code, &buf, ejvw.escapeHTML)
|
||||
buf.WriteString(`,"$scope":{`)
|
||||
|
||||
ejvw.buf = append(ejvw.buf, buf.Bytes()...)
|
||||
|
||||
ejvw.push(mCodeWithScope)
|
||||
return ejvw, nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteDBPointer(ns string, oid primitive.ObjectID) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteDBPointer"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(`{"$dbPointer":{"$ref":"`)
|
||||
buf.WriteString(ns)
|
||||
buf.WriteString(`","$id":{"$oid":"`)
|
||||
buf.WriteString(oid.Hex())
|
||||
buf.WriteString(`"}}},`)
|
||||
|
||||
ejvw.buf = append(ejvw.buf, buf.Bytes()...)
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteDateTime(dt int64) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteDateTime"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := time.Unix(dt/1e3, dt%1e3*1e6).UTC()
|
||||
|
||||
if ejvw.canonical || t.Year() < 1970 || t.Year() > 9999 {
|
||||
s := fmt.Sprintf(`{"$numberLong":"%d"}`, dt)
|
||||
ejvw.writeExtendedSingleValue("date", s, false)
|
||||
} else {
|
||||
ejvw.writeExtendedSingleValue("date", t.Format(rfc3339Milli), true)
|
||||
}
|
||||
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteDecimal128(d primitive.Decimal128) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteDecimal128"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ejvw.writeExtendedSingleValue("numberDecimal", d.String(), true)
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteDocument() (DocumentWriter, error) {
|
||||
if ejvw.stack[ejvw.frame].mode == mTopLevel {
|
||||
ejvw.buf = append(ejvw.buf, '{')
|
||||
return ejvw, nil
|
||||
}
|
||||
|
||||
if err := ejvw.ensureElementValue(mDocument, "WriteDocument", mTopLevel); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ejvw.buf = append(ejvw.buf, '{')
|
||||
ejvw.push(mDocument)
|
||||
return ejvw, nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteDouble(f float64) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteDouble"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := formatDouble(f)
|
||||
|
||||
if ejvw.canonical {
|
||||
ejvw.writeExtendedSingleValue("numberDouble", s, true)
|
||||
} else {
|
||||
switch s {
|
||||
case "Infinity":
|
||||
fallthrough
|
||||
case "-Infinity":
|
||||
fallthrough
|
||||
case "NaN":
|
||||
s = fmt.Sprintf(`{"$numberDouble":"%s"}`, s)
|
||||
}
|
||||
ejvw.buf = append(ejvw.buf, []byte(s)...)
|
||||
}
|
||||
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteInt32(i int32) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteInt32"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := strconv.FormatInt(int64(i), 10)
|
||||
|
||||
if ejvw.canonical {
|
||||
ejvw.writeExtendedSingleValue("numberInt", s, true)
|
||||
} else {
|
||||
ejvw.buf = append(ejvw.buf, []byte(s)...)
|
||||
}
|
||||
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteInt64(i int64) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteInt64"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := strconv.FormatInt(i, 10)
|
||||
|
||||
if ejvw.canonical {
|
||||
ejvw.writeExtendedSingleValue("numberLong", s, true)
|
||||
} else {
|
||||
ejvw.buf = append(ejvw.buf, []byte(s)...)
|
||||
}
|
||||
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteJavascript(code string) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteJavascript"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
writeStringWithEscapes(code, &buf, ejvw.escapeHTML)
|
||||
|
||||
ejvw.writeExtendedSingleValue("code", buf.String(), false)
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteMaxKey() error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteMaxKey"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ejvw.writeExtendedSingleValue("maxKey", "1", false)
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteMinKey() error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteMinKey"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ejvw.writeExtendedSingleValue("minKey", "1", false)
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteNull() error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteNull"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ejvw.buf = append(ejvw.buf, []byte("null")...)
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteObjectID(oid primitive.ObjectID) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteObjectID"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ejvw.writeExtendedSingleValue("oid", oid.Hex(), true)
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteRegex(pattern string, options string) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteRegex"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(`{"$regularExpression":{"pattern":`)
|
||||
writeStringWithEscapes(pattern, &buf, ejvw.escapeHTML)
|
||||
buf.WriteString(`,"options":"`)
|
||||
buf.WriteString(sortStringAlphebeticAscending(options))
|
||||
buf.WriteString(`"}},`)
|
||||
|
||||
ejvw.buf = append(ejvw.buf, buf.Bytes()...)
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteString(s string) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteString"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
writeStringWithEscapes(s, &buf, ejvw.escapeHTML)
|
||||
|
||||
ejvw.buf = append(ejvw.buf, buf.Bytes()...)
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteSymbol(symbol string) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteSymbol"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
writeStringWithEscapes(symbol, &buf, ejvw.escapeHTML)
|
||||
|
||||
ejvw.writeExtendedSingleValue("symbol", buf.String(), false)
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteTimestamp(t uint32, i uint32) error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteTimestamp"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(`{"$timestamp":{"t":`)
|
||||
buf.WriteString(strconv.FormatUint(uint64(t), 10))
|
||||
buf.WriteString(`,"i":`)
|
||||
buf.WriteString(strconv.FormatUint(uint64(i), 10))
|
||||
buf.WriteString(`}},`)
|
||||
|
||||
ejvw.buf = append(ejvw.buf, buf.Bytes()...)
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteUndefined() error {
|
||||
if err := ejvw.ensureElementValue(mode(0), "WriteUndefined"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ejvw.writeExtendedSingleValue("undefined", "true", false)
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteDocumentElement(key string) (ValueWriter, error) {
|
||||
switch ejvw.stack[ejvw.frame].mode {
|
||||
case mDocument, mTopLevel, mCodeWithScope:
|
||||
ejvw.buf = append(ejvw.buf, []byte(fmt.Sprintf(`"%s":`, key))...)
|
||||
ejvw.push(mElement)
|
||||
default:
|
||||
return nil, ejvw.invalidTransitionErr(mElement, "WriteDocumentElement", []mode{mDocument, mTopLevel, mCodeWithScope})
|
||||
}
|
||||
|
||||
return ejvw, nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteDocumentEnd() error {
|
||||
switch ejvw.stack[ejvw.frame].mode {
|
||||
case mDocument, mTopLevel, mCodeWithScope:
|
||||
default:
|
||||
return fmt.Errorf("incorrect mode to end document: %s", ejvw.stack[ejvw.frame].mode)
|
||||
}
|
||||
|
||||
// close the document
|
||||
if ejvw.buf[len(ejvw.buf)-1] == ',' {
|
||||
ejvw.buf[len(ejvw.buf)-1] = '}'
|
||||
} else {
|
||||
ejvw.buf = append(ejvw.buf, '}')
|
||||
}
|
||||
|
||||
switch ejvw.stack[ejvw.frame].mode {
|
||||
case mCodeWithScope:
|
||||
ejvw.buf = append(ejvw.buf, '}')
|
||||
fallthrough
|
||||
case mDocument:
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
case mTopLevel:
|
||||
if ejvw.w != nil {
|
||||
if _, err := ejvw.w.Write(ejvw.buf); err != nil {
|
||||
return err
|
||||
}
|
||||
ejvw.buf = ejvw.buf[:0]
|
||||
}
|
||||
}
|
||||
|
||||
ejvw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteArrayElement() (ValueWriter, error) {
|
||||
switch ejvw.stack[ejvw.frame].mode {
|
||||
case mArray:
|
||||
ejvw.push(mValue)
|
||||
default:
|
||||
return nil, ejvw.invalidTransitionErr(mValue, "WriteArrayElement", []mode{mArray})
|
||||
}
|
||||
|
||||
return ejvw, nil
|
||||
}
|
||||
|
||||
func (ejvw *extJSONValueWriter) WriteArrayEnd() error {
|
||||
switch ejvw.stack[ejvw.frame].mode {
|
||||
case mArray:
|
||||
// close the array
|
||||
if ejvw.buf[len(ejvw.buf)-1] == ',' {
|
||||
ejvw.buf[len(ejvw.buf)-1] = ']'
|
||||
} else {
|
||||
ejvw.buf = append(ejvw.buf, ']')
|
||||
}
|
||||
|
||||
ejvw.buf = append(ejvw.buf, ',')
|
||||
|
||||
ejvw.pop()
|
||||
default:
|
||||
return fmt.Errorf("incorrect mode to end array: %s", ejvw.stack[ejvw.frame].mode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatDouble(f float64) string {
|
||||
var s string
|
||||
if math.IsInf(f, 1) {
|
||||
s = "Infinity"
|
||||
} else if math.IsInf(f, -1) {
|
||||
s = "-Infinity"
|
||||
} else if math.IsNaN(f) {
|
||||
s = "NaN"
|
||||
} else {
|
||||
// Print exactly one decimalType place for integers; otherwise, print as many are necessary to
|
||||
// perfectly represent it.
|
||||
s = strconv.FormatFloat(f, 'G', -1, 64)
|
||||
if !strings.ContainsRune(s, 'E') && !strings.ContainsRune(s, '.') {
|
||||
s += ".0"
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
var hexChars = "0123456789abcdef"
|
||||
|
||||
func writeStringWithEscapes(s string, buf *bytes.Buffer, escapeHTML bool) {
|
||||
buf.WriteByte('"')
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
buf.WriteString(s[start:i])
|
||||
}
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte(b)
|
||||
case '\n':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('n')
|
||||
case '\r':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('r')
|
||||
case '\t':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('t')
|
||||
case '\b':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('b')
|
||||
case '\f':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('f')
|
||||
default:
|
||||
// This encodes bytes < 0x20 except for \t, \n and \r.
|
||||
// If escapeHTML is set, it also escapes <, >, and &
|
||||
// because they can lead to security holes when
|
||||
// user-controlled strings are rendered into JSON
|
||||
// and served to some browsers.
|
||||
buf.WriteString(`\u00`)
|
||||
buf.WriteByte(hexChars[b>>4])
|
||||
buf.WriteByte(hexChars[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRuneInString(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
if start < i {
|
||||
buf.WriteString(s[start:i])
|
||||
}
|
||||
buf.WriteString(`\ufffd`)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
// U+2028 is LINE SEPARATOR.
|
||||
// U+2029 is PARAGRAPH SEPARATOR.
|
||||
// They are both technically valid characters in JSON strings,
|
||||
// but don't work in JSONP, which has to be evaluated as JavaScript,
|
||||
// and can lead to security holes there. It is valid JSON to
|
||||
// escape them, so we do so unconditionally.
|
||||
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
|
||||
if c == '\u2028' || c == '\u2029' {
|
||||
if start < i {
|
||||
buf.WriteString(s[start:i])
|
||||
}
|
||||
buf.WriteString(`\u202`)
|
||||
buf.WriteByte(hexChars[c&0xF])
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
buf.WriteString(s[start:])
|
||||
}
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
|
||||
type sortableString []rune
|
||||
|
||||
func (ss sortableString) Len() int {
|
||||
return len(ss)
|
||||
}
|
||||
|
||||
func (ss sortableString) Less(i, j int) bool {
|
||||
return ss[i] < ss[j]
|
||||
}
|
||||
|
||||
func (ss sortableString) Swap(i, j int) {
|
||||
oldI := ss[i]
|
||||
ss[i] = ss[j]
|
||||
ss[j] = oldI
|
||||
}
|
||||
|
||||
func sortStringAlphebeticAscending(s string) string {
|
||||
ss := sortableString([]rune(s))
|
||||
sort.Sort(ss)
|
||||
return string([]rune(ss))
|
||||
}
|
||||
439
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go
generated
vendored
Executable file
439
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go
generated
vendored
Executable file
@@ -0,0 +1,439 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type jsonTokenType byte
|
||||
|
||||
const (
|
||||
jttBeginObject jsonTokenType = iota
|
||||
jttEndObject
|
||||
jttBeginArray
|
||||
jttEndArray
|
||||
jttColon
|
||||
jttComma
|
||||
jttInt32
|
||||
jttInt64
|
||||
jttDouble
|
||||
jttString
|
||||
jttBool
|
||||
jttNull
|
||||
jttEOF
|
||||
)
|
||||
|
||||
type jsonToken struct {
|
||||
t jsonTokenType
|
||||
v interface{}
|
||||
p int
|
||||
}
|
||||
|
||||
type jsonScanner struct {
|
||||
r io.Reader
|
||||
buf []byte
|
||||
pos int
|
||||
lastReadErr error
|
||||
}
|
||||
|
||||
// nextToken returns the next JSON token if one exists. A token is a character
|
||||
// of the JSON grammar, a number, a string, or a literal.
|
||||
func (js *jsonScanner) nextToken() (*jsonToken, error) {
|
||||
c, err := js.readNextByte()
|
||||
|
||||
// keep reading until a non-space is encountered (break on read error or EOF)
|
||||
for isWhiteSpace(c) && err == nil {
|
||||
c, err = js.readNextByte()
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
return &jsonToken{t: jttEOF}, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// switch on the character
|
||||
switch c {
|
||||
case '{':
|
||||
return &jsonToken{t: jttBeginObject, v: byte('{'), p: js.pos - 1}, nil
|
||||
case '}':
|
||||
return &jsonToken{t: jttEndObject, v: byte('}'), p: js.pos - 1}, nil
|
||||
case '[':
|
||||
return &jsonToken{t: jttBeginArray, v: byte('['), p: js.pos - 1}, nil
|
||||
case ']':
|
||||
return &jsonToken{t: jttEndArray, v: byte(']'), p: js.pos - 1}, nil
|
||||
case ':':
|
||||
return &jsonToken{t: jttColon, v: byte(':'), p: js.pos - 1}, nil
|
||||
case ',':
|
||||
return &jsonToken{t: jttComma, v: byte(','), p: js.pos - 1}, nil
|
||||
case '"': // RFC-8259 only allows for double quotes (") not single (')
|
||||
return js.scanString()
|
||||
default:
|
||||
// check if it's a number
|
||||
if c == '-' || isDigit(c) {
|
||||
return js.scanNumber(c)
|
||||
} else if c == 't' || c == 'f' || c == 'n' {
|
||||
// maybe a literal
|
||||
return js.scanLiteral(c)
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid JSON input. Position: %d. Character: %c", js.pos-1, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readNextByte attempts to read the next byte from the buffer. If the buffer
|
||||
// has been exhausted, this function calls readIntoBuf, thus refilling the
|
||||
// buffer and resetting the read position to 0
|
||||
func (js *jsonScanner) readNextByte() (byte, error) {
|
||||
if js.pos >= len(js.buf) {
|
||||
err := js.readIntoBuf()
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
b := js.buf[js.pos]
|
||||
js.pos++
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// readNNextBytes reads n bytes into dst, starting at offset
|
||||
func (js *jsonScanner) readNNextBytes(dst []byte, n, offset int) error {
|
||||
var err error
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i+offset], err = js.readNextByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readIntoBuf reads up to 512 bytes from the scanner's io.Reader into the buffer
|
||||
func (js *jsonScanner) readIntoBuf() error {
|
||||
if js.lastReadErr != nil {
|
||||
js.buf = js.buf[:0]
|
||||
js.pos = 0
|
||||
return js.lastReadErr
|
||||
}
|
||||
|
||||
if cap(js.buf) == 0 {
|
||||
js.buf = make([]byte, 0, 512)
|
||||
}
|
||||
|
||||
n, err := js.r.Read(js.buf[:cap(js.buf)])
|
||||
if err != nil {
|
||||
js.lastReadErr = err
|
||||
if n > 0 {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
js.buf = js.buf[:n]
|
||||
js.pos = 0
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func isWhiteSpace(c byte) bool {
|
||||
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
|
||||
}
|
||||
|
||||
func isDigit(c byte) bool {
|
||||
return unicode.IsDigit(rune(c))
|
||||
}
|
||||
|
||||
func isValueTerminator(c byte) bool {
|
||||
return c == ',' || c == '}' || c == ']' || isWhiteSpace(c)
|
||||
}
|
||||
|
||||
// scanString reads from an opening '"' to a closing '"' and handles escaped characters
|
||||
func (js *jsonScanner) scanString() (*jsonToken, error) {
|
||||
var b bytes.Buffer
|
||||
var c byte
|
||||
var err error
|
||||
|
||||
p := js.pos - 1
|
||||
|
||||
for {
|
||||
c, err = js.readNextByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, errors.New("end of input in JSON string")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '\\':
|
||||
c, err = js.readNextByte()
|
||||
switch c {
|
||||
case '"', '\\', '/', '\'':
|
||||
b.WriteByte(c)
|
||||
case 'b':
|
||||
b.WriteByte('\b')
|
||||
case 'f':
|
||||
b.WriteByte('\f')
|
||||
case 'n':
|
||||
b.WriteByte('\n')
|
||||
case 'r':
|
||||
b.WriteByte('\r')
|
||||
case 't':
|
||||
b.WriteByte('\t')
|
||||
case 'u':
|
||||
us := make([]byte, 4)
|
||||
err = js.readNNextBytes(us, 4, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid unicode sequence in JSON string: %s", us)
|
||||
}
|
||||
|
||||
s := fmt.Sprintf(`\u%s`, us)
|
||||
s, err = strconv.Unquote(strings.Replace(strconv.Quote(s), `\\u`, `\u`, 1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b.WriteString(s)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid escape sequence in JSON string '\\%c'", c)
|
||||
}
|
||||
case '"':
|
||||
return &jsonToken{t: jttString, v: b.String(), p: p}, nil
|
||||
default:
|
||||
b.WriteByte(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanLiteral reads an unquoted sequence of characters and determines if it is one of
|
||||
// three valid JSON literals (true, false, null); if so, it returns the appropriate
|
||||
// jsonToken; otherwise, it returns an error
|
||||
func (js *jsonScanner) scanLiteral(first byte) (*jsonToken, error) {
|
||||
p := js.pos - 1
|
||||
|
||||
lit := make([]byte, 4)
|
||||
lit[0] = first
|
||||
|
||||
err := js.readNNextBytes(lit, 3, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c5, err := js.readNextByte()
|
||||
|
||||
if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || err == io.EOF) {
|
||||
js.pos = int(math.Max(0, float64(js.pos-1)))
|
||||
return &jsonToken{t: jttBool, v: true, p: p}, nil
|
||||
} else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || err == io.EOF) {
|
||||
js.pos = int(math.Max(0, float64(js.pos-1)))
|
||||
return &jsonToken{t: jttNull, v: nil, p: p}, nil
|
||||
} else if bytes.Equal([]byte("fals"), lit) {
|
||||
if c5 == 'e' {
|
||||
c5, err = js.readNextByte()
|
||||
|
||||
if isValueTerminator(c5) || err == io.EOF {
|
||||
js.pos = int(math.Max(0, float64(js.pos-1)))
|
||||
return &jsonToken{t: jttBool, v: false, p: p}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid JSON literal. Position: %d, literal: %s", p, lit)
|
||||
}
|
||||
|
||||
type numberScanState byte
|
||||
|
||||
const (
|
||||
nssSawLeadingMinus numberScanState = iota
|
||||
nssSawLeadingZero
|
||||
nssSawIntegerDigits
|
||||
nssSawDecimalPoint
|
||||
nssSawFractionDigits
|
||||
nssSawExponentLetter
|
||||
nssSawExponentSign
|
||||
nssSawExponentDigits
|
||||
nssDone
|
||||
nssInvalid
|
||||
)
|
||||
|
||||
// scanNumber reads a JSON number (according to RFC-8259)
|
||||
func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) {
|
||||
var b bytes.Buffer
|
||||
var s numberScanState
|
||||
var c byte
|
||||
var err error
|
||||
|
||||
t := jttInt64 // assume it's an int64 until the type can be determined
|
||||
start := js.pos - 1
|
||||
|
||||
b.WriteByte(first)
|
||||
|
||||
switch first {
|
||||
case '-':
|
||||
s = nssSawLeadingMinus
|
||||
case '0':
|
||||
s = nssSawLeadingZero
|
||||
default:
|
||||
s = nssSawIntegerDigits
|
||||
}
|
||||
|
||||
for {
|
||||
c, err = js.readNextByte()
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch s {
|
||||
case nssSawLeadingMinus:
|
||||
switch c {
|
||||
case '0':
|
||||
s = nssSawLeadingZero
|
||||
b.WriteByte(c)
|
||||
default:
|
||||
if isDigit(c) {
|
||||
s = nssSawIntegerDigits
|
||||
b.WriteByte(c)
|
||||
} else {
|
||||
s = nssInvalid
|
||||
}
|
||||
}
|
||||
case nssSawLeadingZero:
|
||||
switch c {
|
||||
case '.':
|
||||
s = nssSawDecimalPoint
|
||||
b.WriteByte(c)
|
||||
case 'e', 'E':
|
||||
s = nssSawExponentLetter
|
||||
b.WriteByte(c)
|
||||
case '}', ']', ',':
|
||||
s = nssDone
|
||||
default:
|
||||
if isWhiteSpace(c) || err == io.EOF {
|
||||
s = nssDone
|
||||
} else {
|
||||
s = nssInvalid
|
||||
}
|
||||
}
|
||||
case nssSawIntegerDigits:
|
||||
switch c {
|
||||
case '.':
|
||||
s = nssSawDecimalPoint
|
||||
b.WriteByte(c)
|
||||
case 'e', 'E':
|
||||
s = nssSawExponentLetter
|
||||
b.WriteByte(c)
|
||||
case '}', ']', ',':
|
||||
s = nssDone
|
||||
default:
|
||||
if isWhiteSpace(c) || err == io.EOF {
|
||||
s = nssDone
|
||||
} else if isDigit(c) {
|
||||
s = nssSawIntegerDigits
|
||||
b.WriteByte(c)
|
||||
} else {
|
||||
s = nssInvalid
|
||||
}
|
||||
}
|
||||
case nssSawDecimalPoint:
|
||||
t = jttDouble
|
||||
if isDigit(c) {
|
||||
s = nssSawFractionDigits
|
||||
b.WriteByte(c)
|
||||
} else {
|
||||
s = nssInvalid
|
||||
}
|
||||
case nssSawFractionDigits:
|
||||
switch c {
|
||||
case 'e', 'E':
|
||||
s = nssSawExponentLetter
|
||||
b.WriteByte(c)
|
||||
case '}', ']', ',':
|
||||
s = nssDone
|
||||
default:
|
||||
if isWhiteSpace(c) || err == io.EOF {
|
||||
s = nssDone
|
||||
} else if isDigit(c) {
|
||||
s = nssSawFractionDigits
|
||||
b.WriteByte(c)
|
||||
} else {
|
||||
s = nssInvalid
|
||||
}
|
||||
}
|
||||
case nssSawExponentLetter:
|
||||
t = jttDouble
|
||||
switch c {
|
||||
case '+', '-':
|
||||
s = nssSawExponentSign
|
||||
b.WriteByte(c)
|
||||
default:
|
||||
if isDigit(c) {
|
||||
s = nssSawExponentDigits
|
||||
b.WriteByte(c)
|
||||
} else {
|
||||
s = nssInvalid
|
||||
}
|
||||
}
|
||||
case nssSawExponentSign:
|
||||
if isDigit(c) {
|
||||
s = nssSawExponentDigits
|
||||
b.WriteByte(c)
|
||||
} else {
|
||||
s = nssInvalid
|
||||
}
|
||||
case nssSawExponentDigits:
|
||||
switch c {
|
||||
case '}', ']', ',':
|
||||
s = nssDone
|
||||
default:
|
||||
if isWhiteSpace(c) || err == io.EOF {
|
||||
s = nssDone
|
||||
} else if isDigit(c) {
|
||||
s = nssSawExponentDigits
|
||||
b.WriteByte(c)
|
||||
} else {
|
||||
s = nssInvalid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch s {
|
||||
case nssInvalid:
|
||||
return nil, fmt.Errorf("invalid JSON number. Position: %d", start)
|
||||
case nssDone:
|
||||
js.pos = int(math.Max(0, float64(js.pos-1)))
|
||||
if t != jttDouble {
|
||||
v, err := strconv.ParseInt(b.String(), 10, 64)
|
||||
if err == nil {
|
||||
if v < math.MinInt32 || v > math.MaxInt32 {
|
||||
return &jsonToken{t: jttInt64, v: v, p: start}, nil
|
||||
}
|
||||
|
||||
return &jsonToken{t: jttInt32, v: int32(v), p: start}, nil
|
||||
}
|
||||
}
|
||||
|
||||
v, err := strconv.ParseFloat(b.String(), 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &jsonToken{t: jttDouble, v: v, p: start}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
108
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go
generated
vendored
Executable file
108
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go
generated
vendored
Executable file
@@ -0,0 +1,108 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type mode int
|
||||
|
||||
const (
|
||||
_ mode = iota
|
||||
mTopLevel
|
||||
mDocument
|
||||
mArray
|
||||
mValue
|
||||
mElement
|
||||
mCodeWithScope
|
||||
mSpacer
|
||||
)
|
||||
|
||||
func (m mode) String() string {
|
||||
var str string
|
||||
|
||||
switch m {
|
||||
case mTopLevel:
|
||||
str = "TopLevel"
|
||||
case mDocument:
|
||||
str = "DocumentMode"
|
||||
case mArray:
|
||||
str = "ArrayMode"
|
||||
case mValue:
|
||||
str = "ValueMode"
|
||||
case mElement:
|
||||
str = "ElementMode"
|
||||
case mCodeWithScope:
|
||||
str = "CodeWithScopeMode"
|
||||
case mSpacer:
|
||||
str = "CodeWithScopeSpacerFrame"
|
||||
default:
|
||||
str = "UnknownMode"
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
func (m mode) TypeString() string {
|
||||
var str string
|
||||
|
||||
switch m {
|
||||
case mTopLevel:
|
||||
str = "TopLevel"
|
||||
case mDocument:
|
||||
str = "Document"
|
||||
case mArray:
|
||||
str = "Array"
|
||||
case mValue:
|
||||
str = "Value"
|
||||
case mElement:
|
||||
str = "Element"
|
||||
case mCodeWithScope:
|
||||
str = "CodeWithScope"
|
||||
case mSpacer:
|
||||
str = "CodeWithScopeSpacer"
|
||||
default:
|
||||
str = "Unknown"
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// TransitionError is an error returned when an invalid progressing a
|
||||
// ValueReader or ValueWriter state machine occurs.
|
||||
// If read is false, the error is for writing
|
||||
type TransitionError struct {
|
||||
name string
|
||||
parent mode
|
||||
current mode
|
||||
destination mode
|
||||
modes []mode
|
||||
action string
|
||||
}
|
||||
|
||||
func (te TransitionError) Error() string {
|
||||
errString := fmt.Sprintf("%s can only %s", te.name, te.action)
|
||||
if te.destination != mode(0) {
|
||||
errString = fmt.Sprintf("%s a %s", errString, te.destination.TypeString())
|
||||
}
|
||||
errString = fmt.Sprintf("%s while positioned on a", errString)
|
||||
for ind, m := range te.modes {
|
||||
if ind != 0 && len(te.modes) > 2 {
|
||||
errString = fmt.Sprintf("%s,", errString)
|
||||
}
|
||||
if ind == len(te.modes)-1 && len(te.modes) > 1 {
|
||||
errString = fmt.Sprintf("%s or", errString)
|
||||
}
|
||||
errString = fmt.Sprintf("%s %s", errString, m.TypeString())
|
||||
}
|
||||
errString = fmt.Sprintf("%s but is positioned on a %s", errString, te.current.TypeString())
|
||||
if te.parent != mode(0) {
|
||||
errString = fmt.Sprintf("%s with parent %s", errString, te.parent.TypeString())
|
||||
}
|
||||
return errString
|
||||
}
|
||||
63
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go
generated
vendored
Executable file
63
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go
generated
vendored
Executable file
@@ -0,0 +1,63 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// ArrayReader is implemented by types that allow reading values from a BSON
|
||||
// array.
|
||||
type ArrayReader interface {
|
||||
ReadValue() (ValueReader, error)
|
||||
}
|
||||
|
||||
// DocumentReader is implemented by types that allow reading elements from a
|
||||
// BSON document.
|
||||
type DocumentReader interface {
|
||||
ReadElement() (string, ValueReader, error)
|
||||
}
|
||||
|
||||
// ValueReader is a generic interface used to read values from BSON. This type
|
||||
// is implemented by several types with different underlying representations of
|
||||
// BSON, such as a bson.Document, raw BSON bytes, or extended JSON.
|
||||
type ValueReader interface {
|
||||
Type() bsontype.Type
|
||||
Skip() error
|
||||
|
||||
ReadArray() (ArrayReader, error)
|
||||
ReadBinary() (b []byte, btype byte, err error)
|
||||
ReadBoolean() (bool, error)
|
||||
ReadDocument() (DocumentReader, error)
|
||||
ReadCodeWithScope() (code string, dr DocumentReader, err error)
|
||||
ReadDBPointer() (ns string, oid primitive.ObjectID, err error)
|
||||
ReadDateTime() (int64, error)
|
||||
ReadDecimal128() (primitive.Decimal128, error)
|
||||
ReadDouble() (float64, error)
|
||||
ReadInt32() (int32, error)
|
||||
ReadInt64() (int64, error)
|
||||
ReadJavascript() (code string, err error)
|
||||
ReadMaxKey() error
|
||||
ReadMinKey() error
|
||||
ReadNull() error
|
||||
ReadObjectID() (primitive.ObjectID, error)
|
||||
ReadRegex() (pattern, options string, err error)
|
||||
ReadString() (string, error)
|
||||
ReadSymbol() (symbol string, err error)
|
||||
ReadTimestamp() (t, i uint32, err error)
|
||||
ReadUndefined() error
|
||||
}
|
||||
|
||||
// BytesReader is a generic interface used to read BSON bytes from a
|
||||
// ValueReader. This imterface is meant to be a superset of ValueReader, so that
|
||||
// types that implement ValueReader may also implement this interface.
|
||||
//
|
||||
// The bytes of the value will be appended to dst.
|
||||
type BytesReader interface {
|
||||
ReadValueBytes(dst []byte) (bsontype.Type, []byte, error)
|
||||
}
|
||||
882
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go
generated
vendored
Executable file
882
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go
generated
vendored
Executable file
@@ -0,0 +1,882 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"unicode"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
var _ ValueReader = (*valueReader)(nil)
|
||||
|
||||
var vrPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(valueReader)
|
||||
},
|
||||
}
|
||||
|
||||
// BSONValueReaderPool is a pool for ValueReaders that read BSON.
|
||||
type BSONValueReaderPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// NewBSONValueReaderPool instantiates a new BSONValueReaderPool.
|
||||
func NewBSONValueReaderPool() *BSONValueReaderPool {
|
||||
return &BSONValueReaderPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(valueReader)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves a ValueReader from the pool and uses src as the underlying BSON.
|
||||
func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader {
|
||||
vr := bvrp.pool.Get().(*valueReader)
|
||||
vr.reset(src)
|
||||
return vr
|
||||
}
|
||||
|
||||
// Put inserts a ValueReader into the pool. If the ValueReader is not a BSON ValueReader nothing
|
||||
// is inserted into the pool and ok will be false.
|
||||
func (bvrp *BSONValueReaderPool) Put(vr ValueReader) (ok bool) {
|
||||
bvr, ok := vr.(*valueReader)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
bvr.reset(nil)
|
||||
bvrp.pool.Put(bvr)
|
||||
return true
|
||||
}
|
||||
|
||||
// ErrEOA is the error returned when the end of a BSON array has been reached.
|
||||
var ErrEOA = errors.New("end of array")
|
||||
|
||||
// ErrEOD is the error returned when the end of a BSON document has been reached.
|
||||
var ErrEOD = errors.New("end of document")
|
||||
|
||||
type vrState struct {
|
||||
mode mode
|
||||
vType bsontype.Type
|
||||
end int64
|
||||
}
|
||||
|
||||
// valueReader is for reading BSON values.
|
||||
type valueReader struct {
|
||||
offset int64
|
||||
d []byte
|
||||
|
||||
stack []vrState
|
||||
frame int64
|
||||
}
|
||||
|
||||
// NewBSONDocumentReader returns a ValueReader using b for the underlying BSON
|
||||
// representation. Parameter b must be a BSON Document.
|
||||
//
|
||||
// TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes
|
||||
// a []byte while the writer takes an io.Writer. We should have two versions of each, one that takes
|
||||
// a []byte and one that takes an io.Reader or io.Writer. The []byte version will need to return a
|
||||
// thing that can return the finished []byte since it might be reallocated when appended to.
|
||||
func NewBSONDocumentReader(b []byte) ValueReader {
|
||||
return newValueReader(b)
|
||||
}
|
||||
|
||||
// NewBSONValueReader returns a ValueReader that starts in the Value mode instead of in top
|
||||
// level document mode. This enables the creation of a ValueReader for a single BSON value.
|
||||
func NewBSONValueReader(t bsontype.Type, val []byte) ValueReader {
|
||||
stack := make([]vrState, 1, 5)
|
||||
stack[0] = vrState{
|
||||
mode: mValue,
|
||||
vType: t,
|
||||
}
|
||||
return &valueReader{
|
||||
d: val,
|
||||
stack: stack,
|
||||
}
|
||||
}
|
||||
|
||||
func newValueReader(b []byte) *valueReader {
|
||||
stack := make([]vrState, 1, 5)
|
||||
stack[0] = vrState{
|
||||
mode: mTopLevel,
|
||||
}
|
||||
return &valueReader{
|
||||
d: b,
|
||||
stack: stack,
|
||||
}
|
||||
}
|
||||
|
||||
func (vr *valueReader) reset(b []byte) {
|
||||
if vr.stack == nil {
|
||||
vr.stack = make([]vrState, 1, 5)
|
||||
}
|
||||
vr.stack = vr.stack[:1]
|
||||
vr.stack[0] = vrState{mode: mTopLevel}
|
||||
vr.d = b
|
||||
vr.offset = 0
|
||||
vr.frame = 0
|
||||
}
|
||||
|
||||
func (vr *valueReader) advanceFrame() {
|
||||
if vr.frame+1 >= int64(len(vr.stack)) { // We need to grow the stack
|
||||
length := len(vr.stack)
|
||||
if length+1 >= cap(vr.stack) {
|
||||
// double it
|
||||
buf := make([]vrState, 2*cap(vr.stack)+1)
|
||||
copy(buf, vr.stack)
|
||||
vr.stack = buf
|
||||
}
|
||||
vr.stack = vr.stack[:length+1]
|
||||
}
|
||||
vr.frame++
|
||||
|
||||
// Clean the stack
|
||||
vr.stack[vr.frame].mode = 0
|
||||
vr.stack[vr.frame].vType = 0
|
||||
vr.stack[vr.frame].end = 0
|
||||
}
|
||||
|
||||
func (vr *valueReader) pushDocument() error {
|
||||
vr.advanceFrame()
|
||||
|
||||
vr.stack[vr.frame].mode = mDocument
|
||||
|
||||
size, err := vr.readLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vr.stack[vr.frame].end = int64(size) + vr.offset - 4
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) pushArray() error {
|
||||
vr.advanceFrame()
|
||||
|
||||
vr.stack[vr.frame].mode = mArray
|
||||
|
||||
size, err := vr.readLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vr.stack[vr.frame].end = int64(size) + vr.offset - 4
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) pushElement(t bsontype.Type) {
|
||||
vr.advanceFrame()
|
||||
|
||||
vr.stack[vr.frame].mode = mElement
|
||||
vr.stack[vr.frame].vType = t
|
||||
}
|
||||
|
||||
func (vr *valueReader) pushValue(t bsontype.Type) {
|
||||
vr.advanceFrame()
|
||||
|
||||
vr.stack[vr.frame].mode = mValue
|
||||
vr.stack[vr.frame].vType = t
|
||||
}
|
||||
|
||||
func (vr *valueReader) pushCodeWithScope() (int64, error) {
|
||||
vr.advanceFrame()
|
||||
|
||||
vr.stack[vr.frame].mode = mCodeWithScope
|
||||
|
||||
size, err := vr.readLength()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
vr.stack[vr.frame].end = int64(size) + vr.offset - 4
|
||||
|
||||
return int64(size), nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) pop() {
|
||||
switch vr.stack[vr.frame].mode {
|
||||
case mElement, mValue:
|
||||
vr.frame--
|
||||
case mDocument, mArray, mCodeWithScope:
|
||||
vr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc...
|
||||
}
|
||||
}
|
||||
|
||||
func (vr *valueReader) invalidTransitionErr(destination mode, name string, modes []mode) error {
|
||||
te := TransitionError{
|
||||
name: name,
|
||||
current: vr.stack[vr.frame].mode,
|
||||
destination: destination,
|
||||
modes: modes,
|
||||
action: "read",
|
||||
}
|
||||
if vr.frame != 0 {
|
||||
te.parent = vr.stack[vr.frame-1].mode
|
||||
}
|
||||
return te
|
||||
}
|
||||
|
||||
func (vr *valueReader) typeError(t bsontype.Type) error {
|
||||
return fmt.Errorf("positioned on %s, but attempted to read %s", vr.stack[vr.frame].vType, t)
|
||||
}
|
||||
|
||||
func (vr *valueReader) invalidDocumentLengthError() error {
|
||||
return fmt.Errorf("document is invalid, end byte is at %d, but null byte found at %d", vr.stack[vr.frame].end, vr.offset)
|
||||
}
|
||||
|
||||
func (vr *valueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string) error {
|
||||
switch vr.stack[vr.frame].mode {
|
||||
case mElement, mValue:
|
||||
if vr.stack[vr.frame].vType != t {
|
||||
return vr.typeError(t)
|
||||
}
|
||||
default:
|
||||
return vr.invalidTransitionErr(destination, callerName, []mode{mElement, mValue})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) Type() bsontype.Type {
|
||||
return vr.stack[vr.frame].vType
|
||||
}
|
||||
|
||||
func (vr *valueReader) nextElementLength() (int32, error) {
|
||||
var length int32
|
||||
var err error
|
||||
switch vr.stack[vr.frame].vType {
|
||||
case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
|
||||
length, err = vr.peekLength()
|
||||
case bsontype.Binary:
|
||||
length, err = vr.peekLength()
|
||||
length += 4 + 1 // binary length + subtype byte
|
||||
case bsontype.Boolean:
|
||||
length = 1
|
||||
case bsontype.DBPointer:
|
||||
length, err = vr.peekLength()
|
||||
length += 4 + 12 // string length + ObjectID length
|
||||
case bsontype.DateTime, bsontype.Double, bsontype.Int64, bsontype.Timestamp:
|
||||
length = 8
|
||||
case bsontype.Decimal128:
|
||||
length = 16
|
||||
case bsontype.Int32:
|
||||
length = 4
|
||||
case bsontype.JavaScript, bsontype.String, bsontype.Symbol:
|
||||
length, err = vr.peekLength()
|
||||
length += 4
|
||||
case bsontype.MaxKey, bsontype.MinKey, bsontype.Null, bsontype.Undefined:
|
||||
length = 0
|
||||
case bsontype.ObjectID:
|
||||
length = 12
|
||||
case bsontype.Regex:
|
||||
regex := bytes.IndexByte(vr.d[vr.offset:], 0x00)
|
||||
if regex < 0 {
|
||||
err = io.EOF
|
||||
break
|
||||
}
|
||||
pattern := bytes.IndexByte(vr.d[vr.offset+int64(regex)+1:], 0x00)
|
||||
if pattern < 0 {
|
||||
err = io.EOF
|
||||
break
|
||||
}
|
||||
length = int32(int64(regex) + 1 + int64(pattern) + 1)
|
||||
default:
|
||||
return 0, fmt.Errorf("attempted to read bytes of unknown BSON type %v", vr.stack[vr.frame].vType)
|
||||
}
|
||||
|
||||
return length, err
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadValueBytes(dst []byte) (bsontype.Type, []byte, error) {
|
||||
switch vr.stack[vr.frame].mode {
|
||||
case mTopLevel:
|
||||
length, err := vr.peekLength()
|
||||
if err != nil {
|
||||
return bsontype.Type(0), nil, err
|
||||
}
|
||||
dst, err = vr.appendBytes(dst, length)
|
||||
if err != nil {
|
||||
return bsontype.Type(0), nil, err
|
||||
}
|
||||
return bsontype.Type(0), dst, nil
|
||||
case mElement, mValue:
|
||||
length, err := vr.nextElementLength()
|
||||
if err != nil {
|
||||
return bsontype.Type(0), dst, err
|
||||
}
|
||||
|
||||
dst, err = vr.appendBytes(dst, length)
|
||||
t := vr.stack[vr.frame].vType
|
||||
vr.pop()
|
||||
return t, dst, err
|
||||
default:
|
||||
return bsontype.Type(0), nil, vr.invalidTransitionErr(0, "ReadValueBytes", []mode{mElement, mValue})
|
||||
}
|
||||
}
|
||||
|
||||
func (vr *valueReader) Skip() error {
|
||||
switch vr.stack[vr.frame].mode {
|
||||
case mElement, mValue:
|
||||
default:
|
||||
return vr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue})
|
||||
}
|
||||
|
||||
length, err := vr.nextElementLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = vr.skipBytes(length)
|
||||
vr.pop()
|
||||
return err
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadArray() (ArrayReader, error) {
|
||||
if err := vr.ensureElementValue(bsontype.Array, mArray, "ReadArray"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err := vr.pushArray()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vr, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadBinary() (b []byte, btype byte, err error) {
|
||||
if err := vr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
length, err := vr.readLength()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
btype, err = vr.readByte()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if btype == 0x02 {
|
||||
length, err = vr.readLength()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
b, err = vr.readBytes(length)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return b, btype, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadBoolean() (bool, error) {
|
||||
if err := vr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
b, err := vr.readByte()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if b > 1 {
|
||||
return false, fmt.Errorf("invalid byte for boolean, %b", b)
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return b == 1, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadDocument() (DocumentReader, error) {
|
||||
switch vr.stack[vr.frame].mode {
|
||||
case mTopLevel:
|
||||
// read size
|
||||
size, err := vr.readLength()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if int(size) != len(vr.d) {
|
||||
return nil, fmt.Errorf("invalid document length")
|
||||
}
|
||||
vr.stack[vr.frame].end = int64(size) + vr.offset - 4
|
||||
return vr, nil
|
||||
case mElement, mValue:
|
||||
if vr.stack[vr.frame].vType != bsontype.EmbeddedDocument {
|
||||
return nil, vr.typeError(bsontype.EmbeddedDocument)
|
||||
}
|
||||
default:
|
||||
return nil, vr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue})
|
||||
}
|
||||
|
||||
err := vr.pushDocument()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vr, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) {
|
||||
if err := vr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
totalLength, err := vr.readLength()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
strLength, err := vr.readLength()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
strBytes, err := vr.readBytes(strLength)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
code = string(strBytes[:len(strBytes)-1])
|
||||
|
||||
size, err := vr.pushCodeWithScope()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// The total length should equal:
|
||||
// 4 (total length) + strLength + 4 (the length of str itself) + (document length)
|
||||
componentsLength := int64(4+strLength+4) + size
|
||||
if int64(totalLength) != componentsLength {
|
||||
return "", nil, fmt.Errorf(
|
||||
"length of CodeWithScope does not match lengths of components; total: %d; components: %d",
|
||||
totalLength, componentsLength,
|
||||
)
|
||||
}
|
||||
return code, vr, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) {
|
||||
if err := vr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil {
|
||||
return "", oid, err
|
||||
}
|
||||
|
||||
ns, err = vr.readString()
|
||||
if err != nil {
|
||||
return "", oid, err
|
||||
}
|
||||
|
||||
oidbytes, err := vr.readBytes(12)
|
||||
if err != nil {
|
||||
return "", oid, err
|
||||
}
|
||||
|
||||
copy(oid[:], oidbytes)
|
||||
|
||||
vr.pop()
|
||||
return ns, oid, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadDateTime() (int64, error) {
|
||||
if err := vr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := vr.readi64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadDecimal128() (primitive.Decimal128, error) {
|
||||
if err := vr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil {
|
||||
return primitive.Decimal128{}, err
|
||||
}
|
||||
|
||||
b, err := vr.readBytes(16)
|
||||
if err != nil {
|
||||
return primitive.Decimal128{}, err
|
||||
}
|
||||
|
||||
l := binary.LittleEndian.Uint64(b[0:8])
|
||||
h := binary.LittleEndian.Uint64(b[8:16])
|
||||
|
||||
vr.pop()
|
||||
return primitive.NewDecimal128(h, l), nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadDouble() (float64, error) {
|
||||
if err := vr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
u, err := vr.readu64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return math.Float64frombits(u), nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadInt32() (int32, error) {
|
||||
if err := vr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return vr.readi32()
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadInt64() (int64, error) {
|
||||
if err := vr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return vr.readi64()
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadJavascript() (code string, err error) {
|
||||
if err := vr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return vr.readString()
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadMaxKey() error {
|
||||
if err := vr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadMinKey() error {
|
||||
if err := vr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadNull() error {
|
||||
if err := vr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadObjectID() (primitive.ObjectID, error) {
|
||||
if err := vr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil {
|
||||
return primitive.ObjectID{}, err
|
||||
}
|
||||
|
||||
oidbytes, err := vr.readBytes(12)
|
||||
if err != nil {
|
||||
return primitive.ObjectID{}, err
|
||||
}
|
||||
|
||||
var oid primitive.ObjectID
|
||||
copy(oid[:], oidbytes)
|
||||
|
||||
vr.pop()
|
||||
return oid, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadRegex() (string, string, error) {
|
||||
if err := vr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
pattern, err := vr.readCString()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
options, err := vr.readCString()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return pattern, options, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadString() (string, error) {
|
||||
if err := vr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return vr.readString()
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadSymbol() (symbol string, err error) {
|
||||
if err := vr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return vr.readString()
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadTimestamp() (t uint32, i uint32, err error) {
|
||||
if err := vr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
i, err = vr.readu32()
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
t, err = vr.readu32()
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return t, i, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadUndefined() error {
|
||||
if err := vr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadElement() (string, ValueReader, error) {
|
||||
switch vr.stack[vr.frame].mode {
|
||||
case mTopLevel, mDocument, mCodeWithScope:
|
||||
default:
|
||||
return "", nil, vr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope})
|
||||
}
|
||||
|
||||
t, err := vr.readByte()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if t == 0 {
|
||||
if vr.offset != vr.stack[vr.frame].end {
|
||||
return "", nil, vr.invalidDocumentLengthError()
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return "", nil, ErrEOD
|
||||
}
|
||||
|
||||
name, err := vr.readCString()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
vr.pushElement(bsontype.Type(t))
|
||||
return name, vr, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) ReadValue() (ValueReader, error) {
|
||||
switch vr.stack[vr.frame].mode {
|
||||
case mArray:
|
||||
default:
|
||||
return nil, vr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray})
|
||||
}
|
||||
|
||||
t, err := vr.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t == 0 {
|
||||
if vr.offset != vr.stack[vr.frame].end {
|
||||
return nil, vr.invalidDocumentLengthError()
|
||||
}
|
||||
|
||||
vr.pop()
|
||||
return nil, ErrEOA
|
||||
}
|
||||
|
||||
_, err = vr.readCString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vr.pushValue(bsontype.Type(t))
|
||||
return vr, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) readBytes(length int32) ([]byte, error) {
|
||||
if length < 0 {
|
||||
return nil, fmt.Errorf("invalid length: %d", length)
|
||||
}
|
||||
|
||||
if vr.offset+int64(length) > int64(len(vr.d)) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
start := vr.offset
|
||||
vr.offset += int64(length)
|
||||
return vr.d[start : start+int64(length)], nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) appendBytes(dst []byte, length int32) ([]byte, error) {
|
||||
if vr.offset+int64(length) > int64(len(vr.d)) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
start := vr.offset
|
||||
vr.offset += int64(length)
|
||||
return append(dst, vr.d[start:start+int64(length)]...), nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) skipBytes(length int32) error {
|
||||
if vr.offset+int64(length) > int64(len(vr.d)) {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
vr.offset += int64(length)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) readByte() (byte, error) {
|
||||
if vr.offset+1 > int64(len(vr.d)) {
|
||||
return 0x0, io.EOF
|
||||
}
|
||||
|
||||
vr.offset++
|
||||
return vr.d[vr.offset-1], nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) readCString() (string, error) {
|
||||
idx := bytes.IndexByte(vr.d[vr.offset:], 0x00)
|
||||
if idx < 0 {
|
||||
return "", io.EOF
|
||||
}
|
||||
start := vr.offset
|
||||
// idx does not include the null byte
|
||||
vr.offset += int64(idx) + 1
|
||||
return string(vr.d[start : start+int64(idx)]), nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) skipCString() error {
|
||||
idx := bytes.IndexByte(vr.d[vr.offset:], 0x00)
|
||||
if idx < 0 {
|
||||
return io.EOF
|
||||
}
|
||||
// idx does not include the null byte
|
||||
vr.offset += int64(idx) + 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) readString() (string, error) {
|
||||
length, err := vr.readLength()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if int64(length)+vr.offset > int64(len(vr.d)) {
|
||||
return "", io.EOF
|
||||
}
|
||||
|
||||
if length <= 0 {
|
||||
return "", fmt.Errorf("invalid string length: %d", length)
|
||||
}
|
||||
|
||||
if vr.d[vr.offset+int64(length)-1] != 0x00 {
|
||||
return "", fmt.Errorf("string does not end with null byte, but with %v", vr.d[vr.offset+int64(length)-1])
|
||||
}
|
||||
|
||||
start := vr.offset
|
||||
vr.offset += int64(length)
|
||||
|
||||
if length == 2 {
|
||||
asciiByte := vr.d[start]
|
||||
if asciiByte > unicode.MaxASCII {
|
||||
return "", fmt.Errorf("invalid ascii byte")
|
||||
}
|
||||
}
|
||||
|
||||
return string(vr.d[start : start+int64(length)-1]), nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) peekLength() (int32, error) {
|
||||
if vr.offset+4 > int64(len(vr.d)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
idx := vr.offset
|
||||
return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) readLength() (int32, error) { return vr.readi32() }
|
||||
|
||||
func (vr *valueReader) readi32() (int32, error) {
|
||||
if vr.offset+4 > int64(len(vr.d)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
idx := vr.offset
|
||||
vr.offset += 4
|
||||
return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) readu32() (uint32, error) {
|
||||
if vr.offset+4 > int64(len(vr.d)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
idx := vr.offset
|
||||
vr.offset += 4
|
||||
return (uint32(vr.d[idx]) | uint32(vr.d[idx+1])<<8 | uint32(vr.d[idx+2])<<16 | uint32(vr.d[idx+3])<<24), nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) readi64() (int64, error) {
|
||||
if vr.offset+8 > int64(len(vr.d)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
idx := vr.offset
|
||||
vr.offset += 8
|
||||
return int64(vr.d[idx]) | int64(vr.d[idx+1])<<8 | int64(vr.d[idx+2])<<16 | int64(vr.d[idx+3])<<24 |
|
||||
int64(vr.d[idx+4])<<32 | int64(vr.d[idx+5])<<40 | int64(vr.d[idx+6])<<48 | int64(vr.d[idx+7])<<56, nil
|
||||
}
|
||||
|
||||
func (vr *valueReader) readu64() (uint64, error) {
|
||||
if vr.offset+8 > int64(len(vr.d)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
idx := vr.offset
|
||||
vr.offset += 8
|
||||
return uint64(vr.d[idx]) | uint64(vr.d[idx+1])<<8 | uint64(vr.d[idx+2])<<16 | uint64(vr.d[idx+3])<<24 |
|
||||
uint64(vr.d[idx+4])<<32 | uint64(vr.d[idx+5])<<40 | uint64(vr.d[idx+6])<<48 | uint64(vr.d[idx+7])<<56, nil
|
||||
}
|
||||
589
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go
generated
vendored
Executable file
589
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go
generated
vendored
Executable file
@@ -0,0 +1,589 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
var _ ValueWriter = (*valueWriter)(nil)
|
||||
|
||||
var vwPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(valueWriter)
|
||||
},
|
||||
}
|
||||
|
||||
// BSONValueWriterPool is a pool for BSON ValueWriters.
|
||||
type BSONValueWriterPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// NewBSONValueWriterPool creates a new pool for ValueWriter instances that write to BSON.
|
||||
func NewBSONValueWriterPool() *BSONValueWriterPool {
|
||||
return &BSONValueWriterPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(valueWriter)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves a BSON ValueWriter from the pool and resets it to use w as the destination.
|
||||
func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter {
|
||||
vw := bvwp.pool.Get().(*valueWriter)
|
||||
if writer, ok := w.(*SliceWriter); ok {
|
||||
vw.reset(*writer)
|
||||
vw.w = writer
|
||||
return vw
|
||||
}
|
||||
vw.buf = vw.buf[:0]
|
||||
vw.w = w
|
||||
return vw
|
||||
}
|
||||
|
||||
// Put inserts a ValueWriter into the pool. If the ValueWriter is not a BSON ValueWriter, nothing
|
||||
// happens and ok will be false.
|
||||
func (bvwp *BSONValueWriterPool) Put(vw ValueWriter) (ok bool) {
|
||||
bvw, ok := vw.(*valueWriter)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, ok := bvw.w.(*SliceWriter); ok {
|
||||
bvw.buf = nil
|
||||
}
|
||||
bvw.w = nil
|
||||
|
||||
bvwp.pool.Put(bvw)
|
||||
return true
|
||||
}
|
||||
|
||||
// This is here so that during testing we can change it and not require
|
||||
// allocating a 4GB slice.
|
||||
var maxSize = math.MaxInt32
|
||||
|
||||
var errNilWriter = errors.New("cannot create a ValueWriter from a nil io.Writer")
|
||||
|
||||
type errMaxDocumentSizeExceeded struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
func (mdse errMaxDocumentSizeExceeded) Error() string {
|
||||
return fmt.Sprintf("document size (%d) is larger than the max int32", mdse.size)
|
||||
}
|
||||
|
||||
type vwMode int
|
||||
|
||||
const (
|
||||
_ vwMode = iota
|
||||
vwTopLevel
|
||||
vwDocument
|
||||
vwArray
|
||||
vwValue
|
||||
vwElement
|
||||
vwCodeWithScope
|
||||
)
|
||||
|
||||
func (vm vwMode) String() string {
|
||||
var str string
|
||||
|
||||
switch vm {
|
||||
case vwTopLevel:
|
||||
str = "TopLevel"
|
||||
case vwDocument:
|
||||
str = "DocumentMode"
|
||||
case vwArray:
|
||||
str = "ArrayMode"
|
||||
case vwValue:
|
||||
str = "ValueMode"
|
||||
case vwElement:
|
||||
str = "ElementMode"
|
||||
case vwCodeWithScope:
|
||||
str = "CodeWithScopeMode"
|
||||
default:
|
||||
str = "UnknownMode"
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
type vwState struct {
|
||||
mode mode
|
||||
key string
|
||||
arrkey int
|
||||
start int32
|
||||
}
|
||||
|
||||
type valueWriter struct {
|
||||
w io.Writer
|
||||
buf []byte
|
||||
|
||||
stack []vwState
|
||||
frame int64
|
||||
}
|
||||
|
||||
func (vw *valueWriter) advanceFrame() {
|
||||
if vw.frame+1 >= int64(len(vw.stack)) { // We need to grow the stack
|
||||
length := len(vw.stack)
|
||||
if length+1 >= cap(vw.stack) {
|
||||
// double it
|
||||
buf := make([]vwState, 2*cap(vw.stack)+1)
|
||||
copy(buf, vw.stack)
|
||||
vw.stack = buf
|
||||
}
|
||||
vw.stack = vw.stack[:length+1]
|
||||
}
|
||||
vw.frame++
|
||||
}
|
||||
|
||||
func (vw *valueWriter) push(m mode) {
|
||||
vw.advanceFrame()
|
||||
|
||||
// Clean the stack
|
||||
vw.stack[vw.frame].mode = m
|
||||
vw.stack[vw.frame].key = ""
|
||||
vw.stack[vw.frame].arrkey = 0
|
||||
vw.stack[vw.frame].start = 0
|
||||
|
||||
vw.stack[vw.frame].mode = m
|
||||
switch m {
|
||||
case mDocument, mArray, mCodeWithScope:
|
||||
vw.reserveLength()
|
||||
}
|
||||
}
|
||||
|
||||
func (vw *valueWriter) reserveLength() {
|
||||
vw.stack[vw.frame].start = int32(len(vw.buf))
|
||||
vw.buf = append(vw.buf, 0x00, 0x00, 0x00, 0x00)
|
||||
}
|
||||
|
||||
func (vw *valueWriter) pop() {
|
||||
switch vw.stack[vw.frame].mode {
|
||||
case mElement, mValue:
|
||||
vw.frame--
|
||||
case mDocument, mArray, mCodeWithScope:
|
||||
vw.frame -= 2 // we pop twice to jump over the mElement: mDocument -> mElement -> mDocument/mTopLevel/etc...
|
||||
}
|
||||
}
|
||||
|
||||
// NewBSONValueWriter creates a ValueWriter that writes BSON to w.
|
||||
//
|
||||
// This ValueWriter will only write entire documents to the io.Writer and it
|
||||
// will buffer the document as it is built.
|
||||
func NewBSONValueWriter(w io.Writer) (ValueWriter, error) {
|
||||
if w == nil {
|
||||
return nil, errNilWriter
|
||||
}
|
||||
return newValueWriter(w), nil
|
||||
}
|
||||
|
||||
func newValueWriter(w io.Writer) *valueWriter {
|
||||
vw := new(valueWriter)
|
||||
stack := make([]vwState, 1, 5)
|
||||
stack[0] = vwState{mode: mTopLevel}
|
||||
vw.w = w
|
||||
vw.stack = stack
|
||||
|
||||
return vw
|
||||
}
|
||||
|
||||
func newValueWriterFromSlice(buf []byte) *valueWriter {
|
||||
vw := new(valueWriter)
|
||||
stack := make([]vwState, 1, 5)
|
||||
stack[0] = vwState{mode: mTopLevel}
|
||||
vw.stack = stack
|
||||
vw.buf = buf
|
||||
|
||||
return vw
|
||||
}
|
||||
|
||||
func (vw *valueWriter) reset(buf []byte) {
|
||||
if vw.stack == nil {
|
||||
vw.stack = make([]vwState, 1, 5)
|
||||
}
|
||||
vw.stack = vw.stack[:1]
|
||||
vw.stack[0] = vwState{mode: mTopLevel}
|
||||
vw.buf = buf
|
||||
vw.frame = 0
|
||||
vw.w = nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) invalidTransitionError(destination mode, name string, modes []mode) error {
|
||||
te := TransitionError{
|
||||
name: name,
|
||||
current: vw.stack[vw.frame].mode,
|
||||
destination: destination,
|
||||
modes: modes,
|
||||
action: "write",
|
||||
}
|
||||
if vw.frame != 0 {
|
||||
te.parent = vw.stack[vw.frame-1].mode
|
||||
}
|
||||
return te
|
||||
}
|
||||
|
||||
func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error {
|
||||
switch vw.stack[vw.frame].mode {
|
||||
case mElement:
|
||||
vw.buf = bsoncore.AppendHeader(vw.buf, t, vw.stack[vw.frame].key)
|
||||
case mValue:
|
||||
// TODO: Do this with a cache of the first 1000 or so array keys.
|
||||
vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey))
|
||||
default:
|
||||
modes := []mode{mElement, mValue}
|
||||
if addmodes != nil {
|
||||
modes = append(modes, addmodes...)
|
||||
}
|
||||
return vw.invalidTransitionError(destination, callerName, modes)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteValueBytes(t bsontype.Type, b []byte) error {
|
||||
if err := vw.writeElementHeader(t, mode(0), "WriteValueBytes"); err != nil {
|
||||
return err
|
||||
}
|
||||
vw.buf = append(vw.buf, b...)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteArray() (ArrayWriter, error) {
|
||||
if err := vw.writeElementHeader(bsontype.Array, mArray, "WriteArray"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vw.push(mArray)
|
||||
|
||||
return vw, nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteBinary(b []byte) error {
|
||||
return vw.WriteBinaryWithSubtype(b, 0x00)
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteBinaryWithSubtype(b []byte, btype byte) error {
|
||||
if err := vw.writeElementHeader(bsontype.Binary, mode(0), "WriteBinaryWithSubtype"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendBinary(vw.buf, btype, b)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteBoolean(b bool) error {
|
||||
if err := vw.writeElementHeader(bsontype.Boolean, mode(0), "WriteBoolean"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendBoolean(vw.buf, b)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteCodeWithScope(code string) (DocumentWriter, error) {
|
||||
if err := vw.writeElementHeader(bsontype.CodeWithScope, mCodeWithScope, "WriteCodeWithScope"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// CodeWithScope is a different than other types because we need an extra
|
||||
// frame on the stack. In the EndDocument code, we write the document
|
||||
// length, pop, write the code with scope length, and pop. To simplify the
|
||||
// pop code, we push a spacer frame that we'll always jump over.
|
||||
vw.push(mCodeWithScope)
|
||||
vw.buf = bsoncore.AppendString(vw.buf, code)
|
||||
vw.push(mSpacer)
|
||||
vw.push(mDocument)
|
||||
|
||||
return vw, nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteDBPointer(ns string, oid primitive.ObjectID) error {
|
||||
if err := vw.writeElementHeader(bsontype.DBPointer, mode(0), "WriteDBPointer"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendDBPointer(vw.buf, ns, oid)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteDateTime(dt int64) error {
|
||||
if err := vw.writeElementHeader(bsontype.DateTime, mode(0), "WriteDateTime"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendDateTime(vw.buf, dt)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteDecimal128(d128 primitive.Decimal128) error {
|
||||
if err := vw.writeElementHeader(bsontype.Decimal128, mode(0), "WriteDecimal128"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendDecimal128(vw.buf, d128)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteDouble(f float64) error {
|
||||
if err := vw.writeElementHeader(bsontype.Double, mode(0), "WriteDouble"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendDouble(vw.buf, f)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteInt32(i32 int32) error {
|
||||
if err := vw.writeElementHeader(bsontype.Int32, mode(0), "WriteInt32"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendInt32(vw.buf, i32)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteInt64(i64 int64) error {
|
||||
if err := vw.writeElementHeader(bsontype.Int64, mode(0), "WriteInt64"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendInt64(vw.buf, i64)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteJavascript(code string) error {
|
||||
if err := vw.writeElementHeader(bsontype.JavaScript, mode(0), "WriteJavascript"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendJavaScript(vw.buf, code)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteMaxKey() error {
|
||||
if err := vw.writeElementHeader(bsontype.MaxKey, mode(0), "WriteMaxKey"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteMinKey() error {
|
||||
if err := vw.writeElementHeader(bsontype.MinKey, mode(0), "WriteMinKey"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteNull() error {
|
||||
if err := vw.writeElementHeader(bsontype.Null, mode(0), "WriteNull"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteObjectID(oid primitive.ObjectID) error {
|
||||
if err := vw.writeElementHeader(bsontype.ObjectID, mode(0), "WriteObjectID"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendObjectID(vw.buf, oid)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteRegex(pattern string, options string) error {
|
||||
if err := vw.writeElementHeader(bsontype.Regex, mode(0), "WriteRegex"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendRegex(vw.buf, pattern, sortStringAlphebeticAscending(options))
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteString(s string) error {
|
||||
if err := vw.writeElementHeader(bsontype.String, mode(0), "WriteString"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendString(vw.buf, s)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteDocument() (DocumentWriter, error) {
|
||||
if vw.stack[vw.frame].mode == mTopLevel {
|
||||
vw.reserveLength()
|
||||
return vw, nil
|
||||
}
|
||||
if err := vw.writeElementHeader(bsontype.EmbeddedDocument, mDocument, "WriteDocument", mTopLevel); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vw.push(mDocument)
|
||||
return vw, nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteSymbol(symbol string) error {
|
||||
if err := vw.writeElementHeader(bsontype.Symbol, mode(0), "WriteSymbol"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendSymbol(vw.buf, symbol)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteTimestamp(t uint32, i uint32) error {
|
||||
if err := vw.writeElementHeader(bsontype.Timestamp, mode(0), "WriteTimestamp"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.buf = bsoncore.AppendTimestamp(vw.buf, t, i)
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteUndefined() error {
|
||||
if err := vw.writeElementHeader(bsontype.Undefined, mode(0), "WriteUndefined"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteDocumentElement(key string) (ValueWriter, error) {
|
||||
switch vw.stack[vw.frame].mode {
|
||||
case mTopLevel, mDocument:
|
||||
default:
|
||||
return nil, vw.invalidTransitionError(mElement, "WriteDocumentElement", []mode{mTopLevel, mDocument})
|
||||
}
|
||||
|
||||
vw.push(mElement)
|
||||
vw.stack[vw.frame].key = key
|
||||
|
||||
return vw, nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteDocumentEnd() error {
|
||||
switch vw.stack[vw.frame].mode {
|
||||
case mTopLevel, mDocument:
|
||||
default:
|
||||
return fmt.Errorf("incorrect mode to end document: %s", vw.stack[vw.frame].mode)
|
||||
}
|
||||
|
||||
vw.buf = append(vw.buf, 0x00)
|
||||
|
||||
err := vw.writeLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if vw.stack[vw.frame].mode == mTopLevel {
|
||||
if vw.w != nil {
|
||||
if sw, ok := vw.w.(*SliceWriter); ok {
|
||||
*sw = vw.buf
|
||||
} else {
|
||||
_, err = vw.w.Write(vw.buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// reset buffer
|
||||
vw.buf = vw.buf[:0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vw.pop()
|
||||
|
||||
if vw.stack[vw.frame].mode == mCodeWithScope {
|
||||
// We ignore the error here because of the gaurantee of writeLength.
|
||||
// See the docs for writeLength for more info.
|
||||
_ = vw.writeLength()
|
||||
vw.pop()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteArrayElement() (ValueWriter, error) {
|
||||
if vw.stack[vw.frame].mode != mArray {
|
||||
return nil, vw.invalidTransitionError(mValue, "WriteArrayElement", []mode{mArray})
|
||||
}
|
||||
|
||||
arrkey := vw.stack[vw.frame].arrkey
|
||||
vw.stack[vw.frame].arrkey++
|
||||
|
||||
vw.push(mValue)
|
||||
vw.stack[vw.frame].arrkey = arrkey
|
||||
|
||||
return vw, nil
|
||||
}
|
||||
|
||||
func (vw *valueWriter) WriteArrayEnd() error {
|
||||
if vw.stack[vw.frame].mode != mArray {
|
||||
return fmt.Errorf("incorrect mode to end array: %s", vw.stack[vw.frame].mode)
|
||||
}
|
||||
|
||||
vw.buf = append(vw.buf, 0x00)
|
||||
|
||||
err := vw.writeLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vw.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
// NOTE: We assume that if we call writeLength more than once the same function
|
||||
// within the same function without altering the vw.buf that this method will
|
||||
// not return an error. If this changes ensure that the following methods are
|
||||
// updated:
|
||||
//
|
||||
// - WriteDocumentEnd
|
||||
func (vw *valueWriter) writeLength() error {
|
||||
length := len(vw.buf)
|
||||
if length > maxSize {
|
||||
return errMaxDocumentSizeExceeded{size: int64(len(vw.buf))}
|
||||
}
|
||||
length = length - int(vw.stack[vw.frame].start)
|
||||
start := vw.stack[vw.frame].start
|
||||
|
||||
vw.buf[start+0] = byte(length)
|
||||
vw.buf[start+1] = byte(length >> 8)
|
||||
vw.buf[start+2] = byte(length >> 16)
|
||||
vw.buf[start+3] = byte(length >> 24)
|
||||
return nil
|
||||
}
|
||||
96
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go
generated
vendored
Executable file
96
vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go
generated
vendored
Executable file
@@ -0,0 +1,96 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonrw
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// ArrayWriter is the interface used to create a BSON or BSON adjacent array.
|
||||
// Callers must ensure they call WriteArrayEnd when they have finished creating
|
||||
// the array.
|
||||
type ArrayWriter interface {
|
||||
WriteArrayElement() (ValueWriter, error)
|
||||
WriteArrayEnd() error
|
||||
}
|
||||
|
||||
// DocumentWriter is the interface used to create a BSON or BSON adjacent
|
||||
// document. Callers must ensure they call WriteDocumentEnd when they have
|
||||
// finished creating the document.
|
||||
type DocumentWriter interface {
|
||||
WriteDocumentElement(string) (ValueWriter, error)
|
||||
WriteDocumentEnd() error
|
||||
}
|
||||
|
||||
// ValueWriter is the interface used to write BSON values. Implementations of
|
||||
// this interface handle creating BSON or BSON adjacent representations of the
|
||||
// values.
|
||||
type ValueWriter interface {
|
||||
WriteArray() (ArrayWriter, error)
|
||||
WriteBinary(b []byte) error
|
||||
WriteBinaryWithSubtype(b []byte, btype byte) error
|
||||
WriteBoolean(bool) error
|
||||
WriteCodeWithScope(code string) (DocumentWriter, error)
|
||||
WriteDBPointer(ns string, oid primitive.ObjectID) error
|
||||
WriteDateTime(dt int64) error
|
||||
WriteDecimal128(primitive.Decimal128) error
|
||||
WriteDouble(float64) error
|
||||
WriteInt32(int32) error
|
||||
WriteInt64(int64) error
|
||||
WriteJavascript(code string) error
|
||||
WriteMaxKey() error
|
||||
WriteMinKey() error
|
||||
WriteNull() error
|
||||
WriteObjectID(primitive.ObjectID) error
|
||||
WriteRegex(pattern, options string) error
|
||||
WriteString(string) error
|
||||
WriteDocument() (DocumentWriter, error)
|
||||
WriteSymbol(symbol string) error
|
||||
WriteTimestamp(t, i uint32) error
|
||||
WriteUndefined() error
|
||||
}
|
||||
|
||||
// BytesWriter is the interface used to write BSON bytes to a ValueWriter.
|
||||
// This interface is meant to be a superset of ValueWriter, so that types that
|
||||
// implement ValueWriter may also implement this interface.
|
||||
type BytesWriter interface {
|
||||
WriteValueBytes(t bsontype.Type, b []byte) error
|
||||
}
|
||||
|
||||
// SliceWriter allows a pointer to a slice of bytes to be used as an io.Writer.
|
||||
type SliceWriter []byte
|
||||
|
||||
func (sw *SliceWriter) Write(p []byte) (int, error) {
|
||||
written := len(p)
|
||||
*sw = append(*sw, p...)
|
||||
return written, nil
|
||||
}
|
||||
|
||||
type writer []byte
|
||||
|
||||
func (w *writer) Write(p []byte) (int, error) {
|
||||
index := len(*w)
|
||||
return w.WriteAt(p, int64(index))
|
||||
}
|
||||
|
||||
func (w *writer) WriteAt(p []byte, off int64) (int, error) {
|
||||
newend := off + int64(len(p))
|
||||
if newend < int64(len(*w)) {
|
||||
newend = int64(len(*w))
|
||||
}
|
||||
|
||||
if newend > int64(cap(*w)) {
|
||||
buf := make([]byte, int64(2*cap(*w))+newend)
|
||||
copy(buf, *w)
|
||||
*w = buf
|
||||
}
|
||||
|
||||
*w = []byte(*w)[:newend]
|
||||
copy([]byte(*w)[off:], p)
|
||||
return len(p), nil
|
||||
}
|
||||
87
vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go
generated
vendored
Executable file
87
vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go
generated
vendored
Executable file
@@ -0,0 +1,87 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
// Package bsontype is a utility package that contains types for each BSON type and the
|
||||
// a stringifier for the Type to enable easier debugging when working with BSON.
|
||||
package bsontype // import "go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
|
||||
// These constants uniquely refer to each BSON type.
|
||||
const (
|
||||
Double Type = 0x01
|
||||
String Type = 0x02
|
||||
EmbeddedDocument Type = 0x03
|
||||
Array Type = 0x04
|
||||
Binary Type = 0x05
|
||||
Undefined Type = 0x06
|
||||
ObjectID Type = 0x07
|
||||
Boolean Type = 0x08
|
||||
DateTime Type = 0x09
|
||||
Null Type = 0x0A
|
||||
Regex Type = 0x0B
|
||||
DBPointer Type = 0x0C
|
||||
JavaScript Type = 0x0D
|
||||
Symbol Type = 0x0E
|
||||
CodeWithScope Type = 0x0F
|
||||
Int32 Type = 0x10
|
||||
Timestamp Type = 0x11
|
||||
Int64 Type = 0x12
|
||||
Decimal128 Type = 0x13
|
||||
MinKey Type = 0xFF
|
||||
MaxKey Type = 0x7F
|
||||
)
|
||||
|
||||
// Type represents a BSON type.
|
||||
type Type byte
|
||||
|
||||
// String returns the string representation of the BSON type's name.
|
||||
func (bt Type) String() string {
|
||||
switch bt {
|
||||
case '\x01':
|
||||
return "double"
|
||||
case '\x02':
|
||||
return "string"
|
||||
case '\x03':
|
||||
return "embedded document"
|
||||
case '\x04':
|
||||
return "array"
|
||||
case '\x05':
|
||||
return "binary"
|
||||
case '\x06':
|
||||
return "undefined"
|
||||
case '\x07':
|
||||
return "objectID"
|
||||
case '\x08':
|
||||
return "boolean"
|
||||
case '\x09':
|
||||
return "UTC datetime"
|
||||
case '\x0A':
|
||||
return "null"
|
||||
case '\x0B':
|
||||
return "regex"
|
||||
case '\x0C':
|
||||
return "dbPointer"
|
||||
case '\x0D':
|
||||
return "javascript"
|
||||
case '\x0E':
|
||||
return "symbol"
|
||||
case '\x0F':
|
||||
return "code with scope"
|
||||
case '\x10':
|
||||
return "32-bit integer"
|
||||
case '\x11':
|
||||
return "timestamp"
|
||||
case '\x12':
|
||||
return "64-bit integer"
|
||||
case '\x13':
|
||||
return "128-bit decimal"
|
||||
case '\xFF':
|
||||
return "min key"
|
||||
case '\x7F':
|
||||
return "max key"
|
||||
default:
|
||||
return "invalid"
|
||||
}
|
||||
}
|
||||
112
vendor/go.mongodb.org/mongo-driver/bson/decoder.go
generated
vendored
Executable file
112
vendor/go.mongodb.org/mongo-driver/bson/decoder.go
generated
vendored
Executable file
@@ -0,0 +1,112 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
)
|
||||
|
||||
// ErrDecodeToNil is the error returned when trying to decode to a nil value
|
||||
var ErrDecodeToNil = errors.New("cannot Decode to nil value")
|
||||
|
||||
// This pool is used to keep the allocations of Decoders down. This is only used for the Marshal*
|
||||
// methods and is not consumable from outside of this package. The Decoders retrieved from this pool
|
||||
// must have both Reset and SetRegistry called on them.
|
||||
var decPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(Decoder)
|
||||
},
|
||||
}
|
||||
|
||||
// A Decoder reads and decodes BSON documents from a stream. It reads from a bsonrw.ValueReader as
|
||||
// the source of BSON data.
|
||||
type Decoder struct {
|
||||
dc bsoncodec.DecodeContext
|
||||
vr bsonrw.ValueReader
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr.
|
||||
func NewDecoder(vr bsonrw.ValueReader) (*Decoder, error) {
|
||||
if vr == nil {
|
||||
return nil, errors.New("cannot create a new Decoder with a nil ValueReader")
|
||||
}
|
||||
|
||||
return &Decoder{
|
||||
dc: bsoncodec.DecodeContext{Registry: DefaultRegistry},
|
||||
vr: vr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewDecoderWithContext returns a new decoder that uses DecodeContext dc to read from vr.
|
||||
func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (*Decoder, error) {
|
||||
if dc.Registry == nil {
|
||||
dc.Registry = DefaultRegistry
|
||||
}
|
||||
if vr == nil {
|
||||
return nil, errors.New("cannot create a new Decoder with a nil ValueReader")
|
||||
}
|
||||
|
||||
return &Decoder{
|
||||
dc: dc,
|
||||
vr: vr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Decode reads the next BSON document from the stream and decodes it into the
|
||||
// value pointed to by val.
|
||||
//
|
||||
// The documentation for Unmarshal contains details about of BSON into a Go
|
||||
// value.
|
||||
func (d *Decoder) Decode(val interface{}) error {
|
||||
if unmarshaler, ok := val.(Unmarshaler); ok {
|
||||
// TODO(skriptble): Reuse a []byte here and use the AppendDocumentBytes method.
|
||||
buf, err := bsonrw.Copier{}.CopyDocumentToBytes(d.vr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return unmarshaler.UnmarshalBSON(buf)
|
||||
}
|
||||
|
||||
rval := reflect.ValueOf(val)
|
||||
if rval.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("argument to Decode must be a pointer to a type, but got %v", rval)
|
||||
}
|
||||
if rval.IsNil() {
|
||||
return ErrDecodeToNil
|
||||
}
|
||||
rval = rval.Elem()
|
||||
decoder, err := d.dc.LookupDecoder(rval.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return decoder.DecodeValue(d.dc, d.vr, rval)
|
||||
}
|
||||
|
||||
// Reset will reset the state of the decoder, using the same *DecodeContext used in
|
||||
// the original construction but using vr for reading.
|
||||
func (d *Decoder) Reset(vr bsonrw.ValueReader) error {
|
||||
d.vr = vr
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRegistry replaces the current registry of the decoder with r.
|
||||
func (d *Decoder) SetRegistry(r *bsoncodec.Registry) error {
|
||||
d.dc.Registry = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetContext replaces the current registry of the decoder with dc.
|
||||
func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error {
|
||||
d.dc = dc
|
||||
return nil
|
||||
}
|
||||
42
vendor/go.mongodb.org/mongo-driver/bson/doc.go
generated
vendored
Executable file
42
vendor/go.mongodb.org/mongo-driver/bson/doc.go
generated
vendored
Executable file
@@ -0,0 +1,42 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
// Package bson is a library for reading, writing, and manipulating BSON. The
|
||||
// library has two families of types for representing BSON.
|
||||
//
|
||||
// The Raw family of types is used to validate and retrieve elements from a slice of bytes. This
|
||||
// type is most useful when you want do lookups on BSON bytes without unmarshaling it into another
|
||||
// type.
|
||||
//
|
||||
// Example:
|
||||
// var raw bson.Raw = ... // bytes from somewhere
|
||||
// err := raw.Validate()
|
||||
// if err != nil { return err }
|
||||
// val := raw.Lookup("foo")
|
||||
// i32, ok := val.Int32OK()
|
||||
// // do something with i32...
|
||||
//
|
||||
// The D family of types is used to build concise representations of BSON using native Go types.
|
||||
// These types do not support automatic lookup.
|
||||
//
|
||||
// Example:
|
||||
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
|
||||
//
|
||||
//
|
||||
// Marshaling and Unmarshaling are handled with the Marshal and Unmarshal family of functions. If
|
||||
// you need to write or read BSON from a non-slice source, an Encoder or Decoder can be used with a
|
||||
// bsonrw.ValueWriter or bsonrw.ValueReader.
|
||||
//
|
||||
// Example:
|
||||
// b, err := bson.Marshal(bson.D{{"foo", "bar"}})
|
||||
// if err != nil { return err }
|
||||
// var fooer struct {
|
||||
// Foo string
|
||||
// }
|
||||
// err = bson.Unmarshal(b, &fooer)
|
||||
// if err != nil { return err }
|
||||
// // do something with fooer...
|
||||
package bson
|
||||
99
vendor/go.mongodb.org/mongo-driver/bson/encoder.go
generated
vendored
Executable file
99
vendor/go.mongodb.org/mongo-driver/bson/encoder.go
generated
vendored
Executable file
@@ -0,0 +1,99 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
)
|
||||
|
||||
// This pool is used to keep the allocations of Encoders down. This is only used for the Marshal*
|
||||
// methods and is not consumable from outside of this package. The Encoders retrieved from this pool
|
||||
// must have both Reset and SetRegistry called on them.
|
||||
var encPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(Encoder)
|
||||
},
|
||||
}
|
||||
|
||||
// An Encoder writes a serialization format to an output stream. It writes to a bsonrw.ValueWriter
|
||||
// as the destination of BSON data.
|
||||
type Encoder struct {
|
||||
ec bsoncodec.EncodeContext
|
||||
vw bsonrw.ValueWriter
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that uses the DefaultRegistry to write to vw.
|
||||
func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) {
|
||||
if vw == nil {
|
||||
return nil, errors.New("cannot create a new Encoder with a nil ValueWriter")
|
||||
}
|
||||
|
||||
return &Encoder{
|
||||
ec: bsoncodec.EncodeContext{Registry: DefaultRegistry},
|
||||
vw: vw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewEncoderWithContext returns a new encoder that uses EncodeContext ec to write to vw.
|
||||
func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (*Encoder, error) {
|
||||
if ec.Registry == nil {
|
||||
ec = bsoncodec.EncodeContext{Registry: DefaultRegistry}
|
||||
}
|
||||
if vw == nil {
|
||||
return nil, errors.New("cannot create a new Encoder with a nil ValueWriter")
|
||||
}
|
||||
|
||||
return &Encoder{
|
||||
ec: ec,
|
||||
vw: vw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encode writes the BSON encoding of val to the stream.
|
||||
//
|
||||
// The documentation for Marshal contains details about the conversion of Go
|
||||
// values to BSON.
|
||||
func (e *Encoder) Encode(val interface{}) error {
|
||||
if marshaler, ok := val.(Marshaler); ok {
|
||||
// TODO(skriptble): Should we have a MarshalAppender interface so that we can have []byte reuse?
|
||||
buf, err := marshaler.MarshalBSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bsonrw.Copier{}.CopyDocumentFromBytes(e.vw, buf)
|
||||
}
|
||||
|
||||
encoder, err := e.ec.LookupEncoder(reflect.TypeOf(val))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return encoder.EncodeValue(e.ec, e.vw, reflect.ValueOf(val))
|
||||
}
|
||||
|
||||
// Reset will reset the state of the encoder, using the same *EncodeContext used in
|
||||
// the original construction but using vw.
|
||||
func (e *Encoder) Reset(vw bsonrw.ValueWriter) error {
|
||||
e.vw = vw
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRegistry replaces the current registry of the encoder with r.
|
||||
func (e *Encoder) SetRegistry(r *bsoncodec.Registry) error {
|
||||
e.ec.Registry = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetContext replaces the current EncodeContext of the encoder with er.
|
||||
func (e *Encoder) SetContext(ec bsoncodec.EncodeContext) error {
|
||||
e.ec = ec
|
||||
return nil
|
||||
}
|
||||
156
vendor/go.mongodb.org/mongo-driver/bson/marshal.go
generated
vendored
Executable file
156
vendor/go.mongodb.org/mongo-driver/bson/marshal.go
generated
vendored
Executable file
@@ -0,0 +1,156 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
)
|
||||
|
||||
const defaultDstCap = 256
|
||||
|
||||
var bvwPool = bsonrw.NewBSONValueWriterPool()
|
||||
var extjPool = bsonrw.NewExtJSONValueWriterPool()
|
||||
|
||||
// Marshaler is an interface implemented by types that can marshal themselves
|
||||
// into a BSON document represented as bytes. The bytes returned must be a valid
|
||||
// BSON document if the error is nil.
|
||||
type Marshaler interface {
|
||||
MarshalBSON() ([]byte, error)
|
||||
}
|
||||
|
||||
// ValueMarshaler is an interface implemented by types that can marshal
|
||||
// themselves into a BSON value as bytes. The type must be the valid type for
|
||||
// the bytes returned. The bytes and byte type together must be valid if the
|
||||
// error is nil.
|
||||
type ValueMarshaler interface {
|
||||
MarshalBSONValue() (bsontype.Type, []byte, error)
|
||||
}
|
||||
|
||||
// Marshal returns the BSON encoding of val.
|
||||
//
|
||||
// Marshal will use the default registry created by NewRegistry to recursively
|
||||
// marshal val into a []byte. Marshal will inspect struct tags and alter the
|
||||
// marshaling process accordingly.
|
||||
func Marshal(val interface{}) ([]byte, error) {
|
||||
return MarshalWithRegistry(DefaultRegistry, val)
|
||||
}
|
||||
|
||||
// MarshalAppend will append the BSON encoding of val to dst. If dst is not
|
||||
// large enough to hold the BSON encoding of val, dst will be grown.
|
||||
func MarshalAppend(dst []byte, val interface{}) ([]byte, error) {
|
||||
return MarshalAppendWithRegistry(DefaultRegistry, dst, val)
|
||||
}
|
||||
|
||||
// MarshalWithRegistry returns the BSON encoding of val using Registry r.
|
||||
func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) {
|
||||
dst := make([]byte, 0, 256) // TODO: make the default cap a constant
|
||||
return MarshalAppendWithRegistry(r, dst, val)
|
||||
}
|
||||
|
||||
// MarshalWithContext returns the BSON encoding of val using EncodeContext ec.
|
||||
func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, error) {
|
||||
dst := make([]byte, 0, 256) // TODO: make the default cap a constant
|
||||
return MarshalAppendWithContext(ec, dst, val)
|
||||
}
|
||||
|
||||
// MarshalAppendWithRegistry will append the BSON encoding of val to dst using
|
||||
// Registry r. If dst is not large enough to hold the BSON encoding of val, dst
|
||||
// will be grown.
|
||||
func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) ([]byte, error) {
|
||||
return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val)
|
||||
}
|
||||
|
||||
// MarshalAppendWithContext will append the BSON encoding of val to dst using
|
||||
// EncodeContext ec. If dst is not large enough to hold the BSON encoding of val, dst
|
||||
// will be grown.
|
||||
func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) {
|
||||
sw := new(bsonrw.SliceWriter)
|
||||
*sw = dst
|
||||
vw := bvwPool.Get(sw)
|
||||
defer bvwPool.Put(vw)
|
||||
|
||||
enc := encPool.Get().(*Encoder)
|
||||
defer encPool.Put(enc)
|
||||
|
||||
err := enc.Reset(vw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = enc.SetContext(ec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = enc.Encode(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return *sw, nil
|
||||
}
|
||||
|
||||
// MarshalExtJSON returns the extended JSON encoding of val.
|
||||
func MarshalExtJSON(val interface{}, canonical, escapeHTML bool) ([]byte, error) {
|
||||
return MarshalExtJSONWithRegistry(DefaultRegistry, val, canonical, escapeHTML)
|
||||
}
|
||||
|
||||
// MarshalExtJSONAppend will append the extended JSON encoding of val to dst.
|
||||
// If dst is not large enough to hold the extended JSON encoding of val, dst
|
||||
// will be grown.
|
||||
func MarshalExtJSONAppend(dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
|
||||
return MarshalExtJSONAppendWithRegistry(DefaultRegistry, dst, val, canonical, escapeHTML)
|
||||
}
|
||||
|
||||
// MarshalExtJSONWithRegistry returns the extended JSON encoding of val using Registry r.
|
||||
func MarshalExtJSONWithRegistry(r *bsoncodec.Registry, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
|
||||
dst := make([]byte, 0, defaultDstCap)
|
||||
return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML)
|
||||
}
|
||||
|
||||
// MarshalExtJSONWithContext returns the extended JSON encoding of val using Registry r.
|
||||
func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
|
||||
dst := make([]byte, 0, defaultDstCap)
|
||||
return MarshalExtJSONAppendWithContext(ec, dst, val, canonical, escapeHTML)
|
||||
}
|
||||
|
||||
// MarshalExtJSONAppendWithRegistry will append the extended JSON encoding of
|
||||
// val to dst using Registry r. If dst is not large enough to hold the BSON
|
||||
// encoding of val, dst will be grown.
|
||||
func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
|
||||
return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML)
|
||||
}
|
||||
|
||||
// MarshalExtJSONAppendWithContext will append the extended JSON encoding of
|
||||
// val to dst using Registry r. If dst is not large enough to hold the BSON
|
||||
// encoding of val, dst will be grown.
|
||||
func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
|
||||
sw := new(bsonrw.SliceWriter)
|
||||
*sw = dst
|
||||
ejvw := extjPool.Get(sw, canonical, escapeHTML)
|
||||
defer extjPool.Put(ejvw)
|
||||
|
||||
enc := encPool.Get().(*Encoder)
|
||||
defer encPool.Put(enc)
|
||||
|
||||
err := enc.Reset(ejvw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = enc.SetContext(ec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = enc.Encode(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return *sw, nil
|
||||
}
|
||||
307
vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go
generated
vendored
Executable file
307
vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go
generated
vendored
Executable file
@@ -0,0 +1,307 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
|
||||
// See THIRD-PARTY-NOTICES for original license terms.
|
||||
|
||||
package primitive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Decimal128 holds decimal128 BSON values.
|
||||
type Decimal128 struct {
|
||||
h, l uint64
|
||||
}
|
||||
|
||||
// NewDecimal128 creates a Decimal128 using the provide high and low uint64s.
|
||||
func NewDecimal128(h, l uint64) Decimal128 {
|
||||
return Decimal128{h: h, l: l}
|
||||
}
|
||||
|
||||
// GetBytes returns the underlying bytes of the BSON decimal value as two uint16 values. The first
|
||||
// contains the most first 8 bytes of the value and the second contains the latter.
|
||||
func (d Decimal128) GetBytes() (uint64, uint64) {
|
||||
return d.h, d.l
|
||||
}
|
||||
|
||||
// String returns a string representation of the decimal value.
|
||||
func (d Decimal128) String() string {
|
||||
var pos int // positive sign
|
||||
var e int // exponent
|
||||
var h, l uint64 // significand high/low
|
||||
|
||||
if d.h>>63&1 == 0 {
|
||||
pos = 1
|
||||
}
|
||||
|
||||
switch d.h >> 58 & (1<<5 - 1) {
|
||||
case 0x1F:
|
||||
return "NaN"
|
||||
case 0x1E:
|
||||
return "-Infinity"[pos:]
|
||||
}
|
||||
|
||||
l = d.l
|
||||
if d.h>>61&3 == 3 {
|
||||
// Bits: 1*sign 2*ignored 14*exponent 111*significand.
|
||||
// Implicit 0b100 prefix in significand.
|
||||
e = int(d.h>>47&(1<<14-1)) - 6176
|
||||
//h = 4<<47 | d.h&(1<<47-1)
|
||||
// Spec says all of these values are out of range.
|
||||
h, l = 0, 0
|
||||
} else {
|
||||
// Bits: 1*sign 14*exponent 113*significand
|
||||
e = int(d.h>>49&(1<<14-1)) - 6176
|
||||
h = d.h & (1<<49 - 1)
|
||||
}
|
||||
|
||||
// Would be handled by the logic below, but that's trivial and common.
|
||||
if h == 0 && l == 0 && e == 0 {
|
||||
return "-0"[pos:]
|
||||
}
|
||||
|
||||
var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
|
||||
var last = len(repr)
|
||||
var i = len(repr)
|
||||
var dot = len(repr) + e
|
||||
var rem uint32
|
||||
Loop:
|
||||
for d9 := 0; d9 < 5; d9++ {
|
||||
h, l, rem = divmod(h, l, 1e9)
|
||||
for d1 := 0; d1 < 9; d1++ {
|
||||
// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
|
||||
if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
|
||||
e += len(repr) - i
|
||||
i--
|
||||
repr[i] = '.'
|
||||
last = i - 1
|
||||
dot = len(repr) // Unmark.
|
||||
}
|
||||
c := '0' + byte(rem%10)
|
||||
rem /= 10
|
||||
i--
|
||||
repr[i] = c
|
||||
// Handle "0E+3", "1E+3", etc.
|
||||
if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
|
||||
last = i
|
||||
break Loop
|
||||
}
|
||||
if c != '0' {
|
||||
last = i
|
||||
}
|
||||
// Break early. Works without it, but why.
|
||||
if dot > i && l == 0 && h == 0 && rem == 0 {
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
}
|
||||
repr[last-1] = '-'
|
||||
last--
|
||||
|
||||
if e > 0 {
|
||||
return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
|
||||
}
|
||||
if e < 0 {
|
||||
return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
|
||||
}
|
||||
return string(repr[last+pos:])
|
||||
}
|
||||
|
||||
func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
|
||||
div64 := uint64(div)
|
||||
a := h >> 32
|
||||
aq := a / div64
|
||||
ar := a % div64
|
||||
b := ar<<32 + h&(1<<32-1)
|
||||
bq := b / div64
|
||||
br := b % div64
|
||||
c := br<<32 + l>>32
|
||||
cq := c / div64
|
||||
cr := c % div64
|
||||
d := cr<<32 + l&(1<<32-1)
|
||||
dq := d / div64
|
||||
dr := d % div64
|
||||
return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
|
||||
}
|
||||
|
||||
var dNaN = Decimal128{0x1F << 58, 0}
|
||||
var dPosInf = Decimal128{0x1E << 58, 0}
|
||||
var dNegInf = Decimal128{0x3E << 58, 0}
|
||||
|
||||
func dErr(s string) (Decimal128, error) {
|
||||
return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
|
||||
}
|
||||
|
||||
//ParseDecimal128 takes the given string and attempts to parse it into a valid
|
||||
// Decimal128 value.
|
||||
func ParseDecimal128(s string) (Decimal128, error) {
|
||||
orig := s
|
||||
if s == "" {
|
||||
return dErr(orig)
|
||||
}
|
||||
neg := s[0] == '-'
|
||||
if neg || s[0] == '+' {
|
||||
s = s[1:]
|
||||
}
|
||||
|
||||
if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
|
||||
if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
|
||||
return dNaN, nil
|
||||
}
|
||||
if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
|
||||
if neg {
|
||||
return dNegInf, nil
|
||||
}
|
||||
return dPosInf, nil
|
||||
}
|
||||
return dErr(orig)
|
||||
}
|
||||
|
||||
var h, l uint64
|
||||
var e int
|
||||
|
||||
var add, ovr uint32
|
||||
var mul uint32 = 1
|
||||
var dot = -1
|
||||
var digits = 0
|
||||
var i = 0
|
||||
for i < len(s) {
|
||||
c := s[i]
|
||||
if mul == 1e9 {
|
||||
h, l, ovr = muladd(h, l, mul, add)
|
||||
mul, add = 1, 0
|
||||
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
if c >= '0' && c <= '9' {
|
||||
i++
|
||||
if c > '0' || digits > 0 {
|
||||
digits++
|
||||
}
|
||||
if digits > 34 {
|
||||
if c == '0' {
|
||||
// Exact rounding.
|
||||
e++
|
||||
continue
|
||||
}
|
||||
return dErr(orig)
|
||||
}
|
||||
mul *= 10
|
||||
add *= 10
|
||||
add += uint32(c - '0')
|
||||
continue
|
||||
}
|
||||
if c == '.' {
|
||||
i++
|
||||
if dot >= 0 || i == 1 && len(s) == 1 {
|
||||
return dErr(orig)
|
||||
}
|
||||
if i == len(s) {
|
||||
break
|
||||
}
|
||||
if s[i] < '0' || s[i] > '9' || e > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
dot = i
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if i == 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
if mul > 1 {
|
||||
h, l, ovr = muladd(h, l, mul, add)
|
||||
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
if dot >= 0 {
|
||||
e += dot - i
|
||||
}
|
||||
if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
|
||||
i++
|
||||
eneg := s[i] == '-'
|
||||
if eneg || s[i] == '+' {
|
||||
i++
|
||||
if i == len(s) {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
n := 0
|
||||
for i < len(s) && n < 1e4 {
|
||||
c := s[i]
|
||||
i++
|
||||
if c < '0' || c > '9' {
|
||||
return dErr(orig)
|
||||
}
|
||||
n *= 10
|
||||
n += int(c - '0')
|
||||
}
|
||||
if eneg {
|
||||
n = -n
|
||||
}
|
||||
e += n
|
||||
for e < -6176 {
|
||||
// Subnormal.
|
||||
var div uint32 = 1
|
||||
for div < 1e9 && e < -6176 {
|
||||
div *= 10
|
||||
e++
|
||||
}
|
||||
var rem uint32
|
||||
h, l, rem = divmod(h, l, div)
|
||||
if rem > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
for e > 6111 {
|
||||
// Clamped.
|
||||
var mul uint32 = 1
|
||||
for mul < 1e9 && e > 6111 {
|
||||
mul *= 10
|
||||
e--
|
||||
}
|
||||
h, l, ovr = muladd(h, l, mul, 0)
|
||||
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
if e < -6176 || e > 6111 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
|
||||
if i < len(s) {
|
||||
return dErr(orig)
|
||||
}
|
||||
|
||||
h |= uint64(e+6176) & uint64(1<<14-1) << 49
|
||||
if neg {
|
||||
h |= 1 << 63
|
||||
}
|
||||
return Decimal128{h, l}, nil
|
||||
}
|
||||
|
||||
func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
|
||||
mul64 := uint64(mul)
|
||||
a := mul64 * (l & (1<<32 - 1))
|
||||
b := a>>32 + mul64*(l>>32)
|
||||
c := b>>32 + mul64*(h&(1<<32-1))
|
||||
d := c>>32 + mul64*(h>>32)
|
||||
|
||||
a = a&(1<<32-1) + uint64(add)
|
||||
b = b&(1<<32-1) + a>>32
|
||||
c = c&(1<<32-1) + b>>32
|
||||
d = d&(1<<32-1) + c>>32
|
||||
|
||||
return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
|
||||
}
|
||||
165
vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go
generated
vendored
Executable file
165
vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go
generated
vendored
Executable file
@@ -0,0 +1,165 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
|
||||
// See THIRD-PARTY-NOTICES for original license terms.
|
||||
|
||||
package primitive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrInvalidHex indicates that a hex string cannot be converted to an ObjectID.
|
||||
var ErrInvalidHex = errors.New("the provided hex string is not a valid ObjectID")
|
||||
|
||||
// ObjectID is the BSON ObjectID type.
|
||||
type ObjectID [12]byte
|
||||
|
||||
// NilObjectID is the zero value for ObjectID.
|
||||
var NilObjectID ObjectID
|
||||
|
||||
var objectIDCounter = readRandomUint32()
|
||||
var processUnique = processUniqueBytes()
|
||||
|
||||
// NewObjectID generates a new ObjectID.
|
||||
func NewObjectID() ObjectID {
|
||||
return NewObjectIDFromTimestamp(time.Now())
|
||||
}
|
||||
|
||||
// NewObjectIDFromTimestamp generates a new ObjectID based on the given time.
|
||||
func NewObjectIDFromTimestamp(timestamp time.Time) ObjectID {
|
||||
var b [12]byte
|
||||
|
||||
binary.BigEndian.PutUint32(b[0:4], uint32(timestamp.Unix()))
|
||||
copy(b[4:9], processUnique[:])
|
||||
putUint24(b[9:12], atomic.AddUint32(&objectIDCounter, 1))
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// Timestamp extracts the time part of the ObjectId.
|
||||
func (id ObjectID) Timestamp() time.Time {
|
||||
unixSecs := binary.BigEndian.Uint32(id[0:4])
|
||||
return time.Unix(int64(unixSecs), 0).UTC()
|
||||
}
|
||||
|
||||
// Hex returns the hex encoding of the ObjectID as a string.
|
||||
func (id ObjectID) Hex() string {
|
||||
return hex.EncodeToString(id[:])
|
||||
}
|
||||
|
||||
func (id ObjectID) String() string {
|
||||
return fmt.Sprintf("ObjectID(%q)", id.Hex())
|
||||
}
|
||||
|
||||
// IsZero returns true if id is the empty ObjectID.
|
||||
func (id ObjectID) IsZero() bool {
|
||||
return bytes.Equal(id[:], NilObjectID[:])
|
||||
}
|
||||
|
||||
// ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a
|
||||
// valid ObjectID.
|
||||
func ObjectIDFromHex(s string) (ObjectID, error) {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
return NilObjectID, err
|
||||
}
|
||||
|
||||
if len(b) != 12 {
|
||||
return NilObjectID, ErrInvalidHex
|
||||
}
|
||||
|
||||
var oid [12]byte
|
||||
copy(oid[:], b[:])
|
||||
|
||||
return oid, nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns the ObjectID as a string
|
||||
func (id ObjectID) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(id.Hex())
|
||||
}
|
||||
|
||||
// UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 64 bytes long, it
|
||||
// will be populated with the hex representation of the ObjectID. If the byte slice is twelve bytes
|
||||
// long, it will be populated with the BSON representation of the ObjectID. Otherwise, it will
|
||||
// return an error.
|
||||
func (id *ObjectID) UnmarshalJSON(b []byte) error {
|
||||
var err error
|
||||
switch len(b) {
|
||||
case 12:
|
||||
copy(id[:], b)
|
||||
default:
|
||||
// Extended JSON
|
||||
var res interface{}
|
||||
err := json.Unmarshal(b, &res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
str, ok := res.(string)
|
||||
if !ok {
|
||||
m, ok := res.(map[string]interface{})
|
||||
if !ok {
|
||||
return errors.New("not an extended JSON ObjectID")
|
||||
}
|
||||
oid, ok := m["$oid"]
|
||||
if !ok {
|
||||
return errors.New("not an extended JSON ObjectID")
|
||||
}
|
||||
str, ok = oid.(string)
|
||||
if !ok {
|
||||
return errors.New("not an extended JSON ObjectID")
|
||||
}
|
||||
}
|
||||
|
||||
if len(str) != 24 {
|
||||
return fmt.Errorf("cannot unmarshal into an ObjectID, the length must be 12 but it is %d", len(str))
|
||||
}
|
||||
|
||||
_, err = hex.Decode(id[:], []byte(str))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func processUniqueBytes() [5]byte {
|
||||
var b [5]byte
|
||||
_, err := io.ReadFull(rand.Reader, b[:])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err))
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func readRandomUint32() uint32 {
|
||||
var b [4]byte
|
||||
_, err := io.ReadFull(rand.Reader, b[:])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err))
|
||||
}
|
||||
|
||||
return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
|
||||
}
|
||||
|
||||
func putUint24(b []byte, v uint32) {
|
||||
b[0] = byte(v >> 16)
|
||||
b[1] = byte(v >> 8)
|
||||
b[2] = byte(v)
|
||||
}
|
||||
186
vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go
generated
vendored
Executable file
186
vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go
generated
vendored
Executable file
@@ -0,0 +1,186 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
// Package primitive contains types similar to Go primitives for BSON types can do not have direct
|
||||
// Go primitive representations.
|
||||
package primitive // import "go.mongodb.org/mongo-driver/bson/primitive"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Binary represents a BSON binary value.
|
||||
type Binary struct {
|
||||
Subtype byte
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Equal compaes bp to bp2 and returns true is the are equal.
|
||||
func (bp Binary) Equal(bp2 Binary) bool {
|
||||
if bp.Subtype != bp2.Subtype {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(bp.Data, bp2.Data)
|
||||
}
|
||||
|
||||
// Undefined represents the BSON undefined value type.
|
||||
type Undefined struct{}
|
||||
|
||||
// DateTime represents the BSON datetime value.
|
||||
type DateTime int64
|
||||
|
||||
// MarshalJSON marshal to time type
|
||||
func (d DateTime) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(d.Time())
|
||||
}
|
||||
|
||||
// Time returns the date as a time type.
|
||||
func (d DateTime) Time() time.Time {
|
||||
return time.Unix(int64(d)/1000, int64(d)%1000*1000000)
|
||||
}
|
||||
|
||||
// NewDateTimeFromTime creates a new DateTime from a Time.
|
||||
func NewDateTimeFromTime(t time.Time) DateTime {
|
||||
return DateTime(t.UnixNano() / 1000000)
|
||||
}
|
||||
|
||||
// Null repreesnts the BSON null value.
|
||||
type Null struct{}
|
||||
|
||||
// Regex represents a BSON regex value.
|
||||
type Regex struct {
|
||||
Pattern string
|
||||
Options string
|
||||
}
|
||||
|
||||
func (rp Regex) String() string {
|
||||
return fmt.Sprintf(`{"pattern": "%s", "options": "%s"}`, rp.Pattern, rp.Options)
|
||||
}
|
||||
|
||||
// Equal compaes rp to rp2 and returns true is the are equal.
|
||||
func (rp Regex) Equal(rp2 Regex) bool {
|
||||
return rp.Pattern == rp2.Pattern && rp.Options == rp.Options
|
||||
}
|
||||
|
||||
// DBPointer represents a BSON dbpointer value.
|
||||
type DBPointer struct {
|
||||
DB string
|
||||
Pointer ObjectID
|
||||
}
|
||||
|
||||
func (d DBPointer) String() string {
|
||||
return fmt.Sprintf(`{"db": "%s", "pointer": "%s"}`, d.DB, d.Pointer)
|
||||
}
|
||||
|
||||
// Equal compaes d to d2 and returns true is the are equal.
|
||||
func (d DBPointer) Equal(d2 DBPointer) bool {
|
||||
return d.DB == d2.DB && bytes.Equal(d.Pointer[:], d2.Pointer[:])
|
||||
}
|
||||
|
||||
// JavaScript represents a BSON JavaScript code value.
|
||||
type JavaScript string
|
||||
|
||||
// Symbol represents a BSON symbol value.
|
||||
type Symbol string
|
||||
|
||||
// CodeWithScope represents a BSON JavaScript code with scope value.
|
||||
type CodeWithScope struct {
|
||||
Code JavaScript
|
||||
Scope interface{}
|
||||
}
|
||||
|
||||
func (cws CodeWithScope) String() string {
|
||||
return fmt.Sprintf(`{"code": "%s", "scope": %v}`, cws.Code, cws.Scope)
|
||||
}
|
||||
|
||||
// Timestamp represents a BSON timestamp value.
|
||||
type Timestamp struct {
|
||||
T uint32
|
||||
I uint32
|
||||
}
|
||||
|
||||
// Equal compaes tp to tp2 and returns true is the are equal.
|
||||
func (tp Timestamp) Equal(tp2 Timestamp) bool {
|
||||
return tp.T == tp2.T && tp.I == tp2.I
|
||||
}
|
||||
|
||||
// CompareTimestamp returns an integer comparing two Timestamps, where T is compared first, followed by I.
|
||||
// Returns 0 if tp = tp2, 1 if tp > tp2, -1 if tp < tp2.
|
||||
func CompareTimestamp(tp, tp2 Timestamp) int {
|
||||
if tp.Equal(tp2) {
|
||||
return 0
|
||||
}
|
||||
|
||||
if tp.T > tp2.T {
|
||||
return 1
|
||||
}
|
||||
if tp.T < tp2.T {
|
||||
return -1
|
||||
}
|
||||
// Compare I values because T values are equal
|
||||
if tp.I > tp2.I {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// MinKey represents the BSON minkey value.
|
||||
type MinKey struct{}
|
||||
|
||||
// MaxKey represents the BSON maxkey value.
|
||||
type MaxKey struct{}
|
||||
|
||||
// D represents a BSON Document. This type can be used to represent BSON in a concise and readable
|
||||
// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or
|
||||
// Document types should be used.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// primitive.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
|
||||
//
|
||||
// This type should be used in situations where order matters, such as MongoDB commands. If the
|
||||
// order is not important, a map is more comfortable and concise.
|
||||
type D []E
|
||||
|
||||
// Map creates a map from the elements of the D.
|
||||
func (d D) Map() M {
|
||||
m := make(M, len(d))
|
||||
for _, e := range d {
|
||||
m[e.Key] = e.Value
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// E represents a BSON element for a D. It is usually used inside a D.
|
||||
type E struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// M is an unordered, concise representation of a BSON Document. It should generally be used to
|
||||
// serialize BSON when the order of the elements of a BSON document do not matter. If the element
|
||||
// order matters, use a D instead.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// primitive.M{"foo": "bar", "hello": "world", "pi": 3.14159}
|
||||
//
|
||||
// This type is handled in the encoders as a regular map[string]interface{}. The elements will be
|
||||
// serialized in an undefined, random order, and the order will be different each time.
|
||||
type M map[string]interface{}
|
||||
|
||||
// An A represents a BSON array. This type can be used to represent a BSON array in a concise and
|
||||
// readable manner. It should generally be used when serializing to BSON. For deserializing, the
|
||||
// RawArray or Array types should be used.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// primitive.A{"bar", "world", 3.14159, primitive.D{{"qux", 12345}}}
|
||||
//
|
||||
type A []interface{}
|
||||
111
vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go
generated
vendored
Executable file
111
vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go
generated
vendored
Executable file
@@ -0,0 +1,111 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
)
|
||||
|
||||
var primitiveCodecs PrimitiveCodecs
|
||||
|
||||
// PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types
|
||||
// defined in this package.
|
||||
type PrimitiveCodecs struct{}
|
||||
|
||||
// RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs
|
||||
// with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created.
|
||||
func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) {
|
||||
if rb == nil {
|
||||
panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil"))
|
||||
}
|
||||
|
||||
rb.
|
||||
RegisterEncoder(tRawValue, bsoncodec.ValueEncoderFunc(pc.RawValueEncodeValue)).
|
||||
RegisterEncoder(tRaw, bsoncodec.ValueEncoderFunc(pc.RawEncodeValue)).
|
||||
RegisterDecoder(tRawValue, bsoncodec.ValueDecoderFunc(pc.RawValueDecodeValue)).
|
||||
RegisterDecoder(tRaw, bsoncodec.ValueDecoderFunc(pc.RawDecodeValue))
|
||||
}
|
||||
|
||||
// RawValueEncodeValue is the ValueEncoderFunc for RawValue.
|
||||
func (PrimitiveCodecs) RawValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tRawValue {
|
||||
return bsoncodec.ValueEncoderError{Name: "RawValueEncodeValue", Types: []reflect.Type{tRawValue}, Received: val}
|
||||
}
|
||||
|
||||
rawvalue := val.Interface().(RawValue)
|
||||
|
||||
return bsonrw.Copier{}.CopyValueFromBytes(vw, rawvalue.Type, rawvalue.Value)
|
||||
}
|
||||
|
||||
// RawValueDecodeValue is the ValueDecoderFunc for RawValue.
|
||||
func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tRawValue {
|
||||
return bsoncodec.ValueDecoderError{Name: "RawValueDecodeValue", Types: []reflect.Type{tRawValue}, Received: val}
|
||||
}
|
||||
|
||||
t, value, err := bsonrw.Copier{}.CopyValueToBytes(vr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val.Set(reflect.ValueOf(RawValue{Type: t, Value: value}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// RawEncodeValue is the ValueEncoderFunc for Reader.
|
||||
func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tRaw {
|
||||
return bsoncodec.ValueEncoderError{Name: "RawEncodeValue", Types: []reflect.Type{tRaw}, Received: val}
|
||||
}
|
||||
|
||||
rdr := val.Interface().(Raw)
|
||||
|
||||
return bsonrw.Copier{}.CopyDocumentFromBytes(vw, rdr)
|
||||
}
|
||||
|
||||
// RawDecodeValue is the ValueDecoderFunc for Reader.
|
||||
func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tRaw {
|
||||
return bsoncodec.ValueDecoderError{Name: "RawDecodeValue", Types: []reflect.Type{tRaw}, Received: val}
|
||||
}
|
||||
|
||||
if val.IsNil() {
|
||||
val.Set(reflect.MakeSlice(val.Type(), 0, 0))
|
||||
}
|
||||
|
||||
val.SetLen(0)
|
||||
|
||||
rdr, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(Raw), vr)
|
||||
val.Set(reflect.ValueOf(rdr))
|
||||
return err
|
||||
}
|
||||
|
||||
func (pc PrimitiveCodecs) encodeRaw(ec bsoncodec.EncodeContext, dw bsonrw.DocumentWriter, raw Raw) error {
|
||||
var copier bsonrw.Copier
|
||||
elems, err := raw.Elements()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, elem := range elems {
|
||||
dvw, err := dw.WriteDocumentElement(elem.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val := elem.Value()
|
||||
err = copier.CopyValueFromBytes(dvw, val.Type, val.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dw.WriteDocumentEnd()
|
||||
}
|
||||
92
vendor/go.mongodb.org/mongo-driver/bson/raw.go
generated
vendored
Executable file
92
vendor/go.mongodb.org/mongo-driver/bson/raw.go
generated
vendored
Executable file
@@ -0,0 +1,92 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
// ErrNilReader indicates that an operation was attempted on a nil bson.Reader.
|
||||
var ErrNilReader = errors.New("nil reader")
|
||||
var errValidateDone = errors.New("validation loop complete")
|
||||
|
||||
// Raw is a wrapper around a byte slice. It will interpret the slice as a
|
||||
// BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the
|
||||
// methods on this type and associated types come from the bsoncore package.
|
||||
type Raw []byte
|
||||
|
||||
// NewFromIOReader reads in a document from the given io.Reader and constructs a Raw from
|
||||
// it.
|
||||
func NewFromIOReader(r io.Reader) (Raw, error) {
|
||||
doc, err := bsoncore.NewDocumentFromReader(r)
|
||||
return Raw(doc), err
|
||||
}
|
||||
|
||||
// Validate validates the document. This method only validates the first document in
|
||||
// the slice, to validate other documents, the slice must be resliced.
|
||||
func (r Raw) Validate() (err error) { return bsoncore.Document(r).Validate() }
|
||||
|
||||
// Lookup search the document, potentially recursively, for the given key. If
|
||||
// there are multiple keys provided, this method will recurse down, as long as
|
||||
// the top and intermediate nodes are either documents or arrays.If an error
|
||||
// occurs or if the value doesn't exist, an empty RawValue is returned.
|
||||
func (r Raw) Lookup(key ...string) RawValue {
|
||||
return convertFromCoreValue(bsoncore.Document(r).Lookup(key...))
|
||||
}
|
||||
|
||||
// LookupErr searches the document and potentially subdocuments or arrays for the
|
||||
// provided key. Each key provided to this method represents a layer of depth.
|
||||
func (r Raw) LookupErr(key ...string) (RawValue, error) {
|
||||
val, err := bsoncore.Document(r).LookupErr(key...)
|
||||
return convertFromCoreValue(val), err
|
||||
}
|
||||
|
||||
// Elements returns this document as a slice of elements. The returned slice will contain valid
|
||||
// elements. If the document is not valid, the elements up to the invalid point will be returned
|
||||
// along with an error.
|
||||
func (r Raw) Elements() ([]RawElement, error) {
|
||||
elems, err := bsoncore.Document(r).Elements()
|
||||
relems := make([]RawElement, 0, len(elems))
|
||||
for _, elem := range elems {
|
||||
relems = append(relems, RawElement(elem))
|
||||
}
|
||||
return relems, err
|
||||
}
|
||||
|
||||
// Values returns this document as a slice of values. The returned slice will contain valid values.
|
||||
// If the document is not valid, the values up to the invalid point will be returned along with an
|
||||
// error.
|
||||
func (r Raw) Values() ([]RawValue, error) {
|
||||
vals, err := bsoncore.Document(r).Values()
|
||||
rvals := make([]RawValue, 0, len(vals))
|
||||
for _, val := range vals {
|
||||
rvals = append(rvals, convertFromCoreValue(val))
|
||||
}
|
||||
return rvals, err
|
||||
}
|
||||
|
||||
// Index searches for and retrieves the element at the given index. This method will panic if
|
||||
// the document is invalid or if the index is out of bounds.
|
||||
func (r Raw) Index(index uint) RawElement { return RawElement(bsoncore.Document(r).Index(index)) }
|
||||
|
||||
// IndexErr searches for and retrieves the element at the given index.
|
||||
func (r Raw) IndexErr(index uint) (RawElement, error) {
|
||||
elem, err := bsoncore.Document(r).IndexErr(index)
|
||||
return RawElement(elem), err
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer interface.
|
||||
func (r Raw) String() string { return bsoncore.Document(r).String() }
|
||||
|
||||
// readi32 is a helper function for reading an int32 from slice of bytes.
|
||||
func readi32(b []byte) int32 {
|
||||
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
|
||||
}
|
||||
51
vendor/go.mongodb.org/mongo-driver/bson/raw_element.go
generated
vendored
Executable file
51
vendor/go.mongodb.org/mongo-driver/bson/raw_element.go
generated
vendored
Executable file
@@ -0,0 +1,51 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
// RawElement represents a BSON element in byte form. This type provides a simple way to
|
||||
// transform a slice of bytes into a BSON element and extract information from it.
|
||||
//
|
||||
// RawElement is a thin wrapper around a bsoncore.Element.
|
||||
type RawElement []byte
|
||||
|
||||
// Key returns the key for this element. If the element is not valid, this method returns an empty
|
||||
// string. If knowing if the element is valid is important, use KeyErr.
|
||||
func (re RawElement) Key() string { return bsoncore.Element(re).Key() }
|
||||
|
||||
// KeyErr returns the key for this element, returning an error if the element is not valid.
|
||||
func (re RawElement) KeyErr() (string, error) { return bsoncore.Element(re).KeyErr() }
|
||||
|
||||
// Value returns the value of this element. If the element is not valid, this method returns an
|
||||
// empty Value. If knowing if the element is valid is important, use ValueErr.
|
||||
func (re RawElement) Value() RawValue { return convertFromCoreValue(bsoncore.Element(re).Value()) }
|
||||
|
||||
// ValueErr returns the value for this element, returning an error if the element is not valid.
|
||||
func (re RawElement) ValueErr() (RawValue, error) {
|
||||
val, err := bsoncore.Element(re).ValueErr()
|
||||
return convertFromCoreValue(val), err
|
||||
}
|
||||
|
||||
// Validate ensures re is a valid BSON element.
|
||||
func (re RawElement) Validate() error { return bsoncore.Element(re).Validate() }
|
||||
|
||||
// String implements the fmt.Stringer interface. The output will be in extended JSON format.
|
||||
func (re RawElement) String() string {
|
||||
doc := bsoncore.BuildDocument(nil, re)
|
||||
j, err := MarshalExtJSON(Raw(doc), true, false)
|
||||
if err != nil {
|
||||
return "<malformed>"
|
||||
}
|
||||
return string(j)
|
||||
}
|
||||
|
||||
// DebugString outputs a human readable version of RawElement. It will attempt to stringify the
|
||||
// valid components of the element even if the entire element is not valid.
|
||||
func (re RawElement) DebugString() string { return bsoncore.Element(re).DebugString() }
|
||||
287
vendor/go.mongodb.org/mongo-driver/bson/raw_value.go
generated
vendored
Executable file
287
vendor/go.mongodb.org/mongo-driver/bson/raw_value.go
generated
vendored
Executable file
@@ -0,0 +1,287 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
// ErrNilContext is returned when the provided DecodeContext is nil.
|
||||
var ErrNilContext = errors.New("DecodeContext cannot be nil")
|
||||
|
||||
// ErrNilRegistry is returned when the provided registry is nil.
|
||||
var ErrNilRegistry = errors.New("Registry cannot be nil")
|
||||
|
||||
// RawValue represents a BSON value in byte form. It can be used to hold unprocessed BSON or to
|
||||
// defer processing of BSON. Type is the BSON type of the value and Value are the raw bytes that
|
||||
// represent the element.
|
||||
//
|
||||
// This type wraps bsoncore.Value for most of it's functionality.
|
||||
type RawValue struct {
|
||||
Type bsontype.Type
|
||||
Value []byte
|
||||
|
||||
r *bsoncodec.Registry
|
||||
}
|
||||
|
||||
// Unmarshal deserializes BSON into the provided val. If RawValue cannot be unmarshaled into val, an
|
||||
// error is returned. This method will use the registry used to create the RawValue, if the RawValue
|
||||
// was created from partial BSON processing, or it will use the default registry. Users wishing to
|
||||
// specify the registry to use should use UnmarshalWithRegistry.
|
||||
func (rv RawValue) Unmarshal(val interface{}) error {
|
||||
reg := rv.r
|
||||
if reg == nil {
|
||||
reg = DefaultRegistry
|
||||
}
|
||||
return rv.UnmarshalWithRegistry(reg, val)
|
||||
}
|
||||
|
||||
// Equal compares rv and rv2 and returns true if they are equal.
|
||||
func (rv RawValue) Equal(rv2 RawValue) bool {
|
||||
if rv.Type != rv2.Type {
|
||||
return false
|
||||
}
|
||||
|
||||
if !bytes.Equal(rv.Value, rv2.Value) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// UnmarshalWithRegistry performs the same unmarshalling as Unmarshal but uses the provided registry
|
||||
// instead of the one attached or the default registry.
|
||||
func (rv RawValue) UnmarshalWithRegistry(r *bsoncodec.Registry, val interface{}) error {
|
||||
if r == nil {
|
||||
return ErrNilRegistry
|
||||
}
|
||||
|
||||
vr := bsonrw.NewBSONValueReader(rv.Type, rv.Value)
|
||||
rval := reflect.ValueOf(val)
|
||||
if rval.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("argument to Unmarshal* must be a pointer to a type, but got %v", rval)
|
||||
}
|
||||
rval = rval.Elem()
|
||||
dec, err := r.LookupDecoder(rval.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dec.DecodeValue(bsoncodec.DecodeContext{Registry: r}, vr, rval)
|
||||
}
|
||||
|
||||
// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses the provided DecodeContext
|
||||
// instead of the one attached or the default registry.
|
||||
func (rv RawValue) UnmarshalWithContext(dc *bsoncodec.DecodeContext, val interface{}) error {
|
||||
if dc == nil {
|
||||
return ErrNilContext
|
||||
}
|
||||
|
||||
vr := bsonrw.NewBSONValueReader(rv.Type, rv.Value)
|
||||
rval := reflect.ValueOf(val)
|
||||
if rval.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("argument to Unmarshal* must be a pointer to a type, but got %v", rval)
|
||||
}
|
||||
rval = rval.Elem()
|
||||
dec, err := dc.LookupDecoder(rval.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dec.DecodeValue(*dc, vr, rval)
|
||||
}
|
||||
|
||||
func convertFromCoreValue(v bsoncore.Value) RawValue { return RawValue{Type: v.Type, Value: v.Data} }
|
||||
func convertToCoreValue(v RawValue) bsoncore.Value { return bsoncore.Value{Type: v.Type, Data: v.Value} }
|
||||
|
||||
// Validate ensures the value is a valid BSON value.
|
||||
func (rv RawValue) Validate() error { return convertToCoreValue(rv).Validate() }
|
||||
|
||||
// IsNumber returns true if the type of v is a numeric BSON type.
|
||||
func (rv RawValue) IsNumber() bool { return convertToCoreValue(rv).IsNumber() }
|
||||
|
||||
// String implements the fmt.String interface. This method will return values in extended JSON
|
||||
// format. If the value is not valid, this returns an empty string
|
||||
func (rv RawValue) String() string { return convertToCoreValue(rv).String() }
|
||||
|
||||
// DebugString outputs a human readable version of Document. It will attempt to stringify the
|
||||
// valid components of the document even if the entire document is not valid.
|
||||
func (rv RawValue) DebugString() string { return convertToCoreValue(rv).DebugString() }
|
||||
|
||||
// Double returns the float64 value for this element.
|
||||
// It panics if e's BSON type is not bsontype.Double.
|
||||
func (rv RawValue) Double() float64 { return convertToCoreValue(rv).Double() }
|
||||
|
||||
// DoubleOK is the same as Double, but returns a boolean instead of panicking.
|
||||
func (rv RawValue) DoubleOK() (float64, bool) { return convertToCoreValue(rv).DoubleOK() }
|
||||
|
||||
// StringValue returns the string value for this element.
|
||||
// It panics if e's BSON type is not bsontype.String.
|
||||
//
|
||||
// NOTE: This method is called StringValue to avoid a collision with the String method which
|
||||
// implements the fmt.Stringer interface.
|
||||
func (rv RawValue) StringValue() string { return convertToCoreValue(rv).StringValue() }
|
||||
|
||||
// StringValueOK is the same as StringValue, but returns a boolean instead of
|
||||
// panicking.
|
||||
func (rv RawValue) StringValueOK() (string, bool) { return convertToCoreValue(rv).StringValueOK() }
|
||||
|
||||
// Document returns the BSON document the Value represents as a Document. It panics if the
|
||||
// value is a BSON type other than document.
|
||||
func (rv RawValue) Document() Raw { return Raw(convertToCoreValue(rv).Document()) }
|
||||
|
||||
// DocumentOK is the same as Document, except it returns a boolean
|
||||
// instead of panicking.
|
||||
func (rv RawValue) DocumentOK() (Raw, bool) {
|
||||
doc, ok := convertToCoreValue(rv).DocumentOK()
|
||||
return Raw(doc), ok
|
||||
}
|
||||
|
||||
// Array returns the BSON array the Value represents as an Array. It panics if the
|
||||
// value is a BSON type other than array.
|
||||
func (rv RawValue) Array() Raw { return Raw(convertToCoreValue(rv).Array()) }
|
||||
|
||||
// ArrayOK is the same as Array, except it returns a boolean instead
|
||||
// of panicking.
|
||||
func (rv RawValue) ArrayOK() (Raw, bool) {
|
||||
doc, ok := convertToCoreValue(rv).ArrayOK()
|
||||
return Raw(doc), ok
|
||||
}
|
||||
|
||||
// Binary returns the BSON binary value the Value represents. It panics if the value is a BSON type
|
||||
// other than binary.
|
||||
func (rv RawValue) Binary() (subtype byte, data []byte) { return convertToCoreValue(rv).Binary() }
|
||||
|
||||
// BinaryOK is the same as Binary, except it returns a boolean instead of
|
||||
// panicking.
|
||||
func (rv RawValue) BinaryOK() (subtype byte, data []byte, ok bool) {
|
||||
return convertToCoreValue(rv).BinaryOK()
|
||||
}
|
||||
|
||||
// ObjectID returns the BSON objectid value the Value represents. It panics if the value is a BSON
|
||||
// type other than objectid.
|
||||
func (rv RawValue) ObjectID() primitive.ObjectID { return convertToCoreValue(rv).ObjectID() }
|
||||
|
||||
// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of
|
||||
// panicking.
|
||||
func (rv RawValue) ObjectIDOK() (primitive.ObjectID, bool) { return convertToCoreValue(rv).ObjectIDOK() }
|
||||
|
||||
// Boolean returns the boolean value the Value represents. It panics if the
|
||||
// value is a BSON type other than boolean.
|
||||
func (rv RawValue) Boolean() bool { return convertToCoreValue(rv).Boolean() }
|
||||
|
||||
// BooleanOK is the same as Boolean, except it returns a boolean instead of
|
||||
// panicking.
|
||||
func (rv RawValue) BooleanOK() (bool, bool) { return convertToCoreValue(rv).BooleanOK() }
|
||||
|
||||
// DateTime returns the BSON datetime value the Value represents as a
|
||||
// unix timestamp. It panics if the value is a BSON type other than datetime.
|
||||
func (rv RawValue) DateTime() int64 { return convertToCoreValue(rv).DateTime() }
|
||||
|
||||
// DateTimeOK is the same as DateTime, except it returns a boolean instead of
|
||||
// panicking.
|
||||
func (rv RawValue) DateTimeOK() (int64, bool) { return convertToCoreValue(rv).DateTimeOK() }
|
||||
|
||||
// Time returns the BSON datetime value the Value represents. It panics if the value is a BSON
|
||||
// type other than datetime.
|
||||
func (rv RawValue) Time() time.Time { return convertToCoreValue(rv).Time() }
|
||||
|
||||
// TimeOK is the same as Time, except it returns a boolean instead of
|
||||
// panicking.
|
||||
func (rv RawValue) TimeOK() (time.Time, bool) { return convertToCoreValue(rv).TimeOK() }
|
||||
|
||||
// Regex returns the BSON regex value the Value represents. It panics if the value is a BSON
|
||||
// type other than regex.
|
||||
func (rv RawValue) Regex() (pattern, options string) { return convertToCoreValue(rv).Regex() }
|
||||
|
||||
// RegexOK is the same as Regex, except it returns a boolean instead of
|
||||
// panicking.
|
||||
func (rv RawValue) RegexOK() (pattern, options string, ok bool) {
|
||||
return convertToCoreValue(rv).RegexOK()
|
||||
}
|
||||
|
||||
// DBPointer returns the BSON dbpointer value the Value represents. It panics if the value is a BSON
|
||||
// type other than DBPointer.
|
||||
func (rv RawValue) DBPointer() (string, primitive.ObjectID) { return convertToCoreValue(rv).DBPointer() }
|
||||
|
||||
// DBPointerOK is the same as DBPoitner, except that it returns a boolean
|
||||
// instead of panicking.
|
||||
func (rv RawValue) DBPointerOK() (string, primitive.ObjectID, bool) {
|
||||
return convertToCoreValue(rv).DBPointerOK()
|
||||
}
|
||||
|
||||
// JavaScript returns the BSON JavaScript code value the Value represents. It panics if the value is
|
||||
// a BSON type other than JavaScript code.
|
||||
func (rv RawValue) JavaScript() string { return convertToCoreValue(rv).JavaScript() }
|
||||
|
||||
// JavaScriptOK is the same as Javascript, excepti that it returns a boolean
|
||||
// instead of panicking.
|
||||
func (rv RawValue) JavaScriptOK() (string, bool) { return convertToCoreValue(rv).JavaScriptOK() }
|
||||
|
||||
// Symbol returns the BSON symbol value the Value represents. It panics if the value is a BSON
|
||||
// type other than symbol.
|
||||
func (rv RawValue) Symbol() string { return convertToCoreValue(rv).Symbol() }
|
||||
|
||||
// SymbolOK is the same as Symbol, excepti that it returns a boolean
|
||||
// instead of panicking.
|
||||
func (rv RawValue) SymbolOK() (string, bool) { return convertToCoreValue(rv).SymbolOK() }
|
||||
|
||||
// CodeWithScope returns the BSON JavaScript code with scope the Value represents.
|
||||
// It panics if the value is a BSON type other than JavaScript code with scope.
|
||||
func (rv RawValue) CodeWithScope() (string, Raw) {
|
||||
code, scope := convertToCoreValue(rv).CodeWithScope()
|
||||
return code, Raw(scope)
|
||||
}
|
||||
|
||||
// CodeWithScopeOK is the same as CodeWithScope, except that it returns a boolean instead of
|
||||
// panicking.
|
||||
func (rv RawValue) CodeWithScopeOK() (string, Raw, bool) {
|
||||
code, scope, ok := convertToCoreValue(rv).CodeWithScopeOK()
|
||||
return code, Raw(scope), ok
|
||||
}
|
||||
|
||||
// Int32 returns the int32 the Value represents. It panics if the value is a BSON type other than
|
||||
// int32.
|
||||
func (rv RawValue) Int32() int32 { return convertToCoreValue(rv).Int32() }
|
||||
|
||||
// Int32OK is the same as Int32, except that it returns a boolean instead of
|
||||
// panicking.
|
||||
func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32OK() }
|
||||
|
||||
// Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a
|
||||
// BSON type other than timestamp.
|
||||
func (rv RawValue) Timestamp() (t, i uint32) { return convertToCoreValue(rv).Timestamp() }
|
||||
|
||||
// TimestampOK is the same as Timestamp, except that it returns a boolean
|
||||
// instead of panicking.
|
||||
func (rv RawValue) TimestampOK() (t, i uint32, ok bool) { return convertToCoreValue(rv).TimestampOK() }
|
||||
|
||||
// Int64 returns the int64 the Value represents. It panics if the value is a BSON type other than
|
||||
// int64.
|
||||
func (rv RawValue) Int64() int64 { return convertToCoreValue(rv).Int64() }
|
||||
|
||||
// Int64OK is the same as Int64, except that it returns a boolean instead of
|
||||
// panicking.
|
||||
func (rv RawValue) Int64OK() (int64, bool) { return convertToCoreValue(rv).Int64OK() }
|
||||
|
||||
// Decimal128 returns the decimal the Value represents. It panics if the value is a BSON type other than
|
||||
// decimal.
|
||||
func (rv RawValue) Decimal128() primitive.Decimal128 { return convertToCoreValue(rv).Decimal128() }
|
||||
|
||||
// Decimal128OK is the same as Decimal128, except that it returns a boolean
|
||||
// instead of panicking.
|
||||
func (rv RawValue) Decimal128OK() (primitive.Decimal128, bool) {
|
||||
return convertToCoreValue(rv).Decimal128OK()
|
||||
}
|
||||
24
vendor/go.mongodb.org/mongo-driver/bson/registry.go
generated
vendored
Executable file
24
vendor/go.mongodb.org/mongo-driver/bson/registry.go
generated
vendored
Executable file
@@ -0,0 +1,24 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bson
|
||||
|
||||
import "go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
|
||||
// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the
|
||||
// primitive codecs.
|
||||
var DefaultRegistry = NewRegistryBuilder().Build()
|
||||
|
||||
// NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and
|
||||
// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the
|
||||
// PrimitiveCodecs type in this package.
|
||||
func NewRegistryBuilder() *bsoncodec.RegistryBuilder {
|
||||
rb := bsoncodec.NewRegistryBuilder()
|
||||
bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb)
|
||||
bsoncodec.DefaultValueDecoders{}.RegisterDefaultDecoders(rb)
|
||||
primitiveCodecs.RegisterPrimitiveCodecs(rb)
|
||||
return rb
|
||||
}
|
||||
85
vendor/go.mongodb.org/mongo-driver/bson/types.go
generated
vendored
Executable file
85
vendor/go.mongodb.org/mongo-driver/bson/types.go
generated
vendored
Executable file
@@ -0,0 +1,85 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// These constants uniquely refer to each BSON type.
|
||||
const (
|
||||
TypeDouble = bsontype.Double
|
||||
TypeString = bsontype.String
|
||||
TypeEmbeddedDocument = bsontype.EmbeddedDocument
|
||||
TypeArray = bsontype.Array
|
||||
TypeBinary = bsontype.Binary
|
||||
TypeUndefined = bsontype.Undefined
|
||||
TypeObjectID = bsontype.ObjectID
|
||||
TypeBoolean = bsontype.Boolean
|
||||
TypeDateTime = bsontype.DateTime
|
||||
TypeNull = bsontype.Null
|
||||
TypeRegex = bsontype.Regex
|
||||
TypeDBPointer = bsontype.DBPointer
|
||||
TypeJavaScript = bsontype.JavaScript
|
||||
TypeSymbol = bsontype.Symbol
|
||||
TypeCodeWithScope = bsontype.CodeWithScope
|
||||
TypeInt32 = bsontype.Int32
|
||||
TypeTimestamp = bsontype.Timestamp
|
||||
TypeInt64 = bsontype.Int64
|
||||
TypeDecimal128 = bsontype.Decimal128
|
||||
TypeMinKey = bsontype.MinKey
|
||||
TypeMaxKey = bsontype.MaxKey
|
||||
)
|
||||
|
||||
var tBinary = reflect.TypeOf(primitive.Binary{})
|
||||
var tBool = reflect.TypeOf(false)
|
||||
var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{})
|
||||
var tDBPointer = reflect.TypeOf(primitive.DBPointer{})
|
||||
var tDecimal = reflect.TypeOf(primitive.Decimal128{})
|
||||
var tD = reflect.TypeOf(D{})
|
||||
var tA = reflect.TypeOf(A{})
|
||||
var tDateTime = reflect.TypeOf(primitive.DateTime(0))
|
||||
var tUndefined = reflect.TypeOf(primitive.Undefined{})
|
||||
var tNull = reflect.TypeOf(primitive.Null{})
|
||||
var tRawValue = reflect.TypeOf(RawValue{})
|
||||
var tFloat32 = reflect.TypeOf(float32(0))
|
||||
var tFloat64 = reflect.TypeOf(float64(0))
|
||||
var tInt = reflect.TypeOf(int(0))
|
||||
var tInt8 = reflect.TypeOf(int8(0))
|
||||
var tInt16 = reflect.TypeOf(int16(0))
|
||||
var tInt32 = reflect.TypeOf(int32(0))
|
||||
var tInt64 = reflect.TypeOf(int64(0))
|
||||
var tJavaScript = reflect.TypeOf(primitive.JavaScript(""))
|
||||
var tOID = reflect.TypeOf(primitive.ObjectID{})
|
||||
var tRaw = reflect.TypeOf(Raw(nil))
|
||||
var tRegex = reflect.TypeOf(primitive.Regex{})
|
||||
var tString = reflect.TypeOf("")
|
||||
var tSymbol = reflect.TypeOf(primitive.Symbol(""))
|
||||
var tTime = reflect.TypeOf(time.Time{})
|
||||
var tTimestamp = reflect.TypeOf(primitive.Timestamp{})
|
||||
var tUint = reflect.TypeOf(uint(0))
|
||||
var tUint8 = reflect.TypeOf(uint8(0))
|
||||
var tUint16 = reflect.TypeOf(uint16(0))
|
||||
var tUint32 = reflect.TypeOf(uint32(0))
|
||||
var tUint64 = reflect.TypeOf(uint64(0))
|
||||
var tMinKey = reflect.TypeOf(primitive.MinKey{})
|
||||
var tMaxKey = reflect.TypeOf(primitive.MaxKey{})
|
||||
|
||||
var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
var tEmptySlice = reflect.TypeOf([]interface{}(nil))
|
||||
|
||||
var zeroVal reflect.Value
|
||||
|
||||
// this references the quantity of milliseconds between zero time and
|
||||
// the unix epoch. useful for making sure that we convert time.Time
|
||||
// objects correctly to match the legacy bson library's handling of
|
||||
// time.Time values.
|
||||
const zeroEpochMs = int64(62135596800000)
|
||||
101
vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go
generated
vendored
Executable file
101
vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go
generated
vendored
Executable file
@@ -0,0 +1,101 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
)
|
||||
|
||||
// Unmarshaler is an interface implemented by types that can unmarshal a BSON
|
||||
// document representation of themselves. The BSON bytes can be assumed to be
|
||||
// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
|
||||
// after returning.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalBSON([]byte) error
|
||||
}
|
||||
|
||||
// ValueUnmarshaler is an interface implemented by types that can unmarshal a
|
||||
// BSON value representaiton of themselves. The BSON bytes and type can be
|
||||
// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
|
||||
// wishes to retain the data after returning.
|
||||
type ValueUnmarshaler interface {
|
||||
UnmarshalBSONValue(bsontype.Type, []byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the BSON-encoded data and stores the result in the value
|
||||
// pointed to by val. If val is nil or not a pointer, Unmarshal returns
|
||||
// InvalidUnmarshalError.
|
||||
func Unmarshal(data []byte, val interface{}) error {
|
||||
return UnmarshalWithRegistry(DefaultRegistry, data, val)
|
||||
}
|
||||
|
||||
// UnmarshalWithRegistry parses the BSON-encoded data using Registry r and
|
||||
// stores the result in the value pointed to by val. If val is nil or not
|
||||
// a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
|
||||
func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) error {
|
||||
vr := bsonrw.NewBSONDocumentReader(data)
|
||||
return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val)
|
||||
}
|
||||
|
||||
// UnmarshalWithContext parses the BSON-encoded data using DecodeContext dc and
|
||||
// stores the result in the value pointed to by val. If val is nil or not
|
||||
// a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
|
||||
func UnmarshalWithContext(dc bsoncodec.DecodeContext, data []byte, val interface{}) error {
|
||||
vr := bsonrw.NewBSONDocumentReader(data)
|
||||
return unmarshalFromReader(dc, vr, val)
|
||||
}
|
||||
|
||||
// UnmarshalExtJSON parses the extended JSON-encoded data and stores the result
|
||||
// in the value pointed to by val. If val is nil or not a pointer, Unmarshal
|
||||
// returns InvalidUnmarshalError.
|
||||
func UnmarshalExtJSON(data []byte, canonical bool, val interface{}) error {
|
||||
return UnmarshalExtJSONWithRegistry(DefaultRegistry, data, canonical, val)
|
||||
}
|
||||
|
||||
// UnmarshalExtJSONWithRegistry parses the extended JSON-encoded data using
|
||||
// Registry r and stores the result in the value pointed to by val. If val is
|
||||
// nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
|
||||
func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical bool, val interface{}) error {
|
||||
ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, ejvr, val)
|
||||
}
|
||||
|
||||
// UnmarshalExtJSONWithContext parses the extended JSON-encoded data using
|
||||
// DecodeContext dc and stores the result in the value pointed to by val. If val is
|
||||
// nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
|
||||
func UnmarshalExtJSONWithContext(dc bsoncodec.DecodeContext, data []byte, canonical bool, val interface{}) error {
|
||||
ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return unmarshalFromReader(dc, ejvr, val)
|
||||
}
|
||||
|
||||
func unmarshalFromReader(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val interface{}) error {
|
||||
dec := decPool.Get().(*Decoder)
|
||||
defer decPool.Put(dec)
|
||||
|
||||
err := dec.Reset(vr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dec.SetContext(dc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dec.Decode(val)
|
||||
}
|
||||
49
vendor/go.mongodb.org/mongo-driver/event/monitoring.go
generated
vendored
Executable file
49
vendor/go.mongodb.org/mongo-driver/event/monitoring.go
generated
vendored
Executable file
@@ -0,0 +1,49 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package event // import "go.mongodb.org/mongo-driver/event"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
)
|
||||
|
||||
// CommandStartedEvent represents an event generated when a command is sent to a server.
|
||||
type CommandStartedEvent struct {
|
||||
Command bson.Raw
|
||||
DatabaseName string
|
||||
CommandName string
|
||||
RequestID int64
|
||||
ConnectionID string
|
||||
}
|
||||
|
||||
// CommandFinishedEvent represents a generic command finishing.
|
||||
type CommandFinishedEvent struct {
|
||||
DurationNanos int64
|
||||
CommandName string
|
||||
RequestID int64
|
||||
ConnectionID string
|
||||
}
|
||||
|
||||
// CommandSucceededEvent represents an event generated when a command's execution succeeds.
|
||||
type CommandSucceededEvent struct {
|
||||
CommandFinishedEvent
|
||||
Reply bson.Raw
|
||||
}
|
||||
|
||||
// CommandFailedEvent represents an event generated when a command's execution fails.
|
||||
type CommandFailedEvent struct {
|
||||
CommandFinishedEvent
|
||||
Failure string
|
||||
}
|
||||
|
||||
// CommandMonitor represents a monitor that is triggered for different events.
|
||||
type CommandMonitor struct {
|
||||
Started func(context.Context, *CommandStartedEvent)
|
||||
Succeeded func(context.Context, *CommandSucceededEvent)
|
||||
Failed func(context.Context, *CommandFailedEvent)
|
||||
}
|
||||
74
vendor/go.mongodb.org/mongo-driver/internal/channel_connection.go
generated
vendored
Executable file
74
vendor/go.mongodb.org/mongo-driver/internal/channel_connection.go
generated
vendored
Executable file
@@ -0,0 +1,74 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx"
|
||||
"go.mongodb.org/mongo-driver/x/network/wiremessage"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Implements the connection.Connection interface by reading and writing wire messages
|
||||
// to a channel
|
||||
type ChannelConn struct {
|
||||
WriteErr error
|
||||
Written chan wiremessage.WireMessage
|
||||
ReadResp chan wiremessage.WireMessage
|
||||
ReadErr chan error
|
||||
}
|
||||
|
||||
func (c *ChannelConn) WriteWireMessage(ctx context.Context, wm wiremessage.WireMessage) error {
|
||||
select {
|
||||
case c.Written <- wm:
|
||||
default:
|
||||
c.WriteErr = errors.New("could not write wiremessage to written channel")
|
||||
}
|
||||
return c.WriteErr
|
||||
}
|
||||
|
||||
func (c *ChannelConn) ReadWireMessage(ctx context.Context) (wiremessage.WireMessage, error) {
|
||||
var wm wiremessage.WireMessage
|
||||
var err error
|
||||
select {
|
||||
case wm = <-c.ReadResp:
|
||||
case err = <-c.ReadErr:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return wm, err
|
||||
}
|
||||
|
||||
func (c *ChannelConn) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ChannelConn) Expired() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *ChannelConn) Alive() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *ChannelConn) ID() string {
|
||||
return "faked"
|
||||
}
|
||||
|
||||
// Create a OP_REPLY wiremessage from a BSON document
|
||||
func MakeReply(doc bsonx.Doc) (wiremessage.WireMessage, error) {
|
||||
rdr, err := doc.MarshalBSON()
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("could not create document: %v", err))
|
||||
}
|
||||
return wiremessage.Reply{
|
||||
NumberReturned: 1,
|
||||
Documents: []bson.Raw{rdr},
|
||||
}, nil
|
||||
}
|
||||
10
vendor/go.mongodb.org/mongo-driver/internal/const.go
generated
vendored
Executable file
10
vendor/go.mongodb.org/mongo-driver/internal/const.go
generated
vendored
Executable file
@@ -0,0 +1,10 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package internal // import "go.mongodb.org/mongo-driver/internal"
|
||||
|
||||
// Version is the current version of the driver.
|
||||
var Version = "local build"
|
||||
119
vendor/go.mongodb.org/mongo-driver/internal/error.go
generated
vendored
Executable file
119
vendor/go.mongodb.org/mongo-driver/internal/error.go
generated
vendored
Executable file
@@ -0,0 +1,119 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// WrappedError represents an error that contains another error.
|
||||
type WrappedError interface {
|
||||
// Message gets the basic message of the error.
|
||||
Message() string
|
||||
// Inner gets the inner error if one exists.
|
||||
Inner() error
|
||||
}
|
||||
|
||||
// RolledUpErrorMessage gets a flattened error message.
|
||||
func RolledUpErrorMessage(err error) string {
|
||||
if wrappedErr, ok := err.(WrappedError); ok {
|
||||
inner := wrappedErr.Inner()
|
||||
if inner != nil {
|
||||
return fmt.Sprintf("%s: %s", wrappedErr.Message(), RolledUpErrorMessage(inner))
|
||||
}
|
||||
|
||||
return wrappedErr.Message()
|
||||
}
|
||||
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
//UnwrapError attempts to unwrap the error down to its root cause.
|
||||
func UnwrapError(err error) error {
|
||||
|
||||
switch tErr := err.(type) {
|
||||
case WrappedError:
|
||||
return UnwrapError(tErr.Inner())
|
||||
case *multiError:
|
||||
return UnwrapError(tErr.errors[0])
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// WrapError wraps an error with a message.
|
||||
func WrapError(inner error, message string) error {
|
||||
return &wrappedError{message, inner}
|
||||
}
|
||||
|
||||
// WrapErrorf wraps an error with a message.
|
||||
func WrapErrorf(inner error, format string, args ...interface{}) error {
|
||||
return &wrappedError{fmt.Sprintf(format, args...), inner}
|
||||
}
|
||||
|
||||
// MultiError combines multiple errors into a single error. If there are no errors,
|
||||
// nil is returned. If there is 1 error, it is returned. Otherwise, they are combined.
|
||||
func MultiError(errors ...error) error {
|
||||
|
||||
// remove nils from the error list
|
||||
var nonNils []error
|
||||
for _, e := range errors {
|
||||
if e != nil {
|
||||
nonNils = append(nonNils, e)
|
||||
}
|
||||
}
|
||||
|
||||
switch len(nonNils) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return nonNils[0]
|
||||
default:
|
||||
return &multiError{
|
||||
message: "multiple errors encountered",
|
||||
errors: nonNils,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type multiError struct {
|
||||
message string
|
||||
errors []error
|
||||
}
|
||||
|
||||
func (e *multiError) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (e *multiError) Error() string {
|
||||
result := e.message
|
||||
for _, e := range e.errors {
|
||||
result += fmt.Sprintf("\n %s", e)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (e *multiError) Errors() []error {
|
||||
return e.errors
|
||||
}
|
||||
|
||||
type wrappedError struct {
|
||||
message string
|
||||
inner error
|
||||
}
|
||||
|
||||
func (e *wrappedError) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (e *wrappedError) Error() string {
|
||||
return RolledUpErrorMessage(e)
|
||||
}
|
||||
|
||||
func (e *wrappedError) Inner() error {
|
||||
return e.inner
|
||||
}
|
||||
54
vendor/go.mongodb.org/mongo-driver/internal/results.go
generated
vendored
Executable file
54
vendor/go.mongodb.org/mongo-driver/internal/results.go
generated
vendored
Executable file
@@ -0,0 +1,54 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"time"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// IsMasterResult is the result of executing this
|
||||
// ismaster command.
|
||||
type IsMasterResult struct {
|
||||
Arbiters []string `bson:"arbiters,omitempty"`
|
||||
ArbiterOnly bool `bson:"arbiterOnly,omitempty"`
|
||||
ElectionID primitive.ObjectID `bson:"electionId,omitempty"`
|
||||
Hidden bool `bson:"hidden,omitempty"`
|
||||
Hosts []string `bson:"hosts,omitempty"`
|
||||
IsMaster bool `bson:"ismaster,omitempty"`
|
||||
IsReplicaSet bool `bson:"isreplicaset,omitempty"`
|
||||
LastWriteTimestamp time.Time `bson:"lastWriteDate,omitempty"`
|
||||
MaxBSONObjectSize uint32 `bson:"maxBsonObjectSize,omitempty"`
|
||||
MaxMessageSizeBytes uint32 `bson:"maxMessageSizeBytes,omitempty"`
|
||||
MaxWriteBatchSize uint16 `bson:"maxWriteBatchSize,omitempty"`
|
||||
Me string `bson:"me,omitempty"`
|
||||
MaxWireVersion int32 `bson:"maxWireVersion,omitempty"`
|
||||
MinWireVersion int32 `bson:"minWireVersion,omitempty"`
|
||||
Msg string `bson:"msg,omitempty"`
|
||||
OK int32 `bson:"ok"`
|
||||
Passives []string `bson:"passives,omitempty"`
|
||||
ReadOnly bool `bson:"readOnly,omitempty"`
|
||||
Secondary bool `bson:"secondary,omitempty"`
|
||||
SetName string `bson:"setName,omitempty"`
|
||||
SetVersion uint32 `bson:"setVersion,omitempty"`
|
||||
Tags map[string]string `bson:"tags,omitempty"`
|
||||
}
|
||||
|
||||
// BuildInfoResult is the result of executing the
|
||||
// buildInfo command.
|
||||
type BuildInfoResult struct {
|
||||
OK bool `bson:"ok"`
|
||||
GitVersion string `bson:"gitVersion,omitempty"`
|
||||
Version string `bson:"version,omitempty"`
|
||||
VersionArray []uint8 `bson:"versionArray,omitempty"`
|
||||
}
|
||||
|
||||
// GetLastErrorResult is the result of executing the
|
||||
// getLastError command.
|
||||
type GetLastErrorResult struct {
|
||||
ConnectionID uint32 `bson:"connectionId"`
|
||||
}
|
||||
57
vendor/go.mongodb.org/mongo-driver/internal/semaphore.go
generated
vendored
Executable file
57
vendor/go.mongodb.org/mongo-driver/internal/semaphore.go
generated
vendored
Executable file
@@ -0,0 +1,57 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// NewSemaphore creates a new semaphore.
|
||||
func NewSemaphore(slots uint64) *Semaphore {
|
||||
ch := make(chan struct{}, slots)
|
||||
for i := uint64(0); i < slots; i++ {
|
||||
ch <- struct{}{}
|
||||
}
|
||||
|
||||
return &Semaphore{
|
||||
permits: ch,
|
||||
}
|
||||
}
|
||||
|
||||
// Semaphore is a synchronization primitive that controls access
|
||||
// to a common resource.
|
||||
type Semaphore struct {
|
||||
permits chan struct{}
|
||||
}
|
||||
|
||||
// Len gets the number of permits available.
|
||||
func (s *Semaphore) Len() uint64 {
|
||||
return uint64(len(s.permits))
|
||||
}
|
||||
|
||||
// Wait waits until a resource is available or until the context
|
||||
// is done.
|
||||
func (s *Semaphore) Wait(ctx context.Context) error {
|
||||
select {
|
||||
case <-s.permits:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Release releases a resource back into the pool.
|
||||
func (s *Semaphore) Release() error {
|
||||
select {
|
||||
case s.permits <- struct{}{}:
|
||||
default:
|
||||
return errors.New("internal.Semaphore.Release: attempt to release more resources than are available")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
42
vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go
generated
vendored
Executable file
42
vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go
generated
vendored
Executable file
@@ -0,0 +1,42 @@
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver"
|
||||
)
|
||||
|
||||
// batchCursor is the interface implemented by types that can provide batches of document results.
|
||||
// The Cursor type is built on top of this type.
|
||||
type batchCursor interface {
|
||||
// ID returns the ID of the cursor.
|
||||
ID() int64
|
||||
|
||||
// Next returns true if there is a batch available.
|
||||
Next(context.Context) bool
|
||||
|
||||
// Batch will return a DocumentSequence for the current batch of documents. The returned
|
||||
// DocumentSequence is only valid until the next call to Next or Close.
|
||||
Batch() *bsoncore.DocumentSequence
|
||||
|
||||
// Server returns a pointer to the cursor's server.
|
||||
Server() driver.Server
|
||||
|
||||
// Err returns the last error encountered.
|
||||
Err() error
|
||||
|
||||
// Close closes the cursor.
|
||||
Close(context.Context) error
|
||||
}
|
||||
|
||||
// changeStreamCursor is the interface implemented by batch cursors that also provide the functionality for retrieving
|
||||
// a postBatchResumeToken from commands and allows for the cursor to be killed rather than closed
|
||||
type changeStreamCursor interface {
|
||||
batchCursor
|
||||
// PostBatchResumeToken returns the latest seen post batch resume token.
|
||||
PostBatchResumeToken() bsoncore.Document
|
||||
|
||||
// KillCursor kills cursor on server without closing batch cursor
|
||||
KillCursor(context.Context) error
|
||||
}
|
||||
341
vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go
generated
vendored
Executable file
341
vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go
generated
vendored
Executable file
@@ -0,0 +1,341 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driverlegacy"
|
||||
)
|
||||
|
||||
// WriteModel is the interface satisfied by all models for bulk writes.
|
||||
type WriteModel interface {
|
||||
convertModel() driverlegacy.WriteModel
|
||||
}
|
||||
|
||||
// InsertOneModel is the write model for insert operations.
|
||||
type InsertOneModel struct {
|
||||
Document interface{}
|
||||
}
|
||||
|
||||
// NewInsertOneModel creates a new InsertOneModel.
|
||||
func NewInsertOneModel() *InsertOneModel {
|
||||
return &InsertOneModel{}
|
||||
}
|
||||
|
||||
// SetDocument sets the BSON document for the InsertOneModel.
|
||||
func (iom *InsertOneModel) SetDocument(doc interface{}) *InsertOneModel {
|
||||
iom.Document = doc
|
||||
return iom
|
||||
}
|
||||
|
||||
func (iom *InsertOneModel) convertModel() driverlegacy.WriteModel {
|
||||
return driverlegacy.InsertOneModel{
|
||||
Document: iom.Document,
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteOneModel is the write model for delete operations.
|
||||
type DeleteOneModel struct {
|
||||
Filter interface{}
|
||||
Collation *options.Collation
|
||||
}
|
||||
|
||||
// NewDeleteOneModel creates a new DeleteOneModel.
|
||||
func NewDeleteOneModel() *DeleteOneModel {
|
||||
return &DeleteOneModel{}
|
||||
}
|
||||
|
||||
// SetFilter sets the filter for the DeleteOneModel.
|
||||
func (dom *DeleteOneModel) SetFilter(filter interface{}) *DeleteOneModel {
|
||||
dom.Filter = filter
|
||||
return dom
|
||||
}
|
||||
|
||||
// SetCollation sets the collation for the DeleteOneModel.
|
||||
func (dom *DeleteOneModel) SetCollation(collation *options.Collation) *DeleteOneModel {
|
||||
dom.Collation = collation
|
||||
return dom
|
||||
}
|
||||
|
||||
func (dom *DeleteOneModel) convertModel() driverlegacy.WriteModel {
|
||||
return driverlegacy.DeleteOneModel{
|
||||
Collation: dom.Collation,
|
||||
Filter: dom.Filter,
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteManyModel is the write model for deleteMany operations.
|
||||
type DeleteManyModel struct {
|
||||
Filter interface{}
|
||||
Collation *options.Collation
|
||||
}
|
||||
|
||||
// NewDeleteManyModel creates a new DeleteManyModel.
|
||||
func NewDeleteManyModel() *DeleteManyModel {
|
||||
return &DeleteManyModel{}
|
||||
}
|
||||
|
||||
// SetFilter sets the filter for the DeleteManyModel.
|
||||
func (dmm *DeleteManyModel) SetFilter(filter interface{}) *DeleteManyModel {
|
||||
dmm.Filter = filter
|
||||
return dmm
|
||||
}
|
||||
|
||||
// SetCollation sets the collation for the DeleteManyModel.
|
||||
func (dmm *DeleteManyModel) SetCollation(collation *options.Collation) *DeleteManyModel {
|
||||
dmm.Collation = collation
|
||||
return dmm
|
||||
}
|
||||
|
||||
func (dmm *DeleteManyModel) convertModel() driverlegacy.WriteModel {
|
||||
return driverlegacy.DeleteManyModel{
|
||||
Collation: dmm.Collation,
|
||||
Filter: dmm.Filter,
|
||||
}
|
||||
}
|
||||
|
||||
// ReplaceOneModel is the write model for replace operations.
|
||||
type ReplaceOneModel struct {
|
||||
Collation *options.Collation
|
||||
Upsert *bool
|
||||
Filter interface{}
|
||||
Replacement interface{}
|
||||
}
|
||||
|
||||
// NewReplaceOneModel creates a new ReplaceOneModel.
|
||||
func NewReplaceOneModel() *ReplaceOneModel {
|
||||
return &ReplaceOneModel{}
|
||||
}
|
||||
|
||||
// SetFilter sets the filter for the ReplaceOneModel.
|
||||
func (rom *ReplaceOneModel) SetFilter(filter interface{}) *ReplaceOneModel {
|
||||
rom.Filter = filter
|
||||
return rom
|
||||
}
|
||||
|
||||
// SetReplacement sets the replacement document for the ReplaceOneModel.
|
||||
func (rom *ReplaceOneModel) SetReplacement(rep interface{}) *ReplaceOneModel {
|
||||
rom.Replacement = rep
|
||||
return rom
|
||||
}
|
||||
|
||||
// SetCollation sets the collation for the ReplaceOneModel.
|
||||
func (rom *ReplaceOneModel) SetCollation(collation *options.Collation) *ReplaceOneModel {
|
||||
rom.Collation = collation
|
||||
return rom
|
||||
}
|
||||
|
||||
// SetUpsert specifies if a new document should be created if no document matches the query.
|
||||
func (rom *ReplaceOneModel) SetUpsert(upsert bool) *ReplaceOneModel {
|
||||
rom.Upsert = &upsert
|
||||
return rom
|
||||
}
|
||||
|
||||
func (rom *ReplaceOneModel) convertModel() driverlegacy.WriteModel {
|
||||
um := driverlegacy.UpdateModel{
|
||||
Collation: rom.Collation,
|
||||
}
|
||||
if rom.Upsert != nil {
|
||||
um.Upsert = *rom.Upsert
|
||||
um.UpsertSet = true
|
||||
}
|
||||
|
||||
return driverlegacy.ReplaceOneModel{
|
||||
UpdateModel: um,
|
||||
Filter: rom.Filter,
|
||||
Replacement: rom.Replacement,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateOneModel is the write model for update operations.
|
||||
type UpdateOneModel struct {
|
||||
Collation *options.Collation
|
||||
Upsert *bool
|
||||
Filter interface{}
|
||||
Update interface{}
|
||||
ArrayFilters *options.ArrayFilters
|
||||
}
|
||||
|
||||
// NewUpdateOneModel creates a new UpdateOneModel.
|
||||
func NewUpdateOneModel() *UpdateOneModel {
|
||||
return &UpdateOneModel{}
|
||||
}
|
||||
|
||||
// SetFilter sets the filter for the UpdateOneModel.
|
||||
func (uom *UpdateOneModel) SetFilter(filter interface{}) *UpdateOneModel {
|
||||
uom.Filter = filter
|
||||
return uom
|
||||
}
|
||||
|
||||
// SetUpdate sets the update document for the UpdateOneModel.
|
||||
func (uom *UpdateOneModel) SetUpdate(update interface{}) *UpdateOneModel {
|
||||
uom.Update = update
|
||||
return uom
|
||||
}
|
||||
|
||||
// SetArrayFilters specifies a set of filters specifying to which array elements an update should apply.
|
||||
func (uom *UpdateOneModel) SetArrayFilters(filters options.ArrayFilters) *UpdateOneModel {
|
||||
uom.ArrayFilters = &filters
|
||||
return uom
|
||||
}
|
||||
|
||||
// SetCollation sets the collation for the UpdateOneModel.
|
||||
func (uom *UpdateOneModel) SetCollation(collation *options.Collation) *UpdateOneModel {
|
||||
uom.Collation = collation
|
||||
return uom
|
||||
}
|
||||
|
||||
// SetUpsert specifies if a new document should be created if no document matches the query.
|
||||
func (uom *UpdateOneModel) SetUpsert(upsert bool) *UpdateOneModel {
|
||||
uom.Upsert = &upsert
|
||||
return uom
|
||||
}
|
||||
|
||||
func (uom *UpdateOneModel) convertModel() driverlegacy.WriteModel {
|
||||
um := driverlegacy.UpdateModel{
|
||||
Collation: uom.Collation,
|
||||
}
|
||||
if uom.Upsert != nil {
|
||||
um.Upsert = *uom.Upsert
|
||||
um.UpsertSet = true
|
||||
}
|
||||
|
||||
converted := driverlegacy.UpdateOneModel{
|
||||
UpdateModel: um,
|
||||
Filter: uom.Filter,
|
||||
Update: uom.Update,
|
||||
}
|
||||
if uom.ArrayFilters != nil {
|
||||
converted.ArrayFilters = *uom.ArrayFilters
|
||||
converted.ArrayFiltersSet = true
|
||||
}
|
||||
|
||||
return converted
|
||||
}
|
||||
|
||||
// UpdateManyModel is the write model for updateMany operations.
|
||||
type UpdateManyModel struct {
|
||||
Collation *options.Collation
|
||||
Upsert *bool
|
||||
Filter interface{}
|
||||
Update interface{}
|
||||
ArrayFilters *options.ArrayFilters
|
||||
}
|
||||
|
||||
// NewUpdateManyModel creates a new UpdateManyModel.
|
||||
func NewUpdateManyModel() *UpdateManyModel {
|
||||
return &UpdateManyModel{}
|
||||
}
|
||||
|
||||
// SetFilter sets the filter for the UpdateManyModel.
|
||||
func (umm *UpdateManyModel) SetFilter(filter interface{}) *UpdateManyModel {
|
||||
umm.Filter = filter
|
||||
return umm
|
||||
}
|
||||
|
||||
// SetUpdate sets the update document for the UpdateManyModel.
|
||||
func (umm *UpdateManyModel) SetUpdate(update interface{}) *UpdateManyModel {
|
||||
umm.Update = update
|
||||
return umm
|
||||
}
|
||||
|
||||
// SetArrayFilters specifies a set of filters specifying to which array elements an update should apply.
|
||||
func (umm *UpdateManyModel) SetArrayFilters(filters options.ArrayFilters) *UpdateManyModel {
|
||||
umm.ArrayFilters = &filters
|
||||
return umm
|
||||
}
|
||||
|
||||
// SetCollation sets the collation for the UpdateManyModel.
|
||||
func (umm *UpdateManyModel) SetCollation(collation *options.Collation) *UpdateManyModel {
|
||||
umm.Collation = collation
|
||||
return umm
|
||||
}
|
||||
|
||||
// SetUpsert specifies if a new document should be created if no document matches the query.
|
||||
func (umm *UpdateManyModel) SetUpsert(upsert bool) *UpdateManyModel {
|
||||
umm.Upsert = &upsert
|
||||
return umm
|
||||
}
|
||||
|
||||
func (umm *UpdateManyModel) convertModel() driverlegacy.WriteModel {
|
||||
um := driverlegacy.UpdateModel{
|
||||
Collation: umm.Collation,
|
||||
}
|
||||
if umm.Upsert != nil {
|
||||
um.Upsert = *umm.Upsert
|
||||
um.UpsertSet = true
|
||||
}
|
||||
|
||||
converted := driverlegacy.UpdateManyModel{
|
||||
UpdateModel: um,
|
||||
Filter: umm.Filter,
|
||||
Update: umm.Update,
|
||||
}
|
||||
if umm.ArrayFilters != nil {
|
||||
converted.ArrayFilters = *umm.ArrayFilters
|
||||
converted.ArrayFiltersSet = true
|
||||
}
|
||||
|
||||
return converted
|
||||
}
|
||||
|
||||
func dispatchToMongoModel(model driverlegacy.WriteModel) WriteModel {
|
||||
switch conv := model.(type) {
|
||||
case driverlegacy.InsertOneModel:
|
||||
return &InsertOneModel{
|
||||
Document: conv.Document,
|
||||
}
|
||||
case driverlegacy.DeleteOneModel:
|
||||
return &DeleteOneModel{
|
||||
Filter: conv.Filter,
|
||||
Collation: conv.Collation,
|
||||
}
|
||||
case driverlegacy.DeleteManyModel:
|
||||
return &DeleteManyModel{
|
||||
Filter: conv.Filter,
|
||||
Collation: conv.Collation,
|
||||
}
|
||||
case driverlegacy.ReplaceOneModel:
|
||||
rom := &ReplaceOneModel{
|
||||
Filter: conv.Filter,
|
||||
Replacement: conv.Replacement,
|
||||
Collation: conv.Collation,
|
||||
}
|
||||
if conv.UpsertSet {
|
||||
rom.Upsert = &conv.Upsert
|
||||
}
|
||||
return rom
|
||||
case driverlegacy.UpdateOneModel:
|
||||
uom := &UpdateOneModel{
|
||||
Filter: conv.Filter,
|
||||
Update: conv.Update,
|
||||
Collation: conv.Collation,
|
||||
}
|
||||
if conv.UpsertSet {
|
||||
uom.Upsert = &conv.Upsert
|
||||
}
|
||||
if conv.ArrayFiltersSet {
|
||||
uom.ArrayFilters = &conv.ArrayFilters
|
||||
}
|
||||
return uom
|
||||
case driverlegacy.UpdateManyModel:
|
||||
umm := &UpdateManyModel{
|
||||
Filter: conv.Filter,
|
||||
Update: conv.Update,
|
||||
Collation: conv.Collation,
|
||||
}
|
||||
if conv.UpsertSet {
|
||||
umm.Upsert = &conv.Upsert
|
||||
}
|
||||
if conv.ArrayFiltersSet {
|
||||
umm.ArrayFilters = &conv.ArrayFilters
|
||||
}
|
||||
return umm
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
489
vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go
generated
vendored
Executable file
489
vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go
generated
vendored
Executable file
@@ -0,0 +1,489 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/mongo/readconcern"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/description"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/session"
|
||||
)
|
||||
|
||||
const errorInterrupted int32 = 11601
|
||||
const errorCappedPositionLost int32 = 136
|
||||
const errorCursorKilled int32 = 237
|
||||
|
||||
// ErrMissingResumeToken indicates that a change stream notification from the server did not
|
||||
// contain a resume token.
|
||||
var ErrMissingResumeToken = errors.New("cannot provide resume functionality when the resume token is missing")
|
||||
|
||||
// ErrNilCursor indicates that the cursor for the change stream is nil.
|
||||
var ErrNilCursor = errors.New("cursor is nil")
|
||||
|
||||
// ChangeStream instances iterate a stream of change documents. Each document can be decoded via the
|
||||
// Decode method. Resume tokens should be retrieved via the ResumeToken method and can be stored to
|
||||
// resume the change stream at a specific point in time.
|
||||
//
|
||||
// A typical usage of the ChangeStream type would be:
|
||||
type ChangeStream struct {
|
||||
Current bson.Raw
|
||||
|
||||
aggregate *operation.Aggregate
|
||||
pipelineSlice []bsoncore.Document
|
||||
cursor changeStreamCursor
|
||||
cursorOptions driver.CursorOptions
|
||||
batch []bsoncore.Document
|
||||
resumeToken bson.Raw
|
||||
err error
|
||||
sess *session.Client
|
||||
client *Client
|
||||
registry *bsoncodec.Registry
|
||||
streamType StreamType
|
||||
options *options.ChangeStreamOptions
|
||||
selector description.ServerSelector
|
||||
operationTime *primitive.Timestamp
|
||||
}
|
||||
|
||||
type changeStreamConfig struct {
|
||||
readConcern *readconcern.ReadConcern
|
||||
readPreference *readpref.ReadPref
|
||||
client *Client
|
||||
registry *bsoncodec.Registry
|
||||
streamType StreamType
|
||||
collectionName string
|
||||
databaseName string
|
||||
}
|
||||
|
||||
func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline interface{},
|
||||
opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
cs := &ChangeStream{
|
||||
client: config.client,
|
||||
registry: config.registry,
|
||||
streamType: config.streamType,
|
||||
options: options.MergeChangeStreamOptions(opts...),
|
||||
selector: description.ReadPrefSelector(config.readPreference),
|
||||
}
|
||||
|
||||
cs.sess = sessionFromContext(ctx)
|
||||
if cs.sess == nil && cs.client.topology.SessionPool != nil {
|
||||
cs.sess, cs.err = session.NewClientSession(cs.client.topology.SessionPool, cs.client.id, session.Implicit)
|
||||
if cs.err != nil {
|
||||
return nil, cs.Err()
|
||||
}
|
||||
}
|
||||
if cs.err = cs.client.validSession(cs.sess); cs.err != nil {
|
||||
closeImplicitSession(cs.sess)
|
||||
return nil, cs.Err()
|
||||
}
|
||||
|
||||
cs.aggregate = operation.NewAggregate(nil).
|
||||
ReadPreference(config.readPreference).ReadConcern(config.readConcern).
|
||||
Deployment(cs.client.topology).ClusterClock(cs.client.clock).
|
||||
CommandMonitor(cs.client.monitor).Session(cs.sess).ServerSelector(cs.selector)
|
||||
|
||||
if cs.options.Collation != nil {
|
||||
cs.aggregate.Collation(bsoncore.Document(cs.options.Collation.ToDocument()))
|
||||
}
|
||||
if cs.options.BatchSize != nil {
|
||||
cs.cursorOptions.BatchSize = *cs.options.BatchSize
|
||||
}
|
||||
if cs.options.MaxAwaitTime != nil {
|
||||
cs.cursorOptions.MaxTimeMS = int64(time.Duration(*cs.options.MaxAwaitTime) / time.Millisecond)
|
||||
}
|
||||
cs.cursorOptions.CommandMonitor = cs.client.monitor
|
||||
|
||||
switch cs.streamType {
|
||||
case ClientStream:
|
||||
cs.aggregate.Database("admin")
|
||||
case DatabaseStream:
|
||||
cs.aggregate.Database(config.databaseName)
|
||||
case CollectionStream:
|
||||
cs.aggregate.Collection(config.collectionName).Database(config.databaseName)
|
||||
default:
|
||||
closeImplicitSession(cs.sess)
|
||||
return nil, fmt.Errorf("must supply a valid StreamType in config, instead of %v", cs.streamType)
|
||||
}
|
||||
|
||||
// When starting a change stream, cache startAfter as the first resume token if it is set. If not, cache
|
||||
// resumeAfter. If neither is set, do not cache a resume token.
|
||||
resumeToken := cs.options.StartAfter
|
||||
if resumeToken == nil {
|
||||
resumeToken = cs.options.ResumeAfter
|
||||
}
|
||||
var marshaledToken bson.Raw
|
||||
if resumeToken != nil {
|
||||
if marshaledToken, cs.err = bson.Marshal(resumeToken); cs.err != nil {
|
||||
closeImplicitSession(cs.sess)
|
||||
return nil, cs.Err()
|
||||
}
|
||||
}
|
||||
cs.resumeToken = marshaledToken
|
||||
|
||||
if cs.err = cs.buildPipelineSlice(pipeline); cs.err != nil {
|
||||
closeImplicitSession(cs.sess)
|
||||
return nil, cs.Err()
|
||||
}
|
||||
var pipelineArr bsoncore.Document
|
||||
pipelineArr, cs.err = cs.pipelineToBSON()
|
||||
cs.aggregate.Pipeline(pipelineArr)
|
||||
|
||||
if cs.err = cs.executeOperation(ctx, false); cs.err != nil {
|
||||
closeImplicitSession(cs.sess)
|
||||
return nil, cs.Err()
|
||||
}
|
||||
|
||||
return cs, cs.Err()
|
||||
}
|
||||
|
||||
func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) error {
|
||||
var server driver.Server
|
||||
var conn driver.Connection
|
||||
if server, cs.err = cs.client.topology.SelectServer(ctx, cs.selector); cs.err != nil {
|
||||
return cs.Err()
|
||||
}
|
||||
if conn, cs.err = server.Connection(ctx); cs.err != nil {
|
||||
return cs.Err()
|
||||
}
|
||||
cs.aggregate.Deployment(driver.SingleConnectionDeployment{
|
||||
C: conn,
|
||||
})
|
||||
|
||||
if resuming {
|
||||
cs.replaceOptions(ctx, conn.Description().WireVersion) // pass wire version
|
||||
|
||||
csOptDoc := cs.createPipelineOptionsDoc()
|
||||
pipIdx, pipDoc := bsoncore.AppendDocumentStart(nil)
|
||||
pipDoc = bsoncore.AppendDocumentElement(pipDoc, "$changeStream", csOptDoc)
|
||||
if pipDoc, cs.err = bsoncore.AppendDocumentEnd(pipDoc, pipIdx); cs.err != nil {
|
||||
return cs.Err()
|
||||
}
|
||||
cs.pipelineSlice[0] = pipDoc
|
||||
|
||||
var plArr bsoncore.Document
|
||||
if plArr, cs.err = cs.pipelineToBSON(); cs.err != nil {
|
||||
return cs.Err()
|
||||
}
|
||||
cs.aggregate.Pipeline(plArr)
|
||||
}
|
||||
|
||||
if cs.err = replaceErrors(cs.aggregate.Execute(ctx)); cs.err != nil {
|
||||
return cs.Err()
|
||||
}
|
||||
|
||||
cr := cs.aggregate.ResultCursorResponse()
|
||||
cr.Server = server
|
||||
|
||||
cs.cursor, cs.err = driver.NewBatchCursor(cr, cs.sess, cs.client.clock, cs.cursorOptions)
|
||||
if cs.err = replaceErrors(cs.err); cs.err != nil {
|
||||
return cs.Err()
|
||||
}
|
||||
|
||||
cs.updatePbrtFromCommand()
|
||||
if cs.options.StartAtOperationTime == nil && cs.options.ResumeAfter == nil &&
|
||||
cs.options.StartAfter == nil && conn.Description().WireVersion.Max >= 7 &&
|
||||
cs.emptyBatch() && cs.resumeToken == nil {
|
||||
cs.operationTime = cs.sess.OperationTime
|
||||
}
|
||||
|
||||
return cs.Err()
|
||||
}
|
||||
|
||||
// Updates the post batch resume token after a successful aggregate or getMore operation.
|
||||
func (cs *ChangeStream) updatePbrtFromCommand() {
|
||||
// Only cache the pbrt if an empty batch was returned and a pbrt was included
|
||||
if pbrt := cs.cursor.PostBatchResumeToken(); cs.emptyBatch() && pbrt != nil {
|
||||
cs.resumeToken = bson.Raw(pbrt)
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *ChangeStream) storeResumeToken() error {
|
||||
// If cs.Current is the last document in the batch and a pbrt is included, cache the pbrt
|
||||
// Otherwise, cache the _id of the document
|
||||
var tokenDoc bson.Raw
|
||||
if len(cs.batch) == 0 {
|
||||
if pbrt := cs.cursor.PostBatchResumeToken(); pbrt != nil {
|
||||
tokenDoc = bson.Raw(pbrt)
|
||||
}
|
||||
}
|
||||
|
||||
if tokenDoc == nil {
|
||||
var ok bool
|
||||
tokenDoc, ok = cs.Current.Lookup("_id").DocumentOK()
|
||||
if !ok {
|
||||
_ = cs.Close(context.Background())
|
||||
return ErrMissingResumeToken
|
||||
}
|
||||
}
|
||||
|
||||
cs.resumeToken = tokenDoc
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *ChangeStream) buildPipelineSlice(pipeline interface{}) error {
|
||||
val := reflect.ValueOf(pipeline)
|
||||
if !val.IsValid() || !(val.Kind() == reflect.Slice) {
|
||||
cs.err = errors.New("can only transform slices and arrays into aggregation pipelines, but got invalid")
|
||||
return cs.err
|
||||
}
|
||||
|
||||
cs.pipelineSlice = make([]bsoncore.Document, 0, val.Len()+1)
|
||||
|
||||
csIdx, csDoc := bsoncore.AppendDocumentStart(nil)
|
||||
csDocTemp := cs.createPipelineOptionsDoc()
|
||||
if cs.err != nil {
|
||||
return cs.err
|
||||
}
|
||||
csDoc = bsoncore.AppendDocumentElement(csDoc, "$changeStream", csDocTemp)
|
||||
csDoc, cs.err = bsoncore.AppendDocumentEnd(csDoc, csIdx)
|
||||
if cs.err != nil {
|
||||
return cs.err
|
||||
}
|
||||
cs.pipelineSlice = append(cs.pipelineSlice, csDoc)
|
||||
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
var elem []byte
|
||||
elem, cs.err = transformBsoncoreDocument(cs.registry, val.Index(i).Interface())
|
||||
if cs.err != nil {
|
||||
return cs.err
|
||||
}
|
||||
|
||||
cs.pipelineSlice = append(cs.pipelineSlice, elem)
|
||||
}
|
||||
|
||||
return cs.err
|
||||
}
|
||||
|
||||
func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document {
|
||||
plDocIdx, plDoc := bsoncore.AppendDocumentStart(nil)
|
||||
|
||||
if cs.streamType == ClientStream {
|
||||
plDoc = bsoncore.AppendBooleanElement(plDoc, "allChangesForCluster", true)
|
||||
}
|
||||
|
||||
if cs.options.FullDocument != nil {
|
||||
plDoc = bsoncore.AppendStringElement(plDoc, "fullDocument", string(*cs.options.FullDocument))
|
||||
}
|
||||
|
||||
if cs.options.ResumeAfter != nil {
|
||||
var raDoc bsoncore.Document
|
||||
raDoc, cs.err = transformBsoncoreDocument(cs.registry, cs.options.ResumeAfter)
|
||||
if cs.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
plDoc = bsoncore.AppendDocumentElement(plDoc, "resumeAfter", raDoc)
|
||||
}
|
||||
|
||||
if cs.options.StartAfter != nil {
|
||||
var saDoc bsoncore.Document
|
||||
saDoc, cs.err = transformBsoncoreDocument(cs.registry, cs.options.StartAfter)
|
||||
if cs.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
plDoc = bsoncore.AppendDocumentElement(plDoc, "startAfter", saDoc)
|
||||
}
|
||||
|
||||
if cs.options.StartAtOperationTime != nil {
|
||||
plDoc = bsoncore.AppendTimestampElement(plDoc, "startAtOperationTime", cs.options.StartAtOperationTime.T, cs.options.StartAtOperationTime.I)
|
||||
}
|
||||
|
||||
if plDoc, cs.err = bsoncore.AppendDocumentEnd(plDoc, plDocIdx); cs.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return plDoc
|
||||
}
|
||||
|
||||
func (cs *ChangeStream) pipelineToBSON() (bsoncore.Document, error) {
|
||||
pipelineDocIdx, pipelineArr := bsoncore.AppendArrayStart(nil)
|
||||
for i, doc := range cs.pipelineSlice {
|
||||
pipelineArr = bsoncore.AppendDocumentElement(pipelineArr, strconv.Itoa(i), doc)
|
||||
}
|
||||
if pipelineArr, cs.err = bsoncore.AppendArrayEnd(pipelineArr, pipelineDocIdx); cs.err != nil {
|
||||
return nil, cs.err
|
||||
}
|
||||
return pipelineArr, cs.err
|
||||
}
|
||||
|
||||
func (cs *ChangeStream) replaceOptions(ctx context.Context, wireVersion *description.VersionRange) {
|
||||
// Cached resume token: use the resume token as the resumeAfter option and set no other resume options
|
||||
if cs.resumeToken != nil {
|
||||
cs.options.SetResumeAfter(cs.resumeToken)
|
||||
cs.options.SetStartAfter(nil)
|
||||
cs.options.SetStartAtOperationTime(nil)
|
||||
return
|
||||
}
|
||||
|
||||
// No cached resume token but cached operation time: use the operation time as the startAtOperationTime option and
|
||||
// set no other resume options
|
||||
if (cs.sess.OperationTime != nil || cs.options.StartAtOperationTime != nil) && wireVersion.Max >= 7 {
|
||||
opTime := cs.options.StartAtOperationTime
|
||||
if cs.operationTime != nil {
|
||||
opTime = cs.sess.OperationTime
|
||||
}
|
||||
|
||||
cs.options.SetStartAtOperationTime(opTime)
|
||||
cs.options.SetResumeAfter(nil)
|
||||
cs.options.SetStartAfter(nil)
|
||||
return
|
||||
}
|
||||
|
||||
// No cached resume token or operation time: set none of the resume options
|
||||
cs.options.SetResumeAfter(nil)
|
||||
cs.options.SetStartAfter(nil)
|
||||
cs.options.SetStartAtOperationTime(nil)
|
||||
}
|
||||
|
||||
// ID returns the cursor ID for this change stream.
|
||||
func (cs *ChangeStream) ID() int64 {
|
||||
if cs.cursor == nil {
|
||||
return 0
|
||||
}
|
||||
return cs.cursor.ID()
|
||||
}
|
||||
|
||||
// Decode will decode the current document into val.
|
||||
func (cs *ChangeStream) Decode(val interface{}) error {
|
||||
if cs.cursor == nil {
|
||||
return ErrNilCursor
|
||||
}
|
||||
|
||||
return bson.UnmarshalWithRegistry(cs.registry, cs.Current, val)
|
||||
}
|
||||
|
||||
// Err returns the current error.
|
||||
func (cs *ChangeStream) Err() error {
|
||||
if cs.err != nil {
|
||||
return replaceErrors(cs.err)
|
||||
}
|
||||
if cs.cursor == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return replaceErrors(cs.cursor.Err())
|
||||
}
|
||||
|
||||
// Close closes this cursor.
|
||||
func (cs *ChangeStream) Close(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
closeImplicitSession(cs.sess)
|
||||
|
||||
if cs.cursor == nil {
|
||||
return nil // cursor is already closed
|
||||
}
|
||||
|
||||
cs.err = replaceErrors(cs.cursor.Close(ctx))
|
||||
cs.cursor = nil
|
||||
return cs.Err()
|
||||
}
|
||||
|
||||
// ResumeToken returns the last cached resume token for this change stream.
|
||||
func (cs *ChangeStream) ResumeToken() bson.Raw {
|
||||
return cs.resumeToken
|
||||
}
|
||||
|
||||
// Next gets the next result from this change stream. Returns true if there were no errors and the next
|
||||
// result is available for decoding.
|
||||
func (cs *ChangeStream) Next(ctx context.Context) bool {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
if len(cs.batch) == 0 {
|
||||
cs.loopNext(ctx)
|
||||
if cs.err != nil || len(cs.batch) == 0 {
|
||||
cs.err = replaceErrors(cs.err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
cs.Current = bson.Raw(cs.batch[0])
|
||||
cs.batch = cs.batch[1:]
|
||||
if cs.err = cs.storeResumeToken(); cs.err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (cs *ChangeStream) loopNext(ctx context.Context) {
|
||||
for {
|
||||
if cs.cursor == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if cs.cursor.Next(ctx) {
|
||||
// If this is the first batch, the batch cursor will return true, but the batch could be empty.
|
||||
if cs.batch, cs.err = cs.cursor.Batch().Documents(); cs.err != nil || len(cs.batch) > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// no error but empty batch
|
||||
cs.updatePbrtFromCommand()
|
||||
continue
|
||||
}
|
||||
|
||||
cs.err = replaceErrors(cs.cursor.Err())
|
||||
if cs.err == nil {
|
||||
// If a getMore was done but the batch was empty, the batch cursor will return false with no error
|
||||
if len(cs.batch) == 0 {
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch t := cs.err.(type) {
|
||||
case CommandError:
|
||||
if t.Code == errorInterrupted || t.Code == errorCappedPositionLost || t.Code == errorCursorKilled || t.HasErrorLabel("NonResumableChangeStreamError") {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ignore error from cursor close because if the cursor is deleted or errors we tried to close it and will remake and try to get next batch
|
||||
_ = cs.cursor.Close(ctx)
|
||||
if cs.err = cs.executeOperation(ctx, true); cs.err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the underlying cursor's batch is empty
|
||||
func (cs *ChangeStream) emptyBatch() bool {
|
||||
return len(cs.cursor.Batch().Data) == 5 // empty BSON array
|
||||
}
|
||||
|
||||
// StreamType represents the type of a change stream.
|
||||
type StreamType uint8
|
||||
|
||||
// These constants represent valid change stream types. A change stream can be initialized over a collection, all
|
||||
// collections in a database, or over a whole client.
|
||||
const (
|
||||
CollectionStream StreamType = iota
|
||||
DatabaseStream
|
||||
ClientStream
|
||||
)
|
||||
564
vendor/go.mongodb.org/mongo-driver/mongo/client.go
generated
vendored
Executable file
564
vendor/go.mongodb.org/mongo-driver/mongo/client.go
generated
vendored
Executable file
@@ -0,0 +1,564 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/event"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/mongo/readconcern"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"go.mongodb.org/mongo-driver/mongo/writeconcern"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/auth"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/description"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/session"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/topology"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/uuid"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driverlegacy"
|
||||
"go.mongodb.org/mongo-driver/x/network/command"
|
||||
)
|
||||
|
||||
const defaultLocalThreshold = 15 * time.Millisecond
|
||||
|
||||
// Client performs operations on a given topology.
|
||||
type Client struct {
|
||||
id uuid.UUID
|
||||
topologyOptions []topology.Option
|
||||
topology *topology.Topology
|
||||
connString connstring.ConnString
|
||||
localThreshold time.Duration
|
||||
retryWrites bool
|
||||
clock *session.ClusterClock
|
||||
readPreference *readpref.ReadPref
|
||||
readConcern *readconcern.ReadConcern
|
||||
writeConcern *writeconcern.WriteConcern
|
||||
registry *bsoncodec.Registry
|
||||
marshaller BSONAppender
|
||||
monitor *event.CommandMonitor
|
||||
}
|
||||
|
||||
// Connect creates a new Client and then initializes it using the Connect method.
|
||||
func Connect(ctx context.Context, opts ...*options.ClientOptions) (*Client, error) {
|
||||
c, err := NewClient(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.Connect(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewClient creates a new client to connect to a cluster specified by the uri.
|
||||
//
|
||||
// When creating an options.ClientOptions, the order the methods are called matters. Later Set*
|
||||
// methods will overwrite the values from previous Set* method invocations. This includes the
|
||||
// ApplyURI method. This allows callers to determine the order of precedence for option
|
||||
// application. For instance, if ApplyURI is called before SetAuth, the Credential from
|
||||
// SetAuth will overwrite the values from the connection string. If ApplyURI is called
|
||||
// after SetAuth, then its values will overwrite those from SetAuth.
|
||||
//
|
||||
// The opts parameter is processed using options.MergeClientOptions, which will overwrite entire
|
||||
// option fields of previous options, there is no partial overwriting. For example, if Username is
|
||||
// set in the Auth field for the first option, and Password is set for the second but with no
|
||||
// Username, after the merge the Username field will be empty.
|
||||
func NewClient(opts ...*options.ClientOptions) (*Client, error) {
|
||||
clientOpt := options.MergeClientOptions(opts...)
|
||||
|
||||
id, err := uuid.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := &Client{id: id}
|
||||
|
||||
err = client.configure(clientOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client.topology, err = topology.New(client.topologyOptions...)
|
||||
if err != nil {
|
||||
return nil, replaceErrors(err)
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Connect initializes the Client by starting background monitoring goroutines.
|
||||
// This method must be called before a Client can be used.
|
||||
func (c *Client) Connect(ctx context.Context) error {
|
||||
err := c.topology.Connect()
|
||||
if err != nil {
|
||||
return replaceErrors(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Disconnect closes sockets to the topology referenced by this Client. It will
|
||||
// shut down any monitoring goroutines, close the idle connection pool, and will
|
||||
// wait until all the in use connections have been returned to the connection
|
||||
// pool and closed before returning. If the context expires via cancellation,
|
||||
// deadline, or timeout before the in use connections have returned, the in use
|
||||
// connections will be closed, resulting in the failure of any in flight read
|
||||
// or write operations. If this method returns with no errors, all connections
|
||||
// associated with this Client have been closed.
|
||||
func (c *Client) Disconnect(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
c.endSessions(ctx)
|
||||
return replaceErrors(c.topology.Disconnect(ctx))
|
||||
}
|
||||
|
||||
// Ping verifies that the client can connect to the topology.
|
||||
// If readPreference is nil then will use the client's default read
|
||||
// preference.
|
||||
func (c *Client) Ping(ctx context.Context, rp *readpref.ReadPref) error {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
if rp == nil {
|
||||
rp = c.readPreference
|
||||
}
|
||||
|
||||
db := c.Database("admin")
|
||||
res := db.RunCommand(ctx, bson.D{
|
||||
{"ping", 1},
|
||||
})
|
||||
|
||||
return replaceErrors(res.Err())
|
||||
}
|
||||
|
||||
// StartSession starts a new session.
|
||||
func (c *Client) StartSession(opts ...*options.SessionOptions) (Session, error) {
|
||||
if c.topology.SessionPool == nil {
|
||||
return nil, ErrClientDisconnected
|
||||
}
|
||||
|
||||
sopts := options.MergeSessionOptions(opts...)
|
||||
coreOpts := &session.ClientOptions{
|
||||
DefaultReadConcern: c.readConcern,
|
||||
DefaultReadPreference: c.readPreference,
|
||||
DefaultWriteConcern: c.writeConcern,
|
||||
}
|
||||
if sopts.CausalConsistency != nil {
|
||||
coreOpts.CausalConsistency = sopts.CausalConsistency
|
||||
}
|
||||
if sopts.DefaultReadConcern != nil {
|
||||
coreOpts.DefaultReadConcern = sopts.DefaultReadConcern
|
||||
}
|
||||
if sopts.DefaultWriteConcern != nil {
|
||||
coreOpts.DefaultWriteConcern = sopts.DefaultWriteConcern
|
||||
}
|
||||
if sopts.DefaultReadPreference != nil {
|
||||
coreOpts.DefaultReadPreference = sopts.DefaultReadPreference
|
||||
}
|
||||
|
||||
sess, err := session.NewClientSession(c.topology.SessionPool, c.id, session.Explicit, coreOpts)
|
||||
if err != nil {
|
||||
return nil, replaceErrors(err)
|
||||
}
|
||||
|
||||
sess.RetryWrite = c.retryWrites
|
||||
|
||||
return &sessionImpl{
|
||||
clientSession: sess,
|
||||
client: c,
|
||||
topo: c.topology,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) endSessions(ctx context.Context) {
|
||||
if c.topology.SessionPool == nil {
|
||||
return
|
||||
}
|
||||
cmd := command.EndSessions{
|
||||
Clock: c.clock,
|
||||
SessionIDs: c.topology.SessionPool.IDSlice(),
|
||||
}
|
||||
|
||||
_, _ = driverlegacy.EndSessions(ctx, cmd, c.topology, description.ReadPrefSelector(readpref.PrimaryPreferred()))
|
||||
}
|
||||
|
||||
func (c *Client) configure(opts *options.ClientOptions) error {
|
||||
if err := opts.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var connOpts []topology.ConnectionOption
|
||||
var serverOpts []topology.ServerOption
|
||||
var topologyOpts []topology.Option
|
||||
|
||||
// TODO(GODRIVER-814): Add tests for topology, server, and connection related options.
|
||||
|
||||
// AppName
|
||||
var appName string
|
||||
if opts.AppName != nil {
|
||||
appName = *opts.AppName
|
||||
}
|
||||
// Compressors & ZlibLevel
|
||||
var comps []string
|
||||
if len(opts.Compressors) > 0 {
|
||||
comps = opts.Compressors
|
||||
|
||||
connOpts = append(connOpts, topology.WithCompressors(
|
||||
func(compressors []string) []string {
|
||||
return append(compressors, comps...)
|
||||
},
|
||||
))
|
||||
|
||||
for _, comp := range comps {
|
||||
if comp == "zlib" {
|
||||
connOpts = append(connOpts, topology.WithZlibLevel(func(level *int) *int {
|
||||
return opts.ZlibLevel
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
serverOpts = append(serverOpts, topology.WithCompressionOptions(
|
||||
func(opts ...string) []string { return append(opts, comps...) },
|
||||
))
|
||||
}
|
||||
// Handshaker
|
||||
var handshaker = func(driver.Handshaker) driver.Handshaker {
|
||||
return operation.NewIsMaster().AppName(appName).Compressors(comps)
|
||||
}
|
||||
// Auth & Database & Password & Username
|
||||
if opts.Auth != nil {
|
||||
cred := &auth.Cred{
|
||||
Username: opts.Auth.Username,
|
||||
Password: opts.Auth.Password,
|
||||
PasswordSet: opts.Auth.PasswordSet,
|
||||
Props: opts.Auth.AuthMechanismProperties,
|
||||
Source: opts.Auth.AuthSource,
|
||||
}
|
||||
mechanism := opts.Auth.AuthMechanism
|
||||
|
||||
if len(cred.Source) == 0 {
|
||||
switch strings.ToUpper(mechanism) {
|
||||
case auth.MongoDBX509, auth.GSSAPI, auth.PLAIN:
|
||||
cred.Source = "$external"
|
||||
default:
|
||||
cred.Source = "admin"
|
||||
}
|
||||
}
|
||||
|
||||
authenticator, err := auth.CreateAuthenticator(mechanism, cred)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handshakeOpts := &auth.HandshakeOptions{
|
||||
AppName: appName,
|
||||
Authenticator: authenticator,
|
||||
Compressors: comps,
|
||||
}
|
||||
if mechanism == "" {
|
||||
// Required for SASL mechanism negotiation during handshake
|
||||
handshakeOpts.DBUser = cred.Source + "." + cred.Username
|
||||
}
|
||||
if opts.AuthenticateToAnything != nil && *opts.AuthenticateToAnything {
|
||||
// Authenticate arbiters
|
||||
handshakeOpts.PerformAuthentication = func(serv description.Server) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
handshaker = func(driver.Handshaker) driver.Handshaker {
|
||||
return auth.Handshaker(nil, handshakeOpts)
|
||||
}
|
||||
}
|
||||
connOpts = append(connOpts, topology.WithHandshaker(handshaker))
|
||||
// ConnectTimeout
|
||||
if opts.ConnectTimeout != nil {
|
||||
serverOpts = append(serverOpts, topology.WithHeartbeatTimeout(
|
||||
func(time.Duration) time.Duration { return *opts.ConnectTimeout },
|
||||
))
|
||||
connOpts = append(connOpts, topology.WithConnectTimeout(
|
||||
func(time.Duration) time.Duration { return *opts.ConnectTimeout },
|
||||
))
|
||||
}
|
||||
// Dialer
|
||||
if opts.Dialer != nil {
|
||||
connOpts = append(connOpts, topology.WithDialer(
|
||||
func(topology.Dialer) topology.Dialer { return opts.Dialer },
|
||||
))
|
||||
}
|
||||
// Direct
|
||||
if opts.Direct != nil && *opts.Direct {
|
||||
topologyOpts = append(topologyOpts, topology.WithMode(
|
||||
func(topology.MonitorMode) topology.MonitorMode { return topology.SingleMode },
|
||||
))
|
||||
}
|
||||
// HeartbeatInterval
|
||||
if opts.HeartbeatInterval != nil {
|
||||
serverOpts = append(serverOpts, topology.WithHeartbeatInterval(
|
||||
func(time.Duration) time.Duration { return *opts.HeartbeatInterval },
|
||||
))
|
||||
}
|
||||
// Hosts
|
||||
hosts := []string{"localhost:27017"} // default host
|
||||
if len(opts.Hosts) > 0 {
|
||||
hosts = opts.Hosts
|
||||
}
|
||||
topologyOpts = append(topologyOpts, topology.WithSeedList(
|
||||
func(...string) []string { return hosts },
|
||||
))
|
||||
// LocalThreshold
|
||||
if opts.LocalThreshold != nil {
|
||||
c.localThreshold = *opts.LocalThreshold
|
||||
}
|
||||
// MaxConIdleTime
|
||||
if opts.MaxConnIdleTime != nil {
|
||||
connOpts = append(connOpts, topology.WithIdleTimeout(
|
||||
func(time.Duration) time.Duration { return *opts.MaxConnIdleTime },
|
||||
))
|
||||
}
|
||||
// MaxPoolSize
|
||||
if opts.MaxPoolSize != nil {
|
||||
serverOpts = append(
|
||||
serverOpts,
|
||||
topology.WithMaxConnections(func(uint16) uint16 { return *opts.MaxPoolSize }),
|
||||
topology.WithMaxIdleConnections(func(uint16) uint16 { return *opts.MaxPoolSize }),
|
||||
)
|
||||
}
|
||||
// Monitor
|
||||
if opts.Monitor != nil {
|
||||
c.monitor = opts.Monitor
|
||||
connOpts = append(connOpts, topology.WithMonitor(
|
||||
func(*event.CommandMonitor) *event.CommandMonitor { return opts.Monitor },
|
||||
))
|
||||
}
|
||||
// ReadConcern
|
||||
c.readConcern = readconcern.New()
|
||||
if opts.ReadConcern != nil {
|
||||
c.readConcern = opts.ReadConcern
|
||||
}
|
||||
// ReadPreference
|
||||
c.readPreference = readpref.Primary()
|
||||
if opts.ReadPreference != nil {
|
||||
c.readPreference = opts.ReadPreference
|
||||
}
|
||||
// Registry
|
||||
c.registry = bson.DefaultRegistry
|
||||
if opts.Registry != nil {
|
||||
c.registry = opts.Registry
|
||||
}
|
||||
// ReplicaSet
|
||||
if opts.ReplicaSet != nil {
|
||||
topologyOpts = append(topologyOpts, topology.WithReplicaSetName(
|
||||
func(string) string { return *opts.ReplicaSet },
|
||||
))
|
||||
}
|
||||
// RetryWrites
|
||||
if opts.RetryWrites != nil {
|
||||
c.retryWrites = *opts.RetryWrites
|
||||
}
|
||||
// ServerSelectionTimeout
|
||||
if opts.ServerSelectionTimeout != nil {
|
||||
topologyOpts = append(topologyOpts, topology.WithServerSelectionTimeout(
|
||||
func(time.Duration) time.Duration { return *opts.ServerSelectionTimeout },
|
||||
))
|
||||
}
|
||||
// SocketTimeout
|
||||
if opts.SocketTimeout != nil {
|
||||
connOpts = append(
|
||||
connOpts,
|
||||
topology.WithReadTimeout(func(time.Duration) time.Duration { return *opts.SocketTimeout }),
|
||||
topology.WithWriteTimeout(func(time.Duration) time.Duration { return *opts.SocketTimeout }),
|
||||
)
|
||||
}
|
||||
// TLSConfig
|
||||
if opts.TLSConfig != nil {
|
||||
connOpts = append(connOpts, topology.WithTLSConfig(
|
||||
func(*tls.Config) *tls.Config {
|
||||
return opts.TLSConfig
|
||||
},
|
||||
))
|
||||
}
|
||||
// WriteConcern
|
||||
if opts.WriteConcern != nil {
|
||||
c.writeConcern = opts.WriteConcern
|
||||
}
|
||||
|
||||
// ClusterClock
|
||||
c.clock = new(session.ClusterClock)
|
||||
|
||||
serverOpts = append(
|
||||
serverOpts,
|
||||
topology.WithClock(func(*session.ClusterClock) *session.ClusterClock { return c.clock }),
|
||||
topology.WithConnectionOptions(func(...topology.ConnectionOption) []topology.ConnectionOption { return connOpts }),
|
||||
)
|
||||
c.topologyOptions = append(topologyOpts, topology.WithServerOptions(
|
||||
func(...topology.ServerOption) []topology.ServerOption { return serverOpts },
|
||||
))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validSession returns an error if the session doesn't belong to the client
|
||||
func (c *Client) validSession(sess *session.Client) error {
|
||||
if sess != nil && !uuid.Equal(sess.ClientID, c.id) {
|
||||
return ErrWrongClient
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Database returns a handle for a given database.
|
||||
func (c *Client) Database(name string, opts ...*options.DatabaseOptions) *Database {
|
||||
return newDatabase(c, name, opts...)
|
||||
}
|
||||
|
||||
// ListDatabases returns a ListDatabasesResult.
|
||||
func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) (ListDatabasesResult, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
sess := sessionFromContext(ctx)
|
||||
|
||||
err := c.validSession(sess)
|
||||
if sess == nil && c.topology.SessionPool != nil {
|
||||
sess, err = session.NewClientSession(c.topology.SessionPool, c.id, session.Implicit)
|
||||
if err != nil {
|
||||
return ListDatabasesResult{}, err
|
||||
}
|
||||
defer sess.EndSession()
|
||||
}
|
||||
|
||||
err = c.validSession(sess)
|
||||
if err != nil {
|
||||
return ListDatabasesResult{}, err
|
||||
}
|
||||
|
||||
filterDoc, err := transformBsoncoreDocument(c.registry, filter)
|
||||
if err != nil {
|
||||
return ListDatabasesResult{}, err
|
||||
}
|
||||
|
||||
selector := description.CompositeSelector([]description.ServerSelector{
|
||||
description.ReadPrefSelector(readpref.Primary()),
|
||||
description.LatencySelector(c.localThreshold),
|
||||
})
|
||||
if sess != nil && sess.PinnedServer != nil {
|
||||
selector = sess.PinnedServer
|
||||
}
|
||||
|
||||
ldo := options.MergeListDatabasesOptions(opts...)
|
||||
op := operation.NewListDatabases(filterDoc).
|
||||
Session(sess).ReadPreference(c.readPreference).CommandMonitor(c.monitor).
|
||||
ServerSelector(selector).ClusterClock(c.clock).Database("admin").Deployment(c.topology)
|
||||
if ldo.NameOnly != nil {
|
||||
op = op.NameOnly(*ldo.NameOnly)
|
||||
}
|
||||
|
||||
err = op.Execute(ctx)
|
||||
if err != nil {
|
||||
return ListDatabasesResult{}, replaceErrors(err)
|
||||
}
|
||||
|
||||
return newListDatabasesResultFromOperation(op.Result()), nil
|
||||
}
|
||||
|
||||
// ListDatabaseNames returns a slice containing the names of all of the databases on the server.
|
||||
func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) ([]string, error) {
|
||||
opts = append(opts, options.ListDatabases().SetNameOnly(true))
|
||||
|
||||
res, err := c.ListDatabases(ctx, filter, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names := make([]string, 0)
|
||||
for _, spec := range res.Databases {
|
||||
names = append(names, spec.Name)
|
||||
}
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// WithSession allows a user to start a session themselves and manage
|
||||
// its lifetime. The only way to provide a session to a CRUD method is
|
||||
// to invoke that CRUD method with the mongo.SessionContext within the
|
||||
// closure. The mongo.SessionContext can be used as a regular context,
|
||||
// so methods like context.WithDeadline and context.WithTimeout are
|
||||
// supported.
|
||||
//
|
||||
// If the context.Context already has a mongo.Session attached, that
|
||||
// mongo.Session will be replaced with the one provided.
|
||||
//
|
||||
// Errors returned from the closure are transparently returned from
|
||||
// this function.
|
||||
func WithSession(ctx context.Context, sess Session, fn func(SessionContext) error) error {
|
||||
return fn(contextWithSession(ctx, sess))
|
||||
}
|
||||
|
||||
// UseSession creates a default session, that is only valid for the
|
||||
// lifetime of the closure. No cleanup outside of closing the session
|
||||
// is done upon exiting the closure. This means that an outstanding
|
||||
// transaction will be aborted, even if the closure returns an error.
|
||||
//
|
||||
// If ctx already contains a mongo.Session, that mongo.Session will be
|
||||
// replaced with the newly created mongo.Session.
|
||||
//
|
||||
// Errors returned from the closure are transparently returned from
|
||||
// this method.
|
||||
func (c *Client) UseSession(ctx context.Context, fn func(SessionContext) error) error {
|
||||
return c.UseSessionWithOptions(ctx, options.Session(), fn)
|
||||
}
|
||||
|
||||
// UseSessionWithOptions works like UseSession but allows the caller
|
||||
// to specify the options used to create the session.
|
||||
func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.SessionOptions, fn func(SessionContext) error) error {
|
||||
defaultSess, err := c.StartSession(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer defaultSess.EndSession(ctx)
|
||||
|
||||
sessCtx := sessionContext{
|
||||
Context: context.WithValue(ctx, sessionKey{}, defaultSess),
|
||||
Session: defaultSess,
|
||||
}
|
||||
|
||||
return fn(sessCtx)
|
||||
}
|
||||
|
||||
// Watch returns a change stream cursor used to receive information of changes to the client. This method is preferred
|
||||
// to running a raw aggregation with a $changeStream stage because it supports resumability in the case of some errors.
|
||||
// The client must have read concern majority or no read concern for a change stream to be created successfully.
|
||||
func (c *Client) Watch(ctx context.Context, pipeline interface{},
|
||||
opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
|
||||
if c.topology.SessionPool == nil {
|
||||
return nil, ErrClientDisconnected
|
||||
}
|
||||
|
||||
csConfig := changeStreamConfig{
|
||||
readConcern: c.readConcern,
|
||||
readPreference: c.readPreference,
|
||||
client: c,
|
||||
registry: c.registry,
|
||||
streamType: ClientStream,
|
||||
}
|
||||
|
||||
return newChangeStream(ctx, csConfig, pipeline, opts...)
|
||||
}
|
||||
1399
vendor/go.mongodb.org/mongo-driver/mongo/collection.go
generated
vendored
Executable file
1399
vendor/go.mongodb.org/mongo-driver/mongo/collection.go
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
225
vendor/go.mongodb.org/mongo-driver/mongo/cursor.go
generated
vendored
Executable file
225
vendor/go.mongodb.org/mongo-driver/mongo/cursor.go
generated
vendored
Executable file
@@ -0,0 +1,225 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/session"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driverlegacy"
|
||||
)
|
||||
|
||||
// Cursor is used to iterate a stream of documents. Each document is decoded into the result
|
||||
// according to the rules of the bson package.
|
||||
//
|
||||
// A typical usage of the Cursor type would be:
|
||||
//
|
||||
// var cur *Cursor
|
||||
// ctx := context.Background()
|
||||
// defer cur.Close(ctx)
|
||||
//
|
||||
// for cur.Next(ctx) {
|
||||
// elem := &bson.D{}
|
||||
// if err := cur.Decode(elem); err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// // do something with elem....
|
||||
// }
|
||||
//
|
||||
// if err := cur.Err(); err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
//
|
||||
type Cursor struct {
|
||||
// Current is the BSON bytes of the current document. This property is only valid until the next
|
||||
// call to Next or Close. If continued access is required to the bson.Raw, you must make a copy
|
||||
// of it.
|
||||
Current bson.Raw
|
||||
|
||||
bc batchCursor
|
||||
batch *bsoncore.DocumentSequence
|
||||
registry *bsoncodec.Registry
|
||||
clientSession *session.Client
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func newCursor(bc batchCursor, registry *bsoncodec.Registry) (*Cursor, error) {
|
||||
return newCursorWithSession(bc, registry, nil)
|
||||
}
|
||||
|
||||
func newCursorWithSession(bc batchCursor, registry *bsoncodec.Registry, clientSession *session.Client) (*Cursor, error) {
|
||||
if registry == nil {
|
||||
registry = bson.DefaultRegistry
|
||||
}
|
||||
if bc == nil {
|
||||
return nil, errors.New("batch cursor must not be nil")
|
||||
}
|
||||
c := &Cursor{
|
||||
bc: bc,
|
||||
registry: registry,
|
||||
clientSession: clientSession,
|
||||
}
|
||||
if bc.ID() == 0 {
|
||||
c.closeImplicitSession()
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func newEmptyCursor() *Cursor {
|
||||
return &Cursor{bc: driverlegacy.NewEmptyBatchCursor()}
|
||||
}
|
||||
|
||||
// ID returns the ID of this cursor.
|
||||
func (c *Cursor) ID() int64 { return c.bc.ID() }
|
||||
|
||||
// Next gets the next result from this cursor. Returns true if there were no errors and the next
|
||||
// result is available for decoding.
|
||||
func (c *Cursor) Next(ctx context.Context) bool {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
doc, err := c.batch.Next()
|
||||
switch err {
|
||||
case nil:
|
||||
c.Current = bson.Raw(doc)
|
||||
return true
|
||||
case io.EOF: // Need to do a getMore
|
||||
default:
|
||||
c.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
// call the Next method in a loop until at least one document is returned in the next batch or
|
||||
// the context times out.
|
||||
for {
|
||||
// If we don't have a next batch
|
||||
if !c.bc.Next(ctx) {
|
||||
// Do we have an error? If so we return false.
|
||||
c.err = c.bc.Err()
|
||||
if c.err != nil {
|
||||
return false
|
||||
}
|
||||
// Is the cursor ID zero?
|
||||
if c.bc.ID() == 0 {
|
||||
c.closeImplicitSession()
|
||||
return false
|
||||
}
|
||||
// empty batch, but cursor is still valid, so continue.
|
||||
continue
|
||||
}
|
||||
|
||||
// close the implicit session if this was the last getMore
|
||||
if c.bc.ID() == 0 {
|
||||
c.closeImplicitSession()
|
||||
}
|
||||
|
||||
c.batch = c.bc.Batch()
|
||||
doc, err = c.batch.Next()
|
||||
switch err {
|
||||
case nil:
|
||||
c.Current = bson.Raw(doc)
|
||||
return true
|
||||
case io.EOF: // Empty batch so we continue
|
||||
default:
|
||||
c.err = err
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decode will decode the current document into val. If val is nil or is a typed nil, an error will be returned.
|
||||
func (c *Cursor) Decode(val interface{}) error {
|
||||
return bson.UnmarshalWithRegistry(c.registry, c.Current, val)
|
||||
}
|
||||
|
||||
// Err returns the current error.
|
||||
func (c *Cursor) Err() error { return c.err }
|
||||
|
||||
// Close closes this cursor.
|
||||
func (c *Cursor) Close(ctx context.Context) error {
|
||||
defer c.closeImplicitSession()
|
||||
return c.bc.Close(ctx)
|
||||
}
|
||||
|
||||
// All iterates the cursor and decodes each document into results.
|
||||
// The results parameter must be a pointer to a slice. The slice pointed to by results will be completely overwritten.
|
||||
// If the cursor has been iterated, any previously iterated documents will not be included in results.
|
||||
func (c *Cursor) All(ctx context.Context, results interface{}) error {
|
||||
resultsVal := reflect.ValueOf(results)
|
||||
if resultsVal.Kind() != reflect.Ptr {
|
||||
return errors.New("results argument must be a pointer to a slice")
|
||||
}
|
||||
|
||||
sliceVal := resultsVal.Elem()
|
||||
elementType := sliceVal.Type().Elem()
|
||||
var index int
|
||||
var err error
|
||||
|
||||
batch := c.batch // exhaust the current batch before iterating the batch cursor
|
||||
for {
|
||||
sliceVal, index, err = c.addFromBatch(sliceVal, elementType, batch, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.bc.Next(ctx) {
|
||||
break
|
||||
}
|
||||
|
||||
batch = c.bc.Batch()
|
||||
}
|
||||
|
||||
if err = c.bc.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resultsVal.Elem().Set(sliceVal.Slice(0, index))
|
||||
return nil
|
||||
}
|
||||
|
||||
// addFromBatch adds all documents from batch to sliceVal starting at the given index. It returns the new slice value,
|
||||
// the next empty index in the slice, and an error if one occurs.
|
||||
func (c *Cursor) addFromBatch(sliceVal reflect.Value, elemType reflect.Type, batch *bsoncore.DocumentSequence,
|
||||
index int) (reflect.Value, int, error) {
|
||||
|
||||
docs, err := batch.Documents()
|
||||
if err != nil {
|
||||
return sliceVal, index, err
|
||||
}
|
||||
|
||||
for _, doc := range docs {
|
||||
if sliceVal.Len() == index {
|
||||
// slice is full
|
||||
newElem := reflect.New(elemType)
|
||||
sliceVal = reflect.Append(sliceVal, newElem.Elem())
|
||||
sliceVal = sliceVal.Slice(0, sliceVal.Cap())
|
||||
}
|
||||
|
||||
currElem := sliceVal.Index(index).Addr().Interface()
|
||||
if err = bson.UnmarshalWithRegistry(c.registry, doc, currElem); err != nil {
|
||||
return sliceVal, index, err
|
||||
}
|
||||
|
||||
index++
|
||||
}
|
||||
|
||||
return sliceVal, index, nil
|
||||
}
|
||||
|
||||
func (c *Cursor) closeImplicitSession() {
|
||||
if c.clientSession != nil && c.clientSession.SessionType == session.Implicit {
|
||||
c.clientSession.EndSession()
|
||||
}
|
||||
}
|
||||
365
vendor/go.mongodb.org/mongo-driver/mongo/database.go
generated
vendored
Executable file
365
vendor/go.mongodb.org/mongo-driver/mongo/database.go
generated
vendored
Executable file
@@ -0,0 +1,365 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/mongo/readconcern"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"go.mongodb.org/mongo-driver/mongo/writeconcern"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/description"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/session"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultRunCmdOpts = []*options.RunCmdOptions{options.RunCmd().SetReadPreference(readpref.Primary())}
|
||||
)
|
||||
|
||||
// Database performs operations on a given database.
|
||||
type Database struct {
|
||||
client *Client
|
||||
name string
|
||||
readConcern *readconcern.ReadConcern
|
||||
writeConcern *writeconcern.WriteConcern
|
||||
readPreference *readpref.ReadPref
|
||||
readSelector description.ServerSelector
|
||||
writeSelector description.ServerSelector
|
||||
registry *bsoncodec.Registry
|
||||
}
|
||||
|
||||
func newDatabase(client *Client, name string, opts ...*options.DatabaseOptions) *Database {
|
||||
dbOpt := options.MergeDatabaseOptions(opts...)
|
||||
|
||||
rc := client.readConcern
|
||||
if dbOpt.ReadConcern != nil {
|
||||
rc = dbOpt.ReadConcern
|
||||
}
|
||||
|
||||
rp := client.readPreference
|
||||
if dbOpt.ReadPreference != nil {
|
||||
rp = dbOpt.ReadPreference
|
||||
}
|
||||
|
||||
wc := client.writeConcern
|
||||
if dbOpt.WriteConcern != nil {
|
||||
wc = dbOpt.WriteConcern
|
||||
}
|
||||
|
||||
db := &Database{
|
||||
client: client,
|
||||
name: name,
|
||||
readPreference: rp,
|
||||
readConcern: rc,
|
||||
writeConcern: wc,
|
||||
registry: client.registry,
|
||||
}
|
||||
|
||||
db.readSelector = description.CompositeSelector([]description.ServerSelector{
|
||||
description.ReadPrefSelector(db.readPreference),
|
||||
description.LatencySelector(db.client.localThreshold),
|
||||
})
|
||||
|
||||
db.writeSelector = description.CompositeSelector([]description.ServerSelector{
|
||||
description.WriteSelector(),
|
||||
description.LatencySelector(db.client.localThreshold),
|
||||
})
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
// Client returns the Client the database was created from.
|
||||
func (db *Database) Client() *Client {
|
||||
return db.client
|
||||
}
|
||||
|
||||
// Name returns the name of the database.
|
||||
func (db *Database) Name() string {
|
||||
return db.name
|
||||
}
|
||||
|
||||
// Collection gets a handle for a given collection in the database.
|
||||
func (db *Database) Collection(name string, opts ...*options.CollectionOptions) *Collection {
|
||||
return newCollection(db, name, opts...)
|
||||
}
|
||||
|
||||
// Aggregate runs an aggregation framework pipeline.
|
||||
//
|
||||
// See https://docs.mongodb.com/manual/aggregation/.
|
||||
func (db *Database) Aggregate(ctx context.Context, pipeline interface{},
|
||||
opts ...*options.AggregateOptions) (*Cursor, error) {
|
||||
a := aggregateParams{
|
||||
ctx: ctx,
|
||||
pipeline: pipeline,
|
||||
client: db.client,
|
||||
registry: db.registry,
|
||||
readConcern: db.readConcern,
|
||||
writeConcern: db.writeConcern,
|
||||
db: db.name,
|
||||
readSelector: db.readSelector,
|
||||
writeSelector: db.writeSelector,
|
||||
readPreference: db.readPreference,
|
||||
opts: opts,
|
||||
}
|
||||
return aggregate(a)
|
||||
}
|
||||
|
||||
func (db *Database) processRunCommand(ctx context.Context, cmd interface{},
|
||||
opts ...*options.RunCmdOptions) (*operation.Command, *session.Client, error) {
|
||||
|
||||
sess := sessionFromContext(ctx)
|
||||
if sess == nil && db.client.topology.SessionPool != nil {
|
||||
var err error
|
||||
sess, err = session.NewClientSession(db.client.topology.SessionPool, db.client.id, session.Implicit)
|
||||
if err != nil {
|
||||
return nil, sess, err
|
||||
}
|
||||
}
|
||||
|
||||
err := db.client.validSession(sess)
|
||||
if err != nil {
|
||||
return nil, sess, err
|
||||
}
|
||||
|
||||
ro := options.MergeRunCmdOptions(append(defaultRunCmdOpts, opts...)...)
|
||||
if sess != nil && sess.TransactionRunning() && ro.ReadPreference != nil && ro.ReadPreference.Mode() != readpref.PrimaryMode {
|
||||
return nil, sess, errors.New("read preference in a transaction must be primary")
|
||||
}
|
||||
|
||||
runCmdDoc, err := transformBsoncoreDocument(db.registry, cmd)
|
||||
if err != nil {
|
||||
return nil, sess, err
|
||||
}
|
||||
readSelect := description.CompositeSelector([]description.ServerSelector{
|
||||
description.ReadPrefSelector(ro.ReadPreference),
|
||||
description.LatencySelector(db.client.localThreshold),
|
||||
})
|
||||
if sess != nil && sess.PinnedServer != nil {
|
||||
readSelect = sess.PinnedServer
|
||||
}
|
||||
|
||||
return operation.NewCommand(runCmdDoc).
|
||||
Session(sess).CommandMonitor(db.client.monitor).
|
||||
ServerSelector(readSelect).ClusterClock(db.client.clock).
|
||||
Database(db.name).Deployment(db.client.topology).ReadConcern(db.readConcern), sess, nil
|
||||
}
|
||||
|
||||
// RunCommand runs a command on the database. A user can supply a custom
|
||||
// context to this method, or nil to default to context.Background().
|
||||
func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) *SingleResult {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
op, sess, err := db.processRunCommand(ctx, runCommand, opts...)
|
||||
defer closeImplicitSession(sess)
|
||||
if err != nil {
|
||||
return &SingleResult{err: err}
|
||||
}
|
||||
|
||||
err = op.Execute(ctx)
|
||||
return &SingleResult{
|
||||
err: replaceErrors(err),
|
||||
rdr: bson.Raw(op.Result()),
|
||||
reg: db.registry,
|
||||
}
|
||||
}
|
||||
|
||||
// RunCommandCursor runs a command on the database and returns a cursor over the resulting reader. A user can supply
|
||||
// a custom context to this method, or nil to default to context.Background().
|
||||
func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) (*Cursor, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
op, sess, err := db.processRunCommand(ctx, runCommand, opts...)
|
||||
if err != nil {
|
||||
closeImplicitSession(sess)
|
||||
return nil, err
|
||||
}
|
||||
bc, err := op.ResultCursor(driver.CursorOptions{})
|
||||
if err != nil {
|
||||
closeImplicitSession(sess)
|
||||
return nil, replaceErrors(err)
|
||||
}
|
||||
cursor, err := newCursorWithSession(bc, db.registry, sess)
|
||||
return cursor, replaceErrors(err)
|
||||
}
|
||||
|
||||
// Drop drops this database from mongodb.
|
||||
func (db *Database) Drop(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
sess := sessionFromContext(ctx)
|
||||
if sess == nil && db.client.topology.SessionPool != nil {
|
||||
sess, err := session.NewClientSession(db.client.topology.SessionPool, db.client.id, session.Implicit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sess.EndSession()
|
||||
}
|
||||
|
||||
err := db.client.validSession(sess)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wc := db.writeConcern
|
||||
if sess.TransactionRunning() {
|
||||
wc = nil
|
||||
}
|
||||
if !writeconcern.AckWrite(wc) {
|
||||
sess = nil
|
||||
}
|
||||
|
||||
selector := db.writeSelector
|
||||
if sess != nil && sess.PinnedServer != nil {
|
||||
selector = sess.PinnedServer
|
||||
}
|
||||
|
||||
op := operation.NewDropDatabase().
|
||||
Session(sess).WriteConcern(wc).CommandMonitor(db.client.monitor).
|
||||
ServerSelector(selector).ClusterClock(db.client.clock).
|
||||
Database(db.name).Deployment(db.client.topology)
|
||||
|
||||
err = op.Execute(ctx)
|
||||
|
||||
driverErr, ok := err.(driver.Error)
|
||||
if err != nil && (!ok || !driverErr.NamespaceNotFound()) {
|
||||
return replaceErrors(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListCollections returns a cursor over the collections in a database.
|
||||
func (db *Database) ListCollections(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) (*Cursor, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
filterDoc, err := transformBsoncoreDocument(db.registry, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sess := sessionFromContext(ctx)
|
||||
if sess == nil && db.client.topology.SessionPool != nil {
|
||||
sess, err = session.NewClientSession(db.client.topology.SessionPool, db.client.id, session.Implicit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = db.client.validSession(sess)
|
||||
if err != nil {
|
||||
closeImplicitSession(sess)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
selector := db.readSelector
|
||||
if sess != nil && sess.PinnedServer != nil {
|
||||
selector = sess.PinnedServer
|
||||
}
|
||||
|
||||
lco := options.MergeListCollectionsOptions(opts...)
|
||||
op := operation.NewListCollections(filterDoc).
|
||||
Session(sess).ReadPreference(db.readPreference).CommandMonitor(db.client.monitor).
|
||||
ServerSelector(selector).ClusterClock(db.client.clock).
|
||||
Database(db.name).Deployment(db.client.topology)
|
||||
if lco.NameOnly != nil {
|
||||
op = op.NameOnly(*lco.NameOnly)
|
||||
}
|
||||
|
||||
err = op.Execute(ctx)
|
||||
if err != nil {
|
||||
closeImplicitSession(sess)
|
||||
return nil, replaceErrors(err)
|
||||
}
|
||||
|
||||
bc, err := op.Result(driver.CursorOptions{})
|
||||
if err != nil {
|
||||
closeImplicitSession(sess)
|
||||
return nil, replaceErrors(err)
|
||||
}
|
||||
cursor, err := newCursorWithSession(bc, db.registry, sess)
|
||||
return cursor, replaceErrors(err)
|
||||
}
|
||||
|
||||
// ListCollectionNames returns a slice containing the names of all of the collections on the server.
|
||||
func (db *Database) ListCollectionNames(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]string, error) {
|
||||
opts = append(opts, options.ListCollections().SetNameOnly(true))
|
||||
|
||||
res, err := db.ListCollections(ctx, filter, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names := make([]string, 0)
|
||||
for res.Next(ctx) {
|
||||
next := &bsonx.Doc{}
|
||||
err = res.Decode(next)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
elem, err := next.LookupErr("name")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if elem.Type() != bson.TypeString {
|
||||
return nil, fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type(), bson.TypeString)
|
||||
}
|
||||
|
||||
elemName := elem.StringValue()
|
||||
names = append(names, elemName)
|
||||
}
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// ReadConcern returns the read concern of this database.
|
||||
func (db *Database) ReadConcern() *readconcern.ReadConcern {
|
||||
return db.readConcern
|
||||
}
|
||||
|
||||
// ReadPreference returns the read preference of this database.
|
||||
func (db *Database) ReadPreference() *readpref.ReadPref {
|
||||
return db.readPreference
|
||||
}
|
||||
|
||||
// WriteConcern returns the write concern of this database.
|
||||
func (db *Database) WriteConcern() *writeconcern.WriteConcern {
|
||||
return db.writeConcern
|
||||
}
|
||||
|
||||
// Watch returns a change stream cursor used to receive information of changes to the database. This method is preferred
|
||||
// to running a raw aggregation with a $changeStream stage because it supports resumability in the case of some errors.
|
||||
// The database must have read concern majority or no read concern for a change stream to be created successfully.
|
||||
func (db *Database) Watch(ctx context.Context, pipeline interface{},
|
||||
opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
|
||||
|
||||
csConfig := changeStreamConfig{
|
||||
readConcern: db.readConcern,
|
||||
readPreference: db.readPreference,
|
||||
client: db.client,
|
||||
registry: db.registry,
|
||||
streamType: DatabaseStream,
|
||||
databaseName: db.Name(),
|
||||
}
|
||||
return newChangeStream(ctx, csConfig, pipeline, opts...)
|
||||
}
|
||||
81
vendor/go.mongodb.org/mongo-driver/mongo/doc.go
generated
vendored
Executable file
81
vendor/go.mongodb.org/mongo-driver/mongo/doc.go
generated
vendored
Executable file
@@ -0,0 +1,81 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
// NOTE: This documentation should be kept in line with the Example* test functions.
|
||||
|
||||
// Package mongo provides a MongoDB Driver API for Go.
|
||||
//
|
||||
// Basic usage of the driver starts with creating a Client from a connection
|
||||
// string. To do so, call the NewClient and Connect functions:
|
||||
//
|
||||
// client, err := NewClient(options.Client().ApplyURI("mongodb://foo:bar@localhost:27017"))
|
||||
// if err != nil { return err }
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
// defer cancel()
|
||||
// err = client.Connect(ctx)
|
||||
// if err != nil { return err }
|
||||
//
|
||||
// This will create a new client and start monitoring the MongoDB server on localhost.
|
||||
// The Database and Collection types can be used to access the database:
|
||||
//
|
||||
// collection := client.Database("baz").Collection("qux")
|
||||
//
|
||||
// A Collection can be used to query the database or insert documents:
|
||||
//
|
||||
// res, err := collection.InsertOne(context.Background(), bson.M{"hello": "world"})
|
||||
// if err != nil { return err }
|
||||
// id := res.InsertedID
|
||||
//
|
||||
// Several methods return a cursor, which can be used like this:
|
||||
//
|
||||
// cur, err := collection.Find(context.Background(), bson.D{})
|
||||
// if err != nil { log.Fatal(err) }
|
||||
// defer cur.Close(context.Background())
|
||||
// for cur.Next(context.Background()) {
|
||||
// // To decode into a struct, use cursor.Decode()
|
||||
// result := struct{
|
||||
// Foo string
|
||||
// Bar int32
|
||||
// }{}
|
||||
// err := cur.Decode(&result)
|
||||
// if err != nil { log.Fatal(err) }
|
||||
// // do something with result...
|
||||
//
|
||||
// // To get the raw bson bytes use cursor.Current
|
||||
// raw := cur.Current
|
||||
// // do something with raw...
|
||||
// }
|
||||
// if err := cur.Err(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// Methods that only return a single document will return a *SingleResult, which works
|
||||
// like a *sql.Row:
|
||||
//
|
||||
// result := struct{
|
||||
// Foo string
|
||||
// Bar int32
|
||||
// }{}
|
||||
// filter := bson.D{{"hello", "world"}}
|
||||
// err := collection.FindOne(context.Background(), filter).Decode(&result)
|
||||
// if err != nil { return err }
|
||||
// // do something with result...
|
||||
//
|
||||
// All Client, Collection, and Database methods that take parameters of type interface{}
|
||||
// will return ErrNilDocument if nil is passed in for an interface{}.
|
||||
//
|
||||
// Additional examples can be found under the examples directory in the driver's repository and
|
||||
// on the MongoDB website.
|
||||
//
|
||||
// Potential DNS Issues
|
||||
//
|
||||
// Building with Go 1.11+ and using connection strings with the "mongodb+srv"[1] scheme is
|
||||
// incompatible with some DNS servers in the wild due to the change introduced in
|
||||
// https://github.com/golang/go/issues/10622. If you receive an error with the message "cannot
|
||||
// unmarshal DNS message" while running an operation, we suggest you use a different DNS server.
|
||||
//
|
||||
// [1] See https://docs.mongodb.com/manual/reference/connection-string/#dns-seedlist-connection-format
|
||||
package mongo
|
||||
258
vendor/go.mongodb.org/mongo-driver/mongo/errors.go
generated
vendored
Executable file
258
vendor/go.mongodb.org/mongo-driver/mongo/errors.go
generated
vendored
Executable file
@@ -0,0 +1,258 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/topology"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driverlegacy"
|
||||
"go.mongodb.org/mongo-driver/x/network/command"
|
||||
"go.mongodb.org/mongo-driver/x/network/result"
|
||||
)
|
||||
|
||||
// ErrUnacknowledgedWrite is returned from functions that have an unacknowledged
|
||||
// write concern.
|
||||
var ErrUnacknowledgedWrite = errors.New("unacknowledged write")
|
||||
|
||||
// ErrClientDisconnected is returned when a user attempts to call a method on a
|
||||
// disconnected client
|
||||
var ErrClientDisconnected = errors.New("client is disconnected")
|
||||
|
||||
// ErrNilDocument is returned when a user attempts to pass a nil document or filter
|
||||
// to a function where the field is required.
|
||||
var ErrNilDocument = errors.New("document is nil")
|
||||
|
||||
// ErrEmptySlice is returned when a user attempts to pass an empty slice as input
|
||||
// to a function wehere the field is required.
|
||||
var ErrEmptySlice = errors.New("must provide at least one element in input slice")
|
||||
|
||||
func replaceErrors(err error) error {
|
||||
if err == topology.ErrTopologyClosed {
|
||||
return ErrClientDisconnected
|
||||
}
|
||||
if ce, ok := err.(command.Error); ok {
|
||||
return CommandError{Code: ce.Code, Message: ce.Message, Labels: ce.Labels, Name: ce.Name}
|
||||
}
|
||||
if de, ok := err.(driver.Error); ok {
|
||||
return CommandError{Code: de.Code, Message: de.Message, Labels: de.Labels, Name: de.Name}
|
||||
}
|
||||
if conv, ok := err.(driverlegacy.BulkWriteException); ok {
|
||||
return BulkWriteException{
|
||||
WriteConcernError: convertWriteConcernError(conv.WriteConcernError),
|
||||
WriteErrors: convertBulkWriteErrors(conv.WriteErrors),
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// CommandError represents an error in execution of a command against the database.
|
||||
type CommandError struct {
|
||||
Code int32
|
||||
Message string
|
||||
Labels []string
|
||||
Name string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e CommandError) Error() string {
|
||||
if e.Name != "" {
|
||||
return fmt.Sprintf("(%v) %v", e.Name, e.Message)
|
||||
}
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// HasErrorLabel returns true if the error contains the specified label.
|
||||
func (e CommandError) HasErrorLabel(label string) bool {
|
||||
if e.Labels != nil {
|
||||
for _, l := range e.Labels {
|
||||
if l == label {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WriteError is a non-write concern failure that occurred as a result of a write
|
||||
// operation.
|
||||
type WriteError struct {
|
||||
Index int
|
||||
Code int
|
||||
Message string
|
||||
}
|
||||
|
||||
func (we WriteError) Error() string { return we.Message }
|
||||
|
||||
// WriteErrors is a group of non-write concern failures that occurred as a result
|
||||
// of a write operation.
|
||||
type WriteErrors []WriteError
|
||||
|
||||
func (we WriteErrors) Error() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, "write errors: [")
|
||||
for idx, err := range we {
|
||||
if idx != 0 {
|
||||
fmt.Fprintf(&buf, ", ")
|
||||
}
|
||||
fmt.Fprintf(&buf, "{%s}", err)
|
||||
}
|
||||
fmt.Fprint(&buf, "]")
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func writeErrorsFromResult(rwes []result.WriteError) WriteErrors {
|
||||
wes := make(WriteErrors, 0, len(rwes))
|
||||
for _, err := range rwes {
|
||||
wes = append(wes, WriteError{Index: err.Index, Code: err.Code, Message: err.ErrMsg})
|
||||
}
|
||||
return wes
|
||||
}
|
||||
|
||||
func writeErrorsFromDriverWriteErrors(errs driver.WriteErrors) WriteErrors {
|
||||
wes := make(WriteErrors, 0, len(errs))
|
||||
for _, err := range errs {
|
||||
wes = append(wes, WriteError{Index: int(err.Index), Code: int(err.Code), Message: err.Message})
|
||||
}
|
||||
return wes
|
||||
}
|
||||
|
||||
// WriteConcernError is a write concern failure that occurred as a result of a
|
||||
// write operation.
|
||||
type WriteConcernError struct {
|
||||
Name string
|
||||
Code int
|
||||
Message string
|
||||
Details bson.Raw
|
||||
}
|
||||
|
||||
func (wce WriteConcernError) Error() string {
|
||||
if wce.Name != "" {
|
||||
return fmt.Sprintf("(%v) %v", wce.Name, wce.Message)
|
||||
}
|
||||
return wce.Message
|
||||
}
|
||||
|
||||
// WriteException is an error for a non-bulk write operation.
|
||||
type WriteException struct {
|
||||
WriteConcernError *WriteConcernError
|
||||
WriteErrors WriteErrors
|
||||
}
|
||||
|
||||
func (mwe WriteException) Error() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, "multiple write errors: [")
|
||||
fmt.Fprintf(&buf, "{%s}, ", mwe.WriteErrors)
|
||||
fmt.Fprintf(&buf, "{%s}]", mwe.WriteConcernError)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func convertBulkWriteErrors(errors []driverlegacy.BulkWriteError) []BulkWriteError {
|
||||
bwErrors := make([]BulkWriteError, 0, len(errors))
|
||||
for _, err := range errors {
|
||||
bwErrors = append(bwErrors, BulkWriteError{
|
||||
WriteError{
|
||||
Index: err.Index,
|
||||
Code: err.Code,
|
||||
Message: err.ErrMsg,
|
||||
},
|
||||
dispatchToMongoModel(err.Model),
|
||||
})
|
||||
}
|
||||
|
||||
return bwErrors
|
||||
}
|
||||
|
||||
func convertWriteConcernError(wce *result.WriteConcernError) *WriteConcernError {
|
||||
if wce == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &WriteConcernError{Name: wce.Name, Code: wce.Code, Message: wce.ErrMsg, Details: wce.ErrInfo}
|
||||
}
|
||||
|
||||
func convertDriverWriteConcernError(wce *driver.WriteConcernError) *WriteConcernError {
|
||||
if wce == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &WriteConcernError{Code: int(wce.Code), Message: wce.Message, Details: bson.Raw(wce.Details)}
|
||||
}
|
||||
|
||||
// BulkWriteError is an error for one operation in a bulk write.
|
||||
type BulkWriteError struct {
|
||||
WriteError
|
||||
Request WriteModel
|
||||
}
|
||||
|
||||
func (bwe BulkWriteError) Error() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "{%s}", bwe.WriteError)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// BulkWriteException is an error for a bulk write operation.
|
||||
type BulkWriteException struct {
|
||||
WriteConcernError *WriteConcernError
|
||||
WriteErrors []BulkWriteError
|
||||
}
|
||||
|
||||
func (bwe BulkWriteException) Error() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, "bulk write error: [")
|
||||
fmt.Fprintf(&buf, "{%s}, ", bwe.WriteErrors)
|
||||
fmt.Fprintf(&buf, "{%s}]", bwe.WriteConcernError)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// returnResult is used to determine if a function calling processWriteError should return
|
||||
// the result or return nil. Since the processWriteError function is used by many different
|
||||
// methods, both *One and *Many, we need a way to differentiate if the method should return
|
||||
// the result and the error.
|
||||
type returnResult int
|
||||
|
||||
const (
|
||||
rrNone returnResult = 1 << iota // None means do not return the result ever.
|
||||
rrOne // One means return the result if this was called by a *One method.
|
||||
rrMany // Many means return the result is this was called by a *Many method.
|
||||
|
||||
rrAll returnResult = rrOne | rrMany // All means always return the result.
|
||||
)
|
||||
|
||||
// processWriteError handles processing the result of a write operation. If the retrunResult matches
|
||||
// the calling method's type, it should return the result object in addition to the error.
|
||||
// This function will wrap the errors from other packages and return them as errors from this package.
|
||||
//
|
||||
// WriteConcernError will be returned over WriteErrors if both are present.
|
||||
func processWriteError(wce *result.WriteConcernError, wes []result.WriteError, err error) (returnResult, error) {
|
||||
switch {
|
||||
case err == command.ErrUnacknowledgedWrite, err == driver.ErrUnacknowledgedWrite:
|
||||
return rrAll, ErrUnacknowledgedWrite
|
||||
case err != nil:
|
||||
switch tt := err.(type) {
|
||||
case driver.WriteCommandError:
|
||||
return rrMany, WriteException{
|
||||
WriteConcernError: convertDriverWriteConcernError(tt.WriteConcernError),
|
||||
WriteErrors: writeErrorsFromDriverWriteErrors(tt.WriteErrors),
|
||||
}
|
||||
default:
|
||||
return rrNone, replaceErrors(err)
|
||||
}
|
||||
case wce != nil || len(wes) > 0:
|
||||
return rrMany, WriteException{
|
||||
WriteConcernError: convertWriteConcernError(wce),
|
||||
WriteErrors: writeErrorsFromResult(wes),
|
||||
}
|
||||
default:
|
||||
return rrAll, nil
|
||||
}
|
||||
}
|
||||
134
vendor/go.mongodb.org/mongo-driver/mongo/index_options_builder.go
generated
vendored
Executable file
134
vendor/go.mongodb.org/mongo-driver/mongo/index_options_builder.go
generated
vendored
Executable file
@@ -0,0 +1,134 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
)
|
||||
|
||||
// IndexOptionsBuilder constructs a BSON document for index options
|
||||
type IndexOptionsBuilder struct {
|
||||
document bson.D
|
||||
}
|
||||
|
||||
// NewIndexOptionsBuilder creates a new instance of IndexOptionsBuilder
|
||||
func NewIndexOptionsBuilder() *IndexOptionsBuilder {
|
||||
return &IndexOptionsBuilder{}
|
||||
}
|
||||
|
||||
// Background sets the background option
|
||||
func (iob *IndexOptionsBuilder) Background(background bool) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"background", background})
|
||||
return iob
|
||||
}
|
||||
|
||||
// ExpireAfterSeconds sets the expireAfterSeconds option
|
||||
func (iob *IndexOptionsBuilder) ExpireAfterSeconds(expireAfterSeconds int32) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"expireAfterSeconds", expireAfterSeconds})
|
||||
return iob
|
||||
}
|
||||
|
||||
// Name sets the name option
|
||||
func (iob *IndexOptionsBuilder) Name(name string) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"name", name})
|
||||
return iob
|
||||
}
|
||||
|
||||
// Sparse sets the sparse option
|
||||
func (iob *IndexOptionsBuilder) Sparse(sparse bool) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"sparse", sparse})
|
||||
return iob
|
||||
}
|
||||
|
||||
// StorageEngine sets the storageEngine option
|
||||
func (iob *IndexOptionsBuilder) StorageEngine(storageEngine interface{}) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"storageEngine", storageEngine})
|
||||
return iob
|
||||
}
|
||||
|
||||
// Unique sets the unique option
|
||||
func (iob *IndexOptionsBuilder) Unique(unique bool) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"unique", unique})
|
||||
return iob
|
||||
}
|
||||
|
||||
// Version sets the version option
|
||||
func (iob *IndexOptionsBuilder) Version(version int32) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"v", version})
|
||||
return iob
|
||||
}
|
||||
|
||||
// DefaultLanguage sets the defaultLanguage option
|
||||
func (iob *IndexOptionsBuilder) DefaultLanguage(defaultLanguage string) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"default_language", defaultLanguage})
|
||||
return iob
|
||||
}
|
||||
|
||||
// LanguageOverride sets the languageOverride option
|
||||
func (iob *IndexOptionsBuilder) LanguageOverride(languageOverride string) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"language_override", languageOverride})
|
||||
return iob
|
||||
}
|
||||
|
||||
// TextVersion sets the textVersion option
|
||||
func (iob *IndexOptionsBuilder) TextVersion(textVersion int32) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"textIndexVersion", textVersion})
|
||||
return iob
|
||||
}
|
||||
|
||||
// Weights sets the weights option
|
||||
func (iob *IndexOptionsBuilder) Weights(weights interface{}) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"weights", weights})
|
||||
return iob
|
||||
}
|
||||
|
||||
// SphereVersion sets the sphereVersion option
|
||||
func (iob *IndexOptionsBuilder) SphereVersion(sphereVersion int32) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"2dsphereIndexVersion", sphereVersion})
|
||||
return iob
|
||||
}
|
||||
|
||||
// Bits sets the bits option
|
||||
func (iob *IndexOptionsBuilder) Bits(bits int32) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"bits", bits})
|
||||
return iob
|
||||
}
|
||||
|
||||
// Max sets the max option
|
||||
func (iob *IndexOptionsBuilder) Max(max float64) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"max", max})
|
||||
return iob
|
||||
}
|
||||
|
||||
// Min sets the min option
|
||||
func (iob *IndexOptionsBuilder) Min(min float64) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"min", min})
|
||||
return iob
|
||||
}
|
||||
|
||||
// BucketSize sets the bucketSize option
|
||||
func (iob *IndexOptionsBuilder) BucketSize(bucketSize int32) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"bucketSize", bucketSize})
|
||||
return iob
|
||||
}
|
||||
|
||||
// PartialFilterExpression sets the partialFilterExpression option
|
||||
func (iob *IndexOptionsBuilder) PartialFilterExpression(partialFilterExpression interface{}) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"partialFilterExpression", partialFilterExpression})
|
||||
return iob
|
||||
}
|
||||
|
||||
// Collation sets the collation option
|
||||
func (iob *IndexOptionsBuilder) Collation(collation interface{}) *IndexOptionsBuilder {
|
||||
iob.document = append(iob.document, bson.E{"collation", collation})
|
||||
return iob
|
||||
}
|
||||
|
||||
// Build returns the BSON document from the builder
|
||||
func (iob *IndexOptionsBuilder) Build() bson.D {
|
||||
return iob.document
|
||||
}
|
||||
423
vendor/go.mongodb.org/mongo-driver/mongo/index_view.go
generated
vendored
Executable file
423
vendor/go.mongodb.org/mongo-driver/mongo/index_view.go
generated
vendored
Executable file
@@ -0,0 +1,423 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"go.mongodb.org/mongo-driver/mongo/writeconcern"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/description"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/session"
|
||||
)
|
||||
|
||||
// ErrInvalidIndexValue indicates that the index Keys document has a value that isn't either a number or a string.
|
||||
var ErrInvalidIndexValue = errors.New("invalid index value")
|
||||
|
||||
// ErrNonStringIndexName indicates that the index name specified in the options is not a string.
|
||||
var ErrNonStringIndexName = errors.New("index name must be a string")
|
||||
|
||||
// ErrMultipleIndexDrop indicates that multiple indexes would be dropped from a call to IndexView.DropOne.
|
||||
var ErrMultipleIndexDrop = errors.New("multiple indexes would be dropped")
|
||||
|
||||
// IndexView is used to create, drop, and list indexes on a given collection.
|
||||
type IndexView struct {
|
||||
coll *Collection
|
||||
}
|
||||
|
||||
// IndexModel contains information about an index.
|
||||
type IndexModel struct {
|
||||
Keys interface{}
|
||||
Options *options.IndexOptions
|
||||
}
|
||||
|
||||
func isNamespaceNotFoundError(err error) bool {
|
||||
if de, ok := err.(driver.Error); ok {
|
||||
return de.Code == 26
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// List returns a cursor iterating over all the indexes in the collection.
|
||||
func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOptions) (*Cursor, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
sess := sessionFromContext(ctx)
|
||||
if sess == nil && iv.coll.client.topology.SessionPool != nil {
|
||||
var err error
|
||||
sess, err = session.NewClientSession(iv.coll.client.topology.SessionPool, iv.coll.client.id, session.Implicit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err := iv.coll.client.validSession(sess)
|
||||
if err != nil {
|
||||
closeImplicitSession(sess)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
readSelector := description.CompositeSelector([]description.ServerSelector{
|
||||
description.ReadPrefSelector(readpref.Primary()),
|
||||
description.LatencySelector(iv.coll.client.localThreshold),
|
||||
})
|
||||
op := operation.NewListIndexes().
|
||||
Session(sess).CommandMonitor(iv.coll.client.monitor).
|
||||
ServerSelector(readSelector).ClusterClock(iv.coll.client.clock).
|
||||
Database(iv.coll.db.name).Collection(iv.coll.name).
|
||||
Deployment(iv.coll.client.topology)
|
||||
|
||||
var cursorOpts driver.CursorOptions
|
||||
lio := options.MergeListIndexesOptions(opts...)
|
||||
if lio.BatchSize != nil {
|
||||
op = op.BatchSize(*lio.BatchSize)
|
||||
cursorOpts.BatchSize = *lio.BatchSize
|
||||
}
|
||||
if lio.MaxTime != nil {
|
||||
op = op.MaxTimeMS(int64(*lio.MaxTime / time.Millisecond))
|
||||
}
|
||||
|
||||
err = op.Execute(ctx)
|
||||
if err != nil {
|
||||
// for namespaceNotFound errors, return an empty cursor and do not throw an error
|
||||
closeImplicitSession(sess)
|
||||
if isNamespaceNotFoundError(err) {
|
||||
return newEmptyCursor(), nil
|
||||
}
|
||||
|
||||
return nil, replaceErrors(err)
|
||||
}
|
||||
|
||||
bc, err := op.Result(cursorOpts)
|
||||
if err != nil {
|
||||
closeImplicitSession(sess)
|
||||
return nil, replaceErrors(err)
|
||||
}
|
||||
cursor, err := newCursorWithSession(bc, iv.coll.registry, sess)
|
||||
return cursor, replaceErrors(err)
|
||||
}
|
||||
|
||||
// CreateOne creates a single index in the collection specified by the model.
|
||||
func (iv IndexView) CreateOne(ctx context.Context, model IndexModel, opts ...*options.CreateIndexesOptions) (string, error) {
|
||||
names, err := iv.CreateMany(ctx, []IndexModel{model}, opts...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return names[0], nil
|
||||
}
|
||||
|
||||
// CreateMany creates multiple indexes in the collection specified by the models. The names of the
|
||||
// created indexes are returned.
|
||||
func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts ...*options.CreateIndexesOptions) ([]string, error) {
|
||||
names := make([]string, 0, len(models))
|
||||
|
||||
var indexes bsoncore.Document
|
||||
aidx, indexes := bsoncore.AppendArrayStart(indexes)
|
||||
|
||||
for i, model := range models {
|
||||
if model.Keys == nil {
|
||||
return nil, fmt.Errorf("index model keys cannot be nil")
|
||||
}
|
||||
|
||||
name, err := getOrGenerateIndexName(iv.coll.registry, model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names = append(names, name)
|
||||
|
||||
keys, err := transformBsoncoreDocument(iv.coll.registry, model.Keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var iidx int32
|
||||
iidx, indexes = bsoncore.AppendDocumentElementStart(indexes, strconv.Itoa(i))
|
||||
indexes = bsoncore.AppendDocumentElement(indexes, "key", keys)
|
||||
|
||||
if model.Options == nil {
|
||||
model.Options = options.Index()
|
||||
}
|
||||
model.Options.SetName(name)
|
||||
|
||||
optsDoc, err := iv.createOptionsDoc(model.Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexes = bsoncore.AppendDocument(indexes, optsDoc)
|
||||
|
||||
indexes, err = bsoncore.AppendDocumentEnd(indexes, iidx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
indexes, err := bsoncore.AppendArrayEnd(indexes, aidx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sess := sessionFromContext(ctx)
|
||||
|
||||
if sess == nil && iv.coll.client.topology.SessionPool != nil {
|
||||
sess, err = session.NewClientSession(iv.coll.client.topology.SessionPool, iv.coll.client.id, session.Implicit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sess.EndSession()
|
||||
}
|
||||
|
||||
err = iv.coll.client.validSession(sess)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
selector := iv.coll.writeSelector
|
||||
if sess != nil && sess.PinnedServer != nil {
|
||||
selector = sess.PinnedServer
|
||||
}
|
||||
|
||||
option := options.MergeCreateIndexesOptions(opts...)
|
||||
|
||||
op := operation.NewCreateIndexes(indexes).
|
||||
Session(sess).ClusterClock(iv.coll.client.clock).
|
||||
Database(iv.coll.db.name).Collection(iv.coll.name).CommandMonitor(iv.coll.client.monitor).
|
||||
Deployment(iv.coll.client.topology).ServerSelector(selector)
|
||||
|
||||
if option.MaxTime != nil {
|
||||
op.MaxTimeMS(int64(*option.MaxTime / time.Millisecond))
|
||||
}
|
||||
|
||||
err = op.Execute(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (iv IndexView) createOptionsDoc(opts *options.IndexOptions) (bsoncore.Document, error) {
|
||||
optsDoc := bsoncore.Document{}
|
||||
if opts.Background != nil {
|
||||
optsDoc = bsoncore.AppendBooleanElement(optsDoc, "background", *opts.Background)
|
||||
}
|
||||
if opts.ExpireAfterSeconds != nil {
|
||||
optsDoc = bsoncore.AppendInt32Element(optsDoc, "expireAfterSeconds", *opts.ExpireAfterSeconds)
|
||||
}
|
||||
if opts.Name != nil {
|
||||
optsDoc = bsoncore.AppendStringElement(optsDoc, "name", *opts.Name)
|
||||
}
|
||||
if opts.Sparse != nil {
|
||||
optsDoc = bsoncore.AppendBooleanElement(optsDoc, "sparse", *opts.Sparse)
|
||||
}
|
||||
if opts.StorageEngine != nil {
|
||||
doc, err := transformBsoncoreDocument(iv.coll.registry, opts.StorageEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
optsDoc = bsoncore.AppendDocumentElement(optsDoc, "storageEngine", doc)
|
||||
}
|
||||
if opts.Unique != nil {
|
||||
optsDoc = bsoncore.AppendBooleanElement(optsDoc, "unique", *opts.Unique)
|
||||
}
|
||||
if opts.Version != nil {
|
||||
optsDoc = bsoncore.AppendInt32Element(optsDoc, "v", *opts.Version)
|
||||
}
|
||||
if opts.DefaultLanguage != nil {
|
||||
optsDoc = bsoncore.AppendStringElement(optsDoc, "default_language", *opts.DefaultLanguage)
|
||||
}
|
||||
if opts.LanguageOverride != nil {
|
||||
optsDoc = bsoncore.AppendStringElement(optsDoc, "language_override", *opts.LanguageOverride)
|
||||
}
|
||||
if opts.TextVersion != nil {
|
||||
optsDoc = bsoncore.AppendInt32Element(optsDoc, "textIndexVersion", *opts.TextVersion)
|
||||
}
|
||||
if opts.Weights != nil {
|
||||
doc, err := transformBsoncoreDocument(iv.coll.registry, opts.Weights)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
optsDoc = bsoncore.AppendDocumentElement(optsDoc, "weights", doc)
|
||||
}
|
||||
if opts.SphereVersion != nil {
|
||||
optsDoc = bsoncore.AppendInt32Element(optsDoc, "2dsphereIndexVersion", *opts.SphereVersion)
|
||||
}
|
||||
if opts.Bits != nil {
|
||||
optsDoc = bsoncore.AppendInt32Element(optsDoc, "bits", *opts.Bits)
|
||||
}
|
||||
if opts.Max != nil {
|
||||
optsDoc = bsoncore.AppendDoubleElement(optsDoc, "max", *opts.Max)
|
||||
}
|
||||
if opts.Min != nil {
|
||||
optsDoc = bsoncore.AppendDoubleElement(optsDoc, "min", *opts.Min)
|
||||
}
|
||||
if opts.BucketSize != nil {
|
||||
optsDoc = bsoncore.AppendInt32Element(optsDoc, "bucketSize", *opts.BucketSize)
|
||||
}
|
||||
if opts.PartialFilterExpression != nil {
|
||||
doc, err := transformBsoncoreDocument(iv.coll.registry, opts.PartialFilterExpression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
optsDoc = bsoncore.AppendDocumentElement(optsDoc, "partialFilterExpression", doc)
|
||||
}
|
||||
if opts.Collation != nil {
|
||||
optsDoc = bsoncore.AppendDocumentElement(optsDoc, "collation", bsoncore.Document(opts.Collation.ToDocument()))
|
||||
}
|
||||
if opts.WildcardProjection != nil {
|
||||
doc, err := transformBsoncoreDocument(iv.coll.registry, opts.WildcardProjection)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
optsDoc = bsoncore.AppendDocumentElement(optsDoc, "wildcardProjection", doc)
|
||||
}
|
||||
|
||||
return optsDoc, nil
|
||||
}
|
||||
|
||||
func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
sess := sessionFromContext(ctx)
|
||||
if sess == nil && iv.coll.client.topology.SessionPool != nil {
|
||||
var err error
|
||||
sess, err = session.NewClientSession(iv.coll.client.topology.SessionPool, iv.coll.client.id, session.Implicit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sess.EndSession()
|
||||
}
|
||||
|
||||
err := iv.coll.client.validSession(sess)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wc := iv.coll.writeConcern
|
||||
if sess.TransactionRunning() {
|
||||
wc = nil
|
||||
}
|
||||
if !writeconcern.AckWrite(wc) {
|
||||
sess = nil
|
||||
}
|
||||
|
||||
selector := iv.coll.writeSelector
|
||||
if sess != nil && sess.PinnedServer != nil {
|
||||
selector = sess.PinnedServer
|
||||
}
|
||||
|
||||
dio := options.MergeDropIndexesOptions(opts...)
|
||||
op := operation.NewDropIndexes(name).
|
||||
Session(sess).WriteConcern(wc).CommandMonitor(iv.coll.client.monitor).
|
||||
ServerSelector(selector).ClusterClock(iv.coll.client.clock).
|
||||
Database(iv.coll.db.name).Collection(iv.coll.name).
|
||||
Deployment(iv.coll.client.topology)
|
||||
if dio.MaxTime != nil {
|
||||
op.MaxTimeMS(int64(*dio.MaxTime / time.Millisecond))
|
||||
}
|
||||
|
||||
err = op.Execute(ctx)
|
||||
if err != nil {
|
||||
return nil, replaceErrors(err)
|
||||
}
|
||||
|
||||
// TODO: it's weird to return a bson.Raw here because we have to convert the result back to BSON
|
||||
ridx, res := bsoncore.AppendDocumentStart(nil)
|
||||
res = bsoncore.AppendInt32Element(res, "nIndexesWas", op.Result().NIndexesWas)
|
||||
res, _ = bsoncore.AppendDocumentEnd(res, ridx)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// DropOne drops the index with the given name from the collection.
|
||||
func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
|
||||
if name == "*" {
|
||||
return nil, ErrMultipleIndexDrop
|
||||
}
|
||||
|
||||
return iv.drop(ctx, name, opts...)
|
||||
}
|
||||
|
||||
// DropAll drops all indexes in the collection.
|
||||
func (iv IndexView) DropAll(ctx context.Context, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
|
||||
return iv.drop(ctx, "*", opts...)
|
||||
}
|
||||
|
||||
func getOrGenerateIndexName(registry *bsoncodec.Registry, model IndexModel) (string, error) {
|
||||
if model.Options != nil && model.Options.Name != nil {
|
||||
return *model.Options.Name, nil
|
||||
}
|
||||
|
||||
name := bytes.NewBufferString("")
|
||||
first := true
|
||||
|
||||
keys, err := transformDocument(registry, model.Keys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, elem := range keys {
|
||||
if !first {
|
||||
_, err := name.WriteRune('_')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
_, err := name.WriteString(elem.Key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = name.WriteRune('_')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var value string
|
||||
|
||||
switch elem.Value.Type() {
|
||||
case bsontype.Int32:
|
||||
value = fmt.Sprintf("%d", elem.Value.Int32())
|
||||
case bsontype.Int64:
|
||||
value = fmt.Sprintf("%d", elem.Value.Int64())
|
||||
case bsontype.String:
|
||||
value = elem.Value.StringValue()
|
||||
default:
|
||||
return "", ErrInvalidIndexValue
|
||||
}
|
||||
|
||||
_, err = name.WriteString(value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
first = false
|
||||
}
|
||||
|
||||
return name.String(), nil
|
||||
}
|
||||
375
vendor/go.mongodb.org/mongo-driver/mongo/mongo.go
generated
vendored
Executable file
375
vendor/go.mongodb.org/mongo-driver/mongo/mongo.go
generated
vendored
Executable file
@@ -0,0 +1,375 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo // import "go.mongodb.org/mongo-driver/mongo"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// Dialer is used to make network connections.
|
||||
type Dialer interface {
|
||||
DialContext(ctx context.Context, network, address string) (net.Conn, error)
|
||||
}
|
||||
|
||||
// BSONAppender is an interface implemented by types that can marshal a
|
||||
// provided type into BSON bytes and append those bytes to the provided []byte.
|
||||
// The AppendBSON can return a non-nil error and non-nil []byte. The AppendBSON
|
||||
// method may also write incomplete BSON to the []byte.
|
||||
type BSONAppender interface {
|
||||
AppendBSON([]byte, interface{}) ([]byte, error)
|
||||
}
|
||||
|
||||
// BSONAppenderFunc is an adapter function that allows any function that
|
||||
// satisfies the AppendBSON method signature to be used where a BSONAppender is
|
||||
// used.
|
||||
type BSONAppenderFunc func([]byte, interface{}) ([]byte, error)
|
||||
|
||||
// AppendBSON implements the BSONAppender interface
|
||||
func (baf BSONAppenderFunc) AppendBSON(dst []byte, val interface{}) ([]byte, error) {
|
||||
return baf(dst, val)
|
||||
}
|
||||
|
||||
// MarshalError is returned when attempting to transform a value into a document
|
||||
// results in an error.
|
||||
type MarshalError struct {
|
||||
Value interface{}
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (me MarshalError) Error() string {
|
||||
return fmt.Sprintf("cannot transform type %s to a BSON Document: %v", reflect.TypeOf(me.Value), me.Err)
|
||||
}
|
||||
|
||||
// Pipeline is a type that makes creating aggregation pipelines easier. It is a
|
||||
// helper and is intended for serializing to BSON.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// mongo.Pipeline{
|
||||
// {{"$group", bson.D{{"_id", "$state"}, {"totalPop", bson.D{{"$sum", "$pop"}}}}}},
|
||||
// {{"$match", bson.D{{"totalPop", bson.D{{"$gte", 10*1000*1000}}}}}},
|
||||
// }
|
||||
//
|
||||
type Pipeline []bson.D
|
||||
|
||||
// transformAndEnsureID is a hack that makes it easy to get a RawValue as the _id value. This will
|
||||
// be removed when we switch from using bsonx to bsoncore for the driver package.
|
||||
func transformAndEnsureID(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, interface{}, error) {
|
||||
// TODO: performance is going to be pretty bad for bsonx.Doc here since we turn it into a []byte
|
||||
// only to turn it back into a bsonx.Doc. We can fix this post beta1 when we refactor the driver
|
||||
// package to use bsoncore.Document instead of bsonx.Doc.
|
||||
if registry == nil {
|
||||
registry = bson.NewRegistryBuilder().Build()
|
||||
}
|
||||
switch tt := val.(type) {
|
||||
case nil:
|
||||
return nil, nil, ErrNilDocument
|
||||
case bsonx.Doc:
|
||||
val = tt.Copy()
|
||||
case []byte:
|
||||
// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
|
||||
val = bson.Raw(tt)
|
||||
}
|
||||
|
||||
// TODO(skriptble): Use a pool of these instead.
|
||||
buf := make([]byte, 0, 256)
|
||||
b, err := bson.MarshalAppendWithRegistry(registry, buf, val)
|
||||
if err != nil {
|
||||
return nil, nil, MarshalError{Value: val, Err: err}
|
||||
}
|
||||
|
||||
d, err := bsonx.ReadDoc(b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var id interface{}
|
||||
|
||||
idx := d.IndexOf("_id")
|
||||
var idElem bsonx.Elem
|
||||
switch idx {
|
||||
case -1:
|
||||
idElem = bsonx.Elem{"_id", bsonx.ObjectID(primitive.NewObjectID())}
|
||||
d = append(d, bsonx.Elem{})
|
||||
copy(d[1:], d)
|
||||
d[0] = idElem
|
||||
default:
|
||||
idElem = d[idx]
|
||||
copy(d[1:idx+1], d[0:idx])
|
||||
d[0] = idElem
|
||||
}
|
||||
|
||||
idBuf := make([]byte, 0, 256)
|
||||
t, data, err := idElem.Value.MarshalAppendBSONValue(idBuf[:0])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
err = bson.RawValue{Type: t, Value: data}.UnmarshalWithRegistry(registry, &id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return d, id, nil
|
||||
}
|
||||
|
||||
// transformAndEnsureIDv2 is a hack that makes it easy to get a RawValue as the _id value. This will
|
||||
// be removed when we switch from using bsonx to bsoncore for the driver package.
|
||||
func transformAndEnsureIDv2(registry *bsoncodec.Registry, val interface{}) (bsoncore.Document, interface{}, error) {
|
||||
if registry == nil {
|
||||
registry = bson.NewRegistryBuilder().Build()
|
||||
}
|
||||
switch tt := val.(type) {
|
||||
case nil:
|
||||
return nil, nil, ErrNilDocument
|
||||
case bsonx.Doc:
|
||||
val = tt.Copy()
|
||||
case []byte:
|
||||
// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
|
||||
val = bson.Raw(tt)
|
||||
}
|
||||
|
||||
// TODO(skriptble): Use a pool of these instead.
|
||||
doc := make(bsoncore.Document, 0, 256)
|
||||
doc, err := bson.MarshalAppendWithRegistry(registry, doc, val)
|
||||
if err != nil {
|
||||
return nil, nil, MarshalError{Value: val, Err: err}
|
||||
}
|
||||
|
||||
var id interface{}
|
||||
|
||||
value := doc.Lookup("_id")
|
||||
switch value.Type {
|
||||
case bsontype.Type(0):
|
||||
value = bsoncore.Value{Type: bsontype.ObjectID, Data: bsoncore.AppendObjectID(nil, primitive.NewObjectID())}
|
||||
olddoc := doc
|
||||
doc = make(bsoncore.Document, 0, len(olddoc)+17) // type byte + _id + null byte + object ID
|
||||
_, doc = bsoncore.ReserveLength(doc)
|
||||
doc = bsoncore.AppendValueElement(doc, "_id", value)
|
||||
doc = append(doc, olddoc[4:]...) // remove the length
|
||||
doc = bsoncore.UpdateLength(doc, 0, int32(len(doc)))
|
||||
default:
|
||||
// We copy the bytes here to ensure that any bytes returned to the user aren't modified
|
||||
// later.
|
||||
buf := make([]byte, len(value.Data))
|
||||
copy(buf, value.Data)
|
||||
value.Data = buf
|
||||
}
|
||||
|
||||
err = bson.RawValue{Type: value.Type, Value: value.Data}.UnmarshalWithRegistry(registry, &id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return doc, id, nil
|
||||
}
|
||||
|
||||
func transformDocument(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, error) {
|
||||
if doc, ok := val.(bsonx.Doc); ok {
|
||||
return doc.Copy(), nil
|
||||
}
|
||||
b, err := transformBsoncoreDocument(registry, val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bsonx.ReadDoc(b)
|
||||
}
|
||||
|
||||
func transformBsoncoreDocument(registry *bsoncodec.Registry, val interface{}) (bsoncore.Document, error) {
|
||||
if registry == nil {
|
||||
registry = bson.DefaultRegistry
|
||||
}
|
||||
if val == nil {
|
||||
return nil, ErrNilDocument
|
||||
}
|
||||
if bs, ok := val.([]byte); ok {
|
||||
// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
|
||||
val = bson.Raw(bs)
|
||||
}
|
||||
|
||||
// TODO(skriptble): Use a pool of these instead.
|
||||
buf := make([]byte, 0, 256)
|
||||
b, err := bson.MarshalAppendWithRegistry(registry, buf[:0], val)
|
||||
if err != nil {
|
||||
return nil, MarshalError{Value: val, Err: err}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func ensureID(d bsonx.Doc) (bsonx.Doc, interface{}) {
|
||||
var id interface{}
|
||||
|
||||
elem, err := d.LookupElementErr("_id")
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
id = elem
|
||||
default:
|
||||
oid := primitive.NewObjectID()
|
||||
d = append(d, bsonx.Elem{"_id", bsonx.ObjectID(oid)})
|
||||
id = oid
|
||||
}
|
||||
return d, id
|
||||
}
|
||||
|
||||
func ensureDollarKey(doc bsonx.Doc) error {
|
||||
if len(doc) == 0 {
|
||||
return errors.New("update document must have at least one element")
|
||||
}
|
||||
if !strings.HasPrefix(doc[0].Key, "$") {
|
||||
return errors.New("update document must contain key beginning with '$'")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureDollarKeyv2(doc bsoncore.Document) error {
|
||||
firstElem, err := doc.IndexErr(0)
|
||||
if err != nil {
|
||||
return errors.New("update document must have at least one element")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(firstElem.Key(), "$") {
|
||||
return errors.New("update document must contain key beginning with '$'")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func transformAggregatePipeline(registry *bsoncodec.Registry, pipeline interface{}) (bsonx.Arr, error) {
|
||||
pipelineArr := bsonx.Arr{}
|
||||
switch t := pipeline.(type) {
|
||||
case bsoncodec.ValueMarshaler:
|
||||
btype, val, err := t.MarshalBSONValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if btype != bsontype.Array {
|
||||
return nil, fmt.Errorf("ValueMarshaler returned a %v, but was expecting %v", btype, bsontype.Array)
|
||||
}
|
||||
err = pipelineArr.UnmarshalBSONValue(btype, val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
val := reflect.ValueOf(t)
|
||||
if !val.IsValid() || (val.Kind() != reflect.Slice && val.Kind() != reflect.Array) {
|
||||
return nil, fmt.Errorf("can only transform slices and arrays into aggregation pipelines, but got %v", val.Kind())
|
||||
}
|
||||
for idx := 0; idx < val.Len(); idx++ {
|
||||
elem, err := transformDocument(registry, val.Index(idx).Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pipelineArr = append(pipelineArr, bsonx.Document(elem))
|
||||
}
|
||||
}
|
||||
|
||||
return pipelineArr, nil
|
||||
}
|
||||
|
||||
func transformAggregatePipelinev2(registry *bsoncodec.Registry, pipeline interface{}) (bsoncore.Document, bool, error) {
|
||||
switch t := pipeline.(type) {
|
||||
case bsoncodec.ValueMarshaler:
|
||||
btype, val, err := t.MarshalBSONValue()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if btype != bsontype.Array {
|
||||
return nil, false, fmt.Errorf("ValueMarshaler returned a %v, but was expecting %v", btype, bsontype.Array)
|
||||
}
|
||||
|
||||
var hasDollarOut bool
|
||||
pipelineDoc := bsoncore.Document(val)
|
||||
if _, err := pipelineDoc.LookupErr("$out"); err == nil {
|
||||
hasDollarOut = true
|
||||
}
|
||||
|
||||
return pipelineDoc, hasDollarOut, nil
|
||||
default:
|
||||
val := reflect.ValueOf(t)
|
||||
if !val.IsValid() || (val.Kind() != reflect.Slice && val.Kind() != reflect.Array) {
|
||||
return nil, false, fmt.Errorf("can only transform slices and arrays into aggregation pipelines, but got %v", val.Kind())
|
||||
}
|
||||
|
||||
aidx, arr := bsoncore.AppendArrayStart(nil)
|
||||
var hasDollarOut bool
|
||||
valLen := val.Len()
|
||||
for idx := 0; idx < valLen; idx++ {
|
||||
doc, err := transformBsoncoreDocument(registry, val.Index(idx).Interface())
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if idx == valLen-1 {
|
||||
if elem, err := doc.IndexErr(0); err == nil && elem.Key() == "$out" {
|
||||
hasDollarOut = true
|
||||
}
|
||||
}
|
||||
arr = bsoncore.AppendDocumentElement(arr, strconv.Itoa(idx), doc)
|
||||
}
|
||||
arr, _ = bsoncore.AppendArrayEnd(arr, aidx)
|
||||
return arr, hasDollarOut, nil
|
||||
}
|
||||
}
|
||||
|
||||
func transformValue(registry *bsoncodec.Registry, val interface{}) (bsoncore.Value, error) {
|
||||
switch conv := val.(type) {
|
||||
case string:
|
||||
return bsoncore.Value{Type: bsontype.String, Data: bsoncore.AppendString(nil, conv)}, nil
|
||||
default:
|
||||
doc, err := transformBsoncoreDocument(registry, val)
|
||||
if err != nil {
|
||||
return bsoncore.Value{}, err
|
||||
}
|
||||
|
||||
return bsoncore.Value{Type: bsontype.EmbeddedDocument, Data: doc}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Build the aggregation pipeline for the CountDocument command.
|
||||
func countDocumentsAggregatePipeline(registry *bsoncodec.Registry, filter interface{}, opts *options.CountOptions) (bsonx.Arr, error) {
|
||||
pipeline := bsonx.Arr{}
|
||||
filterDoc, err := transformDocument(registry, filter)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pipeline = append(pipeline, bsonx.Document(bsonx.Doc{{"$match", bsonx.Document(filterDoc)}}))
|
||||
|
||||
if opts != nil {
|
||||
if opts.Skip != nil {
|
||||
pipeline = append(pipeline, bsonx.Document(bsonx.Doc{{"$skip", bsonx.Int64(*opts.Skip)}}))
|
||||
}
|
||||
if opts.Limit != nil {
|
||||
pipeline = append(pipeline, bsonx.Document(bsonx.Doc{{"$limit", bsonx.Int64(*opts.Limit)}}))
|
||||
}
|
||||
}
|
||||
|
||||
pipeline = append(pipeline, bsonx.Document(bsonx.Doc{
|
||||
{"$group", bsonx.Document(bsonx.Doc{
|
||||
{"_id", bsonx.Int32(1)},
|
||||
{"n", bsonx.Document(bsonx.Doc{{"$sum", bsonx.Int32(1)}})},
|
||||
})},
|
||||
},
|
||||
))
|
||||
|
||||
return pipeline, nil
|
||||
}
|
||||
119
vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go
generated
vendored
Executable file
119
vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go
generated
vendored
Executable file
@@ -0,0 +1,119 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import "time"
|
||||
|
||||
// AggregateOptions represents all possible options to the Aggregate() function.
|
||||
type AggregateOptions struct {
|
||||
AllowDiskUse *bool // Enables writing to temporary files. When set to true, aggregation stages can write data to the _tmp subdirectory in the dbPath directory
|
||||
BatchSize *int32 // The number of documents to return per batch
|
||||
BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation. This only applies when the $out stage is specified
|
||||
Collation *Collation // Specifies a collation
|
||||
MaxTime *time.Duration // The maximum amount of time to allow the query to run
|
||||
MaxAwaitTime *time.Duration // The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query
|
||||
Comment *string // Enables users to specify an arbitrary string to help trace the operation through the database profiler, currentOp and logs.
|
||||
Hint interface{} // The index to use for the aggregation. The hint does not apply to $lookup and $graphLookup stages
|
||||
}
|
||||
|
||||
// Aggregate returns a pointer to a new AggregateOptions
|
||||
func Aggregate() *AggregateOptions {
|
||||
return &AggregateOptions{}
|
||||
}
|
||||
|
||||
// SetAllowDiskUse enables writing to temporary files. When set to true,
|
||||
// aggregation stages can write data to the _tmp subdirectory in the
|
||||
// dbPath directory
|
||||
func (ao *AggregateOptions) SetAllowDiskUse(b bool) *AggregateOptions {
|
||||
ao.AllowDiskUse = &b
|
||||
return ao
|
||||
}
|
||||
|
||||
// SetBatchSize specifies the number of documents to return per batch
|
||||
func (ao *AggregateOptions) SetBatchSize(i int32) *AggregateOptions {
|
||||
ao.BatchSize = &i
|
||||
return ao
|
||||
}
|
||||
|
||||
// SetBypassDocumentValidation allows the write to opt-out of document level
|
||||
// validation. This only applies when the $out stage is specified
|
||||
// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
|
||||
func (ao *AggregateOptions) SetBypassDocumentValidation(b bool) *AggregateOptions {
|
||||
ao.BypassDocumentValidation = &b
|
||||
return ao
|
||||
}
|
||||
|
||||
// SetCollation specifies a collation.
|
||||
// Valid for server versions >= 3.4
|
||||
func (ao *AggregateOptions) SetCollation(c *Collation) *AggregateOptions {
|
||||
ao.Collation = c
|
||||
return ao
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the maximum amount of time to allow the query to run
|
||||
func (ao *AggregateOptions) SetMaxTime(d time.Duration) *AggregateOptions {
|
||||
ao.MaxTime = &d
|
||||
return ao
|
||||
}
|
||||
|
||||
// SetMaxAwaitTime specifies the maximum amount of time for the server to
|
||||
// wait on new documents to satisfy a tailable cursor query
|
||||
// For servers < 3.2, this option is ignored
|
||||
func (ao *AggregateOptions) SetMaxAwaitTime(d time.Duration) *AggregateOptions {
|
||||
ao.MaxAwaitTime = &d
|
||||
return ao
|
||||
}
|
||||
|
||||
// SetComment enables users to specify an arbitrary string to help trace the
|
||||
// operation through the database profiler, currentOp and logs.
|
||||
func (ao *AggregateOptions) SetComment(s string) *AggregateOptions {
|
||||
ao.Comment = &s
|
||||
return ao
|
||||
}
|
||||
|
||||
// SetHint specifies the index to use for the aggregation. The hint does not
|
||||
// apply to $lookup and $graphLookup stages
|
||||
func (ao *AggregateOptions) SetHint(h interface{}) *AggregateOptions {
|
||||
ao.Hint = h
|
||||
return ao
|
||||
}
|
||||
|
||||
// MergeAggregateOptions combines the argued AggregateOptions into a single AggregateOptions in a last-one-wins fashion
|
||||
func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions {
|
||||
aggOpts := Aggregate()
|
||||
for _, ao := range opts {
|
||||
if ao == nil {
|
||||
continue
|
||||
}
|
||||
if ao.AllowDiskUse != nil {
|
||||
aggOpts.AllowDiskUse = ao.AllowDiskUse
|
||||
}
|
||||
if ao.BatchSize != nil {
|
||||
aggOpts.BatchSize = ao.BatchSize
|
||||
}
|
||||
if ao.BypassDocumentValidation != nil {
|
||||
aggOpts.BypassDocumentValidation = ao.BypassDocumentValidation
|
||||
}
|
||||
if ao.Collation != nil {
|
||||
aggOpts.Collation = ao.Collation
|
||||
}
|
||||
if ao.MaxTime != nil {
|
||||
aggOpts.MaxTime = ao.MaxTime
|
||||
}
|
||||
if ao.MaxAwaitTime != nil {
|
||||
aggOpts.MaxAwaitTime = ao.MaxAwaitTime
|
||||
}
|
||||
if ao.Comment != nil {
|
||||
aggOpts.Comment = ao.Comment
|
||||
}
|
||||
if ao.Hint != nil {
|
||||
aggOpts.Hint = ao.Hint
|
||||
}
|
||||
}
|
||||
|
||||
return aggOpts
|
||||
}
|
||||
55
vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go
generated
vendored
Executable file
55
vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go
generated
vendored
Executable file
@@ -0,0 +1,55 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
// DefaultOrdered is the default order for a BulkWriteOptions struct created from BulkWrite.
|
||||
var DefaultOrdered = true
|
||||
|
||||
// BulkWriteOptions represent all possible options for a bulkWrite operation.
|
||||
type BulkWriteOptions struct {
|
||||
BypassDocumentValidation *bool // If true, allows the write to opt out of document-level validation.
|
||||
Ordered *bool // If true, when a write fails, return without performing remaining writes. Defaults to true.
|
||||
}
|
||||
|
||||
// BulkWrite creates a new *BulkWriteOptions
|
||||
func BulkWrite() *BulkWriteOptions {
|
||||
return &BulkWriteOptions{
|
||||
Ordered: &DefaultOrdered,
|
||||
}
|
||||
}
|
||||
|
||||
// SetOrdered configures the ordered option. If true, when a write fails, the function will return without attempting
|
||||
// remaining writes. Defaults to true.
|
||||
func (b *BulkWriteOptions) SetOrdered(ordered bool) *BulkWriteOptions {
|
||||
b.Ordered = &ordered
|
||||
return b
|
||||
}
|
||||
|
||||
// SetBypassDocumentValidation specifies if the write should opt out of document-level validation.
|
||||
// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
|
||||
func (b *BulkWriteOptions) SetBypassDocumentValidation(bypass bool) *BulkWriteOptions {
|
||||
b.BypassDocumentValidation = &bypass
|
||||
return b
|
||||
}
|
||||
|
||||
// MergeBulkWriteOptions combines the given *BulkWriteOptions into a single *BulkWriteOptions in a last one wins fashion.
|
||||
func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions {
|
||||
b := BulkWrite()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.Ordered != nil {
|
||||
b.Ordered = opt.Ordered
|
||||
}
|
||||
if opt.BypassDocumentValidation != nil {
|
||||
b.BypassDocumentValidation = opt.BypassDocumentValidation
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
110
vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go
generated
vendored
Executable file
110
vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go
generated
vendored
Executable file
@@ -0,0 +1,110 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ChangeStreamOptions represents all possible options to a change stream
|
||||
type ChangeStreamOptions struct {
|
||||
BatchSize *int32 // The number of documents to return per batch
|
||||
Collation *Collation // Specifies a collation
|
||||
FullDocument *FullDocument // When set to ‘updateLookup’, the change notification for partial updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred.
|
||||
MaxAwaitTime *time.Duration // The maximum amount of time for the server to wait on new documents to satisfy a change stream query
|
||||
ResumeAfter interface{} // Specifies the logical starting point for the new change stream
|
||||
StartAtOperationTime *primitive.Timestamp // Ensures that a change stream will only provide changes that occurred after a timestamp.
|
||||
StartAfter interface{} // Specifies a resume token. The started change stream will return the first notification after the token.
|
||||
}
|
||||
|
||||
// ChangeStream returns a pointer to a new ChangeStreamOptions
|
||||
func ChangeStream() *ChangeStreamOptions {
|
||||
cso := &ChangeStreamOptions{}
|
||||
cso.SetFullDocument(Default)
|
||||
return cso
|
||||
}
|
||||
|
||||
// SetBatchSize specifies the number of documents to return per batch
|
||||
func (cso *ChangeStreamOptions) SetBatchSize(i int32) *ChangeStreamOptions {
|
||||
cso.BatchSize = &i
|
||||
return cso
|
||||
}
|
||||
|
||||
// SetCollation specifies a collation
|
||||
func (cso *ChangeStreamOptions) SetCollation(c Collation) *ChangeStreamOptions {
|
||||
cso.Collation = &c
|
||||
return cso
|
||||
}
|
||||
|
||||
// SetFullDocument specifies the fullDocument option.
|
||||
// When set to ‘updateLookup’, the change notification for partial updates will
|
||||
// include both a delta describing the changes to the document, as well as a
|
||||
// copy of the entire document that was changed from some time after the change
|
||||
// occurred.
|
||||
func (cso *ChangeStreamOptions) SetFullDocument(fd FullDocument) *ChangeStreamOptions {
|
||||
cso.FullDocument = &fd
|
||||
return cso
|
||||
}
|
||||
|
||||
// SetMaxAwaitTime specifies the maximum amount of time for the server to wait on new documents to satisfy a change stream query
|
||||
func (cso *ChangeStreamOptions) SetMaxAwaitTime(d time.Duration) *ChangeStreamOptions {
|
||||
cso.MaxAwaitTime = &d
|
||||
return cso
|
||||
}
|
||||
|
||||
// SetResumeAfter specifies the logical starting point for the new change stream
|
||||
func (cso *ChangeStreamOptions) SetResumeAfter(rt interface{}) *ChangeStreamOptions {
|
||||
cso.ResumeAfter = rt
|
||||
return cso
|
||||
}
|
||||
|
||||
// SetStartAtOperationTime ensures that a change stream will only provide changes that occurred after a specified timestamp.
|
||||
func (cso *ChangeStreamOptions) SetStartAtOperationTime(t *primitive.Timestamp) *ChangeStreamOptions {
|
||||
cso.StartAtOperationTime = t
|
||||
return cso
|
||||
}
|
||||
|
||||
// SetStartAfter specifies a resume token. The resulting change stream will return the first notification after the token.
|
||||
// Cannot be used in conjunction with ResumeAfter.
|
||||
func (cso *ChangeStreamOptions) SetStartAfter(sa interface{}) *ChangeStreamOptions {
|
||||
cso.StartAfter = sa
|
||||
return cso
|
||||
}
|
||||
|
||||
// MergeChangeStreamOptions combines the argued ChangeStreamOptions into a single ChangeStreamOptions in a last-one-wins fashion
|
||||
func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions {
|
||||
csOpts := ChangeStream()
|
||||
for _, cso := range opts {
|
||||
if cso == nil {
|
||||
continue
|
||||
}
|
||||
if cso.BatchSize != nil {
|
||||
csOpts.BatchSize = cso.BatchSize
|
||||
}
|
||||
if cso.Collation != nil {
|
||||
csOpts.Collation = cso.Collation
|
||||
}
|
||||
if cso.FullDocument != nil {
|
||||
csOpts.FullDocument = cso.FullDocument
|
||||
}
|
||||
if cso.MaxAwaitTime != nil {
|
||||
csOpts.MaxAwaitTime = cso.MaxAwaitTime
|
||||
}
|
||||
if cso.ResumeAfter != nil {
|
||||
csOpts.ResumeAfter = cso.ResumeAfter
|
||||
}
|
||||
if cso.StartAtOperationTime != nil {
|
||||
csOpts.StartAtOperationTime = cso.StartAtOperationTime
|
||||
}
|
||||
if cso.StartAfter != nil {
|
||||
csOpts.StartAfter = cso.StartAfter
|
||||
}
|
||||
}
|
||||
|
||||
return csOpts
|
||||
}
|
||||
633
vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go
generated
vendored
Executable file
633
vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go
generated
vendored
Executable file
@@ -0,0 +1,633 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options // import "go.mongodb.org/mongo-driver/mongo/options"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/event"
|
||||
"go.mongodb.org/mongo-driver/mongo/readconcern"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"go.mongodb.org/mongo-driver/mongo/writeconcern"
|
||||
"go.mongodb.org/mongo-driver/tag"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
|
||||
)
|
||||
|
||||
// ContextDialer makes new network connections
|
||||
type ContextDialer interface {
|
||||
DialContext(ctx context.Context, network, address string) (net.Conn, error)
|
||||
}
|
||||
|
||||
// Credential holds auth options.
|
||||
//
|
||||
// AuthMechanism indicates the mechanism to use for authentication.
|
||||
// Supported values include "SCRAM-SHA-256", "SCRAM-SHA-1", "MONGODB-CR", "PLAIN", "GSSAPI", and "MONGODB-X509".
|
||||
//
|
||||
// AuthMechanismProperties specifies additional configuration options which may be used by certain
|
||||
// authentication mechanisms. Supported properties are:
|
||||
// SERVICE_NAME: Specifies the name of the service. Defaults to mongodb.
|
||||
// CANONICALIZE_HOST_NAME: If true, tells the driver to canonicalize the given hostname. Defaults to false. This
|
||||
// property may not be used on Linux and Darwin systems and may not be used at the same time as SERVICE_HOST.
|
||||
// SERVICE_REALM: Specifies the realm of the service.
|
||||
// SERVICE_HOST: Specifies a hostname for GSSAPI authentication if it is different from the server's address. For
|
||||
// authentication mechanisms besides GSSAPI, this property is ignored.
|
||||
//
|
||||
// AuthSource specifies the database to authenticate against.
|
||||
//
|
||||
// Username specifies the username that will be authenticated.
|
||||
//
|
||||
// Password specifies the password used for authentication.
|
||||
//
|
||||
// PasswordSet specifies if the password is actually set, since an empty password is a valid password.
|
||||
type Credential struct {
|
||||
AuthMechanism string
|
||||
AuthMechanismProperties map[string]string
|
||||
AuthSource string
|
||||
Username string
|
||||
Password string
|
||||
PasswordSet bool
|
||||
}
|
||||
|
||||
// ClientOptions represents all possible options to configure a client.
|
||||
type ClientOptions struct {
|
||||
AppName *string
|
||||
Auth *Credential
|
||||
ConnectTimeout *time.Duration
|
||||
Compressors []string
|
||||
Dialer ContextDialer
|
||||
HeartbeatInterval *time.Duration
|
||||
Hosts []string
|
||||
LocalThreshold *time.Duration
|
||||
MaxConnIdleTime *time.Duration
|
||||
MaxPoolSize *uint16
|
||||
Monitor *event.CommandMonitor
|
||||
ReadConcern *readconcern.ReadConcern
|
||||
ReadPreference *readpref.ReadPref
|
||||
Registry *bsoncodec.Registry
|
||||
ReplicaSet *string
|
||||
RetryWrites *bool
|
||||
ServerSelectionTimeout *time.Duration
|
||||
Direct *bool
|
||||
SocketTimeout *time.Duration
|
||||
TLSConfig *tls.Config
|
||||
WriteConcern *writeconcern.WriteConcern
|
||||
ZlibLevel *int
|
||||
|
||||
err error
|
||||
|
||||
// Adds an option for internal use only and should not be set. This option is deprecated and is
|
||||
// not part of the stability guarantee. It may be removed in the future.
|
||||
AuthenticateToAnything *bool
|
||||
}
|
||||
|
||||
// Client creates a new ClientOptions instance.
|
||||
func Client() *ClientOptions {
|
||||
return new(ClientOptions)
|
||||
}
|
||||
|
||||
// Validate validates the client options. This method will return the first error found.
|
||||
func (c *ClientOptions) Validate() error { return c.err }
|
||||
|
||||
// ApplyURI parses the provided connection string and sets the values and options accordingly.
|
||||
//
|
||||
// Errors that occur in this method can be retrieved by calling Validate.
|
||||
//
|
||||
// If the URI contains ssl=true this method will overwrite TLSConfig, even if there aren't any other
|
||||
// tls options specified.
|
||||
func (c *ClientOptions) ApplyURI(uri string) *ClientOptions {
|
||||
if c.err != nil {
|
||||
return c
|
||||
}
|
||||
|
||||
cs, err := connstring.Parse(uri)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c
|
||||
}
|
||||
|
||||
if cs.AppName != "" {
|
||||
c.AppName = &cs.AppName
|
||||
}
|
||||
|
||||
if cs.AuthMechanism != "" || cs.AuthMechanismProperties != nil || cs.AuthSource != "" ||
|
||||
cs.Username != "" || cs.PasswordSet {
|
||||
c.Auth = &Credential{
|
||||
AuthMechanism: cs.AuthMechanism,
|
||||
AuthMechanismProperties: cs.AuthMechanismProperties,
|
||||
AuthSource: cs.AuthSource,
|
||||
Username: cs.Username,
|
||||
Password: cs.Password,
|
||||
PasswordSet: cs.PasswordSet,
|
||||
}
|
||||
}
|
||||
|
||||
if cs.ConnectSet {
|
||||
direct := cs.Connect == connstring.SingleConnect
|
||||
c.Direct = &direct
|
||||
}
|
||||
|
||||
if cs.ConnectTimeoutSet {
|
||||
c.ConnectTimeout = &cs.ConnectTimeout
|
||||
}
|
||||
|
||||
if len(cs.Compressors) > 0 {
|
||||
c.Compressors = cs.Compressors
|
||||
}
|
||||
|
||||
if cs.HeartbeatIntervalSet {
|
||||
c.HeartbeatInterval = &cs.HeartbeatInterval
|
||||
}
|
||||
|
||||
c.Hosts = cs.Hosts
|
||||
|
||||
if cs.LocalThresholdSet {
|
||||
c.LocalThreshold = &cs.LocalThreshold
|
||||
}
|
||||
|
||||
if cs.MaxConnIdleTimeSet {
|
||||
c.MaxConnIdleTime = &cs.MaxConnIdleTime
|
||||
}
|
||||
|
||||
if cs.MaxPoolSizeSet {
|
||||
c.MaxPoolSize = &cs.MaxPoolSize
|
||||
}
|
||||
|
||||
if cs.ReadConcernLevel != "" {
|
||||
c.ReadConcern = readconcern.New(readconcern.Level(cs.ReadConcernLevel))
|
||||
}
|
||||
|
||||
if cs.ReadPreference != "" || len(cs.ReadPreferenceTagSets) > 0 || cs.MaxStalenessSet {
|
||||
opts := make([]readpref.Option, 0, 1)
|
||||
|
||||
tagSets := tag.NewTagSetsFromMaps(cs.ReadPreferenceTagSets)
|
||||
if len(tagSets) > 0 {
|
||||
opts = append(opts, readpref.WithTagSets(tagSets...))
|
||||
}
|
||||
|
||||
if cs.MaxStaleness != 0 {
|
||||
opts = append(opts, readpref.WithMaxStaleness(cs.MaxStaleness))
|
||||
}
|
||||
|
||||
mode, err := readpref.ModeFromString(cs.ReadPreference)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c
|
||||
}
|
||||
|
||||
c.ReadPreference, c.err = readpref.New(mode, opts...)
|
||||
if c.err != nil {
|
||||
return c
|
||||
}
|
||||
}
|
||||
|
||||
if cs.RetryWritesSet {
|
||||
c.RetryWrites = &cs.RetryWrites
|
||||
}
|
||||
|
||||
if cs.ReplicaSet != "" {
|
||||
c.ReplicaSet = &cs.ReplicaSet
|
||||
}
|
||||
|
||||
if cs.ServerSelectionTimeoutSet {
|
||||
c.ServerSelectionTimeout = &cs.ServerSelectionTimeout
|
||||
}
|
||||
|
||||
if cs.SocketTimeoutSet {
|
||||
c.SocketTimeout = &cs.SocketTimeout
|
||||
}
|
||||
|
||||
if cs.SSL {
|
||||
tlsConfig := new(tls.Config)
|
||||
|
||||
if cs.SSLCaFileSet {
|
||||
c.err = addCACertFromFile(tlsConfig, cs.SSLCaFile)
|
||||
if c.err != nil {
|
||||
return c
|
||||
}
|
||||
}
|
||||
|
||||
if cs.SSLInsecure {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
}
|
||||
|
||||
if cs.SSLClientCertificateKeyFileSet {
|
||||
var keyPasswd string
|
||||
if cs.SSLClientCertificateKeyPasswordSet && cs.SSLClientCertificateKeyPassword != nil {
|
||||
keyPasswd = cs.SSLClientCertificateKeyPassword()
|
||||
}
|
||||
s, err := addClientCertFromFile(tlsConfig, cs.SSLClientCertificateKeyFile, keyPasswd)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c
|
||||
}
|
||||
|
||||
// If a username wasn't specified, add one from the certificate.
|
||||
if c.Auth != nil && strings.ToLower(c.Auth.AuthMechanism) == "mongodb-x509" && c.Auth.Username == "" {
|
||||
// The Go x509 package gives the subject with the pairs in reverse order that we want.
|
||||
pairs := strings.Split(s, ",")
|
||||
for left, right := 0, len(pairs)-1; left < right; left, right = left+1, right-1 {
|
||||
pairs[left], pairs[right] = pairs[right], pairs[left]
|
||||
}
|
||||
c.Auth.Username = strings.Join(pairs, ",")
|
||||
}
|
||||
}
|
||||
|
||||
c.TLSConfig = tlsConfig
|
||||
}
|
||||
|
||||
if cs.JSet || cs.WString != "" || cs.WNumberSet || cs.WTimeoutSet {
|
||||
opts := make([]writeconcern.Option, 0, 1)
|
||||
|
||||
if len(cs.WString) > 0 {
|
||||
opts = append(opts, writeconcern.WTagSet(cs.WString))
|
||||
} else if cs.WNumberSet {
|
||||
opts = append(opts, writeconcern.W(cs.WNumber))
|
||||
}
|
||||
|
||||
if cs.JSet {
|
||||
opts = append(opts, writeconcern.J(cs.J))
|
||||
}
|
||||
|
||||
if cs.WTimeoutSet {
|
||||
opts = append(opts, writeconcern.WTimeout(cs.WTimeout))
|
||||
}
|
||||
|
||||
c.WriteConcern = writeconcern.New(opts...)
|
||||
}
|
||||
|
||||
if cs.ZlibLevelSet {
|
||||
c.ZlibLevel = &cs.ZlibLevel
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// SetAppName specifies the client application name. This value is used by MongoDB when it logs
|
||||
// connection information and profile information, such as slow queries.
|
||||
func (c *ClientOptions) SetAppName(s string) *ClientOptions {
|
||||
c.AppName = &s
|
||||
return c
|
||||
}
|
||||
|
||||
// SetAuth sets the authentication options.
|
||||
func (c *ClientOptions) SetAuth(auth Credential) *ClientOptions {
|
||||
c.Auth = &auth
|
||||
return c
|
||||
}
|
||||
|
||||
// SetCompressors sets the compressors that can be used when communicating with a server.
|
||||
func (c *ClientOptions) SetCompressors(comps []string) *ClientOptions {
|
||||
c.Compressors = comps
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// SetConnectTimeout specifies the timeout for an initial connection to a server.
|
||||
// If a custom Dialer is used, this method won't be set and the user is
|
||||
// responsible for setting the ConnectTimeout for connections on the dialer
|
||||
// themselves.
|
||||
func (c *ClientOptions) SetConnectTimeout(d time.Duration) *ClientOptions {
|
||||
c.ConnectTimeout = &d
|
||||
return c
|
||||
}
|
||||
|
||||
// SetDialer specifies a custom dialer used to dial new connections to a server.
|
||||
// If a custom dialer is not set, a net.Dialer with a 300 second keepalive time will be used by default.
|
||||
func (c *ClientOptions) SetDialer(d ContextDialer) *ClientOptions {
|
||||
c.Dialer = d
|
||||
return c
|
||||
}
|
||||
|
||||
// SetDirect specifies whether the driver should connect directly to the server instead of
|
||||
// auto-discovering other servers in the cluster.
|
||||
func (c *ClientOptions) SetDirect(b bool) *ClientOptions {
|
||||
c.Direct = &b
|
||||
return c
|
||||
}
|
||||
|
||||
// SetHeartbeatInterval specifies the interval to wait between server monitoring checks.
|
||||
func (c *ClientOptions) SetHeartbeatInterval(d time.Duration) *ClientOptions {
|
||||
c.HeartbeatInterval = &d
|
||||
return c
|
||||
}
|
||||
|
||||
// SetHosts specifies the initial list of addresses from which to discover the rest of the cluster.
|
||||
func (c *ClientOptions) SetHosts(s []string) *ClientOptions {
|
||||
c.Hosts = s
|
||||
return c
|
||||
}
|
||||
|
||||
// SetLocalThreshold specifies how far to distribute queries, beyond the server with the fastest
|
||||
// round-trip time. If a server's roundtrip time is more than LocalThreshold slower than the
|
||||
// the fastest, the driver will not send queries to that server.
|
||||
func (c *ClientOptions) SetLocalThreshold(d time.Duration) *ClientOptions {
|
||||
c.LocalThreshold = &d
|
||||
return c
|
||||
}
|
||||
|
||||
// SetMaxConnIdleTime specifies the maximum number of milliseconds that a connection can remain idle
|
||||
// in a connection pool before being removed and closed.
|
||||
func (c *ClientOptions) SetMaxConnIdleTime(d time.Duration) *ClientOptions {
|
||||
c.MaxConnIdleTime = &d
|
||||
return c
|
||||
}
|
||||
|
||||
// SetMaxPoolSize specifies the max size of a server's connection pool.
|
||||
func (c *ClientOptions) SetMaxPoolSize(u uint16) *ClientOptions {
|
||||
c.MaxPoolSize = &u
|
||||
return c
|
||||
}
|
||||
|
||||
// SetMonitor specifies a command monitor used to see commands for a client.
|
||||
func (c *ClientOptions) SetMonitor(m *event.CommandMonitor) *ClientOptions {
|
||||
c.Monitor = m
|
||||
return c
|
||||
}
|
||||
|
||||
// SetReadConcern specifies the read concern.
|
||||
func (c *ClientOptions) SetReadConcern(rc *readconcern.ReadConcern) *ClientOptions {
|
||||
c.ReadConcern = rc
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// SetReadPreference specifies the read preference.
|
||||
func (c *ClientOptions) SetReadPreference(rp *readpref.ReadPref) *ClientOptions {
|
||||
c.ReadPreference = rp
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// SetRegistry specifies the bsoncodec.Registry.
|
||||
func (c *ClientOptions) SetRegistry(registry *bsoncodec.Registry) *ClientOptions {
|
||||
c.Registry = registry
|
||||
return c
|
||||
}
|
||||
|
||||
// SetReplicaSet specifies the name of the replica set of the cluster.
|
||||
func (c *ClientOptions) SetReplicaSet(s string) *ClientOptions {
|
||||
c.ReplicaSet = &s
|
||||
return c
|
||||
}
|
||||
|
||||
// SetRetryWrites specifies whether the client has retryable writes enabled.
|
||||
func (c *ClientOptions) SetRetryWrites(b bool) *ClientOptions {
|
||||
c.RetryWrites = &b
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// SetServerSelectionTimeout specifies a timeout in milliseconds to block for server selection.
|
||||
func (c *ClientOptions) SetServerSelectionTimeout(d time.Duration) *ClientOptions {
|
||||
c.ServerSelectionTimeout = &d
|
||||
return c
|
||||
}
|
||||
|
||||
// SetSocketTimeout specifies the time in milliseconds to attempt to send or receive on a socket
|
||||
// before the attempt times out.
|
||||
func (c *ClientOptions) SetSocketTimeout(d time.Duration) *ClientOptions {
|
||||
c.SocketTimeout = &d
|
||||
return c
|
||||
}
|
||||
|
||||
// SetTLSConfig sets the tls.Config.
|
||||
func (c *ClientOptions) SetTLSConfig(cfg *tls.Config) *ClientOptions {
|
||||
c.TLSConfig = cfg
|
||||
return c
|
||||
}
|
||||
|
||||
// SetWriteConcern sets the write concern.
|
||||
func (c *ClientOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *ClientOptions {
|
||||
c.WriteConcern = wc
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// SetZlibLevel sets the level for the zlib compressor.
|
||||
func (c *ClientOptions) SetZlibLevel(level int) *ClientOptions {
|
||||
c.ZlibLevel = &level
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// MergeClientOptions combines the given connstring and *ClientOptions into a single *ClientOptions in a last one wins
|
||||
// fashion. The given connstring will be used for the default options, which can be overwritten using the given
|
||||
// *ClientOptions.
|
||||
func MergeClientOptions(opts ...*ClientOptions) *ClientOptions {
|
||||
c := Client()
|
||||
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if opt.Dialer != nil {
|
||||
c.Dialer = opt.Dialer
|
||||
}
|
||||
if opt.AppName != nil {
|
||||
c.AppName = opt.AppName
|
||||
}
|
||||
if opt.Auth != nil {
|
||||
c.Auth = opt.Auth
|
||||
}
|
||||
if opt.AuthenticateToAnything != nil {
|
||||
c.AuthenticateToAnything = opt.AuthenticateToAnything
|
||||
}
|
||||
if opt.Compressors != nil {
|
||||
c.Compressors = opt.Compressors
|
||||
}
|
||||
if opt.ConnectTimeout != nil {
|
||||
c.ConnectTimeout = opt.ConnectTimeout
|
||||
}
|
||||
if opt.HeartbeatInterval != nil {
|
||||
c.HeartbeatInterval = opt.HeartbeatInterval
|
||||
}
|
||||
if len(opt.Hosts) > 0 {
|
||||
c.Hosts = opt.Hosts
|
||||
}
|
||||
if opt.LocalThreshold != nil {
|
||||
c.LocalThreshold = opt.LocalThreshold
|
||||
}
|
||||
if opt.MaxConnIdleTime != nil {
|
||||
c.MaxConnIdleTime = opt.MaxConnIdleTime
|
||||
}
|
||||
if opt.MaxPoolSize != nil {
|
||||
c.MaxPoolSize = opt.MaxPoolSize
|
||||
}
|
||||
if opt.Monitor != nil {
|
||||
c.Monitor = opt.Monitor
|
||||
}
|
||||
if opt.ReadConcern != nil {
|
||||
c.ReadConcern = opt.ReadConcern
|
||||
}
|
||||
if opt.ReadPreference != nil {
|
||||
c.ReadPreference = opt.ReadPreference
|
||||
}
|
||||
if opt.Registry != nil {
|
||||
c.Registry = opt.Registry
|
||||
}
|
||||
if opt.ReplicaSet != nil {
|
||||
c.ReplicaSet = opt.ReplicaSet
|
||||
}
|
||||
if opt.RetryWrites != nil {
|
||||
c.RetryWrites = opt.RetryWrites
|
||||
}
|
||||
if opt.ServerSelectionTimeout != nil {
|
||||
c.ServerSelectionTimeout = opt.ServerSelectionTimeout
|
||||
}
|
||||
if opt.Direct != nil {
|
||||
c.Direct = opt.Direct
|
||||
}
|
||||
if opt.SocketTimeout != nil {
|
||||
c.SocketTimeout = opt.SocketTimeout
|
||||
}
|
||||
if opt.TLSConfig != nil {
|
||||
c.TLSConfig = opt.TLSConfig
|
||||
}
|
||||
if opt.WriteConcern != nil {
|
||||
c.WriteConcern = opt.WriteConcern
|
||||
}
|
||||
if opt.ZlibLevel != nil {
|
||||
c.ZlibLevel = opt.ZlibLevel
|
||||
}
|
||||
if opt.err != nil {
|
||||
c.err = opt.err
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// addCACertFromFile adds a root CA certificate to the configuration given a path
|
||||
// to the containing file.
|
||||
func addCACertFromFile(cfg *tls.Config, file string) error {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certBytes, err := loadCert(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.RootCAs == nil {
|
||||
cfg.RootCAs = x509.NewCertPool()
|
||||
}
|
||||
|
||||
cfg.RootCAs.AddCert(cert)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadCert(data []byte) ([]byte, error) {
|
||||
var certBlock *pem.Block
|
||||
|
||||
for certBlock == nil {
|
||||
if data == nil || len(data) == 0 {
|
||||
return nil, errors.New(".pem file must have both a CERTIFICATE and an RSA PRIVATE KEY section")
|
||||
}
|
||||
|
||||
block, rest := pem.Decode(data)
|
||||
if block == nil {
|
||||
return nil, errors.New("invalid .pem file")
|
||||
}
|
||||
|
||||
switch block.Type {
|
||||
case "CERTIFICATE":
|
||||
if certBlock != nil {
|
||||
return nil, errors.New("multiple CERTIFICATE sections in .pem file")
|
||||
}
|
||||
|
||||
certBlock = block
|
||||
}
|
||||
|
||||
data = rest
|
||||
}
|
||||
|
||||
return certBlock.Bytes, nil
|
||||
}
|
||||
|
||||
// addClientCertFromFile adds a client certificate to the configuration given a path to the
|
||||
// containing file and returns the certificate's subject name.
|
||||
func addClientCertFromFile(cfg *tls.Config, clientFile, keyPasswd string) (string, error) {
|
||||
data, err := ioutil.ReadFile(clientFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var currentBlock *pem.Block
|
||||
var certBlock, certDecodedBlock, keyBlock []byte
|
||||
|
||||
remaining := data
|
||||
start := 0
|
||||
for {
|
||||
currentBlock, remaining = pem.Decode(remaining)
|
||||
if currentBlock == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if currentBlock.Type == "CERTIFICATE" {
|
||||
certBlock = data[start : len(data)-len(remaining)]
|
||||
certDecodedBlock = currentBlock.Bytes
|
||||
start += len(certBlock)
|
||||
} else if strings.HasSuffix(currentBlock.Type, "PRIVATE KEY") {
|
||||
if keyPasswd != "" && x509.IsEncryptedPEMBlock(currentBlock) {
|
||||
var encoded bytes.Buffer
|
||||
buf, err := x509.DecryptPEMBlock(currentBlock, []byte(keyPasswd))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
pem.Encode(&encoded, &pem.Block{Type: currentBlock.Type, Bytes: buf})
|
||||
keyBlock = encoded.Bytes()
|
||||
start = len(data) - len(remaining)
|
||||
} else {
|
||||
keyBlock = data[start : len(data)-len(remaining)]
|
||||
start += len(keyBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(certBlock) == 0 {
|
||||
return "", fmt.Errorf("failed to find CERTIFICATE")
|
||||
}
|
||||
if len(keyBlock) == 0 {
|
||||
return "", fmt.Errorf("failed to find PRIVATE KEY")
|
||||
}
|
||||
|
||||
cert, err := tls.X509KeyPair(certBlock, keyBlock)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cfg.Certificates = append(cfg.Certificates, cert)
|
||||
|
||||
// The documentation for the tls.X509KeyPair indicates that the Leaf certificate is not
|
||||
// retained.
|
||||
crt, err := x509.ParseCertificate(certDecodedBlock)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return x509CertSubject(crt), nil
|
||||
}
|
||||
9
vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_10.go
generated
vendored
Executable file
9
vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_10.go
generated
vendored
Executable file
@@ -0,0 +1,9 @@
|
||||
// +build go1.10
|
||||
|
||||
package options
|
||||
|
||||
import "crypto/x509"
|
||||
|
||||
func x509CertSubject(cert *x509.Certificate) string {
|
||||
return cert.Subject.String()
|
||||
}
|
||||
13
vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_9.go
generated
vendored
Executable file
13
vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_9.go
generated
vendored
Executable file
@@ -0,0 +1,13 @@
|
||||
// +build !go1.10
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
)
|
||||
|
||||
// We don't support version less then 1.10, but Evergreen needs to be able to compile the driver
|
||||
// using version 1.8.
|
||||
func x509CertSubject(cert *x509.Certificate) string {
|
||||
return ""
|
||||
}
|
||||
77
vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go
generated
vendored
Executable file
77
vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go
generated
vendored
Executable file
@@ -0,0 +1,77 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/mongo/readconcern"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"go.mongodb.org/mongo-driver/mongo/writeconcern"
|
||||
)
|
||||
|
||||
// CollectionOptions represent all possible options to configure a Collection.
|
||||
type CollectionOptions struct {
|
||||
ReadConcern *readconcern.ReadConcern // The read concern for operations in the collection.
|
||||
WriteConcern *writeconcern.WriteConcern // The write concern for operations in the collection.
|
||||
ReadPreference *readpref.ReadPref // The read preference for operations in the collection.
|
||||
Registry *bsoncodec.Registry // The registry to be used to construct BSON encoders and decoders for the collection.
|
||||
}
|
||||
|
||||
// Collection creates a new CollectionOptions instance
|
||||
func Collection() *CollectionOptions {
|
||||
return &CollectionOptions{}
|
||||
}
|
||||
|
||||
// SetReadConcern sets the read concern for the collection.
|
||||
func (c *CollectionOptions) SetReadConcern(rc *readconcern.ReadConcern) *CollectionOptions {
|
||||
c.ReadConcern = rc
|
||||
return c
|
||||
}
|
||||
|
||||
// SetWriteConcern sets the write concern for the collection.
|
||||
func (c *CollectionOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *CollectionOptions {
|
||||
c.WriteConcern = wc
|
||||
return c
|
||||
}
|
||||
|
||||
// SetReadPreference sets the read preference for the collection.
|
||||
func (c *CollectionOptions) SetReadPreference(rp *readpref.ReadPref) *CollectionOptions {
|
||||
c.ReadPreference = rp
|
||||
return c
|
||||
}
|
||||
|
||||
// SetRegistry sets the bsoncodec Registry for the collection.
|
||||
func (c *CollectionOptions) SetRegistry(r *bsoncodec.Registry) *CollectionOptions {
|
||||
c.Registry = r
|
||||
return c
|
||||
}
|
||||
|
||||
// MergeCollectionOptions combines the *CollectionOptions arguments into a single *CollectionOptions in a last one wins
|
||||
// fashion.
|
||||
func MergeCollectionOptions(opts ...*CollectionOptions) *CollectionOptions {
|
||||
c := Collection()
|
||||
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.ReadConcern != nil {
|
||||
c.ReadConcern = opt.ReadConcern
|
||||
}
|
||||
if opt.WriteConcern != nil {
|
||||
c.WriteConcern = opt.WriteConcern
|
||||
}
|
||||
if opt.ReadPreference != nil {
|
||||
c.ReadPreference = opt.ReadPreference
|
||||
}
|
||||
if opt.Registry != nil {
|
||||
c.Registry = opt.Registry
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
81
vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go
generated
vendored
Executable file
81
vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go
generated
vendored
Executable file
@@ -0,0 +1,81 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import "time"
|
||||
|
||||
// CountOptions represents all possible options to the Count() function.
|
||||
type CountOptions struct {
|
||||
Collation *Collation // Specifies a collation
|
||||
Hint interface{} // The index to use
|
||||
Limit *int64 // The maximum number of documents to count
|
||||
MaxTime *time.Duration // The maximum amount of time to allow the operation to run
|
||||
Skip *int64 // The number of documents to skip before counting
|
||||
}
|
||||
|
||||
// Count returns a pointer to a new CountOptions
|
||||
func Count() *CountOptions {
|
||||
return &CountOptions{}
|
||||
}
|
||||
|
||||
// SetCollation specifies a collation
|
||||
// Valid for server versions >= 3.4
|
||||
func (co *CountOptions) SetCollation(c *Collation) *CountOptions {
|
||||
co.Collation = c
|
||||
return co
|
||||
}
|
||||
|
||||
// SetHint specifies the index to use
|
||||
func (co *CountOptions) SetHint(h interface{}) *CountOptions {
|
||||
co.Hint = h
|
||||
return co
|
||||
}
|
||||
|
||||
// SetLimit specifies the maximum number of documents to count
|
||||
func (co *CountOptions) SetLimit(i int64) *CountOptions {
|
||||
co.Limit = &i
|
||||
return co
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the maximum amount of time to allow the operation to run
|
||||
func (co *CountOptions) SetMaxTime(d time.Duration) *CountOptions {
|
||||
co.MaxTime = &d
|
||||
return co
|
||||
}
|
||||
|
||||
// SetSkip specifies the number of documents to skip before counting
|
||||
func (co *CountOptions) SetSkip(i int64) *CountOptions {
|
||||
co.Skip = &i
|
||||
return co
|
||||
}
|
||||
|
||||
// MergeCountOptions combines the argued CountOptions into a single CountOptions in a last-one-wins fashion
|
||||
func MergeCountOptions(opts ...*CountOptions) *CountOptions {
|
||||
countOpts := Count()
|
||||
for _, co := range opts {
|
||||
if co == nil {
|
||||
continue
|
||||
}
|
||||
if co.Collation != nil {
|
||||
countOpts.Collation = co.Collation
|
||||
}
|
||||
if co.Hint != nil {
|
||||
countOpts.Hint = co.Hint
|
||||
}
|
||||
if co.Limit != nil {
|
||||
countOpts.Limit = co.Limit
|
||||
}
|
||||
if co.MaxTime != nil {
|
||||
countOpts.MaxTime = co.MaxTime
|
||||
}
|
||||
if co.Skip != nil {
|
||||
countOpts.Skip = co.Skip
|
||||
}
|
||||
}
|
||||
|
||||
return countOpts
|
||||
}
|
||||
77
vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go
generated
vendored
Executable file
77
vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go
generated
vendored
Executable file
@@ -0,0 +1,77 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/mongo/readconcern"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"go.mongodb.org/mongo-driver/mongo/writeconcern"
|
||||
)
|
||||
|
||||
// DatabaseOptions represent all possible options to configure a Database.
|
||||
type DatabaseOptions struct {
|
||||
ReadConcern *readconcern.ReadConcern // The read concern for operations in the database.
|
||||
WriteConcern *writeconcern.WriteConcern // The write concern for operations in the database.
|
||||
ReadPreference *readpref.ReadPref // The read preference for operations in the database.
|
||||
Registry *bsoncodec.Registry // The registry to be used to construct BSON encoders and decoders for the database.
|
||||
}
|
||||
|
||||
// Database creates a new DatabaseOptions instance
|
||||
func Database() *DatabaseOptions {
|
||||
return &DatabaseOptions{}
|
||||
}
|
||||
|
||||
// SetReadConcern sets the read concern for the database.
|
||||
func (d *DatabaseOptions) SetReadConcern(rc *readconcern.ReadConcern) *DatabaseOptions {
|
||||
d.ReadConcern = rc
|
||||
return d
|
||||
}
|
||||
|
||||
// SetWriteConcern sets the write concern for the database.
|
||||
func (d *DatabaseOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *DatabaseOptions {
|
||||
d.WriteConcern = wc
|
||||
return d
|
||||
}
|
||||
|
||||
// SetReadPreference sets the read preference for the database.
|
||||
func (d *DatabaseOptions) SetReadPreference(rp *readpref.ReadPref) *DatabaseOptions {
|
||||
d.ReadPreference = rp
|
||||
return d
|
||||
}
|
||||
|
||||
// SetRegistry sets the bsoncodec Registry for the database.
|
||||
func (d *DatabaseOptions) SetRegistry(r *bsoncodec.Registry) *DatabaseOptions {
|
||||
d.Registry = r
|
||||
return d
|
||||
}
|
||||
|
||||
// MergeDatabaseOptions combines the *DatabaseOptions arguments into a single *DatabaseOptions in a last one wins
|
||||
// fashion.
|
||||
func MergeDatabaseOptions(opts ...*DatabaseOptions) *DatabaseOptions {
|
||||
d := Database()
|
||||
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.ReadConcern != nil {
|
||||
d.ReadConcern = opt.ReadConcern
|
||||
}
|
||||
if opt.WriteConcern != nil {
|
||||
d.WriteConcern = opt.WriteConcern
|
||||
}
|
||||
if opt.ReadPreference != nil {
|
||||
d.ReadPreference = opt.ReadPreference
|
||||
}
|
||||
if opt.Registry != nil {
|
||||
d.Registry = opt.Registry
|
||||
}
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
39
vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go
generated
vendored
Executable file
39
vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go
generated
vendored
Executable file
@@ -0,0 +1,39 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
// DeleteOptions represents all possible options to the DeleteOne() and DeleteMany() functions.
|
||||
type DeleteOptions struct {
|
||||
Collation *Collation // Specifies a collation
|
||||
}
|
||||
|
||||
// Delete returns a pointer to a new DeleteOptions
|
||||
func Delete() *DeleteOptions {
|
||||
return &DeleteOptions{}
|
||||
}
|
||||
|
||||
// SetCollation specifies a collation
|
||||
// Valid for servers >= 3.4.
|
||||
func (do *DeleteOptions) SetCollation(c *Collation) *DeleteOptions {
|
||||
do.Collation = c
|
||||
return do
|
||||
}
|
||||
|
||||
// MergeDeleteOptions combines the argued DeleteOptions into a single DeleteOptions in a last-one-wins fashion
|
||||
func MergeDeleteOptions(opts ...*DeleteOptions) *DeleteOptions {
|
||||
dOpts := Delete()
|
||||
for _, do := range opts {
|
||||
if do == nil {
|
||||
continue
|
||||
}
|
||||
if do.Collation != nil {
|
||||
dOpts.Collation = do.Collation
|
||||
}
|
||||
}
|
||||
|
||||
return dOpts
|
||||
}
|
||||
51
vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go
generated
vendored
Executable file
51
vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go
generated
vendored
Executable file
@@ -0,0 +1,51 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import "time"
|
||||
|
||||
// DistinctOptions represents all possible options to the Distinct() function.
|
||||
type DistinctOptions struct {
|
||||
Collation *Collation // Specifies a collation
|
||||
MaxTime *time.Duration // The maximum amount of time to allow the operation to run
|
||||
}
|
||||
|
||||
// Distinct returns a pointer to a new DistinctOptions
|
||||
func Distinct() *DistinctOptions {
|
||||
return &DistinctOptions{}
|
||||
}
|
||||
|
||||
// SetCollation specifies a collation
|
||||
// Valid for server versions >= 3.4
|
||||
func (do *DistinctOptions) SetCollation(c *Collation) *DistinctOptions {
|
||||
do.Collation = c
|
||||
return do
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the maximum amount of time to allow the operation to run
|
||||
func (do *DistinctOptions) SetMaxTime(d time.Duration) *DistinctOptions {
|
||||
do.MaxTime = &d
|
||||
return do
|
||||
}
|
||||
|
||||
// MergeDistinctOptions combines the argued DistinctOptions into a single DistinctOptions in a last-one-wins fashion
|
||||
func MergeDistinctOptions(opts ...*DistinctOptions) *DistinctOptions {
|
||||
distinctOpts := Distinct()
|
||||
for _, do := range opts {
|
||||
if do == nil {
|
||||
continue
|
||||
}
|
||||
if do.Collation != nil {
|
||||
distinctOpts.Collation = do.Collation
|
||||
}
|
||||
if do.MaxTime != nil {
|
||||
distinctOpts.MaxTime = do.MaxTime
|
||||
}
|
||||
}
|
||||
|
||||
return distinctOpts
|
||||
}
|
||||
42
vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go
generated
vendored
Executable file
42
vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go
generated
vendored
Executable file
@@ -0,0 +1,42 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import "time"
|
||||
|
||||
// EstimatedDocumentCountOptions represents all possible options to the EstimatedDocumentCount() function.
|
||||
type EstimatedDocumentCountOptions struct {
|
||||
MaxTime *time.Duration // The maximum amount of time to allow the operation to run
|
||||
}
|
||||
|
||||
// EstimatedDocumentCount returns a pointer to a new EstimatedDocumentCountOptions
|
||||
func EstimatedDocumentCount() *EstimatedDocumentCountOptions {
|
||||
return &EstimatedDocumentCountOptions{}
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the maximum amount of time to allow the operation to run
|
||||
func (eco *EstimatedDocumentCountOptions) SetMaxTime(d time.Duration) *EstimatedDocumentCountOptions {
|
||||
eco.MaxTime = &d
|
||||
return eco
|
||||
}
|
||||
|
||||
// MergeEstimatedDocumentCountOptions combines the given *EstimatedDocumentCountOptions into a single
|
||||
// *EstimatedDocumentCountOptions in a last one wins fashion.
|
||||
func MergeEstimatedDocumentCountOptions(opts ...*EstimatedDocumentCountOptions) *EstimatedDocumentCountOptions {
|
||||
e := EstimatedDocumentCount()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if opt.MaxTime != nil {
|
||||
e.MaxTime = opt.MaxTime
|
||||
}
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
693
vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go
generated
vendored
Executable file
693
vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go
generated
vendored
Executable file
@@ -0,0 +1,693 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// FindOptions represent all possible options to the Find() function.
|
||||
type FindOptions struct {
|
||||
AllowPartialResults *bool // If true, allows partial results to be returned if some shards are down.
|
||||
BatchSize *int32 // Specifies the number of documents to return in every batch.
|
||||
Collation *Collation // Specifies a collation to be used
|
||||
Comment *string // Specifies a string to help trace the operation through the database.
|
||||
CursorType *CursorType // Specifies the type of cursor to use
|
||||
Hint interface{} // Specifies the index to use.
|
||||
Limit *int64 // Sets a limit on the number of results to return.
|
||||
Max interface{} // Sets an exclusive upper bound for a specific index
|
||||
MaxAwaitTime *time.Duration // Specifies the maximum amount of time for the server to wait on new documents.
|
||||
MaxTime *time.Duration // Specifies the maximum amount of time to allow the query to run.
|
||||
Min interface{} // Specifies the inclusive lower bound for a specific index.
|
||||
NoCursorTimeout *bool // If true, prevents cursors from timing out after an inactivity period.
|
||||
OplogReplay *bool // Adds an option for internal use only and should not be set.
|
||||
Projection interface{} // Limits the fields returned for all documents.
|
||||
ReturnKey *bool // If true, only returns index keys for all result documents.
|
||||
ShowRecordID *bool // If true, a $recordId field with the record identifier will be added to the returned documents.
|
||||
Skip *int64 // Specifies the number of documents to skip before returning
|
||||
Snapshot *bool // If true, prevents the cursor from returning a document more than once because of an intervening write operation.
|
||||
Sort interface{} // Specifies the order in which to return results.
|
||||
}
|
||||
|
||||
// Find creates a new FindOptions instance.
|
||||
func Find() *FindOptions {
|
||||
return &FindOptions{}
|
||||
}
|
||||
|
||||
// SetAllowPartialResults sets whether partial results can be returned if some shards are down.
|
||||
// For server versions < 3.2, this defaults to false.
|
||||
func (f *FindOptions) SetAllowPartialResults(b bool) *FindOptions {
|
||||
f.AllowPartialResults = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetBatchSize sets the number of documents to return in each batch.
|
||||
func (f *FindOptions) SetBatchSize(i int32) *FindOptions {
|
||||
f.BatchSize = &i
|
||||
return f
|
||||
}
|
||||
|
||||
// SetCollation specifies a Collation to use for the Find operation.
|
||||
// Valid for server versions >= 3.4
|
||||
func (f *FindOptions) SetCollation(collation *Collation) *FindOptions {
|
||||
f.Collation = collation
|
||||
return f
|
||||
}
|
||||
|
||||
// SetComment specifies a string to help trace the operation through the database.
|
||||
func (f *FindOptions) SetComment(comment string) *FindOptions {
|
||||
f.Comment = &comment
|
||||
return f
|
||||
}
|
||||
|
||||
// SetCursorType specifes the type of cursor to use.
|
||||
func (f *FindOptions) SetCursorType(ct CursorType) *FindOptions {
|
||||
f.CursorType = &ct
|
||||
return f
|
||||
}
|
||||
|
||||
// SetHint specifies the index to use.
|
||||
func (f *FindOptions) SetHint(hint interface{}) *FindOptions {
|
||||
f.Hint = hint
|
||||
return f
|
||||
}
|
||||
|
||||
// SetLimit specifies a limit on the number of results.
|
||||
// A negative limit implies that only 1 batch should be returned.
|
||||
func (f *FindOptions) SetLimit(i int64) *FindOptions {
|
||||
f.Limit = &i
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMax specifies an exclusive upper bound for a specific index.
|
||||
func (f *FindOptions) SetMax(max interface{}) *FindOptions {
|
||||
f.Max = max
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMaxAwaitTime specifies the max amount of time for the server to wait on new documents.
|
||||
// If the cursor type is not TailableAwait, this option is ignored.
|
||||
// For server versions < 3.2, this option is ignored.
|
||||
func (f *FindOptions) SetMaxAwaitTime(d time.Duration) *FindOptions {
|
||||
f.MaxAwaitTime = &d
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the max time to allow the query to run.
|
||||
func (f *FindOptions) SetMaxTime(d time.Duration) *FindOptions {
|
||||
f.MaxTime = &d
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMin specifies the inclusive lower bound for a specific index.
|
||||
func (f *FindOptions) SetMin(min interface{}) *FindOptions {
|
||||
f.Min = min
|
||||
return f
|
||||
}
|
||||
|
||||
// SetNoCursorTimeout specifies whether or not cursors should time out after a period of inactivity.
|
||||
// For server versions < 3.2, this defaults to false.
|
||||
func (f *FindOptions) SetNoCursorTimeout(b bool) *FindOptions {
|
||||
f.NoCursorTimeout = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetOplogReplay adds an option for internal use only and should not be set.
|
||||
// For server versions < 3.2, this defaults to false.
|
||||
func (f *FindOptions) SetOplogReplay(b bool) *FindOptions {
|
||||
f.OplogReplay = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetProjection adds an option to limit the fields returned for all documents.
|
||||
func (f *FindOptions) SetProjection(projection interface{}) *FindOptions {
|
||||
f.Projection = projection
|
||||
return f
|
||||
}
|
||||
|
||||
// SetReturnKey adds an option to only return index keys for all result documents.
|
||||
func (f *FindOptions) SetReturnKey(b bool) *FindOptions {
|
||||
f.ReturnKey = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetShowRecordID adds an option to determine whether to return the record identifier for each document.
|
||||
// If true, a $recordId field will be added to each returned document.
|
||||
func (f *FindOptions) SetShowRecordID(b bool) *FindOptions {
|
||||
f.ShowRecordID = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSkip specifies the number of documents to skip before returning.
|
||||
// For server versions < 3.2, this defaults to 0.
|
||||
func (f *FindOptions) SetSkip(i int64) *FindOptions {
|
||||
f.Skip = &i
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSnapshot prevents the cursor from returning a document more than once because of an intervening write operation.
|
||||
func (f *FindOptions) SetSnapshot(b bool) *FindOptions {
|
||||
f.Snapshot = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSort specifies the order in which to return documents.
|
||||
func (f *FindOptions) SetSort(sort interface{}) *FindOptions {
|
||||
f.Sort = sort
|
||||
return f
|
||||
}
|
||||
|
||||
// MergeFindOptions combines the argued FindOptions into a single FindOptions in a last-one-wins fashion
|
||||
func MergeFindOptions(opts ...*FindOptions) *FindOptions {
|
||||
fo := Find()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.AllowPartialResults != nil {
|
||||
fo.AllowPartialResults = opt.AllowPartialResults
|
||||
}
|
||||
if opt.BatchSize != nil {
|
||||
fo.BatchSize = opt.BatchSize
|
||||
}
|
||||
if opt.Collation != nil {
|
||||
fo.Collation = opt.Collation
|
||||
}
|
||||
if opt.Comment != nil {
|
||||
fo.Comment = opt.Comment
|
||||
}
|
||||
if opt.CursorType != nil {
|
||||
fo.CursorType = opt.CursorType
|
||||
}
|
||||
if opt.Hint != nil {
|
||||
fo.Hint = opt.Hint
|
||||
}
|
||||
if opt.Limit != nil {
|
||||
fo.Limit = opt.Limit
|
||||
}
|
||||
if opt.Max != nil {
|
||||
fo.Max = opt.Max
|
||||
}
|
||||
if opt.MaxAwaitTime != nil {
|
||||
fo.MaxAwaitTime = opt.MaxAwaitTime
|
||||
}
|
||||
if opt.MaxTime != nil {
|
||||
fo.MaxTime = opt.MaxTime
|
||||
}
|
||||
if opt.Min != nil {
|
||||
fo.Min = opt.Min
|
||||
}
|
||||
if opt.NoCursorTimeout != nil {
|
||||
fo.NoCursorTimeout = opt.NoCursorTimeout
|
||||
}
|
||||
if opt.OplogReplay != nil {
|
||||
fo.OplogReplay = opt.OplogReplay
|
||||
}
|
||||
if opt.Projection != nil {
|
||||
fo.Projection = opt.Projection
|
||||
}
|
||||
if opt.ReturnKey != nil {
|
||||
fo.ReturnKey = opt.ReturnKey
|
||||
}
|
||||
if opt.ShowRecordID != nil {
|
||||
fo.ShowRecordID = opt.ShowRecordID
|
||||
}
|
||||
if opt.Skip != nil {
|
||||
fo.Skip = opt.Skip
|
||||
}
|
||||
if opt.Snapshot != nil {
|
||||
fo.Snapshot = opt.Snapshot
|
||||
}
|
||||
if opt.Sort != nil {
|
||||
fo.Sort = opt.Sort
|
||||
}
|
||||
}
|
||||
|
||||
return fo
|
||||
}
|
||||
|
||||
// FindOneOptions represent all possible options to the FindOne() function.
|
||||
type FindOneOptions struct {
|
||||
AllowPartialResults *bool // If true, allows partial results to be returned if some shards are down.
|
||||
BatchSize *int32 // Specifies the number of documents to return in every batch.
|
||||
Collation *Collation // Specifies a collation to be used
|
||||
Comment *string // Specifies a string to help trace the operation through the database.
|
||||
CursorType *CursorType // Specifies the type of cursor to use
|
||||
Hint interface{} // Specifies the index to use.
|
||||
Max interface{} // Sets an exclusive upper bound for a specific index
|
||||
MaxAwaitTime *time.Duration // Specifies the maximum amount of time for the server to wait on new documents.
|
||||
MaxTime *time.Duration // Specifies the maximum amount of time to allow the query to run.
|
||||
Min interface{} // Specifies the inclusive lower bound for a specific index.
|
||||
NoCursorTimeout *bool // If true, prevents cursors from timing out after an inactivity period.
|
||||
OplogReplay *bool // Adds an option for internal use only and should not be set.
|
||||
Projection interface{} // Limits the fields returned for all documents.
|
||||
ReturnKey *bool // If true, only returns index keys for all result documents.
|
||||
ShowRecordID *bool // If true, a $recordId field with the record identifier will be added to the returned documents.
|
||||
Skip *int64 // Specifies the number of documents to skip before returning
|
||||
Snapshot *bool // If true, prevents the cursor from returning a document more than once because of an intervening write operation.
|
||||
Sort interface{} // Specifies the order in which to return results.
|
||||
}
|
||||
|
||||
// FindOne creates a new FindOneOptions instance.
|
||||
func FindOne() *FindOneOptions {
|
||||
return &FindOneOptions{}
|
||||
}
|
||||
|
||||
// SetAllowPartialResults sets whether partial results can be returned if some shards are down.
|
||||
func (f *FindOneOptions) SetAllowPartialResults(b bool) *FindOneOptions {
|
||||
f.AllowPartialResults = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetBatchSize sets the number of documents to return in each batch.
|
||||
func (f *FindOneOptions) SetBatchSize(i int32) *FindOneOptions {
|
||||
f.BatchSize = &i
|
||||
return f
|
||||
}
|
||||
|
||||
// SetCollation specifies a Collation to use for the Find operation.
|
||||
func (f *FindOneOptions) SetCollation(collation *Collation) *FindOneOptions {
|
||||
f.Collation = collation
|
||||
return f
|
||||
}
|
||||
|
||||
// SetComment specifies a string to help trace the operation through the database.
|
||||
func (f *FindOneOptions) SetComment(comment string) *FindOneOptions {
|
||||
f.Comment = &comment
|
||||
return f
|
||||
}
|
||||
|
||||
// SetCursorType specifes the type of cursor to use.
|
||||
func (f *FindOneOptions) SetCursorType(ct CursorType) *FindOneOptions {
|
||||
f.CursorType = &ct
|
||||
return f
|
||||
}
|
||||
|
||||
// SetHint specifies the index to use.
|
||||
func (f *FindOneOptions) SetHint(hint interface{}) *FindOneOptions {
|
||||
f.Hint = hint
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMax specifies an exclusive upper bound for a specific index.
|
||||
func (f *FindOneOptions) SetMax(max interface{}) *FindOneOptions {
|
||||
f.Max = max
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMaxAwaitTime specifies the max amount of time for the server to wait on new documents.
|
||||
// For server versions < 3.2, this option is ignored.
|
||||
func (f *FindOneOptions) SetMaxAwaitTime(d time.Duration) *FindOneOptions {
|
||||
f.MaxAwaitTime = &d
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the max time to allow the query to run.
|
||||
func (f *FindOneOptions) SetMaxTime(d time.Duration) *FindOneOptions {
|
||||
f.MaxTime = &d
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMin specifies the inclusive lower bound for a specific index.
|
||||
func (f *FindOneOptions) SetMin(min interface{}) *FindOneOptions {
|
||||
f.Min = min
|
||||
return f
|
||||
}
|
||||
|
||||
// SetNoCursorTimeout specifies whether or not cursors should time out after a period of inactivity.
|
||||
func (f *FindOneOptions) SetNoCursorTimeout(b bool) *FindOneOptions {
|
||||
f.NoCursorTimeout = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetOplogReplay adds an option for internal use only and should not be set.
|
||||
func (f *FindOneOptions) SetOplogReplay(b bool) *FindOneOptions {
|
||||
f.OplogReplay = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetProjection adds an option to limit the fields returned for all documents.
|
||||
func (f *FindOneOptions) SetProjection(projection interface{}) *FindOneOptions {
|
||||
f.Projection = projection
|
||||
return f
|
||||
}
|
||||
|
||||
// SetReturnKey adds an option to only return index keys for all result documents.
|
||||
func (f *FindOneOptions) SetReturnKey(b bool) *FindOneOptions {
|
||||
f.ReturnKey = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetShowRecordID adds an option to determine whether to return the record identifier for each document.
|
||||
// If true, a $recordId field will be added to each returned document.
|
||||
func (f *FindOneOptions) SetShowRecordID(b bool) *FindOneOptions {
|
||||
f.ShowRecordID = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSkip specifies the number of documents to skip before returning.
|
||||
func (f *FindOneOptions) SetSkip(i int64) *FindOneOptions {
|
||||
f.Skip = &i
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSnapshot prevents the cursor from returning a document more than once because of an intervening write operation.
|
||||
func (f *FindOneOptions) SetSnapshot(b bool) *FindOneOptions {
|
||||
f.Snapshot = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSort specifies the order in which to return documents.
|
||||
func (f *FindOneOptions) SetSort(sort interface{}) *FindOneOptions {
|
||||
f.Sort = sort
|
||||
return f
|
||||
}
|
||||
|
||||
// MergeFindOneOptions combines the argued FindOneOptions into a single FindOneOptions in a last-one-wins fashion
|
||||
func MergeFindOneOptions(opts ...*FindOneOptions) *FindOneOptions {
|
||||
fo := FindOne()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.AllowPartialResults != nil {
|
||||
fo.AllowPartialResults = opt.AllowPartialResults
|
||||
}
|
||||
if opt.BatchSize != nil {
|
||||
fo.BatchSize = opt.BatchSize
|
||||
}
|
||||
if opt.Collation != nil {
|
||||
fo.Collation = opt.Collation
|
||||
}
|
||||
if opt.Comment != nil {
|
||||
fo.Comment = opt.Comment
|
||||
}
|
||||
if opt.CursorType != nil {
|
||||
fo.CursorType = opt.CursorType
|
||||
}
|
||||
if opt.Hint != nil {
|
||||
fo.Hint = opt.Hint
|
||||
}
|
||||
if opt.Max != nil {
|
||||
fo.Max = opt.Max
|
||||
}
|
||||
if opt.MaxAwaitTime != nil {
|
||||
fo.MaxAwaitTime = opt.MaxAwaitTime
|
||||
}
|
||||
if opt.MaxTime != nil {
|
||||
fo.MaxTime = opt.MaxTime
|
||||
}
|
||||
if opt.Min != nil {
|
||||
fo.Min = opt.Min
|
||||
}
|
||||
if opt.NoCursorTimeout != nil {
|
||||
fo.NoCursorTimeout = opt.NoCursorTimeout
|
||||
}
|
||||
if opt.OplogReplay != nil {
|
||||
fo.OplogReplay = opt.OplogReplay
|
||||
}
|
||||
if opt.Projection != nil {
|
||||
fo.Projection = opt.Projection
|
||||
}
|
||||
if opt.ReturnKey != nil {
|
||||
fo.ReturnKey = opt.ReturnKey
|
||||
}
|
||||
if opt.ShowRecordID != nil {
|
||||
fo.ShowRecordID = opt.ShowRecordID
|
||||
}
|
||||
if opt.Skip != nil {
|
||||
fo.Skip = opt.Skip
|
||||
}
|
||||
if opt.Snapshot != nil {
|
||||
fo.Snapshot = opt.Snapshot
|
||||
}
|
||||
if opt.Sort != nil {
|
||||
fo.Sort = opt.Sort
|
||||
}
|
||||
}
|
||||
|
||||
return fo
|
||||
}
|
||||
|
||||
// FindOneAndReplaceOptions represent all possible options to the FindOneAndReplace() function.
|
||||
type FindOneAndReplaceOptions struct {
|
||||
BypassDocumentValidation *bool // If true, allows the write to opt out of document-level validation.
|
||||
Collation *Collation // Specifies a collation to be used
|
||||
MaxTime *time.Duration // Specifies the maximum amount of time to allow the query to run.
|
||||
Projection interface{} // Limits the fields returned for all documents.
|
||||
ReturnDocument *ReturnDocument // Specifies whether the original or updated document should be returned.
|
||||
Sort interface{} // Specifies the order in which to return results.
|
||||
Upsert *bool // If true, creates a a new document if no document matches the query.
|
||||
}
|
||||
|
||||
// FindOneAndReplace creates a new FindOneAndReplaceOptions instance.
|
||||
func FindOneAndReplace() *FindOneAndReplaceOptions {
|
||||
return &FindOneAndReplaceOptions{}
|
||||
}
|
||||
|
||||
// SetBypassDocumentValidation specifies whether or not the write should opt out of document-level validation.
|
||||
// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
|
||||
func (f *FindOneAndReplaceOptions) SetBypassDocumentValidation(b bool) *FindOneAndReplaceOptions {
|
||||
f.BypassDocumentValidation = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetCollation specifies a Collation to use for the Find operation.
|
||||
func (f *FindOneAndReplaceOptions) SetCollation(collation *Collation) *FindOneAndReplaceOptions {
|
||||
f.Collation = collation
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the max time to allow the query to run.
|
||||
func (f *FindOneAndReplaceOptions) SetMaxTime(d time.Duration) *FindOneAndReplaceOptions {
|
||||
f.MaxTime = &d
|
||||
return f
|
||||
}
|
||||
|
||||
// SetProjection adds an option to limit the fields returned for all documents.
|
||||
func (f *FindOneAndReplaceOptions) SetProjection(projection interface{}) *FindOneAndReplaceOptions {
|
||||
f.Projection = projection
|
||||
return f
|
||||
}
|
||||
|
||||
// SetReturnDocument specifies whether the original or updated document should be returned.
|
||||
// If set to Before, the original document will be returned. If set to After, the updated document
|
||||
// will be returned.
|
||||
func (f *FindOneAndReplaceOptions) SetReturnDocument(rd ReturnDocument) *FindOneAndReplaceOptions {
|
||||
f.ReturnDocument = &rd
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSort specifies the order in which to return documents.
|
||||
func (f *FindOneAndReplaceOptions) SetSort(sort interface{}) *FindOneAndReplaceOptions {
|
||||
f.Sort = sort
|
||||
return f
|
||||
}
|
||||
|
||||
// SetUpsert specifies if a new document should be created if no document matches the query.
|
||||
func (f *FindOneAndReplaceOptions) SetUpsert(b bool) *FindOneAndReplaceOptions {
|
||||
f.Upsert = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// MergeFindOneAndReplaceOptions combines the argued FindOneAndReplaceOptions into a single FindOneAndReplaceOptions in a last-one-wins fashion
|
||||
func MergeFindOneAndReplaceOptions(opts ...*FindOneAndReplaceOptions) *FindOneAndReplaceOptions {
|
||||
fo := FindOneAndReplace()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.BypassDocumentValidation != nil {
|
||||
fo.BypassDocumentValidation = opt.BypassDocumentValidation
|
||||
}
|
||||
if opt.Collation != nil {
|
||||
fo.Collation = opt.Collation
|
||||
}
|
||||
if opt.MaxTime != nil {
|
||||
fo.MaxTime = opt.MaxTime
|
||||
}
|
||||
if opt.Projection != nil {
|
||||
fo.Projection = opt.Projection
|
||||
}
|
||||
if opt.ReturnDocument != nil {
|
||||
fo.ReturnDocument = opt.ReturnDocument
|
||||
}
|
||||
if opt.Sort != nil {
|
||||
fo.Sort = opt.Sort
|
||||
}
|
||||
if opt.Upsert != nil {
|
||||
fo.Upsert = opt.Upsert
|
||||
}
|
||||
}
|
||||
|
||||
return fo
|
||||
}
|
||||
|
||||
// FindOneAndUpdateOptions represent all possible options to the FindOneAndUpdate() function.
|
||||
type FindOneAndUpdateOptions struct {
|
||||
ArrayFilters *ArrayFilters // A set of filters specifying to which array elements an update should apply.
|
||||
BypassDocumentValidation *bool // If true, allows the write to opt out of document-level validation.
|
||||
Collation *Collation // Specifies a collation to be used
|
||||
MaxTime *time.Duration // Specifies the maximum amount of time to allow the query to run.
|
||||
Projection interface{} // Limits the fields returned for all documents.
|
||||
ReturnDocument *ReturnDocument // Specifies whether the original or updated document should be returned.
|
||||
Sort interface{} // Specifies the order in which to return results.
|
||||
Upsert *bool // If true, creates a a new document if no document matches the query.
|
||||
}
|
||||
|
||||
// FindOneAndUpdate creates a new FindOneAndUpdateOptions instance.
|
||||
func FindOneAndUpdate() *FindOneAndUpdateOptions {
|
||||
return &FindOneAndUpdateOptions{}
|
||||
}
|
||||
|
||||
// SetBypassDocumentValidation sets filters that specify to which array elements an update should apply.
|
||||
func (f *FindOneAndUpdateOptions) SetBypassDocumentValidation(b bool) *FindOneAndUpdateOptions {
|
||||
f.BypassDocumentValidation = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetArrayFilters specifies a set of filters, which
|
||||
func (f *FindOneAndUpdateOptions) SetArrayFilters(filters ArrayFilters) *FindOneAndUpdateOptions {
|
||||
f.ArrayFilters = &filters
|
||||
return f
|
||||
}
|
||||
|
||||
// SetCollation specifies a Collation to use for the Find operation.
|
||||
func (f *FindOneAndUpdateOptions) SetCollation(collation *Collation) *FindOneAndUpdateOptions {
|
||||
f.Collation = collation
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the max time to allow the query to run.
|
||||
func (f *FindOneAndUpdateOptions) SetMaxTime(d time.Duration) *FindOneAndUpdateOptions {
|
||||
f.MaxTime = &d
|
||||
return f
|
||||
}
|
||||
|
||||
// SetProjection adds an option to limit the fields returned for all documents.
|
||||
func (f *FindOneAndUpdateOptions) SetProjection(projection interface{}) *FindOneAndUpdateOptions {
|
||||
f.Projection = projection
|
||||
return f
|
||||
}
|
||||
|
||||
// SetReturnDocument specifies whether the original or updated document should be returned.
|
||||
// If set to Before, the original document will be returned. If set to After, the updated document
|
||||
// will be returned.
|
||||
func (f *FindOneAndUpdateOptions) SetReturnDocument(rd ReturnDocument) *FindOneAndUpdateOptions {
|
||||
f.ReturnDocument = &rd
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSort specifies the order in which to return documents.
|
||||
func (f *FindOneAndUpdateOptions) SetSort(sort interface{}) *FindOneAndUpdateOptions {
|
||||
f.Sort = sort
|
||||
return f
|
||||
}
|
||||
|
||||
// SetUpsert specifies if a new document should be created if no document matches the query.
|
||||
func (f *FindOneAndUpdateOptions) SetUpsert(b bool) *FindOneAndUpdateOptions {
|
||||
f.Upsert = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// MergeFindOneAndUpdateOptions combines the argued FindOneAndUpdateOptions into a single FindOneAndUpdateOptions in a last-one-wins fashion
|
||||
func MergeFindOneAndUpdateOptions(opts ...*FindOneAndUpdateOptions) *FindOneAndUpdateOptions {
|
||||
fo := FindOneAndUpdate()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.ArrayFilters != nil {
|
||||
fo.ArrayFilters = opt.ArrayFilters
|
||||
}
|
||||
if opt.BypassDocumentValidation != nil {
|
||||
fo.BypassDocumentValidation = opt.BypassDocumentValidation
|
||||
}
|
||||
if opt.Collation != nil {
|
||||
fo.Collation = opt.Collation
|
||||
}
|
||||
if opt.MaxTime != nil {
|
||||
fo.MaxTime = opt.MaxTime
|
||||
}
|
||||
if opt.Projection != nil {
|
||||
fo.Projection = opt.Projection
|
||||
}
|
||||
if opt.ReturnDocument != nil {
|
||||
fo.ReturnDocument = opt.ReturnDocument
|
||||
}
|
||||
if opt.Sort != nil {
|
||||
fo.Sort = opt.Sort
|
||||
}
|
||||
if opt.Upsert != nil {
|
||||
fo.Upsert = opt.Upsert
|
||||
}
|
||||
}
|
||||
|
||||
return fo
|
||||
}
|
||||
|
||||
// FindOneAndDeleteOptions represent all possible options to the FindOneAndDelete() function.
|
||||
type FindOneAndDeleteOptions struct {
|
||||
Collation *Collation // Specifies a collation to be used
|
||||
MaxTime *time.Duration // Specifies the maximum amount of time to allow the query to run.
|
||||
Projection interface{} // Limits the fields returned for all documents.
|
||||
Sort interface{} // Specifies the order in which to return results.
|
||||
}
|
||||
|
||||
// FindOneAndDelete creates a new FindOneAndDeleteOptions instance.
|
||||
func FindOneAndDelete() *FindOneAndDeleteOptions {
|
||||
return &FindOneAndDeleteOptions{}
|
||||
}
|
||||
|
||||
// SetCollation specifies a Collation to use for the Find operation.
|
||||
// Valid for server versions >= 3.4
|
||||
func (f *FindOneAndDeleteOptions) SetCollation(collation *Collation) *FindOneAndDeleteOptions {
|
||||
f.Collation = collation
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the max time to allow the query to run.
|
||||
func (f *FindOneAndDeleteOptions) SetMaxTime(d time.Duration) *FindOneAndDeleteOptions {
|
||||
f.MaxTime = &d
|
||||
return f
|
||||
}
|
||||
|
||||
// SetProjection adds an option to limit the fields returned for all documents.
|
||||
func (f *FindOneAndDeleteOptions) SetProjection(projection interface{}) *FindOneAndDeleteOptions {
|
||||
f.Projection = projection
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSort specifies the order in which to return documents.
|
||||
func (f *FindOneAndDeleteOptions) SetSort(sort interface{}) *FindOneAndDeleteOptions {
|
||||
f.Sort = sort
|
||||
return f
|
||||
}
|
||||
|
||||
// MergeFindOneAndDeleteOptions combines the argued FindOneAndDeleteOptions into a single FindOneAndDeleteOptions in a last-one-wins fashion
|
||||
func MergeFindOneAndDeleteOptions(opts ...*FindOneAndDeleteOptions) *FindOneAndDeleteOptions {
|
||||
fo := FindOneAndDelete()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.Collation != nil {
|
||||
fo.Collation = opt.Collation
|
||||
}
|
||||
if opt.MaxTime != nil {
|
||||
fo.MaxTime = opt.MaxTime
|
||||
}
|
||||
if opt.Projection != nil {
|
||||
fo.Projection = opt.Projection
|
||||
}
|
||||
if opt.Sort != nil {
|
||||
fo.Sort = opt.Sort
|
||||
}
|
||||
}
|
||||
|
||||
return fo
|
||||
}
|
||||
274
vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go
generated
vendored
Executable file
274
vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go
generated
vendored
Executable file
@@ -0,0 +1,274 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/mongo/readconcern"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"go.mongodb.org/mongo-driver/mongo/writeconcern"
|
||||
)
|
||||
|
||||
// DefaultName is the default name for a GridFS bucket.
|
||||
var DefaultName = "fs"
|
||||
|
||||
// DefaultChunkSize is the default size of each file chunk in bytes.
|
||||
var DefaultChunkSize int32 = 255 * 1024 // 255 KiB
|
||||
|
||||
// DefaultRevision is the default revision number for a download by name operation.
|
||||
var DefaultRevision int32 = -1
|
||||
|
||||
// BucketOptions represents all possible options to configure a GridFS bucket.
|
||||
type BucketOptions struct {
|
||||
Name *string // The bucket name. Defaults to "fs".
|
||||
ChunkSizeBytes *int32 // The chunk size in bytes. Defaults to 255KB.
|
||||
WriteConcern *writeconcern.WriteConcern // The write concern for the bucket. Defaults to the write concern of the database.
|
||||
ReadConcern *readconcern.ReadConcern // The read concern for the bucket. Defaults to the read concern of the database.
|
||||
ReadPreference *readpref.ReadPref // The read preference for the bucket. Defaults to the read preference of the database.
|
||||
}
|
||||
|
||||
// GridFSBucket creates a new *BucketOptions
|
||||
func GridFSBucket() *BucketOptions {
|
||||
return &BucketOptions{
|
||||
Name: &DefaultName,
|
||||
ChunkSizeBytes: &DefaultChunkSize,
|
||||
}
|
||||
}
|
||||
|
||||
// SetName sets the name for the bucket. Defaults to "fs" if not set.
|
||||
func (b *BucketOptions) SetName(name string) *BucketOptions {
|
||||
b.Name = &name
|
||||
return b
|
||||
}
|
||||
|
||||
// SetChunkSizeBytes sets the chunk size in bytes for the bucket. Defaults to 255KB if not set.
|
||||
func (b *BucketOptions) SetChunkSizeBytes(i int32) *BucketOptions {
|
||||
b.ChunkSizeBytes = &i
|
||||
return b
|
||||
}
|
||||
|
||||
// SetWriteConcern sets the write concern for the bucket.
|
||||
func (b *BucketOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *BucketOptions {
|
||||
b.WriteConcern = wc
|
||||
return b
|
||||
}
|
||||
|
||||
// SetReadConcern sets the read concern for the bucket.
|
||||
func (b *BucketOptions) SetReadConcern(rc *readconcern.ReadConcern) *BucketOptions {
|
||||
b.ReadConcern = rc
|
||||
return b
|
||||
}
|
||||
|
||||
// SetReadPreference sets the read preference for the bucket.
|
||||
func (b *BucketOptions) SetReadPreference(rp *readpref.ReadPref) *BucketOptions {
|
||||
b.ReadPreference = rp
|
||||
return b
|
||||
}
|
||||
|
||||
// MergeBucketOptions combines the given *BucketOptions into a single *BucketOptions.
|
||||
// If the name or chunk size is not set in any of the given *BucketOptions, the resulting *BucketOptions will have
|
||||
// name "fs" and chunk size 255KB.
|
||||
func MergeBucketOptions(opts ...*BucketOptions) *BucketOptions {
|
||||
b := GridFSBucket()
|
||||
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.Name != nil {
|
||||
b.Name = opt.Name
|
||||
}
|
||||
if opt.ChunkSizeBytes != nil {
|
||||
b.ChunkSizeBytes = opt.ChunkSizeBytes
|
||||
}
|
||||
if opt.WriteConcern != nil {
|
||||
b.WriteConcern = opt.WriteConcern
|
||||
}
|
||||
if opt.ReadConcern != nil {
|
||||
b.ReadConcern = opt.ReadConcern
|
||||
}
|
||||
if opt.ReadPreference != nil {
|
||||
b.ReadPreference = opt.ReadPreference
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// UploadOptions represents all possible options for a GridFS upload operation. If a registry is nil, bson.DefaultRegistry
|
||||
// will be used when converting the Metadata interface to BSON.
|
||||
type UploadOptions struct {
|
||||
ChunkSizeBytes *int32 // Chunk size in bytes. Defaults to the chunk size of the bucket.
|
||||
Metadata interface{} // User data for the 'metadata' field of the files collection document.
|
||||
Registry *bsoncodec.Registry // The registry to use for converting filters. Defaults to bson.DefaultRegistry.
|
||||
}
|
||||
|
||||
// GridFSUpload creates a new *UploadOptions
|
||||
func GridFSUpload() *UploadOptions {
|
||||
return &UploadOptions{Registry: bson.DefaultRegistry}
|
||||
}
|
||||
|
||||
// SetChunkSizeBytes sets the chunk size in bytes for the upload. Defaults to 255KB if not set.
|
||||
func (u *UploadOptions) SetChunkSizeBytes(i int32) *UploadOptions {
|
||||
u.ChunkSizeBytes = &i
|
||||
return u
|
||||
}
|
||||
|
||||
// SetMetadata specfies the metadata for the upload.
|
||||
func (u *UploadOptions) SetMetadata(doc interface{}) *UploadOptions {
|
||||
u.Metadata = doc
|
||||
return u
|
||||
}
|
||||
|
||||
// MergeUploadOptions combines the given *UploadOptions into a single *UploadOptions.
|
||||
// If the chunk size is not set in any of the given *UploadOptions, the resulting *UploadOptions will have chunk size
|
||||
// 255KB.
|
||||
func MergeUploadOptions(opts ...*UploadOptions) *UploadOptions {
|
||||
u := GridFSUpload()
|
||||
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.ChunkSizeBytes != nil {
|
||||
u.ChunkSizeBytes = opt.ChunkSizeBytes
|
||||
}
|
||||
if opt.Metadata != nil {
|
||||
u.Metadata = opt.Metadata
|
||||
}
|
||||
if opt.Registry != nil {
|
||||
u.Registry = opt.Registry
|
||||
}
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
// NameOptions represents all options that can be used for a GridFS download by name operation.
|
||||
type NameOptions struct {
|
||||
Revision *int32 // Which revision (documents with the same filename and different uploadDate). Defaults to -1 (the most recent revision).
|
||||
}
|
||||
|
||||
// GridFSName creates a new *NameOptions
|
||||
func GridFSName() *NameOptions {
|
||||
return &NameOptions{}
|
||||
}
|
||||
|
||||
// SetRevision specifies which revision of the file to retrieve. Defaults to -1.
|
||||
// * Revision numbers are defined as follows:
|
||||
// * 0 = the original stored file
|
||||
// * 1 = the first revision
|
||||
// * 2 = the second revision
|
||||
// * etc…
|
||||
// * -2 = the second most recent revision
|
||||
// * -1 = the most recent revision
|
||||
func (n *NameOptions) SetRevision(r int32) *NameOptions {
|
||||
n.Revision = &r
|
||||
return n
|
||||
}
|
||||
|
||||
// MergeNameOptions combines the given *NameOptions into a single *NameOptions in a last one wins fashion.
|
||||
func MergeNameOptions(opts ...*NameOptions) *NameOptions {
|
||||
n := GridFSName()
|
||||
n.Revision = &DefaultRevision
|
||||
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.Revision != nil {
|
||||
n.Revision = opt.Revision
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// GridFSFindOptions represents all options for a GridFS find operation.
|
||||
type GridFSFindOptions struct {
|
||||
BatchSize *int32
|
||||
Limit *int32
|
||||
MaxTime *time.Duration
|
||||
NoCursorTimeout *bool
|
||||
Skip *int32
|
||||
Sort interface{}
|
||||
}
|
||||
|
||||
// GridFSFind creates a new GridFSFindOptions instance.
|
||||
func GridFSFind() *GridFSFindOptions {
|
||||
return &GridFSFindOptions{}
|
||||
}
|
||||
|
||||
// SetBatchSize sets the number of documents to return in each batch.
|
||||
func (f *GridFSFindOptions) SetBatchSize(i int32) *GridFSFindOptions {
|
||||
f.BatchSize = &i
|
||||
return f
|
||||
}
|
||||
|
||||
// SetLimit specifies a limit on the number of results.
|
||||
// A negative limit implies that only 1 batch should be returned.
|
||||
func (f *GridFSFindOptions) SetLimit(i int32) *GridFSFindOptions {
|
||||
f.Limit = &i
|
||||
return f
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the max time to allow the query to run.
|
||||
func (f *GridFSFindOptions) SetMaxTime(d time.Duration) *GridFSFindOptions {
|
||||
f.MaxTime = &d
|
||||
return f
|
||||
}
|
||||
|
||||
// SetNoCursorTimeout specifies whether or not cursors should time out after a period of inactivity.
|
||||
func (f *GridFSFindOptions) SetNoCursorTimeout(b bool) *GridFSFindOptions {
|
||||
f.NoCursorTimeout = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSkip specifies the number of documents to skip before returning.
|
||||
func (f *GridFSFindOptions) SetSkip(i int32) *GridFSFindOptions {
|
||||
f.Skip = &i
|
||||
return f
|
||||
}
|
||||
|
||||
// SetSort specifies the order in which to return documents.
|
||||
func (f *GridFSFindOptions) SetSort(sort interface{}) *GridFSFindOptions {
|
||||
f.Sort = sort
|
||||
return f
|
||||
}
|
||||
|
||||
// MergeGridFSFindOptions combines the argued GridFSFindOptions into a single GridFSFindOptions in a last-one-wins fashion
|
||||
func MergeGridFSFindOptions(opts ...*GridFSFindOptions) *GridFSFindOptions {
|
||||
fo := GridFSFind()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.BatchSize != nil {
|
||||
fo.BatchSize = opt.BatchSize
|
||||
}
|
||||
if opt.Limit != nil {
|
||||
fo.Limit = opt.Limit
|
||||
}
|
||||
if opt.MaxTime != nil {
|
||||
fo.MaxTime = opt.MaxTime
|
||||
}
|
||||
if opt.NoCursorTimeout != nil {
|
||||
fo.NoCursorTimeout = opt.NoCursorTimeout
|
||||
}
|
||||
if opt.Skip != nil {
|
||||
fo.Skip = opt.Skip
|
||||
}
|
||||
if opt.Sort != nil {
|
||||
fo.Sort = opt.Sort
|
||||
}
|
||||
}
|
||||
|
||||
return fo
|
||||
}
|
||||
336
vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go
generated
vendored
Executable file
336
vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go
generated
vendored
Executable file
@@ -0,0 +1,336 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// CreateIndexesOptions represents all possible options for the CreateOne() and CreateMany() functions.
|
||||
type CreateIndexesOptions struct {
|
||||
MaxTime *time.Duration // The maximum amount of time to allow the query to run.
|
||||
}
|
||||
|
||||
// CreateIndexes creates a new CreateIndexesOptions instance.
|
||||
func CreateIndexes() *CreateIndexesOptions {
|
||||
return &CreateIndexesOptions{}
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the maximum amount of time to allow the query to run.
|
||||
func (c *CreateIndexesOptions) SetMaxTime(d time.Duration) *CreateIndexesOptions {
|
||||
c.MaxTime = &d
|
||||
return c
|
||||
}
|
||||
|
||||
// MergeCreateIndexesOptions combines the given *CreateIndexesOptions into a single *CreateIndexesOptions in a last one
|
||||
// wins fashion.
|
||||
func MergeCreateIndexesOptions(opts ...*CreateIndexesOptions) *CreateIndexesOptions {
|
||||
c := CreateIndexes()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.MaxTime != nil {
|
||||
c.MaxTime = opt.MaxTime
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// DropIndexesOptions represents all possible options for the DropIndexes() function.
|
||||
type DropIndexesOptions struct {
|
||||
MaxTime *time.Duration
|
||||
}
|
||||
|
||||
// DropIndexes creates a new DropIndexesOptions instance.
|
||||
func DropIndexes() *DropIndexesOptions {
|
||||
return &DropIndexesOptions{}
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the maximum amount of time to allow the query to run.
|
||||
func (d *DropIndexesOptions) SetMaxTime(duration time.Duration) *DropIndexesOptions {
|
||||
d.MaxTime = &duration
|
||||
return d
|
||||
}
|
||||
|
||||
// MergeDropIndexesOptions combines the given *DropIndexesOptions into a single *DropIndexesOptions in a last one
|
||||
// wins fashion.
|
||||
func MergeDropIndexesOptions(opts ...*DropIndexesOptions) *DropIndexesOptions {
|
||||
c := DropIndexes()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.MaxTime != nil {
|
||||
c.MaxTime = opt.MaxTime
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// ListIndexesOptions represents all possible options for the ListIndexes() function.
|
||||
type ListIndexesOptions struct {
|
||||
BatchSize *int32
|
||||
MaxTime *time.Duration
|
||||
}
|
||||
|
||||
// ListIndexes creates a new ListIndexesOptions instance.
|
||||
func ListIndexes() *ListIndexesOptions {
|
||||
return &ListIndexesOptions{}
|
||||
}
|
||||
|
||||
// SetBatchSize specifies the number of documents to return in every batch.
|
||||
func (l *ListIndexesOptions) SetBatchSize(i int32) *ListIndexesOptions {
|
||||
l.BatchSize = &i
|
||||
return l
|
||||
}
|
||||
|
||||
// SetMaxTime specifies the maximum amount of time to allow the query to run.
|
||||
func (l *ListIndexesOptions) SetMaxTime(d time.Duration) *ListIndexesOptions {
|
||||
l.MaxTime = &d
|
||||
return l
|
||||
}
|
||||
|
||||
// MergeListIndexesOptions combines the given *ListIndexesOptions into a single *ListIndexesOptions in a last one
|
||||
// wins fashion.
|
||||
func MergeListIndexesOptions(opts ...*ListIndexesOptions) *ListIndexesOptions {
|
||||
c := ListIndexes()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.MaxTime != nil {
|
||||
c.MaxTime = opt.MaxTime
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// IndexOptions represents all possible options to configure a new index.
|
||||
type IndexOptions struct {
|
||||
Background *bool
|
||||
ExpireAfterSeconds *int32
|
||||
Name *string
|
||||
Sparse *bool
|
||||
StorageEngine interface{}
|
||||
Unique *bool
|
||||
Version *int32
|
||||
DefaultLanguage *string
|
||||
LanguageOverride *string
|
||||
TextVersion *int32
|
||||
Weights interface{}
|
||||
SphereVersion *int32
|
||||
Bits *int32
|
||||
Max *float64
|
||||
Min *float64
|
||||
BucketSize *int32
|
||||
PartialFilterExpression interface{}
|
||||
Collation *Collation
|
||||
WildcardProjection interface{}
|
||||
}
|
||||
|
||||
// Index creates a new *IndexOptions
|
||||
func Index() *IndexOptions {
|
||||
return &IndexOptions{}
|
||||
}
|
||||
|
||||
// SetBackground sets the background option. If true, the server will create the index in the background and not block
|
||||
// other tasks
|
||||
func (i *IndexOptions) SetBackground(background bool) *IndexOptions {
|
||||
i.Background = &background
|
||||
return i
|
||||
}
|
||||
|
||||
// SetExpireAfterSeconds specifies the number of seconds for a document to remain in a collection.
|
||||
func (i *IndexOptions) SetExpireAfterSeconds(seconds int32) *IndexOptions {
|
||||
i.ExpireAfterSeconds = &seconds
|
||||
return i
|
||||
}
|
||||
|
||||
// SetName specifies a name for the index.
|
||||
// If not set, a name will be generated in the format "[field]_[direction]".
|
||||
// If multiple indexes are created for the same key pattern with different collations, a name must be provided to avoid
|
||||
// ambiguity.
|
||||
func (i *IndexOptions) SetName(name string) *IndexOptions {
|
||||
i.Name = &name
|
||||
return i
|
||||
}
|
||||
|
||||
// SetSparse sets the sparse option.
|
||||
// If true, the index will only reference documents with the specified field in the index.
|
||||
func (i *IndexOptions) SetSparse(sparse bool) *IndexOptions {
|
||||
i.Sparse = &sparse
|
||||
return i
|
||||
}
|
||||
|
||||
// SetStorageEngine specifies the storage engine to use.
|
||||
// Valid for server versions >= 3.0
|
||||
func (i *IndexOptions) SetStorageEngine(engine interface{}) *IndexOptions {
|
||||
i.StorageEngine = engine
|
||||
return i
|
||||
}
|
||||
|
||||
// SetUnique forces the index to be unique.
|
||||
func (i *IndexOptions) SetUnique(unique bool) *IndexOptions {
|
||||
i.Unique = &unique
|
||||
return i
|
||||
}
|
||||
|
||||
// SetVersion specifies the index version number, either 0 or 1.
|
||||
func (i *IndexOptions) SetVersion(version int32) *IndexOptions {
|
||||
i.Version = &version
|
||||
return i
|
||||
}
|
||||
|
||||
// SetDefaultLanguage specifies the default language for text indexes.
|
||||
// If not set, this will default to english.
|
||||
func (i *IndexOptions) SetDefaultLanguage(language string) *IndexOptions {
|
||||
i.DefaultLanguage = &language
|
||||
return i
|
||||
}
|
||||
|
||||
// SetLanguageOverride specifies the field in the document to override the language.
|
||||
func (i *IndexOptions) SetLanguageOverride(override string) *IndexOptions {
|
||||
i.LanguageOverride = &override
|
||||
return i
|
||||
}
|
||||
|
||||
// SetTextVersion specifies the text index version number.
|
||||
// MongoDB version 2.4 can only support version 1.
|
||||
// MongoDB versions 2.6 and higher can support versions 1 or 2.
|
||||
func (i *IndexOptions) SetTextVersion(version int32) *IndexOptions {
|
||||
i.TextVersion = &version
|
||||
return i
|
||||
}
|
||||
|
||||
// SetWeights specifies fields in the index and their corresponding weight values.
|
||||
func (i *IndexOptions) SetWeights(weights interface{}) *IndexOptions {
|
||||
i.Weights = weights
|
||||
return i
|
||||
}
|
||||
|
||||
// SetSphereVersion specifies the 2dsphere index version number.
|
||||
// MongoDB version 2.4 can only support version 1.
|
||||
// MongoDB versions 2.6 and higher can support versions 1 or 2.
|
||||
func (i *IndexOptions) SetSphereVersion(version int32) *IndexOptions {
|
||||
i.SphereVersion = &version
|
||||
return i
|
||||
}
|
||||
|
||||
// SetBits specifies the precision of the stored geo hash in the 2d index, from 1 to 32.
|
||||
func (i *IndexOptions) SetBits(bits int32) *IndexOptions {
|
||||
i.Bits = &bits
|
||||
return i
|
||||
}
|
||||
|
||||
// SetMax specifies the maximum boundary for latitude and longitude in the 2d index.
|
||||
func (i *IndexOptions) SetMax(max float64) *IndexOptions {
|
||||
i.Max = &max
|
||||
return i
|
||||
}
|
||||
|
||||
// SetMin specifies the minimum boundary for latitude and longitude in the 2d index.
|
||||
func (i *IndexOptions) SetMin(min float64) *IndexOptions {
|
||||
i.Min = &min
|
||||
return i
|
||||
}
|
||||
|
||||
// SetBucketSize specifies number of units within which to group the location values in a geo haystack index.
|
||||
func (i *IndexOptions) SetBucketSize(bucketSize int32) *IndexOptions {
|
||||
i.BucketSize = &bucketSize
|
||||
return i
|
||||
}
|
||||
|
||||
// SetPartialFilterExpression specifies a filter for use in a partial index. Only documents that match the filter
|
||||
// expression are included in the index.
|
||||
func (i *IndexOptions) SetPartialFilterExpression(expression interface{}) *IndexOptions {
|
||||
i.PartialFilterExpression = expression
|
||||
return i
|
||||
}
|
||||
|
||||
// SetCollation specifies a Collation to use for the operation.
|
||||
// Valid for server versions >= 3.4
|
||||
func (i *IndexOptions) SetCollation(collation *Collation) *IndexOptions {
|
||||
i.Collation = collation
|
||||
return i
|
||||
}
|
||||
|
||||
// SetWildcardProjection specifies a wildcard projection for a wildcard index.
|
||||
func (i *IndexOptions) SetWildcardProjection(wildcardProjection interface{}) *IndexOptions {
|
||||
i.WildcardProjection = wildcardProjection
|
||||
return i
|
||||
}
|
||||
|
||||
// MergeIndexOptions combines the given *IndexOptions into a single *IndexOptions in a last one wins fashion.
|
||||
func MergeIndexOptions(opts ...*IndexOptions) *IndexOptions {
|
||||
i := Index()
|
||||
|
||||
for _, opt := range opts {
|
||||
if opt.Background != nil {
|
||||
i.Background = opt.Background
|
||||
}
|
||||
if opt.ExpireAfterSeconds != nil {
|
||||
i.ExpireAfterSeconds = opt.ExpireAfterSeconds
|
||||
}
|
||||
if opt.Name != nil {
|
||||
i.Name = opt.Name
|
||||
}
|
||||
if opt.Sparse != nil {
|
||||
i.Sparse = opt.Sparse
|
||||
}
|
||||
if opt.StorageEngine != nil {
|
||||
i.StorageEngine = opt.StorageEngine
|
||||
}
|
||||
if opt.Unique != nil {
|
||||
i.Unique = opt.Unique
|
||||
}
|
||||
if opt.Version != nil {
|
||||
i.Version = opt.Version
|
||||
}
|
||||
if opt.DefaultLanguage != nil {
|
||||
i.DefaultLanguage = opt.DefaultLanguage
|
||||
}
|
||||
if opt.LanguageOverride != nil {
|
||||
i.LanguageOverride = opt.LanguageOverride
|
||||
}
|
||||
if opt.TextVersion != nil {
|
||||
i.TextVersion = opt.TextVersion
|
||||
}
|
||||
if opt.Weights != nil {
|
||||
i.Weights = opt.Weights
|
||||
}
|
||||
if opt.SphereVersion != nil {
|
||||
i.SphereVersion = opt.SphereVersion
|
||||
}
|
||||
if opt.Bits != nil {
|
||||
i.Bits = opt.Bits
|
||||
}
|
||||
if opt.Max != nil {
|
||||
i.Max = opt.Max
|
||||
}
|
||||
if opt.Min != nil {
|
||||
i.Min = opt.Min
|
||||
}
|
||||
if opt.BucketSize != nil {
|
||||
i.BucketSize = opt.BucketSize
|
||||
}
|
||||
if opt.PartialFilterExpression != nil {
|
||||
i.PartialFilterExpression = opt.PartialFilterExpression
|
||||
}
|
||||
if opt.Collation != nil {
|
||||
i.Collation = opt.Collation
|
||||
}
|
||||
if opt.WildcardProjection != nil {
|
||||
i.WildcardProjection = opt.WildcardProjection
|
||||
}
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
84
vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go
generated
vendored
Executable file
84
vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go
generated
vendored
Executable file
@@ -0,0 +1,84 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
// InsertOneOptions represents all possible options to the InsertOne() function.
|
||||
type InsertOneOptions struct {
|
||||
BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
|
||||
}
|
||||
|
||||
// InsertOne returns a pointer to a new InsertOneOptions
|
||||
func InsertOne() *InsertOneOptions {
|
||||
return &InsertOneOptions{}
|
||||
}
|
||||
|
||||
// SetBypassDocumentValidation allows the write to opt-out of document level validation.
|
||||
// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
|
||||
func (ioo *InsertOneOptions) SetBypassDocumentValidation(b bool) *InsertOneOptions {
|
||||
ioo.BypassDocumentValidation = &b
|
||||
return ioo
|
||||
}
|
||||
|
||||
// MergeInsertOneOptions combines the argued InsertOneOptions into a single InsertOneOptions in a last-one-wins fashion
|
||||
func MergeInsertOneOptions(opts ...*InsertOneOptions) *InsertOneOptions {
|
||||
ioOpts := InsertOne()
|
||||
for _, ioo := range opts {
|
||||
if ioo == nil {
|
||||
continue
|
||||
}
|
||||
if ioo.BypassDocumentValidation != nil {
|
||||
ioOpts.BypassDocumentValidation = ioo.BypassDocumentValidation
|
||||
}
|
||||
}
|
||||
|
||||
return ioOpts
|
||||
}
|
||||
|
||||
// InsertManyOptions represents all possible options to the InsertMany() function.
|
||||
type InsertManyOptions struct {
|
||||
BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
|
||||
Ordered *bool // If true, when an insert fails, return without performing the remaining inserts. Defaults to true.
|
||||
}
|
||||
|
||||
// InsertMany returns a pointer to a new InsertManyOptions
|
||||
func InsertMany() *InsertManyOptions {
|
||||
return &InsertManyOptions{
|
||||
Ordered: &DefaultOrdered,
|
||||
}
|
||||
}
|
||||
|
||||
// SetBypassDocumentValidation allows the write to opt-out of document level validation.
|
||||
// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
|
||||
func (imo *InsertManyOptions) SetBypassDocumentValidation(b bool) *InsertManyOptions {
|
||||
imo.BypassDocumentValidation = &b
|
||||
return imo
|
||||
}
|
||||
|
||||
// SetOrdered configures the ordered option. If true, when a write fails, the function will return without attempting
|
||||
// remaining writes. Defaults to true.
|
||||
func (imo *InsertManyOptions) SetOrdered(b bool) *InsertManyOptions {
|
||||
imo.Ordered = &b
|
||||
return imo
|
||||
}
|
||||
|
||||
// MergeInsertManyOptions combines the argued InsertManyOptions into a single InsertManyOptions in a last-one-wins fashion
|
||||
func MergeInsertManyOptions(opts ...*InsertManyOptions) *InsertManyOptions {
|
||||
imOpts := InsertMany()
|
||||
for _, imo := range opts {
|
||||
if imo == nil {
|
||||
continue
|
||||
}
|
||||
if imo.BypassDocumentValidation != nil {
|
||||
imOpts.BypassDocumentValidation = imo.BypassDocumentValidation
|
||||
}
|
||||
if imo.Ordered != nil {
|
||||
imOpts.Ordered = imo.Ordered
|
||||
}
|
||||
}
|
||||
|
||||
return imOpts
|
||||
}
|
||||
39
vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go
generated
vendored
Executable file
39
vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go
generated
vendored
Executable file
@@ -0,0 +1,39 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
// ListCollectionsOptions represents all possible options for a listCollections command.
|
||||
type ListCollectionsOptions struct {
|
||||
NameOnly *bool // If true, only the collection names will be returned.
|
||||
}
|
||||
|
||||
// ListCollections creates a new *ListCollectionsOptions
|
||||
func ListCollections() *ListCollectionsOptions {
|
||||
return &ListCollectionsOptions{}
|
||||
}
|
||||
|
||||
// SetNameOnly specifies whether to return only the collection names.
|
||||
func (lc *ListCollectionsOptions) SetNameOnly(b bool) *ListCollectionsOptions {
|
||||
lc.NameOnly = &b
|
||||
return lc
|
||||
}
|
||||
|
||||
// MergeListCollectionsOptions combines the given *ListCollectionsOptions into a single *ListCollectionsOptions in a
|
||||
// last one wins fashion.
|
||||
func MergeListCollectionsOptions(opts ...*ListCollectionsOptions) *ListCollectionsOptions {
|
||||
lc := ListCollections()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.NameOnly != nil {
|
||||
lc.NameOnly = opt.NameOnly
|
||||
}
|
||||
}
|
||||
|
||||
return lc
|
||||
}
|
||||
39
vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go
generated
vendored
Executable file
39
vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go
generated
vendored
Executable file
@@ -0,0 +1,39 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
// ListDatabasesOptions represents all possible options for a listDatabases command.
|
||||
type ListDatabasesOptions struct {
|
||||
NameOnly *bool // If true, only the database names will be returned.
|
||||
}
|
||||
|
||||
// ListDatabases creates a new *ListDatabasesOptions
|
||||
func ListDatabases() *ListDatabasesOptions {
|
||||
return &ListDatabasesOptions{}
|
||||
}
|
||||
|
||||
// SetNameOnly specifies whether to return only the database names.
|
||||
func (ld *ListDatabasesOptions) SetNameOnly(b bool) *ListDatabasesOptions {
|
||||
ld.NameOnly = &b
|
||||
return ld
|
||||
}
|
||||
|
||||
// MergeListDatabasesOptions combines the given *ListDatabasesOptions into a single *ListDatabasesOptions in a last one
|
||||
// wins fashion.
|
||||
func MergeListDatabasesOptions(opts ...*ListDatabasesOptions) *ListDatabasesOptions {
|
||||
ld := ListDatabases()
|
||||
for _, opt := range opts {
|
||||
if opts == nil {
|
||||
continue
|
||||
}
|
||||
if opt.NameOnly != nil {
|
||||
ld.NameOnly = opt.NameOnly
|
||||
}
|
||||
}
|
||||
|
||||
return ld
|
||||
}
|
||||
161
vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go
generated
vendored
Executable file
161
vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go
generated
vendored
Executable file
@@ -0,0 +1,161 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
// Collation allows users to specify language-specific rules for string comparison, such as
|
||||
// rules for lettercase and accent marks.
|
||||
type Collation struct {
|
||||
Locale string `bson:",omitempty"` // The locale
|
||||
CaseLevel bool `bson:",omitempty"` // The case level
|
||||
CaseFirst string `bson:",omitempty"` // The case ordering
|
||||
Strength int `bson:",omitempty"` // The number of comparision levels to use
|
||||
NumericOrdering bool `bson:",omitempty"` // Whether to order numbers based on numerical order and not collation order
|
||||
Alternate string `bson:",omitempty"` // Whether spaces and punctuation are considered base characters
|
||||
MaxVariable string `bson:",omitempty"` // Which characters are affected by alternate: "shifted"
|
||||
Normalization bool `bson:",omitempty"` // Causes text to be normalized into Unicode NFD
|
||||
Backwards bool `bson:",omitempty"` // Causes secondary differences to be considered in reverse order, as it is done in the French language
|
||||
}
|
||||
|
||||
// ToDocument converts the Collation to a bson.Raw.
|
||||
func (co *Collation) ToDocument() bson.Raw {
|
||||
idx, doc := bsoncore.AppendDocumentStart(nil)
|
||||
if co.Locale != "" {
|
||||
doc = bsoncore.AppendStringElement(doc, "locale", co.Locale)
|
||||
}
|
||||
if co.CaseLevel {
|
||||
doc = bsoncore.AppendBooleanElement(doc, "caseLevel", true)
|
||||
}
|
||||
if co.CaseFirst != "" {
|
||||
doc = bsoncore.AppendStringElement(doc, "caseFirst", co.CaseFirst)
|
||||
}
|
||||
if co.Strength != 0 {
|
||||
doc = bsoncore.AppendInt32Element(doc, "strength", int32(co.Strength))
|
||||
}
|
||||
if co.NumericOrdering {
|
||||
doc = bsoncore.AppendBooleanElement(doc, "numericOrdering", true)
|
||||
}
|
||||
if co.Alternate != "" {
|
||||
doc = bsoncore.AppendStringElement(doc, "alternate", co.Alternate)
|
||||
}
|
||||
if co.MaxVariable != "" {
|
||||
doc = bsoncore.AppendStringElement(doc, "maxVariable", co.MaxVariable)
|
||||
}
|
||||
if co.Normalization {
|
||||
doc = bsoncore.AppendBooleanElement(doc, "normalization", true)
|
||||
}
|
||||
if co.Backwards {
|
||||
doc = bsoncore.AppendBooleanElement(doc, "backwards", true)
|
||||
}
|
||||
doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
|
||||
return doc
|
||||
}
|
||||
|
||||
// CursorType specifies whether a cursor should close when the last data is retrieved. See
|
||||
// NonTailable, Tailable, and TailableAwait.
|
||||
type CursorType int8
|
||||
|
||||
const (
|
||||
// NonTailable specifies that a cursor should close after retrieving the last data.
|
||||
NonTailable CursorType = iota
|
||||
// Tailable specifies that a cursor should not close when the last data is retrieved and can be resumed later.
|
||||
Tailable
|
||||
// TailableAwait specifies that a cursor should not close when the last data is retrieved and
|
||||
// that it should block for a certain amount of time for new data before returning no data.
|
||||
TailableAwait
|
||||
)
|
||||
|
||||
// ReturnDocument specifies whether a findAndUpdate operation should return the document as it was
|
||||
// before the update or as it is after the update.
|
||||
type ReturnDocument int8
|
||||
|
||||
const (
|
||||
// Before specifies that findAndUpdate should return the document as it was before the update.
|
||||
Before ReturnDocument = iota
|
||||
// After specifies that findAndUpdate should return the document as it is after the update.
|
||||
After
|
||||
)
|
||||
|
||||
// FullDocument specifies whether a change stream should include a copy of the entire document that was changed from
|
||||
// some time after the change occurred.
|
||||
type FullDocument string
|
||||
|
||||
const (
|
||||
// Default does not include a document copy
|
||||
Default FullDocument = "default"
|
||||
// UpdateLookup includes a delta describing the changes to the document and a copy of the entire document that
|
||||
// was changed
|
||||
UpdateLookup FullDocument = "updateLookup"
|
||||
)
|
||||
|
||||
// ArrayFilters is used to hold filters for the array filters CRUD option. If a registry is nil, bson.DefaultRegistry
|
||||
// will be used when converting the filter interfaces to BSON.
|
||||
type ArrayFilters struct {
|
||||
Registry *bsoncodec.Registry // The registry to use for converting filters. Defaults to bson.DefaultRegistry.
|
||||
Filters []interface{} // The filters to apply
|
||||
}
|
||||
|
||||
// ToArray builds a []bson.Raw from the provided ArrayFilters.
|
||||
func (af *ArrayFilters) ToArray() ([]bson.Raw, error) {
|
||||
registry := af.Registry
|
||||
if registry == nil {
|
||||
registry = bson.DefaultRegistry
|
||||
}
|
||||
filters := make([]bson.Raw, 0, len(af.Filters))
|
||||
for _, f := range af.Filters {
|
||||
filter, err := bson.MarshalWithRegistry(registry, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filters = append(filters, filter)
|
||||
}
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
// ToArrayDocument builds a BSON array for the array filters CRUD option. If the registry for af is nil,
|
||||
// bson.DefaultRegistry will be used when converting the filter interfaces to BSON.
|
||||
func (af *ArrayFilters) ToArrayDocument() (bson.Raw, error) {
|
||||
registry := af.Registry
|
||||
if registry == nil {
|
||||
registry = bson.DefaultRegistry
|
||||
}
|
||||
|
||||
idx, arr := bsoncore.AppendArrayStart(nil)
|
||||
for i, f := range af.Filters {
|
||||
filter, err := bson.MarshalWithRegistry(registry, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
arr = bsoncore.AppendDocumentElement(arr, strconv.Itoa(i), filter)
|
||||
}
|
||||
arr, _ = bsoncore.AppendArrayEnd(arr, idx)
|
||||
return arr, nil
|
||||
}
|
||||
|
||||
// MarshalError is returned when attempting to transform a value into a document
|
||||
// results in an error.
|
||||
type MarshalError struct {
|
||||
Value interface{}
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (me MarshalError) Error() string {
|
||||
return fmt.Sprintf("cannot transform type %s to a bson.Raw", reflect.TypeOf(me.Value))
|
||||
}
|
||||
|
||||
var defaultRegistry = bson.DefaultRegistry
|
||||
60
vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go
generated
vendored
Executable file
60
vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go
generated
vendored
Executable file
@@ -0,0 +1,60 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
// ReplaceOptions represents all possible options to the ReplaceOne() function.
|
||||
type ReplaceOptions struct {
|
||||
BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
|
||||
Collation *Collation // Specifies a collation
|
||||
Upsert *bool // When true, creates a new document if no document matches the query
|
||||
}
|
||||
|
||||
// Replace returns a pointer to a new ReplaceOptions
|
||||
func Replace() *ReplaceOptions {
|
||||
return &ReplaceOptions{}
|
||||
}
|
||||
|
||||
// SetBypassDocumentValidation allows the write to opt-out of document level validation.
|
||||
// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
|
||||
func (ro *ReplaceOptions) SetBypassDocumentValidation(b bool) *ReplaceOptions {
|
||||
ro.BypassDocumentValidation = &b
|
||||
return ro
|
||||
}
|
||||
|
||||
// SetCollation specifies a collation.
|
||||
// Valid for servers >= 3.4
|
||||
func (ro *ReplaceOptions) SetCollation(c *Collation) *ReplaceOptions {
|
||||
ro.Collation = c
|
||||
return ro
|
||||
}
|
||||
|
||||
// SetUpsert allows the creation of a new document if not document matches the query
|
||||
func (ro *ReplaceOptions) SetUpsert(b bool) *ReplaceOptions {
|
||||
ro.Upsert = &b
|
||||
return ro
|
||||
}
|
||||
|
||||
// MergeReplaceOptions combines the argued ReplaceOptions into a single ReplaceOptions in a last-one-wins fashion
|
||||
func MergeReplaceOptions(opts ...*ReplaceOptions) *ReplaceOptions {
|
||||
rOpts := Replace()
|
||||
for _, ro := range opts {
|
||||
if ro == nil {
|
||||
continue
|
||||
}
|
||||
if ro.BypassDocumentValidation != nil {
|
||||
rOpts.BypassDocumentValidation = ro.BypassDocumentValidation
|
||||
}
|
||||
if ro.Collation != nil {
|
||||
rOpts.Collation = ro.Collation
|
||||
}
|
||||
if ro.Upsert != nil {
|
||||
rOpts.Upsert = ro.Upsert
|
||||
}
|
||||
}
|
||||
|
||||
return rOpts
|
||||
}
|
||||
40
vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go
generated
vendored
Executable file
40
vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go
generated
vendored
Executable file
@@ -0,0 +1,40 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import "go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
|
||||
// RunCmdOptions represents all possible options for a runCommand operation.
|
||||
type RunCmdOptions struct {
|
||||
ReadPreference *readpref.ReadPref // The read preference for the operation.
|
||||
}
|
||||
|
||||
// RunCmd creates a new *RunCmdOptions
|
||||
func RunCmd() *RunCmdOptions {
|
||||
return &RunCmdOptions{}
|
||||
}
|
||||
|
||||
// SetReadPreference sets the read preference for the operation.
|
||||
func (rc *RunCmdOptions) SetReadPreference(rp *readpref.ReadPref) *RunCmdOptions {
|
||||
rc.ReadPreference = rp
|
||||
return rc
|
||||
}
|
||||
|
||||
// MergeRunCmdOptions combines the given *RunCmdOptions into one *RunCmdOptions in a last one wins fashion.
|
||||
func MergeRunCmdOptions(opts ...*RunCmdOptions) *RunCmdOptions {
|
||||
rc := RunCmd()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.ReadPreference != nil {
|
||||
rc.ReadPreference = opt.ReadPreference
|
||||
}
|
||||
}
|
||||
|
||||
return rc
|
||||
}
|
||||
79
vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go
generated
vendored
Executable file
79
vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go
generated
vendored
Executable file
@@ -0,0 +1,79 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/mongo/readconcern"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"go.mongodb.org/mongo-driver/mongo/writeconcern"
|
||||
)
|
||||
|
||||
// DefaultCausalConsistency is the default value for the CausalConsistency option.
|
||||
var DefaultCausalConsistency = true
|
||||
|
||||
// SessionOptions represents all possible options for creating a new session.
|
||||
type SessionOptions struct {
|
||||
CausalConsistency *bool // Specifies if reads should be causally consistent. Defaults to true.
|
||||
DefaultReadConcern *readconcern.ReadConcern // The default read concern for transactions started in the session.
|
||||
DefaultReadPreference *readpref.ReadPref // The default read preference for transactions started in the session.
|
||||
DefaultWriteConcern *writeconcern.WriteConcern // The default write concern for transactions started in the session.
|
||||
}
|
||||
|
||||
// Session creates a new *SessionOptions
|
||||
func Session() *SessionOptions {
|
||||
return &SessionOptions{
|
||||
CausalConsistency: &DefaultCausalConsistency,
|
||||
}
|
||||
}
|
||||
|
||||
// SetCausalConsistency specifies if a session should be causally consistent. Defaults to true.
|
||||
func (s *SessionOptions) SetCausalConsistency(b bool) *SessionOptions {
|
||||
s.CausalConsistency = &b
|
||||
return s
|
||||
}
|
||||
|
||||
// SetDefaultReadConcern sets the default read concern for transactions started in a session.
|
||||
func (s *SessionOptions) SetDefaultReadConcern(rc *readconcern.ReadConcern) *SessionOptions {
|
||||
s.DefaultReadConcern = rc
|
||||
return s
|
||||
}
|
||||
|
||||
// SetDefaultReadPreference sets the default read preference for transactions started in a session.
|
||||
func (s *SessionOptions) SetDefaultReadPreference(rp *readpref.ReadPref) *SessionOptions {
|
||||
s.DefaultReadPreference = rp
|
||||
return s
|
||||
}
|
||||
|
||||
// SetDefaultWriteConcern sets the default write concern for transactions started in a session.
|
||||
func (s *SessionOptions) SetDefaultWriteConcern(wc *writeconcern.WriteConcern) *SessionOptions {
|
||||
s.DefaultWriteConcern = wc
|
||||
return s
|
||||
}
|
||||
|
||||
// MergeSessionOptions combines the given *SessionOptions into a single *SessionOptions in a last one wins fashion.
|
||||
func MergeSessionOptions(opts ...*SessionOptions) *SessionOptions {
|
||||
s := Session()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.CausalConsistency != nil {
|
||||
s.CausalConsistency = opt.CausalConsistency
|
||||
}
|
||||
if opt.DefaultReadConcern != nil {
|
||||
s.DefaultReadConcern = opt.DefaultReadConcern
|
||||
}
|
||||
if opt.DefaultReadPreference != nil {
|
||||
s.DefaultReadPreference = opt.DefaultReadPreference
|
||||
}
|
||||
if opt.DefaultWriteConcern != nil {
|
||||
s.DefaultWriteConcern = opt.DefaultWriteConcern
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
65
vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go
generated
vendored
Executable file
65
vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go
generated
vendored
Executable file
@@ -0,0 +1,65 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/mongo/readconcern"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"go.mongodb.org/mongo-driver/mongo/writeconcern"
|
||||
)
|
||||
|
||||
// TransactionOptions represents all possible options for starting a transaction.
|
||||
type TransactionOptions struct {
|
||||
ReadConcern *readconcern.ReadConcern // The read concern for the transaction. Defaults to the session's read concern.
|
||||
ReadPreference *readpref.ReadPref // The read preference for the transaction. Defaults to the session's read preference.
|
||||
WriteConcern *writeconcern.WriteConcern // The write concern for the transaction. Defaults to the session's write concern.
|
||||
}
|
||||
|
||||
// Transaction creates a new *TransactionOptions
|
||||
func Transaction() *TransactionOptions {
|
||||
return &TransactionOptions{}
|
||||
}
|
||||
|
||||
// SetReadConcern sets the read concern for the transaction.
|
||||
func (t *TransactionOptions) SetReadConcern(rc *readconcern.ReadConcern) *TransactionOptions {
|
||||
t.ReadConcern = rc
|
||||
return t
|
||||
}
|
||||
|
||||
// SetReadPreference sets the read preference for the transaction.
|
||||
func (t *TransactionOptions) SetReadPreference(rp *readpref.ReadPref) *TransactionOptions {
|
||||
t.ReadPreference = rp
|
||||
return t
|
||||
}
|
||||
|
||||
// SetWriteConcern sets the write concern for the transaction.
|
||||
func (t *TransactionOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *TransactionOptions {
|
||||
t.WriteConcern = wc
|
||||
return t
|
||||
}
|
||||
|
||||
// MergeTransactionOptions combines the given *TransactionOptions into a single *TransactionOptions in a last one wins
|
||||
// fashion.
|
||||
func MergeTransactionOptions(opts ...*TransactionOptions) *TransactionOptions {
|
||||
t := Transaction()
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if opt.ReadConcern != nil {
|
||||
t.ReadConcern = opt.ReadConcern
|
||||
}
|
||||
if opt.ReadPreference != nil {
|
||||
t.ReadPreference = opt.ReadPreference
|
||||
}
|
||||
if opt.WriteConcern != nil {
|
||||
t.WriteConcern = opt.WriteConcern
|
||||
}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
71
vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go
generated
vendored
Executable file
71
vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go
generated
vendored
Executable file
@@ -0,0 +1,71 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package options
|
||||
|
||||
// UpdateOptions represents all possible options to the UpdateOne() and UpdateMany() functions.
|
||||
type UpdateOptions struct {
|
||||
ArrayFilters *ArrayFilters // A set of filters specifying to which array elements an update should apply
|
||||
BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
|
||||
Collation *Collation // Specifies a collation
|
||||
Upsert *bool // When true, creates a new document if no document matches the query
|
||||
}
|
||||
|
||||
// Update returns a pointer to a new UpdateOptions
|
||||
func Update() *UpdateOptions {
|
||||
return &UpdateOptions{}
|
||||
}
|
||||
|
||||
// SetArrayFilters specifies a set of filters specifying to which array elements an update should apply
|
||||
// Valid for server versions >= 3.6.
|
||||
func (uo *UpdateOptions) SetArrayFilters(af ArrayFilters) *UpdateOptions {
|
||||
uo.ArrayFilters = &af
|
||||
return uo
|
||||
}
|
||||
|
||||
// SetBypassDocumentValidation allows the write to opt-out of document level validation.
|
||||
// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
|
||||
func (uo *UpdateOptions) SetBypassDocumentValidation(b bool) *UpdateOptions {
|
||||
uo.BypassDocumentValidation = &b
|
||||
return uo
|
||||
}
|
||||
|
||||
// SetCollation specifies a collation.
|
||||
// Valid for server versions >= 3.4.
|
||||
func (uo *UpdateOptions) SetCollation(c *Collation) *UpdateOptions {
|
||||
uo.Collation = c
|
||||
return uo
|
||||
}
|
||||
|
||||
// SetUpsert allows the creation of a new document if not document matches the query
|
||||
func (uo *UpdateOptions) SetUpsert(b bool) *UpdateOptions {
|
||||
uo.Upsert = &b
|
||||
return uo
|
||||
}
|
||||
|
||||
// MergeUpdateOptions combines the argued UpdateOptions into a single UpdateOptions in a last-one-wins fashion
|
||||
func MergeUpdateOptions(opts ...*UpdateOptions) *UpdateOptions {
|
||||
uOpts := Update()
|
||||
for _, uo := range opts {
|
||||
if uo == nil {
|
||||
continue
|
||||
}
|
||||
if uo.ArrayFilters != nil {
|
||||
uOpts.ArrayFilters = uo.ArrayFilters
|
||||
}
|
||||
if uo.BypassDocumentValidation != nil {
|
||||
uOpts.BypassDocumentValidation = uo.BypassDocumentValidation
|
||||
}
|
||||
if uo.Collation != nil {
|
||||
uOpts.Collation = uo.Collation
|
||||
}
|
||||
if uo.Upsert != nil {
|
||||
uOpts.Upsert = uo.Upsert
|
||||
}
|
||||
}
|
||||
|
||||
return uOpts
|
||||
}
|
||||
77
vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go
generated
vendored
Executable file
77
vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go
generated
vendored
Executable file
@@ -0,0 +1,77 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package readconcern // import "go.mongodb.org/mongo-driver/mongo/readconcern"
|
||||
|
||||
import (
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
// ReadConcern for replica sets and replica set shards determines which data to return from a query.
|
||||
type ReadConcern struct {
|
||||
level string
|
||||
}
|
||||
|
||||
// Option is an option to provide when creating a ReadConcern.
|
||||
type Option func(concern *ReadConcern)
|
||||
|
||||
// Level creates an option that sets the level of a ReadConcern.
|
||||
func Level(level string) Option {
|
||||
return func(concern *ReadConcern) {
|
||||
concern.level = level
|
||||
}
|
||||
}
|
||||
|
||||
// Local specifies that the query should return the instance’s most recent data.
|
||||
func Local() *ReadConcern {
|
||||
return New(Level("local"))
|
||||
}
|
||||
|
||||
// Majority specifies that the query should return the instance’s most recent data acknowledged as
|
||||
// having been written to a majority of members in the replica set.
|
||||
func Majority() *ReadConcern {
|
||||
return New(Level("majority"))
|
||||
}
|
||||
|
||||
// Linearizable specifies that the query should return data that reflects all successful writes
|
||||
// issued with a write concern of "majority" and acknowledged prior to the start of the read operation.
|
||||
func Linearizable() *ReadConcern {
|
||||
return New(Level("linearizable"))
|
||||
}
|
||||
|
||||
// Available specifies that the query should return data from the instance with no guarantee
|
||||
// that the data has been written to a majority of the replica set members (i.e. may be rolled back).
|
||||
func Available() *ReadConcern {
|
||||
return New(Level("available"))
|
||||
}
|
||||
|
||||
// Snapshot is only available for operations within multi-document transactions.
|
||||
func Snapshot() *ReadConcern {
|
||||
return New(Level("snapshot"))
|
||||
}
|
||||
|
||||
// New constructs a new read concern from the given string.
|
||||
func New(options ...Option) *ReadConcern {
|
||||
concern := &ReadConcern{}
|
||||
|
||||
for _, option := range options {
|
||||
option(concern)
|
||||
}
|
||||
|
||||
return concern
|
||||
}
|
||||
|
||||
// MarshalBSONValue implements the bson.ValueMarshaler interface.
|
||||
func (rc *ReadConcern) MarshalBSONValue() (bsontype.Type, []byte, error) {
|
||||
var elems []byte
|
||||
|
||||
if len(rc.level) > 0 {
|
||||
elems = bsoncore.AppendStringElement(elems, "level", rc.level)
|
||||
}
|
||||
|
||||
return bsontype.EmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil
|
||||
}
|
||||
56
vendor/go.mongodb.org/mongo-driver/mongo/readpref/mode.go
generated
vendored
Executable file
56
vendor/go.mongodb.org/mongo-driver/mongo/readpref/mode.go
generated
vendored
Executable file
@@ -0,0 +1,56 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package readpref
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Mode indicates the user's preference on reads.
|
||||
type Mode uint8
|
||||
|
||||
// Mode constants
|
||||
const (
|
||||
_ Mode = iota
|
||||
// PrimaryMode indicates that only a primary is
|
||||
// considered for reading. This is the default
|
||||
// mode.
|
||||
PrimaryMode
|
||||
// PrimaryPreferredMode indicates that if a primary
|
||||
// is available, use it; otherwise, eligible
|
||||
// secondaries will be considered.
|
||||
PrimaryPreferredMode
|
||||
// SecondaryMode indicates that only secondaries
|
||||
// should be considered.
|
||||
SecondaryMode
|
||||
// SecondaryPreferredMode indicates that only secondaries
|
||||
// should be considered when one is available. If none
|
||||
// are available, then a primary will be considered.
|
||||
SecondaryPreferredMode
|
||||
// NearestMode indicates that all primaries and secondaries
|
||||
// will be considered.
|
||||
NearestMode
|
||||
)
|
||||
|
||||
// ModeFromString returns a mode corresponding to
|
||||
// mode.
|
||||
func ModeFromString(mode string) (Mode, error) {
|
||||
switch strings.ToLower(mode) {
|
||||
case "primary":
|
||||
return PrimaryMode, nil
|
||||
case "primarypreferred":
|
||||
return PrimaryPreferredMode, nil
|
||||
case "secondary":
|
||||
return SecondaryMode, nil
|
||||
case "secondarypreferred":
|
||||
return SecondaryPreferredMode, nil
|
||||
case "nearest":
|
||||
return NearestMode, nil
|
||||
}
|
||||
return Mode(0), fmt.Errorf("unknown read preference %v", mode)
|
||||
}
|
||||
60
vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go
generated
vendored
Executable file
60
vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go
generated
vendored
Executable file
@@ -0,0 +1,60 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package readpref
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/tag"
|
||||
)
|
||||
|
||||
// ErrInvalidTagSet indicates that an invalid set of tags was specified.
|
||||
var ErrInvalidTagSet = errors.New("an even number of tags must be specified")
|
||||
|
||||
// Option configures a read preference
|
||||
type Option func(*ReadPref) error
|
||||
|
||||
// WithMaxStaleness sets the maximum staleness a
|
||||
// server is allowed.
|
||||
func WithMaxStaleness(ms time.Duration) Option {
|
||||
return func(rp *ReadPref) error {
|
||||
rp.maxStaleness = ms
|
||||
rp.maxStalenessSet = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTags sets a single tag set used to match
|
||||
// a server. The last call to WithTags or WithTagSets
|
||||
// overrides all previous calls to either method.
|
||||
func WithTags(tags ...string) Option {
|
||||
return func(rp *ReadPref) error {
|
||||
length := len(tags)
|
||||
if length < 2 || length%2 != 0 {
|
||||
return ErrInvalidTagSet
|
||||
}
|
||||
|
||||
tagset := make(tag.Set, 0, length/2)
|
||||
|
||||
for i := 1; i < length; i += 2 {
|
||||
tagset = append(tagset, tag.Tag{Name: tags[i-1], Value: tags[i]})
|
||||
}
|
||||
|
||||
return WithTagSets(tagset)(rp)
|
||||
}
|
||||
}
|
||||
|
||||
// WithTagSets sets the tag sets used to match
|
||||
// a server. The last call to WithTags or WithTagSets
|
||||
// overrides all previous calls to either method.
|
||||
func WithTagSets(tagSets ...tag.Set) Option {
|
||||
return func(rp *ReadPref) error {
|
||||
rp.tagSets = tagSets
|
||||
return nil
|
||||
}
|
||||
}
|
||||
99
vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go
generated
vendored
Executable file
99
vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go
generated
vendored
Executable file
@@ -0,0 +1,99 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package readpref // import "go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/tag"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidReadPreference = errors.New("can not specify tags or max staleness on primary")
|
||||
)
|
||||
|
||||
var primary = ReadPref{mode: PrimaryMode}
|
||||
|
||||
// Primary constructs a read preference with a PrimaryMode.
|
||||
func Primary() *ReadPref {
|
||||
return &primary
|
||||
}
|
||||
|
||||
// PrimaryPreferred constructs a read preference with a PrimaryPreferredMode.
|
||||
func PrimaryPreferred(opts ...Option) *ReadPref {
|
||||
// New only returns an error with a mode of Primary
|
||||
rp, _ := New(PrimaryPreferredMode, opts...)
|
||||
return rp
|
||||
}
|
||||
|
||||
// SecondaryPreferred constructs a read preference with a SecondaryPreferredMode.
|
||||
func SecondaryPreferred(opts ...Option) *ReadPref {
|
||||
// New only returns an error with a mode of Primary
|
||||
rp, _ := New(SecondaryPreferredMode, opts...)
|
||||
return rp
|
||||
}
|
||||
|
||||
// Secondary constructs a read preference with a SecondaryMode.
|
||||
func Secondary(opts ...Option) *ReadPref {
|
||||
// New only returns an error with a mode of Primary
|
||||
rp, _ := New(SecondaryMode, opts...)
|
||||
return rp
|
||||
}
|
||||
|
||||
// Nearest constructs a read preference with a NearestMode.
|
||||
func Nearest(opts ...Option) *ReadPref {
|
||||
// New only returns an error with a mode of Primary
|
||||
rp, _ := New(NearestMode, opts...)
|
||||
return rp
|
||||
}
|
||||
|
||||
// New creates a new ReadPref.
|
||||
func New(mode Mode, opts ...Option) (*ReadPref, error) {
|
||||
rp := &ReadPref{
|
||||
mode: mode,
|
||||
}
|
||||
|
||||
if mode == PrimaryMode && len(opts) != 0 {
|
||||
return nil, errInvalidReadPreference
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
err := opt(rp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return rp, nil
|
||||
}
|
||||
|
||||
// ReadPref determines which servers are considered suitable for read operations.
|
||||
type ReadPref struct {
|
||||
maxStaleness time.Duration
|
||||
maxStalenessSet bool
|
||||
mode Mode
|
||||
tagSets []tag.Set
|
||||
}
|
||||
|
||||
// MaxStaleness is the maximum amount of time to allow
|
||||
// a server to be considered eligible for selection. The
|
||||
// second return value indicates if this value has been set.
|
||||
func (r *ReadPref) MaxStaleness() (time.Duration, bool) {
|
||||
return r.maxStaleness, r.maxStalenessSet
|
||||
}
|
||||
|
||||
// Mode indicates the mode of the read preference.
|
||||
func (r *ReadPref) Mode() Mode {
|
||||
return r.mode
|
||||
}
|
||||
|
||||
// TagSets are multiple tag sets indicating
|
||||
// which servers should be considered.
|
||||
func (r *ReadPref) TagSets() []tag.Set {
|
||||
return r.tagSets
|
||||
}
|
||||
140
vendor/go.mongodb.org/mongo-driver/mongo/results.go
generated
vendored
Executable file
140
vendor/go.mongodb.org/mongo-driver/mongo/results.go
generated
vendored
Executable file
@@ -0,0 +1,140 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
|
||||
)
|
||||
|
||||
// BulkWriteResult holds the result of a bulk write operation.
|
||||
type BulkWriteResult struct {
|
||||
InsertedCount int64
|
||||
MatchedCount int64
|
||||
ModifiedCount int64
|
||||
DeletedCount int64
|
||||
UpsertedCount int64
|
||||
UpsertedIDs map[int64]interface{}
|
||||
}
|
||||
|
||||
// InsertOneResult is a result of an InsertOne operation.
|
||||
//
|
||||
// InsertedID will be a Go type that corresponds to a BSON type.
|
||||
type InsertOneResult struct {
|
||||
// The identifier that was inserted.
|
||||
InsertedID interface{}
|
||||
}
|
||||
|
||||
// InsertManyResult is a result of an InsertMany operation.
|
||||
type InsertManyResult struct {
|
||||
// Maps the indexes of inserted documents to their _id fields.
|
||||
InsertedIDs []interface{}
|
||||
}
|
||||
|
||||
// DeleteResult is a result of an DeleteOne operation.
|
||||
type DeleteResult struct {
|
||||
// The number of documents that were deleted.
|
||||
DeletedCount int64 `bson:"n"`
|
||||
}
|
||||
|
||||
// ListDatabasesResult is a result of a ListDatabases operation. Each specification
|
||||
// is a description of the datbases on the server.
|
||||
type ListDatabasesResult struct {
|
||||
Databases []DatabaseSpecification
|
||||
TotalSize int64
|
||||
}
|
||||
|
||||
func newListDatabasesResultFromOperation(res operation.ListDatabasesResult) ListDatabasesResult {
|
||||
var ldr ListDatabasesResult
|
||||
ldr.Databases = make([]DatabaseSpecification, 0, len(res.Databases))
|
||||
for _, spec := range res.Databases {
|
||||
ldr.Databases = append(
|
||||
ldr.Databases,
|
||||
DatabaseSpecification{Name: spec.Name, SizeOnDisk: spec.SizeOnDisk, Empty: spec.Empty},
|
||||
)
|
||||
}
|
||||
ldr.TotalSize = res.TotalSize
|
||||
return ldr
|
||||
}
|
||||
|
||||
// DatabaseSpecification is the information for a single database returned
|
||||
// from a ListDatabases operation.
|
||||
type DatabaseSpecification struct {
|
||||
Name string
|
||||
SizeOnDisk int64
|
||||
Empty bool
|
||||
}
|
||||
|
||||
// UpdateResult is a result of an update operation.
|
||||
//
|
||||
// UpsertedID will be a Go type that corresponds to a BSON type.
|
||||
type UpdateResult struct {
|
||||
// The number of documents that matched the filter.
|
||||
MatchedCount int64
|
||||
// The number of documents that were modified.
|
||||
ModifiedCount int64
|
||||
// The number of documents that were upserted.
|
||||
UpsertedCount int64
|
||||
// The identifier of the inserted document if an upsert took place.
|
||||
UpsertedID interface{}
|
||||
}
|
||||
|
||||
// UnmarshalBSON implements the bson.Unmarshaler interface.
|
||||
func (result *UpdateResult) UnmarshalBSON(b []byte) error {
|
||||
elems, err := bson.Raw(b).Elements()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, elem := range elems {
|
||||
switch elem.Key() {
|
||||
case "n":
|
||||
switch elem.Value().Type {
|
||||
case bson.TypeInt32:
|
||||
result.MatchedCount = int64(elem.Value().Int32())
|
||||
case bson.TypeInt64:
|
||||
result.MatchedCount = elem.Value().Int64()
|
||||
default:
|
||||
return fmt.Errorf("Received invalid type for n, should be Int32 or Int64, received %s", elem.Value().Type)
|
||||
}
|
||||
case "nModified":
|
||||
switch elem.Value().Type {
|
||||
case bson.TypeInt32:
|
||||
result.ModifiedCount = int64(elem.Value().Int32())
|
||||
case bson.TypeInt64:
|
||||
result.ModifiedCount = elem.Value().Int64()
|
||||
default:
|
||||
return fmt.Errorf("Received invalid type for nModified, should be Int32 or Int64, received %s", elem.Value().Type)
|
||||
}
|
||||
case "upserted":
|
||||
switch elem.Value().Type {
|
||||
case bson.TypeArray:
|
||||
e, err := elem.Value().Array().IndexErr(0)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if e.Value().Type != bson.TypeEmbeddedDocument {
|
||||
break
|
||||
}
|
||||
var d struct {
|
||||
ID interface{} `bson:"_id"`
|
||||
}
|
||||
err = bson.Unmarshal(e.Value().Document(), &d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result.UpsertedID = d.ID
|
||||
default:
|
||||
return fmt.Errorf("Received invalid type for upserted, should be Array, received %s", elem.Value().Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
277
vendor/go.mongodb.org/mongo-driver/mongo/session.go
generated
vendored
Executable file
277
vendor/go.mongodb.org/mongo-driver/mongo/session.go
generated
vendored
Executable file
@@ -0,0 +1,277 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/description"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/operation"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/session"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/topology"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driverlegacy"
|
||||
"go.mongodb.org/mongo-driver/x/network/command"
|
||||
)
|
||||
|
||||
// ErrWrongClient is returned when a user attempts to pass in a session created by a different client than
|
||||
// the method call is using.
|
||||
var ErrWrongClient = errors.New("session was not created by this client")
|
||||
|
||||
var withTransactionTimeout = 120 * time.Second
|
||||
|
||||
// SessionContext is a hybrid interface. It combines a context.Context with
|
||||
// a mongo.Session. This type can be used as a regular context.Context or
|
||||
// Session type. It is not goroutine safe and should not be used in multiple goroutines concurrently.
|
||||
type SessionContext interface {
|
||||
context.Context
|
||||
Session
|
||||
}
|
||||
|
||||
type sessionContext struct {
|
||||
context.Context
|
||||
Session
|
||||
}
|
||||
|
||||
type sessionKey struct {
|
||||
}
|
||||
|
||||
// Session is the interface that represents a sequential set of operations executed.
|
||||
// Instances of this interface can be used to use transactions against the server
|
||||
// and to enable causally consistent behavior for applications.
|
||||
type Session interface {
|
||||
EndSession(context.Context)
|
||||
WithTransaction(ctx context.Context, fn func(sessCtx SessionContext) (interface{}, error), opts ...*options.TransactionOptions) (interface{}, error)
|
||||
StartTransaction(...*options.TransactionOptions) error
|
||||
AbortTransaction(context.Context) error
|
||||
CommitTransaction(context.Context) error
|
||||
ClusterTime() bson.Raw
|
||||
AdvanceClusterTime(bson.Raw) error
|
||||
OperationTime() *primitive.Timestamp
|
||||
AdvanceOperationTime(*primitive.Timestamp) error
|
||||
Client() *Client
|
||||
session()
|
||||
}
|
||||
|
||||
// sessionImpl represents a set of sequential operations executed by an application that are related in some way.
|
||||
type sessionImpl struct {
|
||||
clientSession *session.Client
|
||||
client *Client
|
||||
topo *topology.Topology
|
||||
didCommitAfterStart bool // true if commit was called after start with no other operations
|
||||
}
|
||||
|
||||
// EndSession ends the session.
|
||||
func (s *sessionImpl) EndSession(ctx context.Context) {
|
||||
if s.clientSession.TransactionInProgress() {
|
||||
// ignore all errors aborting during an end session
|
||||
_ = s.AbortTransaction(ctx)
|
||||
}
|
||||
s.clientSession.EndSession()
|
||||
}
|
||||
|
||||
// WithTransaction creates a transaction on this session and runs the given callback, retrying for
|
||||
// TransientTransactionError and UnknownTransactionCommitResult errors. The only way to provide a
|
||||
// session to a CRUD method is to invoke that CRUD method with the mongo.SessionContext within the
|
||||
// callback. The mongo.SessionContext can be used as a regular context, so methods like
|
||||
// context.WithDeadline and context.WithTimeout are supported.
|
||||
//
|
||||
// If the context.Context already has a mongo.Session attached, that mongo.Session will be replaced
|
||||
// with the one provided.
|
||||
//
|
||||
// The callback may be run multiple times due to retry attempts. Non-retryable and timed out errors
|
||||
// are returned from this function.
|
||||
func (s *sessionImpl) WithTransaction(ctx context.Context, fn func(sessCtx SessionContext) (interface{}, error), opts ...*options.TransactionOptions) (interface{}, error) {
|
||||
timeout := time.NewTimer(withTransactionTimeout)
|
||||
defer timeout.Stop()
|
||||
var err error
|
||||
for {
|
||||
err = s.StartTransaction(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := fn(contextWithSession(ctx, s))
|
||||
if err != nil {
|
||||
if s.clientSession.TransactionRunning() {
|
||||
_ = s.AbortTransaction(ctx)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-timeout.C:
|
||||
return nil, err
|
||||
default:
|
||||
}
|
||||
|
||||
if cerr, ok := err.(CommandError); ok {
|
||||
if cerr.HasErrorLabel(command.TransientTransactionError) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
err = s.clientSession.CheckAbortTransaction()
|
||||
if err != nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
CommitLoop:
|
||||
for {
|
||||
err = s.CommitTransaction(ctx)
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-timeout.C:
|
||||
return res, err
|
||||
default:
|
||||
}
|
||||
|
||||
if cerr, ok := err.(CommandError); ok {
|
||||
if cerr.HasErrorLabel(command.UnknownTransactionCommitResult) {
|
||||
continue
|
||||
}
|
||||
if cerr.HasErrorLabel(command.TransientTransactionError) {
|
||||
break CommitLoop
|
||||
}
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartTransaction starts a transaction for this session.
|
||||
func (s *sessionImpl) StartTransaction(opts ...*options.TransactionOptions) error {
|
||||
err := s.clientSession.CheckStartTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.didCommitAfterStart = false
|
||||
|
||||
topts := options.MergeTransactionOptions(opts...)
|
||||
coreOpts := &session.TransactionOptions{
|
||||
ReadConcern: topts.ReadConcern,
|
||||
ReadPreference: topts.ReadPreference,
|
||||
WriteConcern: topts.WriteConcern,
|
||||
}
|
||||
|
||||
return s.clientSession.StartTransaction(coreOpts)
|
||||
}
|
||||
|
||||
// AbortTransaction aborts the session's transaction, returning any errors and error codes
|
||||
func (s *sessionImpl) AbortTransaction(ctx context.Context) error {
|
||||
err := s.clientSession.CheckAbortTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do not run the abort command if the transaction is in starting state
|
||||
if s.clientSession.TransactionStarting() || s.didCommitAfterStart {
|
||||
return s.clientSession.AbortTransaction()
|
||||
}
|
||||
|
||||
cmd := command.AbortTransaction{
|
||||
Session: s.clientSession,
|
||||
}
|
||||
|
||||
s.clientSession.Aborting = true
|
||||
_, err = driverlegacy.AbortTransaction(ctx, cmd, s.topo, description.WriteSelector())
|
||||
|
||||
_ = s.clientSession.AbortTransaction()
|
||||
return replaceErrors(err)
|
||||
}
|
||||
|
||||
// CommitTransaction commits the sesson's transaction.
|
||||
func (s *sessionImpl) CommitTransaction(ctx context.Context) error {
|
||||
err := s.clientSession.CheckCommitTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do not run the commit command if the transaction is in started state
|
||||
if s.clientSession.TransactionStarting() || s.didCommitAfterStart {
|
||||
s.didCommitAfterStart = true
|
||||
return s.clientSession.CommitTransaction()
|
||||
}
|
||||
|
||||
if s.clientSession.TransactionCommitted() {
|
||||
s.clientSession.RetryingCommit = true
|
||||
}
|
||||
|
||||
var selector description.ServerSelectorFunc = func(t description.Topology, svrs []description.Server) ([]description.Server, error) {
|
||||
if s.clientSession.PinnedServer != nil {
|
||||
return s.clientSession.PinnedServer.SelectServer(t, svrs)
|
||||
}
|
||||
return description.WriteSelector().SelectServer(t, svrs)
|
||||
}
|
||||
|
||||
s.clientSession.Committing = true
|
||||
err = operation.NewCommitTransaction().
|
||||
Session(s.clientSession).ClusterClock(s.client.clock).Database("admin").Deployment(s.topo).
|
||||
WriteConcern(s.clientSession.CurrentWc).ServerSelector(selector).Retry(driver.RetryOncePerCommand).
|
||||
CommandMonitor(s.client.monitor).RecoveryToken(bsoncore.Document(s.clientSession.RecoveryToken)).Execute(ctx)
|
||||
s.clientSession.Committing = false
|
||||
commitErr := s.clientSession.CommitTransaction()
|
||||
|
||||
// We set the write concern to majority for subsequent calls to CommitTransaction.
|
||||
s.clientSession.UpdateCommitTransactionWriteConcern()
|
||||
|
||||
if err != nil {
|
||||
return replaceErrors(err)
|
||||
}
|
||||
return commitErr
|
||||
}
|
||||
|
||||
func (s *sessionImpl) ClusterTime() bson.Raw {
|
||||
return s.clientSession.ClusterTime
|
||||
}
|
||||
|
||||
func (s *sessionImpl) AdvanceClusterTime(d bson.Raw) error {
|
||||
return s.clientSession.AdvanceClusterTime(d)
|
||||
}
|
||||
|
||||
func (s *sessionImpl) OperationTime() *primitive.Timestamp {
|
||||
return s.clientSession.OperationTime
|
||||
}
|
||||
|
||||
func (s *sessionImpl) AdvanceOperationTime(ts *primitive.Timestamp) error {
|
||||
return s.clientSession.AdvanceOperationTime(ts)
|
||||
}
|
||||
|
||||
func (s *sessionImpl) Client() *Client {
|
||||
return s.client
|
||||
}
|
||||
|
||||
func (*sessionImpl) session() {
|
||||
}
|
||||
|
||||
// sessionFromContext checks for a sessionImpl in the argued context and returns the session if it
|
||||
// exists
|
||||
func sessionFromContext(ctx context.Context) *session.Client {
|
||||
s := ctx.Value(sessionKey{})
|
||||
if ses, ok := s.(*sessionImpl); ses != nil && ok {
|
||||
return ses.clientSession
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func contextWithSession(ctx context.Context, sess Session) SessionContext {
|
||||
return &sessionContext{
|
||||
Context: context.WithValue(ctx, sessionKey{}, sess),
|
||||
Session: sess,
|
||||
}
|
||||
}
|
||||
93
vendor/go.mongodb.org/mongo-driver/mongo/single_result.go
generated
vendored
Executable file
93
vendor/go.mongodb.org/mongo-driver/mongo/single_result.go
generated
vendored
Executable file
@@ -0,0 +1,93 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/bsoncodec"
|
||||
)
|
||||
|
||||
// ErrNoDocuments is returned by Decode when an operation that returns a
|
||||
// SingleResult doesn't return any documents.
|
||||
var ErrNoDocuments = errors.New("mongo: no documents in result")
|
||||
|
||||
// SingleResult represents a single document returned from an operation. If
|
||||
// the operation returned an error, the Err method of SingleResult will
|
||||
// return that error.
|
||||
type SingleResult struct {
|
||||
err error
|
||||
cur *Cursor
|
||||
rdr bson.Raw
|
||||
reg *bsoncodec.Registry
|
||||
}
|
||||
|
||||
// Decode will attempt to decode the first document into v. If there was an
|
||||
// error from the operation that created this SingleResult then the error
|
||||
// will be returned. If there were no returned documents, ErrNoDocuments is
|
||||
// returned. If v is nil or is a typed nil, an error will be returned.
|
||||
func (sr *SingleResult) Decode(v interface{}) error {
|
||||
if sr.err != nil {
|
||||
return sr.err
|
||||
}
|
||||
if sr.reg == nil {
|
||||
return bson.ErrNilRegistry
|
||||
}
|
||||
|
||||
if sr.err = sr.setRdrContents(); sr.err != nil {
|
||||
return sr.err
|
||||
}
|
||||
return bson.UnmarshalWithRegistry(sr.reg, sr.rdr, v)
|
||||
}
|
||||
|
||||
// DecodeBytes will return a copy of the document as a bson.Raw. If there was an
|
||||
// error from the operation that created this SingleResult then the error
|
||||
// will be returned. If there were no returned documents, ErrNoDocuments is
|
||||
// returned.
|
||||
func (sr *SingleResult) DecodeBytes() (bson.Raw, error) {
|
||||
if sr.err != nil {
|
||||
return nil, sr.err
|
||||
}
|
||||
|
||||
if sr.err = sr.setRdrContents(); sr.err != nil {
|
||||
return nil, sr.err
|
||||
}
|
||||
return sr.rdr, nil
|
||||
}
|
||||
|
||||
// setRdrContents will set the contents of rdr by iterating the underlying cursor if necessary.
|
||||
func (sr *SingleResult) setRdrContents() error {
|
||||
switch {
|
||||
case sr.err != nil:
|
||||
return sr.err
|
||||
case sr.rdr != nil:
|
||||
return nil
|
||||
case sr.cur != nil:
|
||||
defer sr.cur.Close(context.TODO())
|
||||
if !sr.cur.Next(context.TODO()) {
|
||||
if err := sr.cur.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ErrNoDocuments
|
||||
}
|
||||
sr.rdr = sr.cur.Current
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrNoDocuments
|
||||
}
|
||||
|
||||
// Err will return the error from the operation that created this SingleResult.
|
||||
// If there was no error, nil is returned.
|
||||
func (sr *SingleResult) Err() error {
|
||||
sr.err = sr.setRdrContents()
|
||||
|
||||
return sr.err
|
||||
}
|
||||
7
vendor/go.mongodb.org/mongo-driver/mongo/util.go
generated
vendored
Executable file
7
vendor/go.mongodb.org/mongo-driver/mongo/util.go
generated
vendored
Executable file
@@ -0,0 +1,7 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package mongo
|
||||
216
vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go
generated
vendored
Executable file
216
vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go
generated
vendored
Executable file
@@ -0,0 +1,216 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package writeconcern // import "go.mongodb.org/mongo-driver/mongo/writeconcern"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
// ErrInconsistent indicates that an inconsistent write concern was specified.
|
||||
var ErrInconsistent = errors.New("a write concern cannot have both w=0 and j=true")
|
||||
|
||||
// ErrEmptyWriteConcern indicates that a write concern has no fields set.
|
||||
var ErrEmptyWriteConcern = errors.New("a write concern must have at least one field set")
|
||||
|
||||
// ErrNegativeW indicates that a negative integer `w` field was specified.
|
||||
var ErrNegativeW = errors.New("write concern `w` field cannot be a negative number")
|
||||
|
||||
// ErrNegativeWTimeout indicates that a negative WTimeout was specified.
|
||||
var ErrNegativeWTimeout = errors.New("write concern `wtimeout` field cannot be negative")
|
||||
|
||||
// WriteConcern describes the level of acknowledgement requested from MongoDB for write operations
|
||||
// to a standalone mongod or to replica sets or to sharded clusters.
|
||||
type WriteConcern struct {
|
||||
w interface{}
|
||||
j bool
|
||||
wTimeout time.Duration
|
||||
}
|
||||
|
||||
// Option is an option to provide when creating a WriteConcern.
|
||||
type Option func(concern *WriteConcern)
|
||||
|
||||
// New constructs a new WriteConcern.
|
||||
func New(options ...Option) *WriteConcern {
|
||||
concern := &WriteConcern{}
|
||||
|
||||
for _, option := range options {
|
||||
option(concern)
|
||||
}
|
||||
|
||||
return concern
|
||||
}
|
||||
|
||||
// W requests acknowledgement that write operations propagate to the specified number of mongod
|
||||
// instances.
|
||||
func W(w int) Option {
|
||||
return func(concern *WriteConcern) {
|
||||
concern.w = w
|
||||
}
|
||||
}
|
||||
|
||||
// WMajority requests acknowledgement that write operations propagate to the majority of mongod
|
||||
// instances.
|
||||
func WMajority() Option {
|
||||
return func(concern *WriteConcern) {
|
||||
concern.w = "majority"
|
||||
}
|
||||
}
|
||||
|
||||
// WTagSet requests acknowledgement that write operations propagate to the specified mongod
|
||||
// instance.
|
||||
func WTagSet(tag string) Option {
|
||||
return func(concern *WriteConcern) {
|
||||
concern.w = tag
|
||||
}
|
||||
}
|
||||
|
||||
// J requests acknowledgement from MongoDB that write operations are written to
|
||||
// the journal.
|
||||
func J(j bool) Option {
|
||||
return func(concern *WriteConcern) {
|
||||
concern.j = j
|
||||
}
|
||||
}
|
||||
|
||||
// WTimeout specifies specifies a time limit for the write concern.
|
||||
func WTimeout(d time.Duration) Option {
|
||||
return func(concern *WriteConcern) {
|
||||
concern.wTimeout = d
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalBSONValue implements the bson.ValueMarshaler interface.
|
||||
func (wc *WriteConcern) MarshalBSONValue() (bsontype.Type, []byte, error) {
|
||||
if !wc.IsValid() {
|
||||
return bsontype.Type(0), nil, ErrInconsistent
|
||||
}
|
||||
|
||||
var elems []byte
|
||||
|
||||
if wc.w != nil {
|
||||
switch t := wc.w.(type) {
|
||||
case int:
|
||||
if t < 0 {
|
||||
return bsontype.Type(0), nil, ErrNegativeW
|
||||
}
|
||||
|
||||
elems = bsoncore.AppendInt32Element(elems, "w", int32(t))
|
||||
case string:
|
||||
elems = bsoncore.AppendStringElement(elems, "w", string(t))
|
||||
}
|
||||
}
|
||||
|
||||
if wc.j {
|
||||
elems = bsoncore.AppendBooleanElement(elems, "j", wc.j)
|
||||
}
|
||||
|
||||
if wc.wTimeout < 0 {
|
||||
return bsontype.Type(0), nil, ErrNegativeWTimeout
|
||||
}
|
||||
|
||||
if wc.wTimeout != 0 {
|
||||
elems = bsoncore.AppendInt64Element(elems, "wtimeout", int64(wc.wTimeout/time.Millisecond))
|
||||
}
|
||||
|
||||
if len(elems) == 0 {
|
||||
return bsontype.Type(0), nil, ErrEmptyWriteConcern
|
||||
}
|
||||
return bsontype.EmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil
|
||||
}
|
||||
|
||||
// AcknowledgedValue returns true if a BSON RawValue for a write concern represents an acknowledged write concern.
|
||||
// The element's value must be a document representing a write concern.
|
||||
func AcknowledgedValue(rawv bson.RawValue) bool {
|
||||
doc, ok := bsoncore.Value{Type: rawv.Type, Data: rawv.Value}.DocumentOK()
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
val, err := doc.LookupErr("w")
|
||||
if err != nil {
|
||||
// key w not found --> acknowledged
|
||||
return true
|
||||
}
|
||||
|
||||
i32, ok := val.Int32OK()
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return i32 != 0
|
||||
}
|
||||
|
||||
// Acknowledged indicates whether or not a write with the given write concern will be acknowledged.
|
||||
func (wc *WriteConcern) Acknowledged() bool {
|
||||
if wc == nil || wc.j {
|
||||
return true
|
||||
}
|
||||
|
||||
switch v := wc.w.(type) {
|
||||
case int:
|
||||
if v == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// IsValid checks whether the write concern is invalid.
|
||||
func (wc *WriteConcern) IsValid() bool {
|
||||
if !wc.j {
|
||||
return true
|
||||
}
|
||||
|
||||
switch v := wc.w.(type) {
|
||||
case int:
|
||||
if v == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GetW returns the write concern w level.
|
||||
func (wc *WriteConcern) GetW() interface{} {
|
||||
return wc.w
|
||||
}
|
||||
|
||||
// GetJ returns the write concern journaling level.
|
||||
func (wc *WriteConcern) GetJ() bool {
|
||||
return wc.j
|
||||
}
|
||||
|
||||
// GetWTimeout returns the write concern timeout.
|
||||
func (wc *WriteConcern) GetWTimeout() time.Duration {
|
||||
return wc.wTimeout
|
||||
}
|
||||
|
||||
// WithOptions returns a copy of this WriteConcern with the options set.
|
||||
func (wc *WriteConcern) WithOptions(options ...Option) *WriteConcern {
|
||||
if wc == nil {
|
||||
return New(options...)
|
||||
}
|
||||
newWC := &WriteConcern{}
|
||||
*newWC = *wc
|
||||
|
||||
for _, option := range options {
|
||||
option(newWC)
|
||||
}
|
||||
|
||||
return newWC
|
||||
}
|
||||
|
||||
// AckWrite returns true if a write concern represents an acknowledged write
|
||||
func AckWrite(wc *WriteConcern) bool {
|
||||
return wc == nil || wc.Acknowledged()
|
||||
}
|
||||
57
vendor/go.mongodb.org/mongo-driver/tag/tag.go
generated
vendored
Executable file
57
vendor/go.mongodb.org/mongo-driver/tag/tag.go
generated
vendored
Executable file
@@ -0,0 +1,57 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package tag // import "go.mongodb.org/mongo-driver/tag"
|
||||
|
||||
// Tag is a name/vlaue pair.
|
||||
type Tag struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
// NewTagSetFromMap creates a new tag set from a map.
|
||||
func NewTagSetFromMap(m map[string]string) Set {
|
||||
var set Set
|
||||
for k, v := range m {
|
||||
set = append(set, Tag{Name: k, Value: v})
|
||||
}
|
||||
|
||||
return set
|
||||
}
|
||||
|
||||
// NewTagSetsFromMaps creates new tag sets from maps.
|
||||
func NewTagSetsFromMaps(maps []map[string]string) []Set {
|
||||
sets := make([]Set, 0, len(maps))
|
||||
for _, m := range maps {
|
||||
sets = append(sets, NewTagSetFromMap(m))
|
||||
}
|
||||
return sets
|
||||
}
|
||||
|
||||
// Set is an ordered list of Tags.
|
||||
type Set []Tag
|
||||
|
||||
// Contains indicates whether the name/value pair exists in the tagset.
|
||||
func (ts Set) Contains(name, value string) bool {
|
||||
for _, t := range ts {
|
||||
if t.Name == name && t.Value == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ContainsAll indicates whether all the name/value pairs exist in the tagset.
|
||||
func (ts Set) ContainsAll(other []Tag) bool {
|
||||
for _, ot := range other {
|
||||
if !ts.Contains(ot.Name, ot.Value) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
10
vendor/go.mongodb.org/mongo-driver/version/version.go
generated
vendored
Executable file
10
vendor/go.mongodb.org/mongo-driver/version/version.go
generated
vendored
Executable file
@@ -0,0 +1,10 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package version // import "go.mongodb.org/mongo-driver/version"
|
||||
|
||||
// Driver is the current version of the driver.
|
||||
var Driver = "v1.1.0+prerelease"
|
||||
97
vendor/go.mongodb.org/mongo-driver/x/bsonx/array.go
generated
vendored
Executable file
97
vendor/go.mongodb.org/mongo-driver/x/bsonx/array.go
generated
vendored
Executable file
@@ -0,0 +1,97 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsonx // import "go.mongodb.org/mongo-driver/x/bsonx"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
)
|
||||
|
||||
// ErrNilArray indicates that an operation was attempted on a nil *Array.
|
||||
var ErrNilArray = errors.New("array is nil")
|
||||
|
||||
// Arr represents an array in BSON.
|
||||
type Arr []Val
|
||||
|
||||
// String implements the fmt.Stringer interface.
|
||||
func (a Arr) String() string {
|
||||
var buf bytes.Buffer
|
||||
buf.Write([]byte("bson.Array["))
|
||||
for idx, val := range a {
|
||||
if idx > 0 {
|
||||
buf.Write([]byte(", "))
|
||||
}
|
||||
fmt.Fprintf(&buf, "%s", val)
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface.
|
||||
func (a Arr) MarshalBSONValue() (bsontype.Type, []byte, error) {
|
||||
if a == nil {
|
||||
// TODO: Should we do this?
|
||||
return bsontype.Null, nil, nil
|
||||
}
|
||||
|
||||
idx, dst := bsoncore.ReserveLength(nil)
|
||||
for idx, value := range a {
|
||||
t, data, _ := value.MarshalBSONValue() // marshalBSONValue never returns an error.
|
||||
dst = append(dst, byte(t))
|
||||
dst = append(dst, strconv.Itoa(idx)...)
|
||||
dst = append(dst, 0x00)
|
||||
dst = append(dst, data...)
|
||||
}
|
||||
dst = append(dst, 0x00)
|
||||
dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:])))
|
||||
return bsontype.Array, dst, nil
|
||||
}
|
||||
|
||||
// UnmarshalBSONValue implements the bsoncodec.ValueUnmarshaler interface.
|
||||
func (a *Arr) UnmarshalBSONValue(t bsontype.Type, data []byte) error {
|
||||
if a == nil {
|
||||
return ErrNilArray
|
||||
}
|
||||
*a = (*a)[:0]
|
||||
|
||||
elements, err := bsoncore.Document(data).Elements()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, elem := range elements {
|
||||
var val Val
|
||||
rawval := elem.Value()
|
||||
err = val.UnmarshalBSONValue(rawval.Type, rawval.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*a = append(*a, val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Equal compares this document to another, returning true if they are equal.
|
||||
func (a Arr) Equal(a2 Arr) bool {
|
||||
if len(a) != len(a2) {
|
||||
return false
|
||||
}
|
||||
for idx := range a {
|
||||
if !a[idx].Equal(a2[idx]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (Arr) idoc() {}
|
||||
827
vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go
generated
vendored
Executable file
827
vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go
generated
vendored
Executable file
@@ -0,0 +1,827 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
// Package bsoncore contains functions that can be used to encode and decode BSON
|
||||
// elements and values to or from a slice of bytes. These functions are aimed at
|
||||
// allowing low level manipulation of BSON and can be used to build a higher
|
||||
// level BSON library.
|
||||
//
|
||||
// The Read* functions within this package return the values of the element and
|
||||
// a boolean indicating if the values are valid. A boolean was used instead of
|
||||
// an error because any error that would be returned would be the same: not
|
||||
// enough bytes. This library attempts to do no validation, it will only return
|
||||
// false if there are not enough bytes for an item to be read. For example, the
|
||||
// ReadDocument function checks the length, if that length is larger than the
|
||||
// number of bytes availble, it will return false, if there are enough bytes, it
|
||||
// will return those bytes and true. It is the consumers responsibility to
|
||||
// validate those bytes.
|
||||
//
|
||||
// The Append* functions within this package will append the type value to the
|
||||
// given dst slice. If the slice has enough capacity, it will not grow the
|
||||
// slice. The Append*Element functions within this package operate in the same
|
||||
// way, but additionally append the BSON type and the key before the value.
|
||||
package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// AppendType will append t to dst and return the extended buffer.
|
||||
func AppendType(dst []byte, t bsontype.Type) []byte { return append(dst, byte(t)) }
|
||||
|
||||
// AppendKey will append key to dst and return the extended buffer.
|
||||
func AppendKey(dst []byte, key string) []byte { return append(dst, key+string(0x00)...) }
|
||||
|
||||
// AppendHeader will append Type t and key to dst and return the extended
|
||||
// buffer.
|
||||
func AppendHeader(dst []byte, t bsontype.Type, key string) []byte {
|
||||
dst = AppendType(dst, t)
|
||||
dst = append(dst, key...)
|
||||
return append(dst, 0x00)
|
||||
// return append(AppendType(dst, t), key+string(0x00)...)
|
||||
}
|
||||
|
||||
// TODO(skriptble): All of the Read* functions should return src resliced to start just after what
|
||||
// was read.
|
||||
|
||||
// ReadType will return the first byte of the provided []byte as a type. If
|
||||
// there is no availble byte, false is returned.
|
||||
func ReadType(src []byte) (bsontype.Type, []byte, bool) {
|
||||
if len(src) < 1 {
|
||||
return 0, src, false
|
||||
}
|
||||
return bsontype.Type(src[0]), src[1:], true
|
||||
}
|
||||
|
||||
// ReadKey will read a key from src. The 0x00 byte will not be present
|
||||
// in the returned string. If there are not enough bytes available, false is
|
||||
// returned.
|
||||
func ReadKey(src []byte) (string, []byte, bool) { return readcstring(src) }
|
||||
|
||||
// ReadKeyBytes will read a key from src as bytes. The 0x00 byte will
|
||||
// not be present in the returned string. If there are not enough bytes
|
||||
// available, false is returned.
|
||||
func ReadKeyBytes(src []byte) ([]byte, []byte, bool) { return readcstringbytes(src) }
|
||||
|
||||
// ReadHeader will read a type byte and a key from src. If both of these
|
||||
// values cannot be read, false is returned.
|
||||
func ReadHeader(src []byte) (t bsontype.Type, key string, rem []byte, ok bool) {
|
||||
t, rem, ok = ReadType(src)
|
||||
if !ok {
|
||||
return 0, "", src, false
|
||||
}
|
||||
key, rem, ok = ReadKey(rem)
|
||||
if !ok {
|
||||
return 0, "", src, false
|
||||
}
|
||||
|
||||
return t, key, rem, true
|
||||
}
|
||||
|
||||
// ReadHeaderBytes will read a type and a key from src and the remainder of the bytes
|
||||
// are returned as rem. If either the type or key cannot be red, ok will be false.
|
||||
func ReadHeaderBytes(src []byte) (header []byte, rem []byte, ok bool) {
|
||||
if len(src) < 1 {
|
||||
return nil, src, false
|
||||
}
|
||||
idx := bytes.IndexByte(src[1:], 0x00)
|
||||
if idx == -1 {
|
||||
return nil, src, false
|
||||
}
|
||||
return src[:idx], src[idx+1:], true
|
||||
}
|
||||
|
||||
// ReadElement reads the next full element from src. It returns the element, the remaining bytes in
|
||||
// the slice, and a boolean indicating if the read was successful.
|
||||
func ReadElement(src []byte) (Element, []byte, bool) {
|
||||
if len(src) < 1 {
|
||||
return nil, src, false
|
||||
}
|
||||
t := bsontype.Type(src[0])
|
||||
idx := bytes.IndexByte(src[1:], 0x00)
|
||||
if idx == -1 {
|
||||
return nil, src, false
|
||||
}
|
||||
length, ok := valueLength(src[idx+2:], t) // We add 2 here because we called IndexByte with src[1:]
|
||||
if !ok {
|
||||
return nil, src, false
|
||||
}
|
||||
elemLength := 1 + idx + 1 + int(length)
|
||||
if elemLength > len(src) {
|
||||
return nil, src, false
|
||||
}
|
||||
return src[:elemLength], src[elemLength:], true
|
||||
}
|
||||
|
||||
// AppendValueElement appends value to dst as an element using key as the element's key.
|
||||
func AppendValueElement(dst []byte, key string, value Value) []byte {
|
||||
dst = AppendHeader(dst, value.Type, key)
|
||||
dst = append(dst, value.Data...)
|
||||
return dst
|
||||
}
|
||||
|
||||
// ReadValue reads the next value as the provided types and returns a Value, the remaining bytes,
|
||||
// and a boolean indicating if the read was successful.
|
||||
func ReadValue(src []byte, t bsontype.Type) (Value, []byte, bool) {
|
||||
data, rem, ok := readValue(src, t)
|
||||
if !ok {
|
||||
return Value{}, src, false
|
||||
}
|
||||
return Value{Type: t, Data: data}, rem, true
|
||||
}
|
||||
|
||||
// AppendDouble will append f to dst and return the extended buffer.
|
||||
func AppendDouble(dst []byte, f float64) []byte {
|
||||
return appendu64(dst, math.Float64bits(f))
|
||||
}
|
||||
|
||||
// AppendDoubleElement will append a BSON double element using key and f to dst
|
||||
// and return the extended buffer.
|
||||
func AppendDoubleElement(dst []byte, key string, f float64) []byte {
|
||||
return AppendDouble(AppendHeader(dst, bsontype.Double, key), f)
|
||||
}
|
||||
|
||||
// ReadDouble will read a float64 from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadDouble(src []byte) (float64, []byte, bool) {
|
||||
bits, src, ok := readu64(src)
|
||||
if !ok {
|
||||
return 0, src, false
|
||||
}
|
||||
return math.Float64frombits(bits), src, true
|
||||
}
|
||||
|
||||
// AppendString will append s to dst and return the extended buffer.
|
||||
func AppendString(dst []byte, s string) []byte {
|
||||
return appendstring(dst, s)
|
||||
}
|
||||
|
||||
// AppendStringElement will append a BSON string element using key and val to dst
|
||||
// and return the extended buffer.
|
||||
func AppendStringElement(dst []byte, key, val string) []byte {
|
||||
return AppendString(AppendHeader(dst, bsontype.String, key), val)
|
||||
}
|
||||
|
||||
// ReadString will read a string from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadString(src []byte) (string, []byte, bool) {
|
||||
return readstring(src)
|
||||
}
|
||||
|
||||
// AppendDocumentStart reserves a document's length and returns the index where the length begins.
|
||||
// This index can later be used to write the length of the document.
|
||||
//
|
||||
// TODO(skriptble): We really need AppendDocumentStart and AppendDocumentEnd.
|
||||
// AppendDocumentStart would handle calling ReserveLength and providing the index of the start of
|
||||
// the document. AppendDocumentEnd would handle taking that start index, adding the null byte,
|
||||
// calculating the length, and filling in the length at the start of the document.
|
||||
func AppendDocumentStart(dst []byte) (index int32, b []byte) { return ReserveLength(dst) }
|
||||
|
||||
// AppendDocumentStartInline functions the same as AppendDocumentStart but takes a pointer to the
|
||||
// index int32 which allows this function to be used inline.
|
||||
func AppendDocumentStartInline(dst []byte, index *int32) []byte {
|
||||
idx, doc := AppendDocumentStart(dst)
|
||||
*index = idx
|
||||
return doc
|
||||
}
|
||||
|
||||
// AppendDocumentElementStart writes a document element header and then reserves the length bytes.
|
||||
func AppendDocumentElementStart(dst []byte, key string) (index int32, b []byte) {
|
||||
return AppendDocumentStart(AppendHeader(dst, bsontype.EmbeddedDocument, key))
|
||||
}
|
||||
|
||||
// AppendDocumentEnd writes the null byte for a document and updates the length of the document.
|
||||
// The index should be the beginning of the document's length bytes.
|
||||
func AppendDocumentEnd(dst []byte, index int32) ([]byte, error) {
|
||||
if int(index) > len(dst)-4 {
|
||||
return dst, fmt.Errorf("not enough bytes available after index to write length")
|
||||
}
|
||||
dst = append(dst, 0x00)
|
||||
dst = UpdateLength(dst, index, int32(len(dst[index:])))
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// AppendDocument will append doc to dst and return the extended buffer.
|
||||
func AppendDocument(dst []byte, doc []byte) []byte { return append(dst, doc...) }
|
||||
|
||||
// AppendDocumentElement will append a BSON embeded document element using key
|
||||
// and doc to dst and return the extended buffer.
|
||||
func AppendDocumentElement(dst []byte, key string, doc []byte) []byte {
|
||||
return AppendDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), doc)
|
||||
}
|
||||
|
||||
// BuildDocument will create a document with the given elements and will append it to dst.
|
||||
func BuildDocument(dst []byte, elems []byte) []byte {
|
||||
idx, dst := ReserveLength(dst)
|
||||
dst = append(dst, elems...)
|
||||
dst = append(dst, 0x00)
|
||||
dst = UpdateLength(dst, idx, int32(len(dst[idx:])))
|
||||
return dst
|
||||
}
|
||||
|
||||
// BuildDocumentFromElements will create a document with the given slice of elements and will append
|
||||
// it to dst and return the extended buffer.
|
||||
func BuildDocumentFromElements(dst []byte, elems ...[]byte) []byte {
|
||||
idx, dst := ReserveLength(dst)
|
||||
for _, elem := range elems {
|
||||
dst = append(dst, elem...)
|
||||
}
|
||||
dst = append(dst, 0x00)
|
||||
dst = UpdateLength(dst, idx, int32(len(dst[idx:])))
|
||||
return dst
|
||||
}
|
||||
|
||||
// ReadDocument will read a document from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadDocument(src []byte) (doc Document, rem []byte, ok bool) { return readLengthBytes(src) }
|
||||
|
||||
// AppendArrayStart appends the length bytes to an array and then returns the index of the start
|
||||
// of those length bytes.
|
||||
func AppendArrayStart(dst []byte) (index int32, b []byte) { return ReserveLength(dst) }
|
||||
|
||||
// AppendArrayElementStart appends an array element header and then the length bytes for an array,
|
||||
// returning the index where the length starts.
|
||||
func AppendArrayElementStart(dst []byte, key string) (index int32, b []byte) {
|
||||
return AppendArrayStart(AppendHeader(dst, bsontype.Array, key))
|
||||
}
|
||||
|
||||
// AppendArrayEnd appends the null byte to an array and calculates the length, inserting that
|
||||
// calculated length starting at index.
|
||||
func AppendArrayEnd(dst []byte, index int32) ([]byte, error) { return AppendDocumentEnd(dst, index) }
|
||||
|
||||
// AppendArray will append arr to dst and return the extended buffer.
|
||||
func AppendArray(dst []byte, arr []byte) []byte { return append(dst, arr...) }
|
||||
|
||||
// AppendArrayElement will append a BSON array element using key and arr to dst
|
||||
// and return the extended buffer.
|
||||
func AppendArrayElement(dst []byte, key string, arr []byte) []byte {
|
||||
return AppendArray(AppendHeader(dst, bsontype.Array, key), arr)
|
||||
}
|
||||
|
||||
// BuildArray will append a BSON array to dst built from values.
|
||||
func BuildArray(dst []byte, values ...Value) []byte {
|
||||
idx, dst := ReserveLength(dst)
|
||||
for pos, val := range values {
|
||||
dst = AppendValueElement(dst, strconv.Itoa(pos), val)
|
||||
}
|
||||
dst = append(dst, 0x00)
|
||||
dst = UpdateLength(dst, idx, int32(len(dst[idx:])))
|
||||
return dst
|
||||
}
|
||||
|
||||
// BuildArrayElement will create an array element using the provided values.
|
||||
func BuildArrayElement(dst []byte, key string, values ...Value) []byte {
|
||||
return BuildArray(AppendHeader(dst, bsontype.Array, key), values...)
|
||||
}
|
||||
|
||||
// ReadArray will read an array from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadArray(src []byte) (arr Document, rem []byte, ok bool) { return readLengthBytes(src) }
|
||||
|
||||
// AppendBinary will append subtype and b to dst and return the extended buffer.
|
||||
func AppendBinary(dst []byte, subtype byte, b []byte) []byte {
|
||||
if subtype == 0x02 {
|
||||
return appendBinarySubtype2(dst, subtype, b)
|
||||
}
|
||||
dst = append(appendLength(dst, int32(len(b))), subtype)
|
||||
return append(dst, b...)
|
||||
}
|
||||
|
||||
// AppendBinaryElement will append a BSON binary element using key, subtype, and
|
||||
// b to dst and return the extended buffer.
|
||||
func AppendBinaryElement(dst []byte, key string, subtype byte, b []byte) []byte {
|
||||
return AppendBinary(AppendHeader(dst, bsontype.Binary, key), subtype, b)
|
||||
}
|
||||
|
||||
// ReadBinary will read a subtype and bin from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadBinary(src []byte) (subtype byte, bin []byte, rem []byte, ok bool) {
|
||||
length, rem, ok := ReadLength(src)
|
||||
if !ok {
|
||||
return 0x00, nil, src, false
|
||||
}
|
||||
if len(rem) < 1 { // subtype
|
||||
return 0x00, nil, src, false
|
||||
}
|
||||
subtype, rem = rem[0], rem[1:]
|
||||
|
||||
if len(rem) < int(length) {
|
||||
return 0x00, nil, src, false
|
||||
}
|
||||
|
||||
if subtype == 0x02 {
|
||||
length, rem, ok = ReadLength(rem)
|
||||
if !ok || len(rem) < int(length) {
|
||||
return 0x00, nil, src, false
|
||||
}
|
||||
}
|
||||
|
||||
return subtype, rem[:length], rem[length:], true
|
||||
}
|
||||
|
||||
// AppendUndefinedElement will append a BSON undefined element using key to dst
|
||||
// and return the extended buffer.
|
||||
func AppendUndefinedElement(dst []byte, key string) []byte {
|
||||
return AppendHeader(dst, bsontype.Undefined, key)
|
||||
}
|
||||
|
||||
// AppendObjectID will append oid to dst and return the extended buffer.
|
||||
func AppendObjectID(dst []byte, oid primitive.ObjectID) []byte { return append(dst, oid[:]...) }
|
||||
|
||||
// AppendObjectIDElement will append a BSON ObjectID element using key and oid to dst
|
||||
// and return the extended buffer.
|
||||
func AppendObjectIDElement(dst []byte, key string, oid primitive.ObjectID) []byte {
|
||||
return AppendObjectID(AppendHeader(dst, bsontype.ObjectID, key), oid)
|
||||
}
|
||||
|
||||
// ReadObjectID will read an ObjectID from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadObjectID(src []byte) (primitive.ObjectID, []byte, bool) {
|
||||
if len(src) < 12 {
|
||||
return primitive.ObjectID{}, src, false
|
||||
}
|
||||
var oid primitive.ObjectID
|
||||
copy(oid[:], src[0:12])
|
||||
return oid, src[12:], true
|
||||
}
|
||||
|
||||
// AppendBoolean will append b to dst and return the extended buffer.
|
||||
func AppendBoolean(dst []byte, b bool) []byte {
|
||||
if b {
|
||||
return append(dst, 0x01)
|
||||
}
|
||||
return append(dst, 0x00)
|
||||
}
|
||||
|
||||
// AppendBooleanElement will append a BSON boolean element using key and b to dst
|
||||
// and return the extended buffer.
|
||||
func AppendBooleanElement(dst []byte, key string, b bool) []byte {
|
||||
return AppendBoolean(AppendHeader(dst, bsontype.Boolean, key), b)
|
||||
}
|
||||
|
||||
// ReadBoolean will read a bool from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadBoolean(src []byte) (bool, []byte, bool) {
|
||||
if len(src) < 1 {
|
||||
return false, src, false
|
||||
}
|
||||
|
||||
return src[0] == 0x01, src[1:], true
|
||||
}
|
||||
|
||||
// AppendDateTime will append dt to dst and return the extended buffer.
|
||||
func AppendDateTime(dst []byte, dt int64) []byte { return appendi64(dst, dt) }
|
||||
|
||||
// AppendDateTimeElement will append a BSON datetime element using key and dt to dst
|
||||
// and return the extended buffer.
|
||||
func AppendDateTimeElement(dst []byte, key string, dt int64) []byte {
|
||||
return AppendDateTime(AppendHeader(dst, bsontype.DateTime, key), dt)
|
||||
}
|
||||
|
||||
// ReadDateTime will read an int64 datetime from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadDateTime(src []byte) (int64, []byte, bool) { return readi64(src) }
|
||||
|
||||
// AppendTime will append time as a BSON DateTime to dst and return the extended buffer.
|
||||
func AppendTime(dst []byte, t time.Time) []byte {
|
||||
return AppendDateTime(dst, t.Unix()*1000+int64(t.Nanosecond()/1e6))
|
||||
}
|
||||
|
||||
// AppendTimeElement will append a BSON datetime element using key and dt to dst
|
||||
// and return the extended buffer.
|
||||
func AppendTimeElement(dst []byte, key string, t time.Time) []byte {
|
||||
return AppendTime(AppendHeader(dst, bsontype.DateTime, key), t)
|
||||
}
|
||||
|
||||
// ReadTime will read an time.Time datetime from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadTime(src []byte) (time.Time, []byte, bool) {
|
||||
dt, rem, ok := readi64(src)
|
||||
return time.Unix(dt/1e3, dt%1e3*1e6), rem, ok
|
||||
}
|
||||
|
||||
// AppendNullElement will append a BSON null element using key to dst
|
||||
// and return the extended buffer.
|
||||
func AppendNullElement(dst []byte, key string) []byte { return AppendHeader(dst, bsontype.Null, key) }
|
||||
|
||||
// AppendRegex will append pattern and options to dst and return the extended buffer.
|
||||
func AppendRegex(dst []byte, pattern, options string) []byte {
|
||||
return append(dst, pattern+string(0x00)+options+string(0x00)...)
|
||||
}
|
||||
|
||||
// AppendRegexElement will append a BSON regex element using key, pattern, and
|
||||
// options to dst and return the extended buffer.
|
||||
func AppendRegexElement(dst []byte, key, pattern, options string) []byte {
|
||||
return AppendRegex(AppendHeader(dst, bsontype.Regex, key), pattern, options)
|
||||
}
|
||||
|
||||
// ReadRegex will read a pattern and options from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadRegex(src []byte) (pattern, options string, rem []byte, ok bool) {
|
||||
pattern, rem, ok = readcstring(src)
|
||||
if !ok {
|
||||
return "", "", src, false
|
||||
}
|
||||
options, rem, ok = readcstring(rem)
|
||||
if !ok {
|
||||
return "", "", src, false
|
||||
}
|
||||
return pattern, options, rem, true
|
||||
}
|
||||
|
||||
// AppendDBPointer will append ns and oid to dst and return the extended buffer.
|
||||
func AppendDBPointer(dst []byte, ns string, oid primitive.ObjectID) []byte {
|
||||
return append(appendstring(dst, ns), oid[:]...)
|
||||
}
|
||||
|
||||
// AppendDBPointerElement will append a BSON DBPointer element using key, ns,
|
||||
// and oid to dst and return the extended buffer.
|
||||
func AppendDBPointerElement(dst []byte, key, ns string, oid primitive.ObjectID) []byte {
|
||||
return AppendDBPointer(AppendHeader(dst, bsontype.DBPointer, key), ns, oid)
|
||||
}
|
||||
|
||||
// ReadDBPointer will read a ns and oid from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadDBPointer(src []byte) (ns string, oid primitive.ObjectID, rem []byte, ok bool) {
|
||||
ns, rem, ok = readstring(src)
|
||||
if !ok {
|
||||
return "", primitive.ObjectID{}, src, false
|
||||
}
|
||||
oid, rem, ok = ReadObjectID(rem)
|
||||
if !ok {
|
||||
return "", primitive.ObjectID{}, src, false
|
||||
}
|
||||
return ns, oid, rem, true
|
||||
}
|
||||
|
||||
// AppendJavaScript will append js to dst and return the extended buffer.
|
||||
func AppendJavaScript(dst []byte, js string) []byte { return appendstring(dst, js) }
|
||||
|
||||
// AppendJavaScriptElement will append a BSON JavaScript element using key and
|
||||
// js to dst and return the extended buffer.
|
||||
func AppendJavaScriptElement(dst []byte, key, js string) []byte {
|
||||
return AppendJavaScript(AppendHeader(dst, bsontype.JavaScript, key), js)
|
||||
}
|
||||
|
||||
// ReadJavaScript will read a js string from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadJavaScript(src []byte) (js string, rem []byte, ok bool) { return readstring(src) }
|
||||
|
||||
// AppendSymbol will append symbol to dst and return the extended buffer.
|
||||
func AppendSymbol(dst []byte, symbol string) []byte { return appendstring(dst, symbol) }
|
||||
|
||||
// AppendSymbolElement will append a BSON symbol element using key and symbol to dst
|
||||
// and return the extended buffer.
|
||||
func AppendSymbolElement(dst []byte, key, symbol string) []byte {
|
||||
return AppendSymbol(AppendHeader(dst, bsontype.Symbol, key), symbol)
|
||||
}
|
||||
|
||||
// ReadSymbol will read a symbol string from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadSymbol(src []byte) (symbol string, rem []byte, ok bool) { return readstring(src) }
|
||||
|
||||
// AppendCodeWithScope will append code and scope to dst and return the extended buffer.
|
||||
func AppendCodeWithScope(dst []byte, code string, scope []byte) []byte {
|
||||
length := int32(4 + 4 + len(code) + 1 + len(scope)) // length of cws, length of code, code, 0x00, scope
|
||||
dst = appendLength(dst, length)
|
||||
|
||||
return append(appendstring(dst, code), scope...)
|
||||
}
|
||||
|
||||
// AppendCodeWithScopeElement will append a BSON code with scope element using
|
||||
// key, code, and scope to dst
|
||||
// and return the extended buffer.
|
||||
func AppendCodeWithScopeElement(dst []byte, key, code string, scope []byte) []byte {
|
||||
return AppendCodeWithScope(AppendHeader(dst, bsontype.CodeWithScope, key), code, scope)
|
||||
}
|
||||
|
||||
// ReadCodeWithScope will read code and scope from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadCodeWithScope(src []byte) (code string, scope []byte, rem []byte, ok bool) {
|
||||
length, rem, ok := ReadLength(src)
|
||||
if !ok || len(src) < int(length) {
|
||||
return "", nil, src, false
|
||||
}
|
||||
|
||||
code, rem, ok = readstring(rem)
|
||||
if !ok {
|
||||
return "", nil, src, false
|
||||
}
|
||||
|
||||
scope, rem, ok = ReadDocument(rem)
|
||||
if !ok {
|
||||
return "", nil, src, false
|
||||
}
|
||||
return code, scope, rem, true
|
||||
}
|
||||
|
||||
// AppendInt32 will append i32 to dst and return the extended buffer.
|
||||
func AppendInt32(dst []byte, i32 int32) []byte { return appendi32(dst, i32) }
|
||||
|
||||
// AppendInt32Element will append a BSON int32 element using key and i32 to dst
|
||||
// and return the extended buffer.
|
||||
func AppendInt32Element(dst []byte, key string, i32 int32) []byte {
|
||||
return AppendInt32(AppendHeader(dst, bsontype.Int32, key), i32)
|
||||
}
|
||||
|
||||
// ReadInt32 will read an int32 from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadInt32(src []byte) (int32, []byte, bool) { return readi32(src) }
|
||||
|
||||
// AppendTimestamp will append t and i to dst and return the extended buffer.
|
||||
func AppendTimestamp(dst []byte, t, i uint32) []byte {
|
||||
return appendu32(appendu32(dst, i), t) // i is the lower 4 bytes, t is the higher 4 bytes
|
||||
}
|
||||
|
||||
// AppendTimestampElement will append a BSON timestamp element using key, t, and
|
||||
// i to dst and return the extended buffer.
|
||||
func AppendTimestampElement(dst []byte, key string, t, i uint32) []byte {
|
||||
return AppendTimestamp(AppendHeader(dst, bsontype.Timestamp, key), t, i)
|
||||
}
|
||||
|
||||
// ReadTimestamp will read t and i from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadTimestamp(src []byte) (t, i uint32, rem []byte, ok bool) {
|
||||
i, rem, ok = readu32(src)
|
||||
if !ok {
|
||||
return 0, 0, src, false
|
||||
}
|
||||
t, rem, ok = readu32(rem)
|
||||
if !ok {
|
||||
return 0, 0, src, false
|
||||
}
|
||||
return t, i, rem, true
|
||||
}
|
||||
|
||||
// AppendInt64 will append i64 to dst and return the extended buffer.
|
||||
func AppendInt64(dst []byte, i64 int64) []byte { return appendi64(dst, i64) }
|
||||
|
||||
// AppendInt64Element will append a BSON int64 element using key and i64 to dst
|
||||
// and return the extended buffer.
|
||||
func AppendInt64Element(dst []byte, key string, i64 int64) []byte {
|
||||
return AppendInt64(AppendHeader(dst, bsontype.Int64, key), i64)
|
||||
}
|
||||
|
||||
// ReadInt64 will read an int64 from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadInt64(src []byte) (int64, []byte, bool) { return readi64(src) }
|
||||
|
||||
// AppendDecimal128 will append d128 to dst and return the extended buffer.
|
||||
func AppendDecimal128(dst []byte, d128 primitive.Decimal128) []byte {
|
||||
high, low := d128.GetBytes()
|
||||
return appendu64(appendu64(dst, low), high)
|
||||
}
|
||||
|
||||
// AppendDecimal128Element will append a BSON primitive.28 element using key and
|
||||
// d128 to dst and return the extended buffer.
|
||||
func AppendDecimal128Element(dst []byte, key string, d128 primitive.Decimal128) []byte {
|
||||
return AppendDecimal128(AppendHeader(dst, bsontype.Decimal128, key), d128)
|
||||
}
|
||||
|
||||
// ReadDecimal128 will read a primitive.Decimal128 from src. If there are not enough bytes it
|
||||
// will return false.
|
||||
func ReadDecimal128(src []byte) (primitive.Decimal128, []byte, bool) {
|
||||
l, rem, ok := readu64(src)
|
||||
if !ok {
|
||||
return primitive.Decimal128{}, src, false
|
||||
}
|
||||
|
||||
h, rem, ok := readu64(rem)
|
||||
if !ok {
|
||||
return primitive.Decimal128{}, src, false
|
||||
}
|
||||
|
||||
return primitive.NewDecimal128(h, l), rem, true
|
||||
}
|
||||
|
||||
// AppendMaxKeyElement will append a BSON max key element using key to dst
|
||||
// and return the extended buffer.
|
||||
func AppendMaxKeyElement(dst []byte, key string) []byte {
|
||||
return AppendHeader(dst, bsontype.MaxKey, key)
|
||||
}
|
||||
|
||||
// AppendMinKeyElement will append a BSON min key element using key to dst
|
||||
// and return the extended buffer.
|
||||
func AppendMinKeyElement(dst []byte, key string) []byte {
|
||||
return AppendHeader(dst, bsontype.MinKey, key)
|
||||
}
|
||||
|
||||
// EqualValue will return true if the two values are equal.
|
||||
func EqualValue(t1, t2 bsontype.Type, v1, v2 []byte) bool {
|
||||
if t1 != t2 {
|
||||
return false
|
||||
}
|
||||
v1, _, ok := readValue(v1, t1)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
v2, _, ok = readValue(v2, t2)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1, v2)
|
||||
}
|
||||
|
||||
// valueLength will determine the length of the next value contained in src as if it
|
||||
// is type t. The returned bool will be false if there are not enough bytes in src for
|
||||
// a value of type t.
|
||||
func valueLength(src []byte, t bsontype.Type) (int32, bool) {
|
||||
var length int32
|
||||
ok := true
|
||||
switch t {
|
||||
case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
|
||||
length, _, ok = ReadLength(src)
|
||||
case bsontype.Binary:
|
||||
length, _, ok = ReadLength(src)
|
||||
length += 4 + 1 // binary length + subtype byte
|
||||
case bsontype.Boolean:
|
||||
length = 1
|
||||
case bsontype.DBPointer:
|
||||
length, _, ok = ReadLength(src)
|
||||
length += 4 + 12 // string length + ObjectID length
|
||||
case bsontype.DateTime, bsontype.Double, bsontype.Int64, bsontype.Timestamp:
|
||||
length = 8
|
||||
case bsontype.Decimal128:
|
||||
length = 16
|
||||
case bsontype.Int32:
|
||||
length = 4
|
||||
case bsontype.JavaScript, bsontype.String, bsontype.Symbol:
|
||||
length, _, ok = ReadLength(src)
|
||||
length += 4
|
||||
case bsontype.MaxKey, bsontype.MinKey, bsontype.Null, bsontype.Undefined:
|
||||
length = 0
|
||||
case bsontype.ObjectID:
|
||||
length = 12
|
||||
case bsontype.Regex:
|
||||
regex := bytes.IndexByte(src, 0x00)
|
||||
if regex < 0 {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
pattern := bytes.IndexByte(src[regex+1:], 0x00)
|
||||
if pattern < 0 {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
length = int32(int64(regex) + 1 + int64(pattern) + 1)
|
||||
default:
|
||||
ok = false
|
||||
}
|
||||
|
||||
return length, ok
|
||||
}
|
||||
|
||||
func readValue(src []byte, t bsontype.Type) ([]byte, []byte, bool) {
|
||||
length, ok := valueLength(src, t)
|
||||
if !ok || int(length) > len(src) {
|
||||
return nil, src, false
|
||||
}
|
||||
|
||||
return src[:length], src[length:], true
|
||||
}
|
||||
|
||||
// ReserveLength reserves the space required for length and returns the index where to write the length
|
||||
// and the []byte with reserved space.
|
||||
func ReserveLength(dst []byte) (int32, []byte) {
|
||||
index := len(dst)
|
||||
return int32(index), append(dst, 0x00, 0x00, 0x00, 0x00)
|
||||
}
|
||||
|
||||
// UpdateLength updates the length at index with length and returns the []byte.
|
||||
func UpdateLength(dst []byte, index, length int32) []byte {
|
||||
dst[index] = byte(length)
|
||||
dst[index+1] = byte(length >> 8)
|
||||
dst[index+2] = byte(length >> 16)
|
||||
dst[index+3] = byte(length >> 24)
|
||||
return dst
|
||||
}
|
||||
|
||||
func appendLength(dst []byte, l int32) []byte { return appendi32(dst, l) }
|
||||
|
||||
func appendi32(dst []byte, i32 int32) []byte {
|
||||
return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24))
|
||||
}
|
||||
|
||||
// ReadLength reads an int32 length from src and returns the length and the remaining bytes. If
|
||||
// there aren't enough bytes to read a valid length, src is returned unomdified and the returned
|
||||
// bool will be false.
|
||||
func ReadLength(src []byte) (int32, []byte, bool) { return readi32(src) }
|
||||
|
||||
func readi32(src []byte) (int32, []byte, bool) {
|
||||
if len(src) < 4 {
|
||||
return 0, src, false
|
||||
}
|
||||
|
||||
return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true
|
||||
}
|
||||
|
||||
func appendi64(dst []byte, i64 int64) []byte {
|
||||
return append(dst,
|
||||
byte(i64), byte(i64>>8), byte(i64>>16), byte(i64>>24),
|
||||
byte(i64>>32), byte(i64>>40), byte(i64>>48), byte(i64>>56),
|
||||
)
|
||||
}
|
||||
|
||||
func readi64(src []byte) (int64, []byte, bool) {
|
||||
if len(src) < 8 {
|
||||
return 0, src, false
|
||||
}
|
||||
i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 |
|
||||
int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56)
|
||||
return i64, src[8:], true
|
||||
}
|
||||
|
||||
func appendu32(dst []byte, u32 uint32) []byte {
|
||||
return append(dst, byte(u32), byte(u32>>8), byte(u32>>16), byte(u32>>24))
|
||||
}
|
||||
|
||||
func readu32(src []byte) (uint32, []byte, bool) {
|
||||
if len(src) < 4 {
|
||||
return 0, src, false
|
||||
}
|
||||
|
||||
return (uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24), src[4:], true
|
||||
}
|
||||
|
||||
func appendu64(dst []byte, u64 uint64) []byte {
|
||||
return append(dst,
|
||||
byte(u64), byte(u64>>8), byte(u64>>16), byte(u64>>24),
|
||||
byte(u64>>32), byte(u64>>40), byte(u64>>48), byte(u64>>56),
|
||||
)
|
||||
}
|
||||
|
||||
func readu64(src []byte) (uint64, []byte, bool) {
|
||||
if len(src) < 8 {
|
||||
return 0, src, false
|
||||
}
|
||||
u64 := (uint64(src[0]) | uint64(src[1])<<8 | uint64(src[2])<<16 | uint64(src[3])<<24 |
|
||||
uint64(src[4])<<32 | uint64(src[5])<<40 | uint64(src[6])<<48 | uint64(src[7])<<56)
|
||||
return u64, src[8:], true
|
||||
}
|
||||
|
||||
// keep in sync with readcstringbytes
|
||||
func readcstring(src []byte) (string, []byte, bool) {
|
||||
idx := bytes.IndexByte(src, 0x00)
|
||||
if idx < 0 {
|
||||
return "", src, false
|
||||
}
|
||||
return string(src[:idx]), src[idx+1:], true
|
||||
}
|
||||
|
||||
// keep in sync with readcstring
|
||||
func readcstringbytes(src []byte) ([]byte, []byte, bool) {
|
||||
idx := bytes.IndexByte(src, 0x00)
|
||||
if idx < 0 {
|
||||
return nil, src, false
|
||||
}
|
||||
return src[:idx], src[idx+1:], true
|
||||
}
|
||||
|
||||
func appendstring(dst []byte, s string) []byte {
|
||||
l := int32(len(s) + 1)
|
||||
dst = appendLength(dst, l)
|
||||
dst = append(dst, s...)
|
||||
return append(dst, 0x00)
|
||||
}
|
||||
|
||||
func readstring(src []byte) (string, []byte, bool) {
|
||||
l, rem, ok := ReadLength(src)
|
||||
if !ok {
|
||||
return "", src, false
|
||||
}
|
||||
if len(src[4:]) < int(l) {
|
||||
return "", src, false
|
||||
}
|
||||
|
||||
return string(rem[:l-1]), rem[l:], true
|
||||
}
|
||||
|
||||
// readLengthBytes attempts to read a length and that number of bytes. This
|
||||
// function requires that the length include the four bytes for itself.
|
||||
func readLengthBytes(src []byte) ([]byte, []byte, bool) {
|
||||
l, _, ok := ReadLength(src)
|
||||
if !ok {
|
||||
return nil, src, false
|
||||
}
|
||||
if len(src) < int(l) {
|
||||
return nil, src, false
|
||||
}
|
||||
return src[:l], src[l:], true
|
||||
}
|
||||
|
||||
func appendBinarySubtype2(dst []byte, subtype byte, b []byte) []byte {
|
||||
dst = appendLength(dst, int32(len(b)+4)) // The bytes we'll encode need to be 4 larger for the length bytes
|
||||
dst = append(dst, subtype)
|
||||
dst = appendLength(dst, int32(len(b)))
|
||||
return append(dst, b...)
|
||||
}
|
||||
399
vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go
generated
vendored
Executable file
399
vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go
generated
vendored
Executable file
@@ -0,0 +1,399 @@
|
||||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-stack/stack"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
)
|
||||
|
||||
// DocumentValidationError is an error type returned when attempting to validate a document.
|
||||
type DocumentValidationError string
|
||||
|
||||
func (dve DocumentValidationError) Error() string { return string(dve) }
|
||||
|
||||
// NewDocumentLengthError creates and returns an error for when the length of a document exceeds the
|
||||
// bytes available.
|
||||
func NewDocumentLengthError(length, rem int) error {
|
||||
return DocumentValidationError(
|
||||
fmt.Sprintf("document length exceeds available bytes. length=%d remainingBytes=%d", length, rem),
|
||||
)
|
||||
}
|
||||
|
||||
// InsufficientBytesError indicates that there were not enough bytes to read the next component.
|
||||
type InsufficientBytesError struct {
|
||||
Source []byte
|
||||
Remaining []byte
|
||||
Stack stack.CallStack
|
||||
}
|
||||
|
||||
// NewInsufficientBytesError creates a new InsufficientBytesError with the given Document, remaining
|
||||
// bytes, and the current stack.
|
||||
func NewInsufficientBytesError(src, rem []byte) InsufficientBytesError {
|
||||
return InsufficientBytesError{Source: src, Remaining: rem, Stack: stack.Trace().TrimRuntime()}
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (ibe InsufficientBytesError) Error() string {
|
||||
return "too few bytes to read next component"
|
||||
}
|
||||
|
||||
// ErrorStack returns a string representing the stack at the point where the error occurred.
|
||||
func (ibe InsufficientBytesError) ErrorStack() string {
|
||||
s := bytes.NewBufferString("too few bytes to read next component: [")
|
||||
|
||||
for i, call := range ibe.Stack {
|
||||
if i != 0 {
|
||||
s.WriteString(", ")
|
||||
}
|
||||
|
||||
// go vet doesn't like %k even though it's part of stack's API, so we move the format
|
||||
// string so it doesn't complain. (We also can't make it a constant, or go vet still
|
||||
// complains.)
|
||||
callFormat := "%k.%n %v"
|
||||
|
||||
s.WriteString(fmt.Sprintf(callFormat, call, call, call))
|
||||
}
|
||||
|
||||
s.WriteRune(']')
|
||||
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Equal checks that err2 also is an ErrTooSmall.
|
||||
func (ibe InsufficientBytesError) Equal(err2 error) bool {
|
||||
switch err2.(type) {
|
||||
case InsufficientBytesError:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidDepthTraversalError is returned when attempting a recursive Lookup when one component of
|
||||
// the path is neither an embedded document nor an array.
|
||||
type InvalidDepthTraversalError struct {
|
||||
Key string
|
||||
Type bsontype.Type
|
||||
}
|
||||
|
||||
func (idte InvalidDepthTraversalError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"attempt to traverse into %s, but it's type is %s, not %s nor %s",
|
||||
idte.Key, idte.Type, bsontype.EmbeddedDocument, bsontype.Array,
|
||||
)
|
||||
}
|
||||
|
||||
// ErrMissingNull is returned when a document's last byte is not null.
|
||||
const ErrMissingNull DocumentValidationError = "document end is missing null byte"
|
||||
|
||||
// ErrNilReader indicates that an operation was attempted on a nil io.Reader.
|
||||
var ErrNilReader = errors.New("nil reader")
|
||||
|
||||
// ErrInvalidLength indicates that a length in a binary representation of a BSON document is invalid.
|
||||
var ErrInvalidLength = errors.New("document length is invalid")
|
||||
|
||||
// ErrEmptyKey indicates that no key was provided to a Lookup method.
|
||||
var ErrEmptyKey = errors.New("empty key provided")
|
||||
|
||||
// ErrElementNotFound indicates that an Element matching a certain condition does not exist.
|
||||
var ErrElementNotFound = errors.New("element not found")
|
||||
|
||||
// ErrOutOfBounds indicates that an index provided to access something was invalid.
|
||||
var ErrOutOfBounds = errors.New("out of bounds")
|
||||
|
||||
// Document is a raw bytes representation of a BSON document.
|
||||
type Document []byte
|
||||
|
||||
// Array is a raw bytes representation of a BSON array.
|
||||
type Array = Document
|
||||
|
||||
// NewDocumentFromReader reads a document from r. This function will only validate the length is
|
||||
// correct and that the document ends with a null byte.
|
||||
func NewDocumentFromReader(r io.Reader) (Document, error) {
|
||||
if r == nil {
|
||||
return nil, ErrNilReader
|
||||
}
|
||||
|
||||
var lengthBytes [4]byte
|
||||
|
||||
// ReadFull guarantees that we will have read at least len(lengthBytes) if err == nil
|
||||
_, err := io.ReadFull(r, lengthBytes[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
length, _, _ := readi32(lengthBytes[:]) // ignore ok since we always have enough bytes to read a length
|
||||
if length < 0 {
|
||||
return nil, ErrInvalidLength
|
||||
}
|
||||
document := make([]byte, length)
|
||||
|
||||
copy(document, lengthBytes[:])
|
||||
|
||||
_, err = io.ReadFull(r, document[4:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if document[length-1] != 0x00 {
|
||||
return nil, ErrMissingNull
|
||||
}
|
||||
|
||||
return document, nil
|
||||
}
|
||||
|
||||
// Lookup searches the document, potentially recursively, for the given key. If there are multiple
|
||||
// keys provided, this method will recurse down, as long as the top and intermediate nodes are
|
||||
// either documents or arrays. If an error occurs or if the value doesn't exist, an empty Value is
|
||||
// returned.
|
||||
func (d Document) Lookup(key ...string) Value {
|
||||
val, _ := d.LookupErr(key...)
|
||||
return val
|
||||
}
|
||||
|
||||
// LookupErr is the same as Lookup, except it returns an error in addition to an empty Value.
|
||||
func (d Document) LookupErr(key ...string) (Value, error) {
|
||||
if len(key) < 1 {
|
||||
return Value{}, ErrEmptyKey
|
||||
}
|
||||
length, rem, ok := ReadLength(d)
|
||||
if !ok {
|
||||
return Value{}, NewInsufficientBytesError(d, rem)
|
||||
}
|
||||
|
||||
length -= 4
|
||||
|
||||
var elem Element
|
||||
for length > 1 {
|
||||
elem, rem, ok = ReadElement(rem)
|
||||
length -= int32(len(elem))
|
||||
if !ok {
|
||||
return Value{}, NewInsufficientBytesError(d, rem)
|
||||
}
|
||||
if elem.Key() != key[0] {
|
||||
continue
|
||||
}
|
||||
if len(key) > 1 {
|
||||
tt := bsontype.Type(elem[0])
|
||||
switch tt {
|
||||
case bsontype.EmbeddedDocument:
|
||||
val, err := elem.Value().Document().LookupErr(key[1:]...)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
return val, nil
|
||||
case bsontype.Array:
|
||||
val, err := elem.Value().Array().LookupErr(key[1:]...)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
return val, nil
|
||||
default:
|
||||
return Value{}, InvalidDepthTraversalError{Key: elem.Key(), Type: tt}
|
||||
}
|
||||
}
|
||||
return elem.ValueErr()
|
||||
}
|
||||
return Value{}, ErrElementNotFound
|
||||
}
|
||||
|
||||
// Index searches for and retrieves the element at the given index. This method will panic if
|
||||
// the document is invalid or if the index is out of bounds.
|
||||
func (d Document) Index(index uint) Element {
|
||||
elem, err := d.IndexErr(index)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return elem
|
||||
}
|
||||
|
||||
// IndexErr searches for and retrieves the element at the given index.
|
||||
func (d Document) IndexErr(index uint) (Element, error) {
|
||||
length, rem, ok := ReadLength(d)
|
||||
if !ok {
|
||||
return nil, NewInsufficientBytesError(d, rem)
|
||||
}
|
||||
|
||||
length -= 4
|
||||
|
||||
var current uint
|
||||
var elem Element
|
||||
for length > 1 {
|
||||
elem, rem, ok = ReadElement(rem)
|
||||
length -= int32(len(elem))
|
||||
if !ok {
|
||||
return nil, NewInsufficientBytesError(d, rem)
|
||||
}
|
||||
if current != index {
|
||||
current++
|
||||
continue
|
||||
}
|
||||
return elem, nil
|
||||
}
|
||||
return nil, ErrOutOfBounds
|
||||
}
|
||||
|
||||
// DebugString outputs a human readable version of Document. It will attempt to stringify the
|
||||
// valid components of the document even if the entire document is not valid.
|
||||
func (d Document) DebugString() string {
|
||||
if len(d) < 5 {
|
||||
return "<malformed>"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("Document")
|
||||
length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
|
||||
buf.WriteByte('(')
|
||||
buf.WriteString(strconv.Itoa(int(length)))
|
||||
length -= 4
|
||||
buf.WriteString("){")
|
||||
var elem Element
|
||||
var ok bool
|
||||
for length > 1 {
|
||||
elem, rem, ok = ReadElement(rem)
|
||||
length -= int32(len(elem))
|
||||
if !ok {
|
||||
buf.WriteString(fmt.Sprintf("<malformed (%d)>", length))
|
||||
break
|
||||
}
|
||||
fmt.Fprintf(&buf, "%s ", elem.DebugString())
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// String outputs an ExtendedJSON version of Document. If the document is not valid, this method
|
||||
// returns an empty string.
|
||||
func (d Document) String() string {
|
||||
if len(d) < 5 {
|
||||
return ""
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteByte('{')
|
||||
|
||||
length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
|
||||
|
||||
length -= 4
|
||||
|
||||
var elem Element
|
||||
var ok bool
|
||||
first := true
|
||||
for length > 1 {
|
||||
if !first {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
elem, rem, ok = ReadElement(rem)
|
||||
length -= int32(len(elem))
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
fmt.Fprintf(&buf, "%s", elem.String())
|
||||
first = false
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Elements returns this document as a slice of elements. The returned slice will contain valid
|
||||
// elements. If the document is not valid, the elements up to the invalid point will be returned
|
||||
// along with an error.
|
||||
func (d Document) Elements() ([]Element, error) {
|
||||
length, rem, ok := ReadLength(d)
|
||||
if !ok {
|
||||
return nil, NewInsufficientBytesError(d, rem)
|
||||
}
|
||||
|
||||
length -= 4
|
||||
|
||||
var elem Element
|
||||
var elems []Element
|
||||
for length > 1 {
|
||||
elem, rem, ok = ReadElement(rem)
|
||||
length -= int32(len(elem))
|
||||
if !ok {
|
||||
return elems, NewInsufficientBytesError(d, rem)
|
||||
}
|
||||
if err := elem.Validate(); err != nil {
|
||||
return elems, err
|
||||
}
|
||||
elems = append(elems, elem)
|
||||
}
|
||||
return elems, nil
|
||||
}
|
||||
|
||||
// Values returns this document as a slice of values. The returned slice will contain valid values.
|
||||
// If the document is not valid, the values up to the invalid point will be returned along with an
|
||||
// error.
|
||||
func (d Document) Values() ([]Value, error) {
|
||||
length, rem, ok := ReadLength(d)
|
||||
if !ok {
|
||||
return nil, NewInsufficientBytesError(d, rem)
|
||||
}
|
||||
|
||||
length -= 4
|
||||
|
||||
var elem Element
|
||||
var vals []Value
|
||||
for length > 1 {
|
||||
elem, rem, ok = ReadElement(rem)
|
||||
length -= int32(len(elem))
|
||||
if !ok {
|
||||
return vals, NewInsufficientBytesError(d, rem)
|
||||
}
|
||||
if err := elem.Value().Validate(); err != nil {
|
||||
return vals, err
|
||||
}
|
||||
vals = append(vals, elem.Value())
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// Validate validates the document and ensures the elements contained within are valid.
|
||||
func (d Document) Validate() error {
|
||||
length, rem, ok := ReadLength(d)
|
||||
if !ok {
|
||||
return NewInsufficientBytesError(d, rem)
|
||||
}
|
||||
if int(length) > len(d) {
|
||||
return d.lengtherror(int(length), len(d))
|
||||
}
|
||||
if d[length-1] != 0x00 {
|
||||
return ErrMissingNull
|
||||
}
|
||||
|
||||
length -= 4
|
||||
var elem Element
|
||||
|
||||
for length > 1 {
|
||||
elem, rem, ok = ReadElement(rem)
|
||||
length -= int32(len(elem))
|
||||
if !ok {
|
||||
return NewInsufficientBytesError(d, rem)
|
||||
}
|
||||
err := elem.Validate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(rem) < 1 || rem[0] != 0x00 {
|
||||
return ErrMissingNull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Document) lengtherror(length, rem int) error {
|
||||
return DocumentValidationError(fmt.Sprintf("document length exceeds available bytes. length=%d remainingBytes=%d", length, rem))
|
||||
}
|
||||
167
vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go
generated
vendored
Executable file
167
vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go
generated
vendored
Executable file
@@ -0,0 +1,167 @@
|
||||
package bsoncore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
)
|
||||
|
||||
// DocumentSequenceStyle is used to represent how a document sequence is laid out in a slice of
|
||||
// bytes.
|
||||
type DocumentSequenceStyle uint32
|
||||
|
||||
// These constants are the valid styles for a DocumentSequence.
|
||||
const (
|
||||
_ DocumentSequenceStyle = iota
|
||||
SequenceStyle
|
||||
ArrayStyle
|
||||
)
|
||||
|
||||
// DocumentSequence represents a sequence of documents. The Style field indicates how the documents
|
||||
// are laid out inside of the Data field.
|
||||
type DocumentSequence struct {
|
||||
Style DocumentSequenceStyle
|
||||
Data []byte
|
||||
Pos int
|
||||
}
|
||||
|
||||
// ErrCorruptedDocument is returned when a full document couldn't be read from the sequence.
|
||||
var ErrCorruptedDocument = errors.New("invalid DocumentSequence: corrupted document")
|
||||
|
||||
// ErrNonDocument is returned when a DocumentSequence contains a non-document BSON value.
|
||||
var ErrNonDocument = errors.New("invalid DocumentSequence: a non-document value was found in sequence")
|
||||
|
||||
// ErrInvalidDocumentSequenceStyle is returned when an unknown DocumentSequenceStyle is set on a
|
||||
// DocumentSequence.
|
||||
var ErrInvalidDocumentSequenceStyle = errors.New("invalid DocumentSequenceStyle")
|
||||
|
||||
// DocumentCount returns the number of documents in the sequence.
|
||||
func (ds *DocumentSequence) DocumentCount() int {
|
||||
if ds == nil {
|
||||
return 0
|
||||
}
|
||||
switch ds.Style {
|
||||
case SequenceStyle:
|
||||
var count int
|
||||
var ok bool
|
||||
rem := ds.Data
|
||||
for len(rem) > 0 {
|
||||
_, rem, ok = ReadDocument(rem)
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
count++
|
||||
}
|
||||
return count
|
||||
case ArrayStyle:
|
||||
_, rem, ok := ReadLength(ds.Data)
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
var count int
|
||||
for len(rem) > 1 {
|
||||
_, rem, ok = ReadElement(rem)
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
count++
|
||||
}
|
||||
return count
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
//ResetIterator resets the iteration point for the Next method to the beginning of the document
|
||||
//sequence.
|
||||
func (ds *DocumentSequence) ResetIterator() {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
ds.Pos = 0
|
||||
}
|
||||
|
||||
// Documents returns a slice of the documents. If nil either the Data field is also nil or could not
|
||||
// be properly read.
|
||||
func (ds *DocumentSequence) Documents() ([]Document, error) {
|
||||
if ds == nil {
|
||||
return nil, nil
|
||||
}
|
||||
switch ds.Style {
|
||||
case SequenceStyle:
|
||||
rem := ds.Data
|
||||
var docs []Document
|
||||
var doc Document
|
||||
var ok bool
|
||||
for {
|
||||
doc, rem, ok = ReadDocument(rem)
|
||||
if !ok {
|
||||
if len(rem) == 0 {
|
||||
break
|
||||
}
|
||||
return nil, ErrCorruptedDocument
|
||||
}
|
||||
docs = append(docs, doc)
|
||||
}
|
||||
return docs, nil
|
||||
case ArrayStyle:
|
||||
if len(ds.Data) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
vals, err := Document(ds.Data).Values()
|
||||
if err != nil {
|
||||
return nil, ErrCorruptedDocument
|
||||
}
|
||||
docs := make([]Document, 0, len(vals))
|
||||
for _, v := range vals {
|
||||
if v.Type != bsontype.EmbeddedDocument {
|
||||
return nil, ErrNonDocument
|
||||
}
|
||||
docs = append(docs, v.Data)
|
||||
}
|
||||
return docs, nil
|
||||
default:
|
||||
return nil, ErrInvalidDocumentSequenceStyle
|
||||
}
|
||||
}
|
||||
|
||||
// Next retrieves the next document from this sequence and returns it. This method will return
|
||||
// io.EOF when it has reached the end of the sequence.
|
||||
func (ds *DocumentSequence) Next() (Document, error) {
|
||||
if ds == nil || ds.Pos >= len(ds.Data) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
switch ds.Style {
|
||||
case SequenceStyle:
|
||||
doc, _, ok := ReadDocument(ds.Data[ds.Pos:])
|
||||
if !ok {
|
||||
return nil, ErrCorruptedDocument
|
||||
}
|
||||
ds.Pos += len(doc)
|
||||
return doc, nil
|
||||
case ArrayStyle:
|
||||
if ds.Pos < 4 {
|
||||
if len(ds.Data) < 4 {
|
||||
return nil, ErrCorruptedDocument
|
||||
}
|
||||
ds.Pos = 4 // Skip the length of the document
|
||||
}
|
||||
if len(ds.Data[ds.Pos:]) == 1 && ds.Data[ds.Pos] == 0x00 {
|
||||
return nil, io.EOF // At the end of the document
|
||||
}
|
||||
elem, _, ok := ReadElement(ds.Data[ds.Pos:])
|
||||
if !ok {
|
||||
return nil, ErrCorruptedDocument
|
||||
}
|
||||
ds.Pos += len(elem)
|
||||
val := elem.Value()
|
||||
if val.Type != bsontype.EmbeddedDocument {
|
||||
return nil, ErrNonDocument
|
||||
}
|
||||
return val.Data, nil
|
||||
default:
|
||||
return nil, ErrInvalidDocumentSequenceStyle
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user