VENDOR intensifies
This commit is contained in:
24738
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
Executable file
24738
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
249
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
generated
vendored
Executable file
249
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
generated
vendored
Executable file
@@ -0,0 +1,249 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
const (
|
||||
contentMD5Header = "Content-Md5"
|
||||
contentSha256Header = "X-Amz-Content-Sha256"
|
||||
amzTeHeader = "X-Amz-Te"
|
||||
amzTxEncodingHeader = "X-Amz-Transfer-Encoding"
|
||||
|
||||
appendMD5TxEncoding = "append-md5"
|
||||
)
|
||||
|
||||
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
|
||||
// require it.
|
||||
func contentMD5(r *request.Request) {
|
||||
h := md5.New()
|
||||
|
||||
if !aws.IsReaderSeekable(r.Body) {
|
||||
if r.Config.Logger != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(
|
||||
"Unable to compute Content-MD5 for unseekable body, S3.%s",
|
||||
r.Operation.Name))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := copySeekableBody(h, r.Body); err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
|
||||
return
|
||||
}
|
||||
|
||||
// encode the md5 checksum in base64 and set the request header.
|
||||
v := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
r.HTTPRequest.Header.Set(contentMD5Header, v)
|
||||
}
|
||||
|
||||
// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
|
||||
// request. If the body is not seekable or S3DisableContentMD5Validation set
|
||||
// this handler will be ignored.
|
||||
func computeBodyHashes(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
|
||||
return
|
||||
}
|
||||
if r.IsPresigned() {
|
||||
return
|
||||
}
|
||||
if r.Error != nil || !aws.IsReaderSeekable(r.Body) {
|
||||
return
|
||||
}
|
||||
|
||||
var md5Hash, sha256Hash hash.Hash
|
||||
hashers := make([]io.Writer, 0, 2)
|
||||
|
||||
// Determine upfront which hashes can be set without overriding user
|
||||
// provide header data.
|
||||
if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 {
|
||||
md5Hash = md5.New()
|
||||
hashers = append(hashers, md5Hash)
|
||||
}
|
||||
|
||||
if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 {
|
||||
sha256Hash = sha256.New()
|
||||
hashers = append(hashers, sha256Hash)
|
||||
}
|
||||
|
||||
// Create the destination writer based on the hashes that are not already
|
||||
// provided by the user.
|
||||
var dst io.Writer
|
||||
switch len(hashers) {
|
||||
case 0:
|
||||
return
|
||||
case 1:
|
||||
dst = hashers[0]
|
||||
default:
|
||||
dst = io.MultiWriter(hashers...)
|
||||
}
|
||||
|
||||
if _, err := copySeekableBody(dst, r.Body); err != nil {
|
||||
r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
|
||||
return
|
||||
}
|
||||
|
||||
// For the hashes created, set the associated headers that the user did not
|
||||
// already provide.
|
||||
if md5Hash != nil {
|
||||
sum := make([]byte, md5.Size)
|
||||
encoded := make([]byte, md5Base64EncLen)
|
||||
|
||||
base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0]))
|
||||
r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)}
|
||||
}
|
||||
|
||||
if sha256Hash != nil {
|
||||
encoded := make([]byte, sha256HexEncLen)
|
||||
sum := make([]byte, sha256.Size)
|
||||
|
||||
hex.Encode(encoded, sha256Hash.Sum(sum[0:0]))
|
||||
r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen
|
||||
sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
|
||||
)
|
||||
|
||||
func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
|
||||
curPos, err := src.Seek(0, sdkio.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// hash the body. seek back to the first position after reading to reset
|
||||
// the body for transmission. copy errors may be assumed to be from the
|
||||
// body.
|
||||
n, err := io.Copy(dst, src)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
_, err = src.Seek(curPos, sdkio.SeekStart)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Adds the x-amz-te: append_md5 header to the request. This requests the service
|
||||
// responds with a trailing MD5 checksum.
|
||||
//
|
||||
// Will not ask for append MD5 if disabled, the request is presigned or,
|
||||
// or the API operation does not support content MD5 validation.
|
||||
func askForTxEncodingAppendMD5(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
|
||||
return
|
||||
}
|
||||
if r.IsPresigned() {
|
||||
return
|
||||
}
|
||||
r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding)
|
||||
}
|
||||
|
||||
func useMD5ValidationReader(r *request.Request) {
|
||||
if r.Error != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding {
|
||||
return
|
||||
}
|
||||
|
||||
var bodyReader *io.ReadCloser
|
||||
var contentLen int64
|
||||
switch tv := r.Data.(type) {
|
||||
case *GetObjectOutput:
|
||||
bodyReader = &tv.Body
|
||||
contentLen = aws.Int64Value(tv.ContentLength)
|
||||
// Update ContentLength hiden the trailing MD5 checksum.
|
||||
tv.ContentLength = aws.Int64(contentLen - md5.Size)
|
||||
tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range"))
|
||||
default:
|
||||
r.Error = awserr.New("ChecksumValidationError",
|
||||
fmt.Sprintf("%s: %s header received on unsupported API, %s",
|
||||
amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name,
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if contentLen < md5.Size {
|
||||
r.Error = awserr.New("ChecksumValidationError",
|
||||
fmt.Sprintf("invalid Content-Length %d for %s %s",
|
||||
contentLen, appendMD5TxEncoding, amzTxEncodingHeader,
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Wrap and swap the response body reader with the validation reader.
|
||||
*bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size)
|
||||
}
|
||||
|
||||
type md5ValidationReader struct {
|
||||
rawReader io.ReadCloser
|
||||
payload io.Reader
|
||||
hash hash.Hash
|
||||
|
||||
payloadLen int64
|
||||
read int64
|
||||
}
|
||||
|
||||
func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader {
|
||||
h := md5.New()
|
||||
return &md5ValidationReader{
|
||||
rawReader: reader,
|
||||
payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h),
|
||||
hash: h,
|
||||
payloadLen: payloadLen,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *md5ValidationReader) Read(p []byte) (n int, err error) {
|
||||
n, err = v.payload.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
v.read += int64(n)
|
||||
|
||||
if err == io.EOF {
|
||||
if v.read != v.payloadLen {
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
expectSum := make([]byte, md5.Size)
|
||||
actualSum := make([]byte, md5.Size)
|
||||
if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil {
|
||||
return n, sumReadErr
|
||||
}
|
||||
actualSum = v.hash.Sum(actualSum[0:0])
|
||||
if !bytes.Equal(expectSum, actualSum) {
|
||||
return n, awserr.New("InvalidChecksum",
|
||||
fmt.Sprintf("expected MD5 checksum %s, got %s",
|
||||
hex.EncodeToString(expectSum),
|
||||
hex.EncodeToString(actualSum),
|
||||
),
|
||||
nil)
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (v *md5ValidationReader) Close() error {
|
||||
return v.rawReader.Close()
|
||||
}
|
||||
106
vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
generated
vendored
Executable file
106
vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
generated
vendored
Executable file
@@ -0,0 +1,106 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
|
||||
|
||||
// NormalizeBucketLocation is a utility function which will update the
|
||||
// passed in value to always be a region ID. Generally this would be used
|
||||
// with GetBucketLocation API operation.
|
||||
//
|
||||
// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
|
||||
//
|
||||
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
|
||||
// for more information on the values that can be returned.
|
||||
func NormalizeBucketLocation(loc string) string {
|
||||
switch loc {
|
||||
case "":
|
||||
loc = "us-east-1"
|
||||
case "EU":
|
||||
loc = "eu-west-1"
|
||||
}
|
||||
|
||||
return loc
|
||||
}
|
||||
|
||||
// NormalizeBucketLocationHandler is a request handler which will update the
|
||||
// GetBucketLocation's result LocationConstraint value to always be a region ID.
|
||||
//
|
||||
// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
|
||||
//
|
||||
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
|
||||
// for more information on the values that can be returned.
|
||||
//
|
||||
// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
|
||||
// Bucket: aws.String(bucket),
|
||||
// })
|
||||
// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
|
||||
// err := req.Send()
|
||||
var NormalizeBucketLocationHandler = request.NamedHandler{
|
||||
Name: "awssdk.s3.NormalizeBucketLocation",
|
||||
Fn: func(req *request.Request) {
|
||||
if req.Error != nil {
|
||||
return
|
||||
}
|
||||
|
||||
out := req.Data.(*GetBucketLocationOutput)
|
||||
loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint))
|
||||
out.LocationConstraint = aws.String(loc)
|
||||
},
|
||||
}
|
||||
|
||||
// WithNormalizeBucketLocation is a request option which will update the
|
||||
// GetBucketLocation's result LocationConstraint value to always be a region ID.
|
||||
//
|
||||
// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
|
||||
//
|
||||
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
|
||||
// for more information on the values that can be returned.
|
||||
//
|
||||
// result, err := svc.GetBucketLocationWithContext(ctx,
|
||||
// &s3.GetBucketLocationInput{
|
||||
// Bucket: aws.String(bucket),
|
||||
// },
|
||||
// s3.WithNormalizeBucketLocation,
|
||||
// )
|
||||
func WithNormalizeBucketLocation(r *request.Request) {
|
||||
r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
|
||||
}
|
||||
|
||||
func buildGetBucketLocation(r *request.Request) {
|
||||
if r.DataFilled() {
|
||||
out := r.Data.(*GetBucketLocationOutput)
|
||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed reading response body", err)
|
||||
return
|
||||
}
|
||||
|
||||
match := reBucketLocation.FindSubmatch(b)
|
||||
if len(match) > 1 {
|
||||
loc := string(match[1])
|
||||
out.LocationConstraint = aws.String(loc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func populateLocationConstraint(r *request.Request) {
|
||||
if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
|
||||
in := r.Params.(*CreateBucketInput)
|
||||
if in.CreateBucketConfiguration == nil {
|
||||
r.Params = awsutil.CopyOf(r.Params)
|
||||
in = r.Params.(*CreateBucketInput)
|
||||
in.CreateBucketConfiguration = &CreateBucketConfiguration{
|
||||
LocationConstraint: r.Config.Region,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
74
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
Executable file
74
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
Executable file
@@ -0,0 +1,74 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/s3err"
|
||||
)
|
||||
|
||||
func init() {
|
||||
initClient = defaultInitClientFn
|
||||
initRequest = defaultInitRequestFn
|
||||
}
|
||||
|
||||
func defaultInitClientFn(c *client.Client) {
|
||||
// Support building custom endpoints based on config
|
||||
c.Handlers.Build.PushFront(updateEndpointForS3Config)
|
||||
|
||||
// Require SSL when using SSE keys
|
||||
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
|
||||
c.Handlers.Build.PushBack(computeSSEKeys)
|
||||
|
||||
// S3 uses custom error unmarshaling logic
|
||||
c.Handlers.UnmarshalError.Clear()
|
||||
c.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler())
|
||||
}
|
||||
|
||||
func defaultInitRequestFn(r *request.Request) {
|
||||
// Add reuest handlers for specific platforms.
|
||||
// e.g. 100-continue support for PUT requests using Go 1.6
|
||||
platformRequestHandlers(r)
|
||||
|
||||
switch r.Operation.Name {
|
||||
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
|
||||
opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
|
||||
opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration,
|
||||
opPutBucketReplication:
|
||||
// These S3 operations require Content-MD5 to be set
|
||||
r.Handlers.Build.PushBack(contentMD5)
|
||||
case opGetBucketLocation:
|
||||
// GetBucketLocation has custom parsing logic
|
||||
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
|
||||
case opCreateBucket:
|
||||
// Auto-populate LocationConstraint with current region
|
||||
r.Handlers.Validate.PushFront(populateLocationConstraint)
|
||||
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
|
||||
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
|
||||
r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler())
|
||||
case opPutObject, opUploadPart:
|
||||
r.Handlers.Build.PushBack(computeBodyHashes)
|
||||
// Disabled until #1837 root issue is resolved.
|
||||
// case opGetObject:
|
||||
// r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
|
||||
// r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
|
||||
}
|
||||
}
|
||||
|
||||
// bucketGetter is an accessor interface to grab the "Bucket" field from
|
||||
// an S3 type.
|
||||
type bucketGetter interface {
|
||||
getBucket() string
|
||||
}
|
||||
|
||||
// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey"
|
||||
// field from an S3 type.
|
||||
type sseCustomerKeyGetter interface {
|
||||
getSSECustomerKey() string
|
||||
}
|
||||
|
||||
// copySourceSSECustomerKeyGetter is an accessor interface to grab the
|
||||
// "CopySourceSSECustomerKey" field from an S3 type.
|
||||
type copySourceSSECustomerKeyGetter interface {
|
||||
getCopySourceSSECustomerKey() string
|
||||
}
|
||||
26
vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
generated
vendored
Executable file
26
vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
generated
vendored
Executable file
@@ -0,0 +1,26 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
// Package s3 provides the client and types for making API
|
||||
// requests to Amazon Simple Storage Service.
|
||||
//
|
||||
// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service.
|
||||
//
|
||||
// See s3 package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
|
||||
//
|
||||
// Using the Client
|
||||
//
|
||||
// To contact Amazon Simple Storage Service with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
// These clients are safe to use concurrently.
|
||||
//
|
||||
// See the SDK's documentation for more information on how to use the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/
|
||||
//
|
||||
// See aws.Config documentation for more information on configuring SDK clients.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
|
||||
//
|
||||
// See the Amazon Simple Storage Service client S3 for more
|
||||
// information on creating client for this service.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
|
||||
package s3
|
||||
109
vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
generated
vendored
Executable file
109
vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
generated
vendored
Executable file
@@ -0,0 +1,109 @@
|
||||
// Upload Managers
|
||||
//
|
||||
// The s3manager package's Uploader provides concurrent upload of content to S3
|
||||
// by taking advantage of S3's Multipart APIs. The Uploader also supports both
|
||||
// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker
|
||||
// for optimizations if the Body satisfies that type. Once the Uploader instance
|
||||
// is created you can call Upload concurrently from multiple goroutines safely.
|
||||
//
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create an uploader with the session and default options
|
||||
// uploader := s3manager.NewUploader(sess)
|
||||
//
|
||||
// f, err := os.Open(filename)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to open file %q, %v", filename, err)
|
||||
// }
|
||||
//
|
||||
// // Upload the file to S3.
|
||||
// result, err := uploader.Upload(&s3manager.UploadInput{
|
||||
// Bucket: aws.String(myBucket),
|
||||
// Key: aws.String(myString),
|
||||
// Body: f,
|
||||
// })
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to upload file, %v", err)
|
||||
// }
|
||||
// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
|
||||
//
|
||||
// See the s3manager package's Uploader type documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
|
||||
//
|
||||
// Download Manager
|
||||
//
|
||||
// The s3manager package's Downloader provides concurrently downloading of Objects
|
||||
// from S3. The Downloader will write S3 Object content with an io.WriterAt.
|
||||
// Once the Downloader instance is created you can call Download concurrently from
|
||||
// multiple goroutines safely.
|
||||
//
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a downloader with the session and default options
|
||||
// downloader := s3manager.NewDownloader(sess)
|
||||
//
|
||||
// // Create a file to write the S3 Object contents to.
|
||||
// f, err := os.Create(filename)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to create file %q, %v", filename, err)
|
||||
// }
|
||||
//
|
||||
// // Write the contents of S3 Object to the file
|
||||
// n, err := downloader.Download(f, &s3.GetObjectInput{
|
||||
// Bucket: aws.String(myBucket),
|
||||
// Key: aws.String(myString),
|
||||
// })
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to download file, %v", err)
|
||||
// }
|
||||
// fmt.Printf("file downloaded, %d bytes\n", n)
|
||||
//
|
||||
// See the s3manager package's Downloader type documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
|
||||
//
|
||||
// Get Bucket Region
|
||||
//
|
||||
// GetBucketRegion will attempt to get the region for a bucket using a region
|
||||
// hint to determine which AWS partition to perform the query on. Use this utility
|
||||
// to determine the region a bucket is in.
|
||||
//
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// bucket := "my-bucket"
|
||||
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
|
||||
// if err != nil {
|
||||
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
|
||||
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
|
||||
// }
|
||||
// return err
|
||||
// }
|
||||
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
|
||||
//
|
||||
// See the s3manager package's GetBucketRegion function documentation for more information
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
|
||||
//
|
||||
// S3 Crypto Client
|
||||
//
|
||||
// The s3crypto package provides the tools to upload and download encrypted
|
||||
// content from S3. The Encryption and Decryption clients can be used concurrently
|
||||
// once the client is created.
|
||||
//
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create the decryption client.
|
||||
// svc := s3crypto.NewDecryptionClient(sess)
|
||||
//
|
||||
// // The object will be downloaded from S3 and decrypted locally. By metadata
|
||||
// // about the object's encryption will instruct the decryption client how
|
||||
// // decrypt the content of the object. By default KMS is used for keys.
|
||||
// result, err := svc.GetObject(&s3.GetObjectInput {
|
||||
// Bucket: aws.String(myBucket),
|
||||
// Key: aws.String(myKey),
|
||||
// })
|
||||
//
|
||||
// See the s3crypto package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
|
||||
//
|
||||
package s3
|
||||
48
vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
generated
vendored
Executable file
48
vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
generated
vendored
Executable file
@@ -0,0 +1,48 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package s3
|
||||
|
||||
const (
|
||||
|
||||
// ErrCodeBucketAlreadyExists for service response error code
|
||||
// "BucketAlreadyExists".
|
||||
//
|
||||
// The requested bucket name is not available. The bucket namespace is shared
|
||||
// by all users of the system. Please select a different name and try again.
|
||||
ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
|
||||
|
||||
// ErrCodeBucketAlreadyOwnedByYou for service response error code
|
||||
// "BucketAlreadyOwnedByYou".
|
||||
ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
|
||||
|
||||
// ErrCodeNoSuchBucket for service response error code
|
||||
// "NoSuchBucket".
|
||||
//
|
||||
// The specified bucket does not exist.
|
||||
ErrCodeNoSuchBucket = "NoSuchBucket"
|
||||
|
||||
// ErrCodeNoSuchKey for service response error code
|
||||
// "NoSuchKey".
|
||||
//
|
||||
// The specified key does not exist.
|
||||
ErrCodeNoSuchKey = "NoSuchKey"
|
||||
|
||||
// ErrCodeNoSuchUpload for service response error code
|
||||
// "NoSuchUpload".
|
||||
//
|
||||
// The specified multipart upload does not exist.
|
||||
ErrCodeNoSuchUpload = "NoSuchUpload"
|
||||
|
||||
// ErrCodeObjectAlreadyInActiveTierError for service response error code
|
||||
// "ObjectAlreadyInActiveTierError".
|
||||
//
|
||||
// This operation is not allowed against this storage tier
|
||||
ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
|
||||
|
||||
// ErrCodeObjectNotInActiveTierError for service response error code
|
||||
// "ObjectNotInActiveTierError".
|
||||
//
|
||||
// The source object of the COPY operation is not in the active tier and is
|
||||
// only stored in Amazon Glacier.
|
||||
ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
|
||||
)
|
||||
155
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
generated
vendored
Executable file
155
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
generated
vendored
Executable file
@@ -0,0 +1,155 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// an operationBlacklist is a list of operation names that should a
|
||||
// request handler should not be executed with.
|
||||
type operationBlacklist []string
|
||||
|
||||
// Continue will return true of the Request's operation name is not
|
||||
// in the blacklist. False otherwise.
|
||||
func (b operationBlacklist) Continue(r *request.Request) bool {
|
||||
for i := 0; i < len(b); i++ {
|
||||
if b[i] == r.Operation.Name {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var accelerateOpBlacklist = operationBlacklist{
|
||||
opListBuckets, opCreateBucket, opDeleteBucket,
|
||||
}
|
||||
|
||||
// Request handler to automatically add the bucket name to the endpoint domain
|
||||
// if possible. This style of bucket is valid for all bucket names which are
|
||||
// DNS compatible and do not contain "."
|
||||
func updateEndpointForS3Config(r *request.Request) {
|
||||
forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
|
||||
accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
|
||||
|
||||
if accelerate && accelerateOpBlacklist.Continue(r) {
|
||||
if forceHostStyle {
|
||||
if r.Config.Logger != nil {
|
||||
r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
|
||||
}
|
||||
}
|
||||
updateEndpointForAccelerate(r)
|
||||
} else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
|
||||
updateEndpointForHostStyle(r)
|
||||
}
|
||||
}
|
||||
|
||||
func updateEndpointForHostStyle(r *request.Request) {
|
||||
bucket, ok := bucketNameFromReqParams(r.Params)
|
||||
if !ok {
|
||||
// Ignore operation requests if the bucketname was not provided
|
||||
// if this is an input validation error the validation handler
|
||||
// will report it.
|
||||
return
|
||||
}
|
||||
|
||||
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
|
||||
// bucket name must be valid to put into the host
|
||||
return
|
||||
}
|
||||
|
||||
moveBucketToHost(r.HTTPRequest.URL, bucket)
|
||||
}
|
||||
|
||||
var (
|
||||
accelElem = []byte("s3-accelerate.dualstack.")
|
||||
)
|
||||
|
||||
func updateEndpointForAccelerate(r *request.Request) {
|
||||
bucket, ok := bucketNameFromReqParams(r.Params)
|
||||
if !ok {
|
||||
// Ignore operation requests if the bucketname was not provided
|
||||
// if this is an input validation error the validation handler
|
||||
// will report it.
|
||||
return
|
||||
}
|
||||
|
||||
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
|
||||
r.Error = awserr.New("InvalidParameterException",
|
||||
fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
|
||||
nil)
|
||||
return
|
||||
}
|
||||
|
||||
parts := strings.Split(r.HTTPRequest.URL.Host, ".")
|
||||
if len(parts) < 3 {
|
||||
r.Error = awserr.New("InvalidParameterExecption",
|
||||
fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s",
|
||||
r.HTTPRequest.URL.Host), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
|
||||
parts[0] = "s3-accelerate"
|
||||
}
|
||||
for i := 1; i+1 < len(parts); i++ {
|
||||
if parts[i] == aws.StringValue(r.Config.Region) {
|
||||
parts = append(parts[:i], parts[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
r.HTTPRequest.URL.Host = strings.Join(parts, ".")
|
||||
|
||||
moveBucketToHost(r.HTTPRequest.URL, bucket)
|
||||
}
|
||||
|
||||
// Attempts to retrieve the bucket name from the request input parameters.
|
||||
// If no bucket is found, or the field is empty "", false will be returned.
|
||||
func bucketNameFromReqParams(params interface{}) (string, bool) {
|
||||
if iface, ok := params.(bucketGetter); ok {
|
||||
b := iface.getBucket()
|
||||
return b, len(b) > 0
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
// hostCompatibleBucketName returns true if the request should
|
||||
// put the bucket in the host. This is false if S3ForcePathStyle is
|
||||
// explicitly set or if the bucket is not DNS compatible.
|
||||
func hostCompatibleBucketName(u *url.URL, bucket string) bool {
|
||||
// Bucket might be DNS compatible but dots in the hostname will fail
|
||||
// certificate validation, so do not use host-style.
|
||||
if u.Scheme == "https" && strings.Contains(bucket, ".") {
|
||||
return false
|
||||
}
|
||||
|
||||
// if the bucket is DNS compatible
|
||||
return dnsCompatibleBucketName(bucket)
|
||||
}
|
||||
|
||||
var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
||||
var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
|
||||
|
||||
// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
|
||||
// Buckets created outside of the classic region MUST be DNS compatible.
|
||||
func dnsCompatibleBucketName(bucket string) bool {
|
||||
return reDomain.MatchString(bucket) &&
|
||||
!reIPAddress.MatchString(bucket) &&
|
||||
!strings.Contains(bucket, "..")
|
||||
}
|
||||
|
||||
// moveBucketToHost moves the bucket name from the URI path to URL host.
|
||||
func moveBucketToHost(u *url.URL, bucket string) {
|
||||
u.Host = bucket + "." + u.Host
|
||||
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
|
||||
if u.Path == "" {
|
||||
u.Path = "/"
|
||||
}
|
||||
}
|
||||
8
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
generated
vendored
Executable file
8
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
generated
vendored
Executable file
@@ -0,0 +1,8 @@
|
||||
// +build !go1.6
|
||||
|
||||
package s3
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/request"
|
||||
|
||||
func platformRequestHandlers(r *request.Request) {
|
||||
}
|
||||
28
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
generated
vendored
Executable file
28
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
generated
vendored
Executable file
@@ -0,0 +1,28 @@
|
||||
// +build go1.6
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
func platformRequestHandlers(r *request.Request) {
|
||||
if r.Operation.HTTPMethod == "PUT" {
|
||||
// 100-Continue should only be used on put requests.
|
||||
r.Handlers.Sign.PushBack(add100Continue)
|
||||
}
|
||||
}
|
||||
|
||||
func add100Continue(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.S3Disable100Continue) {
|
||||
return
|
||||
}
|
||||
if r.HTTPRequest.ContentLength < 1024*1024*2 {
|
||||
// Ignore requests smaller than 2MB. This helps prevent delaying
|
||||
// requests unnecessarily.
|
||||
return
|
||||
}
|
||||
|
||||
r.HTTPRequest.Header.Set("Expect", "100-Continue")
|
||||
}
|
||||
443
vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
generated
vendored
Executable file
443
vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
generated
vendored
Executable file
@@ -0,0 +1,443 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client
|
||||
// for testing your code.
|
||||
//
|
||||
// It is important to note that this interface will have breaking changes
|
||||
// when the service model is updated and adds new API operations, paginators,
|
||||
// and waiters.
|
||||
package s3iface
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
// S3API provides an interface to enable mocking the
|
||||
// s3.S3 service client's API operation,
|
||||
// paginators, and waiters. This make unit testing your code that calls out
|
||||
// to the SDK's service client's calls easier.
|
||||
//
|
||||
// The best way to use this interface is so the SDK's service client's calls
|
||||
// can be stubbed out for unit testing your code with the SDK without needing
|
||||
// to inject custom request handlers into the SDK's request pipeline.
|
||||
//
|
||||
// // myFunc uses an SDK service client to make a request to
|
||||
// // Amazon Simple Storage Service.
|
||||
// func myFunc(svc s3iface.S3API) bool {
|
||||
// // Make svc.AbortMultipartUpload request
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// sess := session.New()
|
||||
// svc := s3.New(sess)
|
||||
//
|
||||
// myFunc(svc)
|
||||
// }
|
||||
//
|
||||
// In your _test.go file:
|
||||
//
|
||||
// // Define a mock struct to be used in your unit tests of myFunc.
|
||||
// type mockS3Client struct {
|
||||
// s3iface.S3API
|
||||
// }
|
||||
// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
|
||||
// // mock response/functionality
|
||||
// }
|
||||
//
|
||||
// func TestMyFunc(t *testing.T) {
|
||||
// // Setup Test
|
||||
// mockSvc := &mockS3Client{}
|
||||
//
|
||||
// myfunc(mockSvc)
|
||||
//
|
||||
// // Verify myFunc's functionality
|
||||
// }
|
||||
//
|
||||
// It is important to note that this interface will have breaking changes
|
||||
// when the service model is updated and adds new API operations, paginators,
|
||||
// and waiters. Its suggested to use the pattern above for testing, or using
|
||||
// tooling to generate mocks to satisfy the interfaces.
|
||||
type S3API interface {
|
||||
AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
|
||||
AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error)
|
||||
AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput)
|
||||
|
||||
CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
|
||||
CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error)
|
||||
CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput)
|
||||
|
||||
CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
CopyObjectWithContext(aws.Context, *s3.CopyObjectInput, ...request.Option) (*s3.CopyObjectOutput, error)
|
||||
CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput)
|
||||
|
||||
CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error)
|
||||
CreateBucketWithContext(aws.Context, *s3.CreateBucketInput, ...request.Option) (*s3.CreateBucketOutput, error)
|
||||
CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput)
|
||||
|
||||
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
||||
CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error)
|
||||
CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput)
|
||||
|
||||
DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
|
||||
DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error)
|
||||
DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput)
|
||||
|
||||
DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
|
||||
DeleteBucketAnalyticsConfigurationWithContext(aws.Context, *s3.DeleteBucketAnalyticsConfigurationInput, ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
|
||||
DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput)
|
||||
|
||||
DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error)
|
||||
DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error)
|
||||
DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput)
|
||||
|
||||
DeleteBucketEncryption(*s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error)
|
||||
DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error)
|
||||
DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput)
|
||||
|
||||
DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error)
|
||||
DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error)
|
||||
DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput)
|
||||
|
||||
DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error)
|
||||
DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error)
|
||||
DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput)
|
||||
|
||||
DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error)
|
||||
DeleteBucketMetricsConfigurationWithContext(aws.Context, *s3.DeleteBucketMetricsConfigurationInput, ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error)
|
||||
DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput)
|
||||
|
||||
DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error)
|
||||
DeleteBucketPolicyWithContext(aws.Context, *s3.DeleteBucketPolicyInput, ...request.Option) (*s3.DeleteBucketPolicyOutput, error)
|
||||
DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput)
|
||||
|
||||
DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error)
|
||||
DeleteBucketReplicationWithContext(aws.Context, *s3.DeleteBucketReplicationInput, ...request.Option) (*s3.DeleteBucketReplicationOutput, error)
|
||||
DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput)
|
||||
|
||||
DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error)
|
||||
DeleteBucketTaggingWithContext(aws.Context, *s3.DeleteBucketTaggingInput, ...request.Option) (*s3.DeleteBucketTaggingOutput, error)
|
||||
DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput)
|
||||
|
||||
DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error)
|
||||
DeleteBucketWebsiteWithContext(aws.Context, *s3.DeleteBucketWebsiteInput, ...request.Option) (*s3.DeleteBucketWebsiteOutput, error)
|
||||
DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput)
|
||||
|
||||
DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
||||
DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error)
|
||||
DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput)
|
||||
|
||||
DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error)
|
||||
DeleteObjectTaggingWithContext(aws.Context, *s3.DeleteObjectTaggingInput, ...request.Option) (*s3.DeleteObjectTaggingOutput, error)
|
||||
DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput)
|
||||
|
||||
DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
|
||||
DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error)
|
||||
DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput)
|
||||
|
||||
DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error)
|
||||
DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error)
|
||||
DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput)
|
||||
|
||||
GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error)
|
||||
GetBucketAccelerateConfigurationWithContext(aws.Context, *s3.GetBucketAccelerateConfigurationInput, ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error)
|
||||
GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput)
|
||||
|
||||
GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error)
|
||||
GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error)
|
||||
GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput)
|
||||
|
||||
GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error)
|
||||
GetBucketAnalyticsConfigurationWithContext(aws.Context, *s3.GetBucketAnalyticsConfigurationInput, ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error)
|
||||
GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput)
|
||||
|
||||
GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error)
|
||||
GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error)
|
||||
GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput)
|
||||
|
||||
GetBucketEncryption(*s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error)
|
||||
GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error)
|
||||
GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput)
|
||||
|
||||
GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error)
|
||||
GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error)
|
||||
GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput)
|
||||
|
||||
GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error)
|
||||
GetBucketLifecycleWithContext(aws.Context, *s3.GetBucketLifecycleInput, ...request.Option) (*s3.GetBucketLifecycleOutput, error)
|
||||
GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput)
|
||||
|
||||
GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error)
|
||||
GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error)
|
||||
GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput)
|
||||
|
||||
GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error)
|
||||
GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error)
|
||||
GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput)
|
||||
|
||||
GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error)
|
||||
GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error)
|
||||
GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput)
|
||||
|
||||
GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error)
|
||||
GetBucketMetricsConfigurationWithContext(aws.Context, *s3.GetBucketMetricsConfigurationInput, ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error)
|
||||
GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput)
|
||||
|
||||
GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error)
|
||||
GetBucketNotificationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfigurationDeprecated, error)
|
||||
GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated)
|
||||
|
||||
GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error)
|
||||
GetBucketNotificationConfigurationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfiguration, error)
|
||||
GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration)
|
||||
|
||||
GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error)
|
||||
GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error)
|
||||
GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput)
|
||||
|
||||
GetBucketPolicyStatus(*s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error)
|
||||
GetBucketPolicyStatusWithContext(aws.Context, *s3.GetBucketPolicyStatusInput, ...request.Option) (*s3.GetBucketPolicyStatusOutput, error)
|
||||
GetBucketPolicyStatusRequest(*s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput)
|
||||
|
||||
GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error)
|
||||
GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error)
|
||||
GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput)
|
||||
|
||||
GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error)
|
||||
GetBucketRequestPaymentWithContext(aws.Context, *s3.GetBucketRequestPaymentInput, ...request.Option) (*s3.GetBucketRequestPaymentOutput, error)
|
||||
GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput)
|
||||
|
||||
GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error)
|
||||
GetBucketTaggingWithContext(aws.Context, *s3.GetBucketTaggingInput, ...request.Option) (*s3.GetBucketTaggingOutput, error)
|
||||
GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput)
|
||||
|
||||
GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error)
|
||||
GetBucketVersioningWithContext(aws.Context, *s3.GetBucketVersioningInput, ...request.Option) (*s3.GetBucketVersioningOutput, error)
|
||||
GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput)
|
||||
|
||||
GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error)
|
||||
GetBucketWebsiteWithContext(aws.Context, *s3.GetBucketWebsiteInput, ...request.Option) (*s3.GetBucketWebsiteOutput, error)
|
||||
GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput)
|
||||
|
||||
GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error)
|
||||
GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error)
|
||||
GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput)
|
||||
|
||||
GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error)
|
||||
GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput)
|
||||
|
||||
GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error)
|
||||
GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error)
|
||||
GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput)
|
||||
|
||||
GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error)
|
||||
GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error)
|
||||
GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput)
|
||||
|
||||
GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error)
|
||||
GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error)
|
||||
GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput)
|
||||
|
||||
GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error)
|
||||
GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error)
|
||||
GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput)
|
||||
|
||||
GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error)
|
||||
GetObjectTorrentWithContext(aws.Context, *s3.GetObjectTorrentInput, ...request.Option) (*s3.GetObjectTorrentOutput, error)
|
||||
GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput)
|
||||
|
||||
GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error)
|
||||
GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error)
|
||||
GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput)
|
||||
|
||||
HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error)
|
||||
HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput)
|
||||
|
||||
HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
HeadObjectWithContext(aws.Context, *s3.HeadObjectInput, ...request.Option) (*s3.HeadObjectOutput, error)
|
||||
HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput)
|
||||
|
||||
ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
|
||||
ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
|
||||
ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput)
|
||||
|
||||
ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error)
|
||||
ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error)
|
||||
ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput)
|
||||
|
||||
ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error)
|
||||
ListBucketMetricsConfigurationsWithContext(aws.Context, *s3.ListBucketMetricsConfigurationsInput, ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error)
|
||||
ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput)
|
||||
|
||||
ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error)
|
||||
ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error)
|
||||
ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
|
||||
|
||||
ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
|
||||
ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error)
|
||||
ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput)
|
||||
|
||||
ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error
|
||||
ListMultipartUploadsPagesWithContext(aws.Context, *s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
|
||||
ListObjectVersionsWithContext(aws.Context, *s3.ListObjectVersionsInput, ...request.Option) (*s3.ListObjectVersionsOutput, error)
|
||||
ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput)
|
||||
|
||||
ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error
|
||||
ListObjectVersionsPagesWithContext(aws.Context, *s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
|
||||
ListObjectsWithContext(aws.Context, *s3.ListObjectsInput, ...request.Option) (*s3.ListObjectsOutput, error)
|
||||
ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput)
|
||||
|
||||
ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error
|
||||
ListObjectsPagesWithContext(aws.Context, *s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
|
||||
ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error)
|
||||
ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output)
|
||||
|
||||
ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error
|
||||
ListObjectsV2PagesWithContext(aws.Context, *s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool, ...request.Option) error
|
||||
|
||||
ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
|
||||
ListPartsWithContext(aws.Context, *s3.ListPartsInput, ...request.Option) (*s3.ListPartsOutput, error)
|
||||
ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput)
|
||||
|
||||
ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error
|
||||
ListPartsPagesWithContext(aws.Context, *s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error)
|
||||
PutBucketAccelerateConfigurationWithContext(aws.Context, *s3.PutBucketAccelerateConfigurationInput, ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error)
|
||||
PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput)
|
||||
|
||||
PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error)
|
||||
PutBucketAclWithContext(aws.Context, *s3.PutBucketAclInput, ...request.Option) (*s3.PutBucketAclOutput, error)
|
||||
PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput)
|
||||
|
||||
PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error)
|
||||
PutBucketAnalyticsConfigurationWithContext(aws.Context, *s3.PutBucketAnalyticsConfigurationInput, ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error)
|
||||
PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput)
|
||||
|
||||
PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error)
|
||||
PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error)
|
||||
PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput)
|
||||
|
||||
PutBucketEncryption(*s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error)
|
||||
PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error)
|
||||
PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput)
|
||||
|
||||
PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error)
|
||||
PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error)
|
||||
PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput)
|
||||
|
||||
PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error)
|
||||
PutBucketLifecycleWithContext(aws.Context, *s3.PutBucketLifecycleInput, ...request.Option) (*s3.PutBucketLifecycleOutput, error)
|
||||
PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput)
|
||||
|
||||
PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error)
|
||||
PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error)
|
||||
PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput)
|
||||
|
||||
PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error)
|
||||
PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error)
|
||||
PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput)
|
||||
|
||||
PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error)
|
||||
PutBucketMetricsConfigurationWithContext(aws.Context, *s3.PutBucketMetricsConfigurationInput, ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error)
|
||||
PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput)
|
||||
|
||||
PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error)
|
||||
PutBucketNotificationWithContext(aws.Context, *s3.PutBucketNotificationInput, ...request.Option) (*s3.PutBucketNotificationOutput, error)
|
||||
PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput)
|
||||
|
||||
PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error)
|
||||
PutBucketNotificationConfigurationWithContext(aws.Context, *s3.PutBucketNotificationConfigurationInput, ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error)
|
||||
PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput)
|
||||
|
||||
PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error)
|
||||
PutBucketPolicyWithContext(aws.Context, *s3.PutBucketPolicyInput, ...request.Option) (*s3.PutBucketPolicyOutput, error)
|
||||
PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput)
|
||||
|
||||
PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error)
|
||||
PutBucketReplicationWithContext(aws.Context, *s3.PutBucketReplicationInput, ...request.Option) (*s3.PutBucketReplicationOutput, error)
|
||||
PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput)
|
||||
|
||||
PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error)
|
||||
PutBucketRequestPaymentWithContext(aws.Context, *s3.PutBucketRequestPaymentInput, ...request.Option) (*s3.PutBucketRequestPaymentOutput, error)
|
||||
PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput)
|
||||
|
||||
PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error)
|
||||
PutBucketTaggingWithContext(aws.Context, *s3.PutBucketTaggingInput, ...request.Option) (*s3.PutBucketTaggingOutput, error)
|
||||
PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput)
|
||||
|
||||
PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error)
|
||||
PutBucketVersioningWithContext(aws.Context, *s3.PutBucketVersioningInput, ...request.Option) (*s3.PutBucketVersioningOutput, error)
|
||||
PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput)
|
||||
|
||||
PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error)
|
||||
PutBucketWebsiteWithContext(aws.Context, *s3.PutBucketWebsiteInput, ...request.Option) (*s3.PutBucketWebsiteOutput, error)
|
||||
PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput)
|
||||
|
||||
PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error)
|
||||
PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error)
|
||||
PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput)
|
||||
|
||||
PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error)
|
||||
PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error)
|
||||
PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput)
|
||||
|
||||
PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error)
|
||||
PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error)
|
||||
PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput)
|
||||
|
||||
PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error)
|
||||
PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error)
|
||||
PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput)
|
||||
|
||||
PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error)
|
||||
PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error)
|
||||
PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput)
|
||||
|
||||
PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error)
|
||||
PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error)
|
||||
PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput)
|
||||
|
||||
PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error)
|
||||
PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error)
|
||||
PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput)
|
||||
|
||||
RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)
|
||||
RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error)
|
||||
RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput)
|
||||
|
||||
SelectObjectContent(*s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error)
|
||||
SelectObjectContentWithContext(aws.Context, *s3.SelectObjectContentInput, ...request.Option) (*s3.SelectObjectContentOutput, error)
|
||||
SelectObjectContentRequest(*s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput)
|
||||
|
||||
UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error)
|
||||
UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error)
|
||||
UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput)
|
||||
|
||||
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
|
||||
UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error)
|
||||
UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput)
|
||||
|
||||
WaitUntilBucketExists(*s3.HeadBucketInput) error
|
||||
WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
|
||||
|
||||
WaitUntilBucketNotExists(*s3.HeadBucketInput) error
|
||||
WaitUntilBucketNotExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
|
||||
|
||||
WaitUntilObjectExists(*s3.HeadObjectInput) error
|
||||
WaitUntilObjectExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
|
||||
|
||||
WaitUntilObjectNotExists(*s3.HeadObjectInput) error
|
||||
WaitUntilObjectNotExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
|
||||
}
|
||||
|
||||
var _ S3API = (*s3.S3)(nil)
|
||||
529
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
generated
vendored
Executable file
529
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
generated
vendored
Executable file
@@ -0,0 +1,529 @@
|
||||
package s3manager
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBatchSize is the batch size we initialize when constructing a batch delete client.
|
||||
// This value is used when calling DeleteObjects. This represents how many objects to delete
|
||||
// per DeleteObjects call.
|
||||
DefaultBatchSize = 100
|
||||
)
|
||||
|
||||
// BatchError will contain the key and bucket of the object that failed to
|
||||
// either upload or download.
|
||||
type BatchError struct {
|
||||
Errors Errors
|
||||
code string
|
||||
message string
|
||||
}
|
||||
|
||||
// Errors is a typed alias for a slice of errors to satisfy the error
|
||||
// interface.
|
||||
type Errors []Error
|
||||
|
||||
func (errs Errors) Error() string {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
for i, err := range errs {
|
||||
buf.WriteString(err.Error())
|
||||
if i+1 < len(errs) {
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Error will contain the original error, bucket, and key of the operation that failed
|
||||
// during batch operations.
|
||||
type Error struct {
|
||||
OrigErr error
|
||||
Bucket *string
|
||||
Key *string
|
||||
}
|
||||
|
||||
func newError(err error, bucket, key *string) Error {
|
||||
return Error{
|
||||
err,
|
||||
bucket,
|
||||
key,
|
||||
}
|
||||
}
|
||||
|
||||
func (err *Error) Error() string {
|
||||
origErr := ""
|
||||
if err.OrigErr != nil {
|
||||
origErr = ":\n" + err.OrigErr.Error()
|
||||
}
|
||||
return fmt.Sprintf("failed to perform batch operation on %q to %q%s",
|
||||
aws.StringValue(err.Key),
|
||||
aws.StringValue(err.Bucket),
|
||||
origErr,
|
||||
)
|
||||
}
|
||||
|
||||
// NewBatchError will return a BatchError that satisfies the awserr.Error interface.
|
||||
func NewBatchError(code, message string, err []Error) awserr.Error {
|
||||
return &BatchError{
|
||||
Errors: err,
|
||||
code: code,
|
||||
message: message,
|
||||
}
|
||||
}
|
||||
|
||||
// Code will return the code associated with the batch error.
|
||||
func (err *BatchError) Code() string {
|
||||
return err.code
|
||||
}
|
||||
|
||||
// Message will return the message associated with the batch error.
|
||||
func (err *BatchError) Message() string {
|
||||
return err.message
|
||||
}
|
||||
|
||||
func (err *BatchError) Error() string {
|
||||
return awserr.SprintError(err.Code(), err.Message(), "", err.Errors)
|
||||
}
|
||||
|
||||
// OrigErr will return the original error. Which, in this case, will always be nil
|
||||
// for batched operations.
|
||||
func (err *BatchError) OrigErr() error {
|
||||
return err.Errors
|
||||
}
|
||||
|
||||
// BatchDeleteIterator is an interface that uses the scanner pattern to
|
||||
// iterate through what needs to be deleted.
|
||||
type BatchDeleteIterator interface {
|
||||
Next() bool
|
||||
Err() error
|
||||
DeleteObject() BatchDeleteObject
|
||||
}
|
||||
|
||||
// DeleteListIterator is an alternative iterator for the BatchDelete client. This will
|
||||
// iterate through a list of objects and delete the objects.
|
||||
//
|
||||
// Example:
|
||||
// iter := &s3manager.DeleteListIterator{
|
||||
// Client: svc,
|
||||
// Input: &s3.ListObjectsInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// MaxKeys: aws.Int64(5),
|
||||
// },
|
||||
// Paginator: request.Pagination{
|
||||
// NewRequest: func() (*request.Request, error) {
|
||||
// var inCpy *ListObjectsInput
|
||||
// if input != nil {
|
||||
// tmp := *input
|
||||
// inCpy = &tmp
|
||||
// }
|
||||
// req, _ := c.ListObjectsRequest(inCpy)
|
||||
// return req, nil
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// batcher := s3manager.NewBatchDeleteWithClient(svc)
|
||||
// if err := batcher.Delete(aws.BackgroundContext(), iter); err != nil {
|
||||
// return err
|
||||
// }
|
||||
type DeleteListIterator struct {
|
||||
Bucket *string
|
||||
Paginator request.Pagination
|
||||
objects []*s3.Object
|
||||
}
|
||||
|
||||
// NewDeleteListIterator will return a new DeleteListIterator.
|
||||
func NewDeleteListIterator(svc s3iface.S3API, input *s3.ListObjectsInput, opts ...func(*DeleteListIterator)) BatchDeleteIterator {
|
||||
iter := &DeleteListIterator{
|
||||
Bucket: input.Bucket,
|
||||
Paginator: request.Pagination{
|
||||
NewRequest: func() (*request.Request, error) {
|
||||
var inCpy *s3.ListObjectsInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := svc.ListObjectsRequest(inCpy)
|
||||
return req, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(iter)
|
||||
}
|
||||
return iter
|
||||
}
|
||||
|
||||
// Next will use the S3API client to iterate through a list of objects.
|
||||
func (iter *DeleteListIterator) Next() bool {
|
||||
if len(iter.objects) > 0 {
|
||||
iter.objects = iter.objects[1:]
|
||||
}
|
||||
|
||||
if len(iter.objects) == 0 && iter.Paginator.Next() {
|
||||
iter.objects = iter.Paginator.Page().(*s3.ListObjectsOutput).Contents
|
||||
}
|
||||
|
||||
return len(iter.objects) > 0
|
||||
}
|
||||
|
||||
// Err will return the last known error from Next.
|
||||
func (iter *DeleteListIterator) Err() error {
|
||||
return iter.Paginator.Err()
|
||||
}
|
||||
|
||||
// DeleteObject will return the current object to be deleted.
|
||||
func (iter *DeleteListIterator) DeleteObject() BatchDeleteObject {
|
||||
return BatchDeleteObject{
|
||||
Object: &s3.DeleteObjectInput{
|
||||
Bucket: iter.Bucket,
|
||||
Key: iter.objects[0].Key,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// BatchDelete will use the s3 package's service client to perform a batch
|
||||
// delete.
|
||||
type BatchDelete struct {
|
||||
Client s3iface.S3API
|
||||
BatchSize int
|
||||
}
|
||||
|
||||
// NewBatchDeleteWithClient will return a new delete client that can delete a batched amount of
|
||||
// objects.
|
||||
//
|
||||
// Example:
|
||||
// batcher := s3manager.NewBatchDeleteWithClient(client, size)
|
||||
//
|
||||
// objects := []BatchDeleteObject{
|
||||
// {
|
||||
// Object: &s3.DeleteObjectInput {
|
||||
// Key: aws.String("key"),
|
||||
// Bucket: aws.String("bucket"),
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
|
||||
// Objects: objects,
|
||||
// }); err != nil {
|
||||
// return err
|
||||
// }
|
||||
func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete)) *BatchDelete {
|
||||
svc := &BatchDelete{
|
||||
Client: client,
|
||||
BatchSize: DefaultBatchSize,
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
opt(svc)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// NewBatchDelete will return a new delete client that can delete a batched amount of
|
||||
// objects.
|
||||
//
|
||||
// Example:
|
||||
// batcher := s3manager.NewBatchDelete(sess, size)
|
||||
//
|
||||
// objects := []BatchDeleteObject{
|
||||
// {
|
||||
// Object: &s3.DeleteObjectInput {
|
||||
// Key: aws.String("key"),
|
||||
// Bucket: aws.String("bucket"),
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
|
||||
// Objects: objects,
|
||||
// }); err != nil {
|
||||
// return err
|
||||
// }
|
||||
func NewBatchDelete(c client.ConfigProvider, options ...func(*BatchDelete)) *BatchDelete {
|
||||
client := s3.New(c)
|
||||
return NewBatchDeleteWithClient(client, options...)
|
||||
}
|
||||
|
||||
// BatchDeleteObject is a wrapper object for calling the batch delete operation.
|
||||
type BatchDeleteObject struct {
|
||||
Object *s3.DeleteObjectInput
|
||||
// After will run after each iteration during the batch process. This function will
|
||||
// be executed whether or not the request was successful.
|
||||
After func() error
|
||||
}
|
||||
|
||||
// DeleteObjectsIterator is an interface that uses the scanner pattern to iterate
|
||||
// through a series of objects to be deleted.
|
||||
type DeleteObjectsIterator struct {
|
||||
Objects []BatchDeleteObject
|
||||
index int
|
||||
inc bool
|
||||
}
|
||||
|
||||
// Next will increment the default iterator's index and and ensure that there
|
||||
// is another object to iterator to.
|
||||
func (iter *DeleteObjectsIterator) Next() bool {
|
||||
if iter.inc {
|
||||
iter.index++
|
||||
} else {
|
||||
iter.inc = true
|
||||
}
|
||||
return iter.index < len(iter.Objects)
|
||||
}
|
||||
|
||||
// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface
|
||||
// this will only return nil.
|
||||
func (iter *DeleteObjectsIterator) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteObject will return the BatchDeleteObject at the current batched index.
|
||||
func (iter *DeleteObjectsIterator) DeleteObject() BatchDeleteObject {
|
||||
object := iter.Objects[iter.index]
|
||||
return object
|
||||
}
|
||||
|
||||
// Delete will use the iterator to queue up objects that need to be deleted.
|
||||
// Once the batch size is met, this will call the deleteBatch function.
|
||||
func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error {
|
||||
var errs []Error
|
||||
objects := []BatchDeleteObject{}
|
||||
var input *s3.DeleteObjectsInput
|
||||
|
||||
for iter.Next() {
|
||||
o := iter.DeleteObject()
|
||||
|
||||
if input == nil {
|
||||
input = initDeleteObjectsInput(o.Object)
|
||||
}
|
||||
|
||||
parity := hasParity(input, o)
|
||||
if parity {
|
||||
input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{
|
||||
Key: o.Object.Key,
|
||||
VersionId: o.Object.VersionId,
|
||||
})
|
||||
objects = append(objects, o)
|
||||
}
|
||||
|
||||
if len(input.Delete.Objects) == d.BatchSize || !parity {
|
||||
if err := deleteBatch(ctx, d, input, objects); err != nil {
|
||||
errs = append(errs, err...)
|
||||
}
|
||||
|
||||
objects = objects[:0]
|
||||
input = nil
|
||||
|
||||
if !parity {
|
||||
objects = append(objects, o)
|
||||
input = initDeleteObjectsInput(o.Object)
|
||||
input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{
|
||||
Key: o.Object.Key,
|
||||
VersionId: o.Object.VersionId,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// iter.Next() could return false (above) plus populate iter.Err()
|
||||
if iter.Err() != nil {
|
||||
errs = append(errs, newError(iter.Err(), nil, nil))
|
||||
}
|
||||
|
||||
if input != nil && len(input.Delete.Objects) > 0 {
|
||||
if err := deleteBatch(ctx, d, input, objects); err != nil {
|
||||
errs = append(errs, err...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return NewBatchError("BatchedDeleteIncomplete", "some objects have failed to be deleted.", errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initDeleteObjectsInput(o *s3.DeleteObjectInput) *s3.DeleteObjectsInput {
|
||||
return &s3.DeleteObjectsInput{
|
||||
Bucket: o.Bucket,
|
||||
MFA: o.MFA,
|
||||
RequestPayer: o.RequestPayer,
|
||||
Delete: &s3.Delete{},
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ErrDeleteBatchFailCode represents an error code which will be returned
|
||||
// only when DeleteObjects.Errors has an error that does not contain a code.
|
||||
ErrDeleteBatchFailCode = "DeleteBatchError"
|
||||
errDefaultDeleteBatchMessage = "failed to delete"
|
||||
)
|
||||
|
||||
// deleteBatch will delete a batch of items in the objects parameters.
|
||||
func deleteBatch(ctx aws.Context, d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error {
|
||||
errs := []Error{}
|
||||
|
||||
if result, err := d.Client.DeleteObjectsWithContext(ctx, input); err != nil {
|
||||
for i := 0; i < len(input.Delete.Objects); i++ {
|
||||
errs = append(errs, newError(err, input.Bucket, input.Delete.Objects[i].Key))
|
||||
}
|
||||
} else if len(result.Errors) > 0 {
|
||||
for i := 0; i < len(result.Errors); i++ {
|
||||
code := ErrDeleteBatchFailCode
|
||||
msg := errDefaultDeleteBatchMessage
|
||||
if result.Errors[i].Message != nil {
|
||||
msg = *result.Errors[i].Message
|
||||
}
|
||||
if result.Errors[i].Code != nil {
|
||||
code = *result.Errors[i].Code
|
||||
}
|
||||
|
||||
errs = append(errs, newError(awserr.New(code, msg, err), input.Bucket, result.Errors[i].Key))
|
||||
}
|
||||
}
|
||||
for _, object := range objects {
|
||||
if object.After == nil {
|
||||
continue
|
||||
}
|
||||
if err := object.After(); err != nil {
|
||||
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func hasParity(o1 *s3.DeleteObjectsInput, o2 BatchDeleteObject) bool {
|
||||
if o1.Bucket != nil && o2.Object.Bucket != nil {
|
||||
if *o1.Bucket != *o2.Object.Bucket {
|
||||
return false
|
||||
}
|
||||
} else if o1.Bucket != o2.Object.Bucket {
|
||||
return false
|
||||
}
|
||||
|
||||
if o1.MFA != nil && o2.Object.MFA != nil {
|
||||
if *o1.MFA != *o2.Object.MFA {
|
||||
return false
|
||||
}
|
||||
} else if o1.MFA != o2.Object.MFA {
|
||||
return false
|
||||
}
|
||||
|
||||
if o1.RequestPayer != nil && o2.Object.RequestPayer != nil {
|
||||
if *o1.RequestPayer != *o2.Object.RequestPayer {
|
||||
return false
|
||||
}
|
||||
} else if o1.RequestPayer != o2.Object.RequestPayer {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// BatchDownloadIterator is an interface that uses the scanner pattern to iterate
|
||||
// through a series of objects to be downloaded.
|
||||
type BatchDownloadIterator interface {
|
||||
Next() bool
|
||||
Err() error
|
||||
DownloadObject() BatchDownloadObject
|
||||
}
|
||||
|
||||
// BatchDownloadObject contains all necessary information to run a batch operation once.
|
||||
type BatchDownloadObject struct {
|
||||
Object *s3.GetObjectInput
|
||||
Writer io.WriterAt
|
||||
// After will run after each iteration during the batch process. This function will
|
||||
// be executed whether or not the request was successful.
|
||||
After func() error
|
||||
}
|
||||
|
||||
// DownloadObjectsIterator implements the BatchDownloadIterator interface and allows for batched
|
||||
// download of objects.
|
||||
type DownloadObjectsIterator struct {
|
||||
Objects []BatchDownloadObject
|
||||
index int
|
||||
inc bool
|
||||
}
|
||||
|
||||
// Next will increment the default iterator's index and and ensure that there
|
||||
// is another object to iterator to.
|
||||
func (batcher *DownloadObjectsIterator) Next() bool {
|
||||
if batcher.inc {
|
||||
batcher.index++
|
||||
} else {
|
||||
batcher.inc = true
|
||||
}
|
||||
return batcher.index < len(batcher.Objects)
|
||||
}
|
||||
|
||||
// DownloadObject will return the BatchDownloadObject at the current batched index.
|
||||
func (batcher *DownloadObjectsIterator) DownloadObject() BatchDownloadObject {
|
||||
object := batcher.Objects[batcher.index]
|
||||
return object
|
||||
}
|
||||
|
||||
// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface
|
||||
// this will only return nil.
|
||||
func (batcher *DownloadObjectsIterator) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchUploadIterator is an interface that uses the scanner pattern to
|
||||
// iterate through what needs to be uploaded.
|
||||
type BatchUploadIterator interface {
|
||||
Next() bool
|
||||
Err() error
|
||||
UploadObject() BatchUploadObject
|
||||
}
|
||||
|
||||
// UploadObjectsIterator implements the BatchUploadIterator interface and allows for batched
|
||||
// upload of objects.
|
||||
type UploadObjectsIterator struct {
|
||||
Objects []BatchUploadObject
|
||||
index int
|
||||
inc bool
|
||||
}
|
||||
|
||||
// Next will increment the default iterator's index and and ensure that there
|
||||
// is another object to iterator to.
|
||||
func (batcher *UploadObjectsIterator) Next() bool {
|
||||
if batcher.inc {
|
||||
batcher.index++
|
||||
} else {
|
||||
batcher.inc = true
|
||||
}
|
||||
return batcher.index < len(batcher.Objects)
|
||||
}
|
||||
|
||||
// Err will return an error. Since this is just used to satisfy the BatchUploadIterator interface
|
||||
// this will only return nil.
|
||||
func (batcher *UploadObjectsIterator) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UploadObject will return the BatchUploadObject at the current batched index.
|
||||
func (batcher *UploadObjectsIterator) UploadObject() BatchUploadObject {
|
||||
object := batcher.Objects[batcher.index]
|
||||
return object
|
||||
}
|
||||
|
||||
// BatchUploadObject contains all necessary information to run a batch operation once.
|
||||
type BatchUploadObject struct {
|
||||
Object *UploadInput
|
||||
// After will run after each iteration during the batch process. This function will
|
||||
// be executed whether or not the request was successful.
|
||||
After func() error
|
||||
}
|
||||
88
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go
generated
vendored
Executable file
88
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go
generated
vendored
Executable file
@@ -0,0 +1,88 @@
|
||||
package s3manager
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
)
|
||||
|
||||
// GetBucketRegion will attempt to get the region for a bucket using the
|
||||
// regionHint to determine which AWS partition to perform the query on.
|
||||
//
|
||||
// The request will not be signed, and will not use your AWS credentials.
|
||||
//
|
||||
// A "NotFound" error code will be returned if the bucket does not exist in the
|
||||
// AWS partition the regionHint belongs to. If the regionHint parameter is an
|
||||
// empty string GetBucketRegion will fallback to the ConfigProvider's region
|
||||
// config. If the regionHint is empty, and the ConfigProvider does not have a
|
||||
// region value, an error will be returned..
|
||||
//
|
||||
// For example to get the region of a bucket which exists in "eu-central-1"
|
||||
// you could provide a region hint of "us-west-2".
|
||||
//
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// bucket := "my-bucket"
|
||||
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
|
||||
// if err != nil {
|
||||
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
|
||||
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
|
||||
// }
|
||||
// return err
|
||||
// }
|
||||
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
|
||||
//
|
||||
func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) {
|
||||
var cfg aws.Config
|
||||
if len(regionHint) != 0 {
|
||||
cfg.Region = aws.String(regionHint)
|
||||
}
|
||||
svc := s3.New(c, &cfg)
|
||||
return GetBucketRegionWithClient(ctx, svc, bucket, opts...)
|
||||
}
|
||||
|
||||
const bucketRegionHeader = "X-Amz-Bucket-Region"
|
||||
|
||||
// GetBucketRegionWithClient is the same as GetBucketRegion with the exception
|
||||
// that it takes a S3 service client instead of a Session. The regionHint is
|
||||
// derived from the region the S3 service client was created in.
|
||||
//
|
||||
// See GetBucketRegion for more information.
|
||||
func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) {
|
||||
req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{
|
||||
Bucket: aws.String(bucket),
|
||||
})
|
||||
req.Config.S3ForcePathStyle = aws.Bool(true)
|
||||
req.Config.Credentials = credentials.AnonymousCredentials
|
||||
req.SetContext(ctx)
|
||||
|
||||
// Disable HTTP redirects to prevent an invalid 301 from eating the response
|
||||
// because Go's HTTP client will fail, and drop the response if an 301 is
|
||||
// received without a location header. S3 will return a 301 without the
|
||||
// location header for HeadObject API calls.
|
||||
req.DisableFollowRedirects = true
|
||||
|
||||
var bucketRegion string
|
||||
req.Handlers.Send.PushBack(func(r *request.Request) {
|
||||
bucketRegion = r.HTTPResponse.Header.Get(bucketRegionHeader)
|
||||
if len(bucketRegion) == 0 {
|
||||
return
|
||||
}
|
||||
r.HTTPResponse.StatusCode = 200
|
||||
r.HTTPResponse.Status = "OK"
|
||||
r.Error = nil
|
||||
})
|
||||
|
||||
req.ApplyOptions(opts...)
|
||||
|
||||
if err := req.Send(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
bucketRegion = s3.NormalizeBucketLocation(bucketRegion)
|
||||
|
||||
return bucketRegion, nil
|
||||
}
|
||||
3
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go
generated
vendored
Executable file
3
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go
generated
vendored
Executable file
@@ -0,0 +1,3 @@
|
||||
// Package s3manager provides utilities to upload and download objects from
|
||||
// S3 concurrently. Helpful for when working with large objects.
|
||||
package s3manager
|
||||
555
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
Executable file
555
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
Executable file
@@ -0,0 +1,555 @@
|
||||
package s3manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
)
|
||||
|
||||
// DefaultDownloadPartSize is the default range of bytes to get at a time when
|
||||
// using Download().
|
||||
const DefaultDownloadPartSize = 1024 * 1024 * 5
|
||||
|
||||
// DefaultDownloadConcurrency is the default number of goroutines to spin up
|
||||
// when using Download().
|
||||
const DefaultDownloadConcurrency = 5
|
||||
|
||||
// The Downloader structure that calls Download(). It is safe to call Download()
|
||||
// on this structure for multiple objects and across concurrent goroutines.
|
||||
// Mutating the Downloader's properties is not safe to be done concurrently.
|
||||
type Downloader struct {
|
||||
// The buffer size (in bytes) to use when buffering data into chunks and
|
||||
// sending them as parts to S3. The minimum allowed part size is 5MB, and
|
||||
// if this value is set to zero, the DefaultDownloadPartSize value will be used.
|
||||
//
|
||||
// PartSize is ignored if the Range input parameter is provided.
|
||||
PartSize int64
|
||||
|
||||
// The number of goroutines to spin up in parallel when sending parts.
|
||||
// If this is set to zero, the DefaultDownloadConcurrency value will be used.
|
||||
//
|
||||
// Concurrency of 1 will download the parts sequentially.
|
||||
//
|
||||
// Concurrency is ignored if the Range input parameter is provided.
|
||||
Concurrency int
|
||||
|
||||
// An S3 client to use when performing downloads.
|
||||
S3 s3iface.S3API
|
||||
|
||||
// List of request options that will be passed down to individual API
|
||||
// operation requests made by the downloader.
|
||||
RequestOptions []request.Option
|
||||
}
|
||||
|
||||
// WithDownloaderRequestOptions appends to the Downloader's API request options.
|
||||
func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
|
||||
return func(d *Downloader) {
|
||||
d.RequestOptions = append(d.RequestOptions, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// NewDownloader creates a new Downloader instance to downloads objects from
|
||||
// S3 in concurrent chunks. Pass in additional functional options to customize
|
||||
// the downloader behavior. Requires a client.ConfigProvider in order to create
|
||||
// a S3 service client. The session.Session satisfies the client.ConfigProvider
|
||||
// interface.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a downloader with the session and default options
|
||||
// downloader := s3manager.NewDownloader(sess)
|
||||
//
|
||||
// // Create a downloader with the session and custom options
|
||||
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
|
||||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
S3: s3.New(c),
|
||||
PartSize: DefaultDownloadPartSize,
|
||||
Concurrency: DefaultDownloadConcurrency,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(d)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// NewDownloaderWithClient creates a new Downloader instance to downloads
|
||||
// objects from S3 in concurrent chunks. Pass in additional functional
|
||||
// options to customize the downloader behavior. Requires a S3 service client
|
||||
// to make S3 API calls.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // The S3 client the S3 Downloader will use
|
||||
// s3Svc := s3.new(sess)
|
||||
//
|
||||
// // Create a downloader with the s3 client and default options
|
||||
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
|
||||
//
|
||||
// // Create a downloader with the s3 client and custom options
|
||||
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
|
||||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
S3: svc,
|
||||
PartSize: DefaultDownloadPartSize,
|
||||
Concurrency: DefaultDownloadConcurrency,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(d)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
type maxRetrier interface {
|
||||
MaxRetries() int
|
||||
}
|
||||
|
||||
// Download downloads an object in S3 and writes the payload into w using
|
||||
// concurrent GET requests.
|
||||
//
|
||||
// Additional functional options can be provided to configure the individual
|
||||
// download. These options are copies of the Downloader instance Download is called from.
|
||||
// Modifying the options will not impact the original Downloader instance.
|
||||
//
|
||||
// It is safe to call this method concurrently across goroutines.
|
||||
//
|
||||
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
|
||||
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
|
||||
//
|
||||
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
|
||||
// download the parts from S3 sequentially.
|
||||
//
|
||||
// If the GetObjectInput's Range value is provided that will cause the downloader
|
||||
// to perform a single GetObjectInput request for that object's range. This will
|
||||
// caused the part size, and concurrency configurations to be ignored.
|
||||
func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
|
||||
return d.DownloadWithContext(aws.BackgroundContext(), w, input, options...)
|
||||
}
|
||||
|
||||
// DownloadWithContext downloads an object in S3 and writes the payload into w
|
||||
// using concurrent GET requests.
|
||||
//
|
||||
// DownloadWithContext is the same as Download with the additional support for
|
||||
// Context input parameters. The Context must not be nil. A nil Context will
|
||||
// cause a panic. Use the Context to add deadlining, timeouts, etc. The
|
||||
// DownloadWithContext may create sub-contexts for individual underlying
|
||||
// requests.
|
||||
//
|
||||
// Additional functional options can be provided to configure the individual
|
||||
// download. These options are copies of the Downloader instance Download is
|
||||
// called from. Modifying the options will not impact the original Downloader
|
||||
// instance. Use the WithDownloaderRequestOptions helper function to pass in request
|
||||
// options that will be applied to all API operations made with this downloader.
|
||||
//
|
||||
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
|
||||
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
|
||||
//
|
||||
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
|
||||
// download the parts from S3 sequentially.
|
||||
//
|
||||
// It is safe to call this method concurrently across goroutines.
|
||||
//
|
||||
// If the GetObjectInput's Range value is provided that will cause the downloader
|
||||
// to perform a single GetObjectInput request for that object's range. This will
|
||||
// caused the part size, and concurrency configurations to be ignored.
|
||||
func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
|
||||
impl := downloader{w: w, in: input, cfg: d, ctx: ctx}
|
||||
|
||||
for _, option := range options {
|
||||
option(&impl.cfg)
|
||||
}
|
||||
impl.cfg.RequestOptions = append(impl.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
|
||||
|
||||
if s, ok := d.S3.(maxRetrier); ok {
|
||||
impl.partBodyMaxRetries = s.MaxRetries()
|
||||
}
|
||||
|
||||
impl.totalBytes = -1
|
||||
if impl.cfg.Concurrency == 0 {
|
||||
impl.cfg.Concurrency = DefaultDownloadConcurrency
|
||||
}
|
||||
|
||||
if impl.cfg.PartSize == 0 {
|
||||
impl.cfg.PartSize = DefaultDownloadPartSize
|
||||
}
|
||||
|
||||
return impl.download()
|
||||
}
|
||||
|
||||
// DownloadWithIterator will download a batched amount of objects in S3 and writes them
|
||||
// to the io.WriterAt specificed in the iterator.
|
||||
//
|
||||
// Example:
|
||||
// svc := s3manager.NewDownloader(session)
|
||||
//
|
||||
// fooFile, err := os.Open("/tmp/foo.file")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// barFile, err := os.Open("/tmp/bar.file")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// objects := []s3manager.BatchDownloadObject {
|
||||
// {
|
||||
// Object: &s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("foo"),
|
||||
// },
|
||||
// Writer: fooFile,
|
||||
// },
|
||||
// {
|
||||
// Object: &s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("bar"),
|
||||
// },
|
||||
// Writer: barFile,
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// iter := &s3manager.DownloadObjectsIterator{Objects: objects}
|
||||
// if err := svc.DownloadWithIterator(aws.BackgroundContext(), iter); err != nil {
|
||||
// return err
|
||||
// }
|
||||
func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error {
|
||||
var errs []Error
|
||||
for iter.Next() {
|
||||
object := iter.DownloadObject()
|
||||
if _, err := d.DownloadWithContext(ctx, object.Writer, object.Object, opts...); err != nil {
|
||||
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
|
||||
}
|
||||
|
||||
if object.After == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := object.After(); err != nil {
|
||||
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return NewBatchError("BatchedDownloadIncomplete", "some objects have failed to download.", errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// downloader is the implementation structure used internally by Downloader.
|
||||
type downloader struct {
|
||||
ctx aws.Context
|
||||
cfg Downloader
|
||||
|
||||
in *s3.GetObjectInput
|
||||
w io.WriterAt
|
||||
|
||||
wg sync.WaitGroup
|
||||
m sync.Mutex
|
||||
|
||||
pos int64
|
||||
totalBytes int64
|
||||
written int64
|
||||
err error
|
||||
|
||||
partBodyMaxRetries int
|
||||
}
|
||||
|
||||
// download performs the implementation of the object download across ranged
|
||||
// GETs.
|
||||
func (d *downloader) download() (n int64, err error) {
|
||||
// If range is specified fall back to single download of that range
|
||||
// this enables the functionality of ranged gets with the downloader but
|
||||
// at the cost of no multipart downloads.
|
||||
if rng := aws.StringValue(d.in.Range); len(rng) > 0 {
|
||||
d.downloadRange(rng)
|
||||
return d.written, d.err
|
||||
}
|
||||
|
||||
// Spin off first worker to check additional header information
|
||||
d.getChunk()
|
||||
|
||||
if total := d.getTotalBytes(); total >= 0 {
|
||||
// Spin up workers
|
||||
ch := make(chan dlchunk, d.cfg.Concurrency)
|
||||
|
||||
for i := 0; i < d.cfg.Concurrency; i++ {
|
||||
d.wg.Add(1)
|
||||
go d.downloadPart(ch)
|
||||
}
|
||||
|
||||
// Assign work
|
||||
for d.getErr() == nil {
|
||||
if d.pos >= total {
|
||||
break // We're finished queuing chunks
|
||||
}
|
||||
|
||||
// Queue the next range of bytes to read.
|
||||
ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
|
||||
d.pos += d.cfg.PartSize
|
||||
}
|
||||
|
||||
// Wait for completion
|
||||
close(ch)
|
||||
d.wg.Wait()
|
||||
} else {
|
||||
// Checking if we read anything new
|
||||
for d.err == nil {
|
||||
d.getChunk()
|
||||
}
|
||||
|
||||
// We expect a 416 error letting us know we are done downloading the
|
||||
// total bytes. Since we do not know the content's length, this will
|
||||
// keep grabbing chunks of data until the range of bytes specified in
|
||||
// the request is out of range of the content. Once, this happens, a
|
||||
// 416 should occur.
|
||||
e, ok := d.err.(awserr.RequestFailure)
|
||||
if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable {
|
||||
d.err = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Return error
|
||||
return d.written, d.err
|
||||
}
|
||||
|
||||
// downloadPart is an individual goroutine worker reading from the ch channel
|
||||
// and performing a GetObject request on the data with a given byte range.
|
||||
//
|
||||
// If this is the first worker, this operation also resolves the total number
|
||||
// of bytes to be read so that the worker manager knows when it is finished.
|
||||
func (d *downloader) downloadPart(ch chan dlchunk) {
|
||||
defer d.wg.Done()
|
||||
for {
|
||||
chunk, ok := <-ch
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if d.getErr() != nil {
|
||||
// Drain the channel if there is an error, to prevent deadlocking
|
||||
// of download producer.
|
||||
continue
|
||||
}
|
||||
|
||||
if err := d.downloadChunk(chunk); err != nil {
|
||||
d.setErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getChunk grabs a chunk of data from the body.
|
||||
// Not thread safe. Should only used when grabbing data on a single thread.
|
||||
func (d *downloader) getChunk() {
|
||||
if d.getErr() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
|
||||
d.pos += d.cfg.PartSize
|
||||
|
||||
if err := d.downloadChunk(chunk); err != nil {
|
||||
d.setErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// downloadRange downloads an Object given the passed in Byte-Range value.
|
||||
// The chunk used down download the range will be configured for that range.
|
||||
func (d *downloader) downloadRange(rng string) {
|
||||
if d.getErr() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
chunk := dlchunk{w: d.w, start: d.pos}
|
||||
// Ranges specified will short circuit the multipart download
|
||||
chunk.withRange = rng
|
||||
|
||||
if err := d.downloadChunk(chunk); err != nil {
|
||||
d.setErr(err)
|
||||
}
|
||||
|
||||
// Update the position based on the amount of data received.
|
||||
d.pos = d.written
|
||||
}
|
||||
|
||||
// downloadChunk downloads the chunk from s3
|
||||
func (d *downloader) downloadChunk(chunk dlchunk) error {
|
||||
in := &s3.GetObjectInput{}
|
||||
awsutil.Copy(in, d.in)
|
||||
|
||||
// Get the next byte range of data
|
||||
in.Range = aws.String(chunk.ByteRange())
|
||||
|
||||
var n int64
|
||||
var err error
|
||||
for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
|
||||
var resp *s3.GetObjectOutput
|
||||
resp, err = d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.setTotalBytes(resp) // Set total if not yet set.
|
||||
|
||||
n, err = io.Copy(&chunk, resp.Body)
|
||||
resp.Body.Close()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
chunk.cur = 0
|
||||
logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries,
|
||||
fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d",
|
||||
aws.StringValue(in.Key), err, retry))
|
||||
}
|
||||
|
||||
d.incrWritten(n)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) {
|
||||
s, ok := svc.(*s3.S3)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if s.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if s.Config.LogLevel.Matches(level) {
|
||||
s.Config.Logger.Log(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// getTotalBytes is a thread-safe getter for retrieving the total byte status.
|
||||
func (d *downloader) getTotalBytes() int64 {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
return d.totalBytes
|
||||
}
|
||||
|
||||
// setTotalBytes is a thread-safe setter for setting the total byte status.
|
||||
// Will extract the object's total bytes from the Content-Range if the file
|
||||
// will be chunked, or Content-Length. Content-Length is used when the response
|
||||
// does not include a Content-Range. Meaning the object was not chunked. This
|
||||
// occurs when the full file fits within the PartSize directive.
|
||||
func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
if d.totalBytes >= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if resp.ContentRange == nil {
|
||||
// ContentRange is nil when the full file contents is provided, and
|
||||
// is not chunked. Use ContentLength instead.
|
||||
if resp.ContentLength != nil {
|
||||
d.totalBytes = *resp.ContentLength
|
||||
return
|
||||
}
|
||||
} else {
|
||||
parts := strings.Split(*resp.ContentRange, "/")
|
||||
|
||||
total := int64(-1)
|
||||
var err error
|
||||
// Checking for whether or not a numbered total exists
|
||||
// If one does not exist, we will assume the total to be -1, undefined,
|
||||
// and sequentially download each chunk until hitting a 416 error
|
||||
totalStr := parts[len(parts)-1]
|
||||
if totalStr != "*" {
|
||||
total, err = strconv.ParseInt(totalStr, 10, 64)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
d.totalBytes = total
|
||||
}
|
||||
}
|
||||
|
||||
func (d *downloader) incrWritten(n int64) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
d.written += n
|
||||
}
|
||||
|
||||
// getErr is a thread-safe getter for the error object
|
||||
func (d *downloader) getErr() error {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
return d.err
|
||||
}
|
||||
|
||||
// setErr is a thread-safe setter for the error object
|
||||
func (d *downloader) setErr(e error) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
d.err = e
|
||||
}
|
||||
|
||||
// dlchunk represents a single chunk of data to write by the worker routine.
|
||||
// This structure also implements an io.SectionReader style interface for
|
||||
// io.WriterAt, effectively making it an io.SectionWriter (which does not
|
||||
// exist).
|
||||
type dlchunk struct {
|
||||
w io.WriterAt
|
||||
start int64
|
||||
size int64
|
||||
cur int64
|
||||
|
||||
// specifies the byte range the chunk should be downloaded with.
|
||||
withRange string
|
||||
}
|
||||
|
||||
// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start
|
||||
// position to its end (or EOF).
|
||||
//
|
||||
// If a range is specified on the dlchunk the size will be ignored when writing.
|
||||
// as the total size may not of be known ahead of time.
|
||||
func (c *dlchunk) Write(p []byte) (n int, err error) {
|
||||
if c.cur >= c.size && len(c.withRange) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, err = c.w.WriteAt(p, c.start+c.cur)
|
||||
c.cur += int64(n)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ByteRange returns a HTTP Byte-Range header value that should be used by the
|
||||
// client to request the chunk's range.
|
||||
func (c *dlchunk) ByteRange() string {
|
||||
if len(c.withRange) != 0 {
|
||||
return c.withRange
|
||||
}
|
||||
|
||||
return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1)
|
||||
}
|
||||
718
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
Executable file
718
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
Executable file
@@ -0,0 +1,718 @@
|
||||
package s3manager
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
)
|
||||
|
||||
// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
|
||||
// on Amazon S3.
|
||||
const MaxUploadParts = 10000
|
||||
|
||||
// MinUploadPartSize is the minimum allowed part size when uploading a part to
|
||||
// Amazon S3.
|
||||
const MinUploadPartSize int64 = 1024 * 1024 * 5
|
||||
|
||||
// DefaultUploadPartSize is the default part size to buffer chunks of a
|
||||
// payload into.
|
||||
const DefaultUploadPartSize = MinUploadPartSize
|
||||
|
||||
// DefaultUploadConcurrency is the default number of goroutines to spin up when
|
||||
// using Upload().
|
||||
const DefaultUploadConcurrency = 5
|
||||
|
||||
// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
|
||||
// will satisfy this interface when a multi part upload failed to upload all
|
||||
// chucks to S3. In the case of a failure the UploadID is needed to operate on
|
||||
// the chunks, if any, which were uploaded.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// u := s3manager.NewUploader(opts)
|
||||
// output, err := u.upload(input)
|
||||
// if err != nil {
|
||||
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
|
||||
// // Process error and its associated uploadID
|
||||
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
|
||||
// } else {
|
||||
// // Process error generically
|
||||
// fmt.Println("Error:", err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
type MultiUploadFailure interface {
|
||||
awserr.Error
|
||||
|
||||
// Returns the upload id for the S3 multipart upload that failed.
|
||||
UploadID() string
|
||||
}
|
||||
|
||||
// So that the Error interface type can be included as an anonymous field
|
||||
// in the multiUploadError struct and not conflict with the error.Error() method.
|
||||
type awsError awserr.Error
|
||||
|
||||
// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
|
||||
// Composed of BaseError for code, message, and original error
|
||||
//
|
||||
// Should be used for an error that occurred failing a S3 multipart upload,
|
||||
// and a upload ID is available. If an uploadID is not available a more relevant
|
||||
type multiUploadError struct {
|
||||
awsError
|
||||
|
||||
// ID for multipart upload which failed.
|
||||
uploadID string
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
//
|
||||
// See apierr.BaseError ErrorWithExtra for output format
|
||||
//
|
||||
// Satisfies the error interface.
|
||||
func (m multiUploadError) Error() string {
|
||||
extra := fmt.Sprintf("upload id: %s", m.uploadID)
|
||||
return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (m multiUploadError) String() string {
|
||||
return m.Error()
|
||||
}
|
||||
|
||||
// UploadID returns the id of the S3 upload which failed.
|
||||
func (m multiUploadError) UploadID() string {
|
||||
return m.uploadID
|
||||
}
|
||||
|
||||
// UploadOutput represents a response from the Upload() call.
|
||||
type UploadOutput struct {
|
||||
// The URL where the object was uploaded to.
|
||||
Location string
|
||||
|
||||
// The version of the object that was uploaded. Will only be populated if
|
||||
// the S3 Bucket is versioned. If the bucket is not versioned this field
|
||||
// will not be set.
|
||||
VersionID *string
|
||||
|
||||
// The ID for a multipart upload to S3. In the case of an error the error
|
||||
// can be cast to the MultiUploadFailure interface to extract the upload ID.
|
||||
UploadID string
|
||||
}
|
||||
|
||||
// WithUploaderRequestOptions appends to the Uploader's API request options.
|
||||
func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) {
|
||||
return func(u *Uploader) {
|
||||
u.RequestOptions = append(u.RequestOptions, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// The Uploader structure that calls Upload(). It is safe to call Upload()
|
||||
// on this structure for multiple objects and across concurrent goroutines.
|
||||
// Mutating the Uploader's properties is not safe to be done concurrently.
|
||||
type Uploader struct {
|
||||
// The buffer size (in bytes) to use when buffering data into chunks and
|
||||
// sending them as parts to S3. The minimum allowed part size is 5MB, and
|
||||
// if this value is set to zero, the DefaultUploadPartSize value will be used.
|
||||
PartSize int64
|
||||
|
||||
// The number of goroutines to spin up in parallel per call to Upload when
|
||||
// sending parts. If this is set to zero, the DefaultUploadConcurrency value
|
||||
// will be used.
|
||||
//
|
||||
// The concurrency pool is not shared between calls to Upload.
|
||||
Concurrency int
|
||||
|
||||
// Setting this value to true will cause the SDK to avoid calling
|
||||
// AbortMultipartUpload on a failure, leaving all successfully uploaded
|
||||
// parts on S3 for manual recovery.
|
||||
//
|
||||
// Note that storing parts of an incomplete multipart upload counts towards
|
||||
// space usage on S3 and will add additional costs if not cleaned up.
|
||||
LeavePartsOnError bool
|
||||
|
||||
// MaxUploadParts is the max number of parts which will be uploaded to S3.
|
||||
// Will be used to calculate the partsize of the object to be uploaded.
|
||||
// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
|
||||
// as 100, 50MB parts.
|
||||
// With a limited of s3.MaxUploadParts (10,000 parts).
|
||||
//
|
||||
// Defaults to package const's MaxUploadParts value.
|
||||
MaxUploadParts int
|
||||
|
||||
// The client to use when uploading to S3.
|
||||
S3 s3iface.S3API
|
||||
|
||||
// List of request options that will be passed down to individual API
|
||||
// operation requests made by the uploader.
|
||||
RequestOptions []request.Option
|
||||
}
|
||||
|
||||
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
|
||||
// additional functional options to customize the uploader's behavior. Requires a
|
||||
// client.ConfigProvider in order to create a S3 service client. The session.Session
|
||||
// satisfies the client.ConfigProvider interface.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create an uploader with the session and default options
|
||||
// uploader := s3manager.NewUploader(sess)
|
||||
//
|
||||
// // Create an uploader with the session and custom options
|
||||
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
|
||||
u := &Uploader{
|
||||
S3: s3.New(c),
|
||||
PartSize: DefaultUploadPartSize,
|
||||
Concurrency: DefaultUploadConcurrency,
|
||||
LeavePartsOnError: false,
|
||||
MaxUploadParts: MaxUploadParts,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(u)
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
|
||||
// additional functional options to customize the uploader's behavior. Requires
|
||||
// a S3 service client to make S3 API calls.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // S3 service client the Upload manager will use.
|
||||
// s3Svc := s3.New(sess)
|
||||
//
|
||||
// // Create an uploader with S3 client and default options
|
||||
// uploader := s3manager.NewUploaderWithClient(s3Svc)
|
||||
//
|
||||
// // Create an uploader with S3 client and custom options
|
||||
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
|
||||
u := &Uploader{
|
||||
S3: svc,
|
||||
PartSize: DefaultUploadPartSize,
|
||||
Concurrency: DefaultUploadConcurrency,
|
||||
LeavePartsOnError: false,
|
||||
MaxUploadParts: MaxUploadParts,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(u)
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
// Upload uploads an object to S3, intelligently buffering large files into
|
||||
// smaller chunks and sending them in parallel across multiple goroutines. You
|
||||
// can configure the buffer size and concurrency through the Uploader's parameters.
|
||||
//
|
||||
// Additional functional options can be provided to configure the individual
|
||||
// upload. These options are copies of the Uploader instance Upload is called from.
|
||||
// Modifying the options will not impact the original Uploader instance.
|
||||
//
|
||||
// Use the WithUploaderRequestOptions helper function to pass in request
|
||||
// options that will be applied to all API operations made with this uploader.
|
||||
//
|
||||
// It is safe to call this method concurrently across goroutines.
|
||||
//
|
||||
// Example:
|
||||
// // Upload input parameters
|
||||
// upParams := &s3manager.UploadInput{
|
||||
// Bucket: &bucketName,
|
||||
// Key: &keyName,
|
||||
// Body: file,
|
||||
// }
|
||||
//
|
||||
// // Perform an upload.
|
||||
// result, err := uploader.Upload(upParams)
|
||||
//
|
||||
// // Perform upload with options different than the those in the Uploader.
|
||||
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
|
||||
// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
|
||||
// })
|
||||
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
|
||||
return u.UploadWithContext(aws.BackgroundContext(), input, options...)
|
||||
}
|
||||
|
||||
// UploadWithContext uploads an object to S3, intelligently buffering large
|
||||
// files into smaller chunks and sending them in parallel across multiple
|
||||
// goroutines. You can configure the buffer size and concurrency through the
|
||||
// Uploader's parameters.
|
||||
//
|
||||
// UploadWithContext is the same as Upload with the additional support for
|
||||
// Context input parameters. The Context must not be nil. A nil Context will
|
||||
// cause a panic. Use the context to add deadlining, timeouts, etc. The
|
||||
// UploadWithContext may create sub-contexts for individual underlying requests.
|
||||
//
|
||||
// Additional functional options can be provided to configure the individual
|
||||
// upload. These options are copies of the Uploader instance Upload is called from.
|
||||
// Modifying the options will not impact the original Uploader instance.
|
||||
//
|
||||
// Use the WithUploaderRequestOptions helper function to pass in request
|
||||
// options that will be applied to all API operations made with this uploader.
|
||||
//
|
||||
// It is safe to call this method concurrently across goroutines.
|
||||
func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) {
|
||||
i := uploader{in: input, cfg: u, ctx: ctx}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(&i.cfg)
|
||||
}
|
||||
i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
|
||||
|
||||
return i.upload()
|
||||
}
|
||||
|
||||
// UploadWithIterator will upload a batched amount of objects to S3. This operation uses
|
||||
// the iterator pattern to know which object to upload next. Since this is an interface this
|
||||
// allows for custom defined functionality.
|
||||
//
|
||||
// Example:
|
||||
// svc:= s3manager.NewUploader(sess)
|
||||
//
|
||||
// objects := []BatchUploadObject{
|
||||
// {
|
||||
// Object: &s3manager.UploadInput {
|
||||
// Key: aws.String("key"),
|
||||
// Bucket: aws.String("bucket"),
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// iter := &s3manager.UploadObjectsIterator{Objects: objects}
|
||||
// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
|
||||
// return err
|
||||
// }
|
||||
func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error {
|
||||
var errs []Error
|
||||
for iter.Next() {
|
||||
object := iter.UploadObject()
|
||||
if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil {
|
||||
s3Err := Error{
|
||||
OrigErr: err,
|
||||
Bucket: object.Object.Bucket,
|
||||
Key: object.Object.Key,
|
||||
}
|
||||
|
||||
errs = append(errs, s3Err)
|
||||
}
|
||||
|
||||
if object.After == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := object.After(); err != nil {
|
||||
s3Err := Error{
|
||||
OrigErr: err,
|
||||
Bucket: object.Object.Bucket,
|
||||
Key: object.Object.Key,
|
||||
}
|
||||
|
||||
errs = append(errs, s3Err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// internal structure to manage an upload to S3.
|
||||
type uploader struct {
|
||||
ctx aws.Context
|
||||
cfg Uploader
|
||||
|
||||
in *UploadInput
|
||||
|
||||
readerPos int64 // current reader position
|
||||
totalSize int64 // set to -1 if the size is not known
|
||||
|
||||
bufferPool sync.Pool
|
||||
}
|
||||
|
||||
// internal logic for deciding whether to upload a single part or use a
|
||||
// multipart upload.
|
||||
func (u *uploader) upload() (*UploadOutput, error) {
|
||||
u.init()
|
||||
|
||||
if u.cfg.PartSize < MinUploadPartSize {
|
||||
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
|
||||
return nil, awserr.New("ConfigError", msg, nil)
|
||||
}
|
||||
|
||||
// Do one read to determine if we have more than one part
|
||||
reader, _, part, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
return u.singlePart(reader)
|
||||
} else if err != nil {
|
||||
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
|
||||
}
|
||||
|
||||
mu := multiuploader{uploader: u}
|
||||
return mu.upload(reader, part)
|
||||
}
|
||||
|
||||
// init will initialize all default options.
|
||||
func (u *uploader) init() {
|
||||
if u.cfg.Concurrency == 0 {
|
||||
u.cfg.Concurrency = DefaultUploadConcurrency
|
||||
}
|
||||
if u.cfg.PartSize == 0 {
|
||||
u.cfg.PartSize = DefaultUploadPartSize
|
||||
}
|
||||
if u.cfg.MaxUploadParts == 0 {
|
||||
u.cfg.MaxUploadParts = MaxUploadParts
|
||||
}
|
||||
|
||||
u.bufferPool = sync.Pool{
|
||||
New: func() interface{} { return make([]byte, u.cfg.PartSize) },
|
||||
}
|
||||
|
||||
// Try to get the total size for some optimizations
|
||||
u.initSize()
|
||||
}
|
||||
|
||||
// initSize tries to detect the total stream size, setting u.totalSize. If
|
||||
// the size is not known, totalSize is set to -1.
|
||||
func (u *uploader) initSize() {
|
||||
u.totalSize = -1
|
||||
|
||||
switch r := u.in.Body.(type) {
|
||||
case io.Seeker:
|
||||
n, err := aws.SeekerLen(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
u.totalSize = n
|
||||
|
||||
// Try to adjust partSize if it is too small and account for
|
||||
// integer division truncation.
|
||||
if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) {
|
||||
// Add one to the part size to account for remainders
|
||||
// during the size calculation. e.g odd number of bytes.
|
||||
u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextReader returns a seekable reader representing the next packet of data.
|
||||
// This operation increases the shared u.readerPos counter, but note that it
|
||||
// does not need to be wrapped in a mutex because nextReader is only called
|
||||
// from the main thread.
|
||||
func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) {
|
||||
type readerAtSeeker interface {
|
||||
io.ReaderAt
|
||||
io.ReadSeeker
|
||||
}
|
||||
switch r := u.in.Body.(type) {
|
||||
case readerAtSeeker:
|
||||
var err error
|
||||
|
||||
n := u.cfg.PartSize
|
||||
if u.totalSize >= 0 {
|
||||
bytesLeft := u.totalSize - u.readerPos
|
||||
|
||||
if bytesLeft <= u.cfg.PartSize {
|
||||
err = io.EOF
|
||||
n = bytesLeft
|
||||
}
|
||||
}
|
||||
|
||||
reader := io.NewSectionReader(r, u.readerPos, n)
|
||||
u.readerPos += n
|
||||
|
||||
return reader, int(n), nil, err
|
||||
|
||||
default:
|
||||
part := u.bufferPool.Get().([]byte)
|
||||
n, err := readFillBuf(r, part)
|
||||
u.readerPos += int64(n)
|
||||
|
||||
return bytes.NewReader(part[0:n]), n, part, err
|
||||
}
|
||||
}
|
||||
|
||||
func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
|
||||
for offset < len(b) && err == nil {
|
||||
var n int
|
||||
n, err = r.Read(b[offset:])
|
||||
offset += n
|
||||
}
|
||||
|
||||
return offset, err
|
||||
}
|
||||
|
||||
// singlePart contains upload logic for uploading a single chunk via
|
||||
// a regular PutObject request. Multipart requests require at least two
|
||||
// parts, or at least 5MB of data.
|
||||
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
|
||||
params := &s3.PutObjectInput{}
|
||||
awsutil.Copy(params, u.in)
|
||||
params.Body = buf
|
||||
|
||||
// Need to use request form because URL generated in request is
|
||||
// used in return.
|
||||
req, out := u.cfg.S3.PutObjectRequest(params)
|
||||
req.SetContext(u.ctx)
|
||||
req.ApplyOptions(u.cfg.RequestOptions...)
|
||||
if err := req.Send(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := req.HTTPRequest.URL.String()
|
||||
return &UploadOutput{
|
||||
Location: url,
|
||||
VersionID: out.VersionId,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// internal structure to manage a specific multipart upload to S3.
|
||||
type multiuploader struct {
|
||||
*uploader
|
||||
wg sync.WaitGroup
|
||||
m sync.Mutex
|
||||
err error
|
||||
uploadID string
|
||||
parts completedParts
|
||||
}
|
||||
|
||||
// keeps track of a single chunk of data being sent to S3.
|
||||
type chunk struct {
|
||||
buf io.ReadSeeker
|
||||
part []byte
|
||||
num int64
|
||||
}
|
||||
|
||||
// completedParts is a wrapper to make parts sortable by their part number,
|
||||
// since S3 required this list to be sent in sorted order.
|
||||
type completedParts []*s3.CompletedPart
|
||||
|
||||
func (a completedParts) Len() int { return len(a) }
|
||||
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
|
||||
|
||||
// upload will perform a multipart upload using the firstBuf buffer containing
|
||||
// the first chunk of data.
|
||||
func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*UploadOutput, error) {
|
||||
params := &s3.CreateMultipartUploadInput{}
|
||||
awsutil.Copy(params, u.in)
|
||||
|
||||
// Create the multipart
|
||||
resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.uploadID = *resp.UploadId
|
||||
|
||||
// Create the workers
|
||||
ch := make(chan chunk, u.cfg.Concurrency)
|
||||
for i := 0; i < u.cfg.Concurrency; i++ {
|
||||
u.wg.Add(1)
|
||||
go u.readChunk(ch)
|
||||
}
|
||||
|
||||
// Send part 1 to the workers
|
||||
var num int64 = 1
|
||||
ch <- chunk{buf: firstBuf, part: firstPart, num: num}
|
||||
|
||||
// Read and queue the rest of the parts
|
||||
for u.geterr() == nil && err == nil {
|
||||
num++
|
||||
// This upload exceeded maximum number of supported parts, error now.
|
||||
if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) {
|
||||
var msg string
|
||||
if num > int64(u.cfg.MaxUploadParts) {
|
||||
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
||||
u.cfg.MaxUploadParts)
|
||||
} else {
|
||||
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
||||
MaxUploadParts)
|
||||
}
|
||||
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
|
||||
break
|
||||
}
|
||||
|
||||
var reader io.ReadSeeker
|
||||
var nextChunkLen int
|
||||
var part []byte
|
||||
reader, nextChunkLen, part, err = u.nextReader()
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
u.seterr(awserr.New(
|
||||
"ReadRequestBody",
|
||||
"read multipart upload data failed",
|
||||
err))
|
||||
break
|
||||
}
|
||||
|
||||
if nextChunkLen == 0 {
|
||||
// No need to upload empty part, if file was empty to start
|
||||
// with empty single part would of been created and never
|
||||
// started multipart upload.
|
||||
break
|
||||
}
|
||||
|
||||
ch <- chunk{buf: reader, part: part, num: num}
|
||||
}
|
||||
|
||||
// Close the channel, wait for workers, and complete upload
|
||||
close(ch)
|
||||
u.wg.Wait()
|
||||
complete := u.complete()
|
||||
|
||||
if err := u.geterr(); err != nil {
|
||||
return nil, &multiUploadError{
|
||||
awsError: awserr.New(
|
||||
"MultipartUpload",
|
||||
"upload multipart failed",
|
||||
err),
|
||||
uploadID: u.uploadID,
|
||||
}
|
||||
}
|
||||
|
||||
// Create a presigned URL of the S3 Get Object in order to have parity with
|
||||
// single part upload.
|
||||
getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
})
|
||||
getReq.Config.Credentials = credentials.AnonymousCredentials
|
||||
uploadLocation, _, _ := getReq.PresignRequest(1)
|
||||
|
||||
return &UploadOutput{
|
||||
Location: uploadLocation,
|
||||
VersionID: complete.VersionId,
|
||||
UploadID: u.uploadID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// readChunk runs in worker goroutines to pull chunks off of the ch channel
|
||||
// and send() them as UploadPart requests.
|
||||
func (u *multiuploader) readChunk(ch chan chunk) {
|
||||
defer u.wg.Done()
|
||||
for {
|
||||
data, ok := <-ch
|
||||
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if u.geterr() == nil {
|
||||
if err := u.send(data); err != nil {
|
||||
u.seterr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send performs an UploadPart request and keeps track of the completed
|
||||
// part information.
|
||||
func (u *multiuploader) send(c chunk) error {
|
||||
params := &s3.UploadPartInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
Body: c.buf,
|
||||
UploadId: &u.uploadID,
|
||||
SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
|
||||
SSECustomerKey: u.in.SSECustomerKey,
|
||||
PartNumber: &c.num,
|
||||
}
|
||||
resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
||||
// put the byte array back into the pool to conserve memory
|
||||
u.bufferPool.Put(c.part)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := c.num
|
||||
completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
|
||||
|
||||
u.m.Lock()
|
||||
u.parts = append(u.parts, completed)
|
||||
u.m.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// geterr is a thread-safe getter for the error object
|
||||
func (u *multiuploader) geterr() error {
|
||||
u.m.Lock()
|
||||
defer u.m.Unlock()
|
||||
|
||||
return u.err
|
||||
}
|
||||
|
||||
// seterr is a thread-safe setter for the error object
|
||||
func (u *multiuploader) seterr(e error) {
|
||||
u.m.Lock()
|
||||
defer u.m.Unlock()
|
||||
|
||||
u.err = e
|
||||
}
|
||||
|
||||
// fail will abort the multipart unless LeavePartsOnError is set to true.
|
||||
func (u *multiuploader) fail() {
|
||||
if u.cfg.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
|
||||
params := &s3.AbortMultipartUploadInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
UploadId: &u.uploadID,
|
||||
}
|
||||
_, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
||||
if err != nil {
|
||||
logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// complete successfully completes a multipart upload and returns the response.
|
||||
func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
|
||||
if u.geterr() != nil {
|
||||
u.fail()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parts must be sorted in PartNumber order.
|
||||
sort.Sort(u.parts)
|
||||
|
||||
params := &s3.CompleteMultipartUploadInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
UploadId: &u.uploadID,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
|
||||
}
|
||||
resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
||||
if err != nil {
|
||||
u.seterr(err)
|
||||
u.fail()
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
||||
123
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
generated
vendored
Executable file
123
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
generated
vendored
Executable file
@@ -0,0 +1,123 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package s3manager
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UploadInput provides the input parameters for uploading a stream or buffer
|
||||
// to an object in an Amazon S3 bucket. This type is similar to the s3
|
||||
// package's PutObjectInput with the exception that the Body member is an
|
||||
// io.Reader instead of an io.ReadSeeker.
|
||||
type UploadInput struct {
|
||||
_ struct{} `type:"structure" payload:"Body"`
|
||||
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||
|
||||
// The readable body payload to send to S3.
|
||||
Body io.Reader
|
||||
|
||||
// Name of the bucket to which the PUT operation was initiated.
|
||||
//
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// Specifies caching behavior along the request/reply chain.
|
||||
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
|
||||
|
||||
// Specifies presentational information for the object.
|
||||
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
|
||||
|
||||
// Specifies what content encodings have been applied to the object and thus
|
||||
// what decoding mechanisms must be applied to obtain the media-type referenced
|
||||
// by the Content-Type header field.
|
||||
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
|
||||
|
||||
// The language the content is in.
|
||||
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
|
||||
|
||||
// The base64-encoded 128-bit MD5 digest of the part data. This parameter is
|
||||
// auto-populated when using the command from the CLI
|
||||
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
|
||||
|
||||
// A standard MIME type describing the format of the object data.
|
||||
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
|
||||
// Allows grantee to read the object data and its metadata.
|
||||
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
|
||||
|
||||
// Allows grantee to read the object ACL.
|
||||
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
|
||||
|
||||
// Allows grantee to write the ACL for the applicable object.
|
||||
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
|
||||
|
||||
// Object key for which the PUT operation was initiated.
|
||||
//
|
||||
// Key is a required field
|
||||
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
|
||||
|
||||
// A map of metadata to store with the object in S3.
|
||||
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
|
||||
|
||||
// The Legal Hold status that you want to apply to the specified object.
|
||||
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
|
||||
|
||||
// The Object Lock mode that you want to apply to this object.
|
||||
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
|
||||
|
||||
// The date and time when you want this object's Object Lock to expire.
|
||||
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
|
||||
|
||||
// Confirms that the requester knows that she or he will be charged for the
|
||||
// request. Bucket owners need not specify this parameter in their requests.
|
||||
// Documentation on downloading objects from requester pays buckets can be found
|
||||
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
|
||||
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
|
||||
|
||||
// Specifies the algorithm to use to when encrypting the object (e.g., AES256).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
||||
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
|
||||
// data. This value is used to store the object and then it is discarded; Amazon
|
||||
// does not store the encryption key. The key must be appropriate for use with
|
||||
// the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
|
||||
// header.
|
||||
SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
|
||||
|
||||
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
|
||||
// Amazon S3 uses this header for a message integrity check to ensure the encryption
|
||||
// key was transmitted without error.
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
|
||||
// Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
|
||||
// requests for an object protected by AWS KMS will fail if not made via SSL
|
||||
// or using SigV4. Documentation on configuring any of the officially supported
|
||||
// AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
|
||||
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
|
||||
|
||||
// The Server-side encryption algorithm used when storing this object in S3
|
||||
// (e.g., AES256, aws:kms).
|
||||
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
|
||||
|
||||
// The type of storage to use for the object. Defaults to 'STANDARD'.
|
||||
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
|
||||
|
||||
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.
|
||||
// (For example, "Key1=Value1")
|
||||
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
|
||||
|
||||
// If the bucket is configured as a website, redirects requests for this object
|
||||
// to another object in the same bucket or to an external URL. Amazon S3 stores
|
||||
// the value of this header in the object metadata.
|
||||
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
|
||||
}
|
||||
99
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
Executable file
99
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
Executable file
@@ -0,0 +1,99 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/restxml"
|
||||
)
|
||||
|
||||
// S3 provides the API operation methods for making requests to
|
||||
// Amazon Simple Storage Service. See this package's package overview docs
|
||||
// for details on the service.
|
||||
//
|
||||
// S3 methods are safe to use concurrently. It is not safe to
|
||||
// modify mutate any of the struct's properties though.
|
||||
type S3 struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// Used for custom client initialization logic
|
||||
var initClient func(*client.Client)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*request.Request)
|
||||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "s3" // Name of service.
|
||||
EndpointsID = ServiceName // ID to lookup a service endpoint with.
|
||||
ServiceID = "S3" // ServiceID is a unique identifer of a specific service.
|
||||
)
|
||||
|
||||
// New creates a new instance of the S3 client with a session.
|
||||
// If additional configuration is needed for the client instance use the optional
|
||||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// // Create a S3 client from just a session.
|
||||
// svc := s3.New(mySession)
|
||||
//
|
||||
// // Create a S3 client with additional configuration
|
||||
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
|
||||
svc := &S3{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
ServiceID: ServiceID,
|
||||
SigningName: signingName,
|
||||
SigningRegion: signingRegion,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2006-03-01",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) {
|
||||
s.DisableURIPathEscaping = true
|
||||
}))
|
||||
svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
|
||||
|
||||
svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a S3 operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
||||
req := c.NewRequest(op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
54
vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
generated
vendored
Executable file
54
vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
generated
vendored
Executable file
@@ -0,0 +1,54 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
|
||||
|
||||
func validateSSERequiresSSL(r *request.Request) {
|
||||
if r.HTTPRequest.URL.Scheme == "https" {
|
||||
return
|
||||
}
|
||||
|
||||
if iface, ok := r.Params.(sseCustomerKeyGetter); ok {
|
||||
if len(iface.getSSECustomerKey()) > 0 {
|
||||
r.Error = errSSERequiresSSL
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
|
||||
if len(iface.getCopySourceSSECustomerKey()) > 0 {
|
||||
r.Error = errSSERequiresSSL
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func computeSSEKeys(r *request.Request) {
|
||||
headers := []string{
|
||||
"x-amz-server-side-encryption-customer-key",
|
||||
"x-amz-copy-source-server-side-encryption-customer-key",
|
||||
}
|
||||
|
||||
for _, h := range headers {
|
||||
md5h := h + "-md5"
|
||||
if key := r.HTTPRequest.Header.Get(h); key != "" {
|
||||
// Base64-encode the value
|
||||
b64v := base64.StdEncoding.EncodeToString([]byte(key))
|
||||
r.HTTPRequest.Header.Set(h, b64v)
|
||||
|
||||
// Add MD5 if it wasn't computed
|
||||
if r.HTTPRequest.Header.Get(md5h) == "" {
|
||||
sum := md5.Sum([]byte(key))
|
||||
b64sum := base64.StdEncoding.EncodeToString(sum[:])
|
||||
r.HTTPRequest.Header.Set(md5h, b64sum)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
40
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
generated
vendored
Executable file
40
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
generated
vendored
Executable file
@@ -0,0 +1,40 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
|
||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", "unable to read response body", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
body := bytes.NewReader(b)
|
||||
r.HTTPResponse.Body = ioutil.NopCloser(body)
|
||||
defer body.Seek(0, sdkio.SeekStart)
|
||||
|
||||
if body.Len() == 0 {
|
||||
// If there is no body don't attempt to parse the body.
|
||||
return
|
||||
}
|
||||
|
||||
unmarshalError(r)
|
||||
if err, ok := r.Error.(awserr.Error); ok && err != nil {
|
||||
if err.Code() == "SerializationError" {
|
||||
r.Error = nil
|
||||
return
|
||||
}
|
||||
r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
|
||||
}
|
||||
}
|
||||
82
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
Executable file
82
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
Executable file
@@ -0,0 +1,82 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
type xmlErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
}
|
||||
|
||||
func unmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
|
||||
|
||||
// Bucket exists in a different region, and request needs
|
||||
// to be made to the correct region.
|
||||
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
|
||||
msg := fmt.Sprintf(
|
||||
"incorrect region, the bucket is not in '%s' region at endpoint '%s'",
|
||||
aws.StringValue(r.Config.Region),
|
||||
aws.StringValue(r.Config.Endpoint),
|
||||
)
|
||||
if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 {
|
||||
msg += fmt.Sprintf(", bucket is in '%s' region", v)
|
||||
}
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("BucketRegionError", msg, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
var errCode, errMsg string
|
||||
|
||||
// Attempt to parse error from body if it is known
|
||||
resp := &xmlErrorResponse{}
|
||||
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
|
||||
if err != nil && err != io.EOF {
|
||||
errCode = "SerializationError"
|
||||
errMsg = "failed to decode S3 XML error response"
|
||||
} else {
|
||||
errCode = resp.Code
|
||||
errMsg = resp.Message
|
||||
err = nil
|
||||
}
|
||||
|
||||
// Fallback to status code converted to message if still no error code
|
||||
if len(errCode) == 0 {
|
||||
statusText := http.StatusText(r.HTTPResponse.StatusCode)
|
||||
errCode = strings.Replace(statusText, " ", "", -1)
|
||||
errMsg = statusText
|
||||
}
|
||||
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(errCode, errMsg, err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
}
|
||||
|
||||
// A RequestFailure provides access to the S3 Request ID and Host ID values
|
||||
// returned from API operation errors. Getting the error as a string will
|
||||
// return the formated error with the same information as awserr.RequestFailure,
|
||||
// while also adding the HostID value from the response.
|
||||
type RequestFailure interface {
|
||||
awserr.RequestFailure
|
||||
|
||||
// Host ID is the S3 Host ID needed for debug, and contacting support
|
||||
HostID() string
|
||||
}
|
||||
214
vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
generated
vendored
Executable file
214
vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
generated
vendored
Executable file
@@ -0,0 +1,214 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// WaitUntilBucketExists uses the Amazon S3 API operation
|
||||
// HeadBucket to wait for a condition to be met before returning.
|
||||
// If the condition is not met within the max attempt window, an error will
|
||||
// be returned.
|
||||
func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
|
||||
return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
|
||||
}
|
||||
|
||||
// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists.
|
||||
// With the support for passing in a context and options to configure the
|
||||
// Waiter and the underlying request options.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "WaitUntilBucketExists",
|
||||
MaxAttempts: 20,
|
||||
Delay: request.ConstantWaiterDelay(5 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.StatusWaiterMatch,
|
||||
Expected: 200,
|
||||
},
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.StatusWaiterMatch,
|
||||
Expected: 301,
|
||||
},
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.StatusWaiterMatch,
|
||||
Expected: 403,
|
||||
},
|
||||
{
|
||||
State: request.RetryWaiterState,
|
||||
Matcher: request.StatusWaiterMatch,
|
||||
Expected: 404,
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *HeadBucketInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.HeadBucketRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
|
||||
// WaitUntilBucketNotExists uses the Amazon S3 API operation
|
||||
// HeadBucket to wait for a condition to be met before returning.
|
||||
// If the condition is not met within the max attempt window, an error will
|
||||
// be returned.
|
||||
func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
|
||||
return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
|
||||
}
|
||||
|
||||
// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists.
|
||||
// With the support for passing in a context and options to configure the
|
||||
// Waiter and the underlying request options.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "WaitUntilBucketNotExists",
|
||||
MaxAttempts: 20,
|
||||
Delay: request.ConstantWaiterDelay(5 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.StatusWaiterMatch,
|
||||
Expected: 404,
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *HeadBucketInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.HeadBucketRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
|
||||
// WaitUntilObjectExists uses the Amazon S3 API operation
|
||||
// HeadObject to wait for a condition to be met before returning.
|
||||
// If the condition is not met within the max attempt window, an error will
|
||||
// be returned.
|
||||
func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
|
||||
return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
|
||||
}
|
||||
|
||||
// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists.
|
||||
// With the support for passing in a context and options to configure the
|
||||
// Waiter and the underlying request options.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "WaitUntilObjectExists",
|
||||
MaxAttempts: 20,
|
||||
Delay: request.ConstantWaiterDelay(5 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.StatusWaiterMatch,
|
||||
Expected: 200,
|
||||
},
|
||||
{
|
||||
State: request.RetryWaiterState,
|
||||
Matcher: request.StatusWaiterMatch,
|
||||
Expected: 404,
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *HeadObjectInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.HeadObjectRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
|
||||
// WaitUntilObjectNotExists uses the Amazon S3 API operation
|
||||
// HeadObject to wait for a condition to be met before returning.
|
||||
// If the condition is not met within the max attempt window, an error will
|
||||
// be returned.
|
||||
func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
|
||||
return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
|
||||
}
|
||||
|
||||
// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists.
|
||||
// With the support for passing in a context and options to configure the
|
||||
// Waiter and the underlying request options.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "WaitUntilObjectNotExists",
|
||||
MaxAttempts: 20,
|
||||
Delay: request.ConstantWaiterDelay(5 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.StatusWaiterMatch,
|
||||
Expected: 404,
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *HeadObjectInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.HeadObjectRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
2401
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
Executable file
2401
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
72
vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
generated
vendored
Executable file
72
vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
generated
vendored
Executable file
@@ -0,0 +1,72 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
// Package sts provides the client and types for making API
|
||||
// requests to AWS Security Token Service.
|
||||
//
|
||||
// The AWS Security Token Service (STS) is a web service that enables you to
|
||||
// request temporary, limited-privilege credentials for AWS Identity and Access
|
||||
// Management (IAM) users or for users that you authenticate (federated users).
|
||||
// This guide provides descriptions of the STS API. For more detailed information
|
||||
// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
|
||||
//
|
||||
// As an alternative to using the API, you can use one of the AWS SDKs, which
|
||||
// consist of libraries and sample code for various programming languages and
|
||||
// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
|
||||
// way to create programmatic access to STS. For example, the SDKs take care
|
||||
// of cryptographically signing requests, managing errors, and retrying requests
|
||||
// automatically. For information about the AWS SDKs, including how to download
|
||||
// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
|
||||
//
|
||||
// For information about setting up signatures and authorization through the
|
||||
// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
|
||||
// in the AWS General Reference. For general information about the Query API,
|
||||
// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
|
||||
// in Using IAM. For information about using security tokens with other AWS
|
||||
// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// If you're new to AWS and need additional technical information about a specific
|
||||
// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
|
||||
// (http://aws.amazon.com/documentation/).
|
||||
//
|
||||
// Endpoints
|
||||
//
|
||||
// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
|
||||
// that maps to the US East (N. Virginia) region. Additional regions are available
|
||||
// and are activated by default. For more information, see Activating and Deactivating
|
||||
// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
|
||||
// in the AWS General Reference.
|
||||
//
|
||||
// Recording API requests
|
||||
//
|
||||
// STS supports AWS CloudTrail, which is a service that records AWS calls for
|
||||
// your AWS account and delivers log files to an Amazon S3 bucket. By using
|
||||
// information collected by CloudTrail, you can determine what requests were
|
||||
// successfully made to STS, who made the request, when it was made, and so
|
||||
// on. To learn more about CloudTrail, including how to turn it on and find
|
||||
// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
|
||||
//
|
||||
// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
|
||||
//
|
||||
// See sts package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
|
||||
//
|
||||
// Using the Client
|
||||
//
|
||||
// To contact AWS Security Token Service with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
// These clients are safe to use concurrently.
|
||||
//
|
||||
// See the SDK's documentation for more information on how to use the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/
|
||||
//
|
||||
// See aws.Config documentation for more information on configuring SDK clients.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
|
||||
//
|
||||
// See the AWS Security Token Service client STS for more
|
||||
// information on creating client for this service.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
|
||||
package sts
|
||||
73
vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
generated
vendored
Executable file
73
vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
generated
vendored
Executable file
@@ -0,0 +1,73 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package sts
|
||||
|
||||
const (
|
||||
|
||||
// ErrCodeExpiredTokenException for service response error code
|
||||
// "ExpiredTokenException".
|
||||
//
|
||||
// The web identity token that was passed is expired or is not valid. Get a
|
||||
// new identity token from the identity provider and then retry the request.
|
||||
ErrCodeExpiredTokenException = "ExpiredTokenException"
|
||||
|
||||
// ErrCodeIDPCommunicationErrorException for service response error code
|
||||
// "IDPCommunicationError".
|
||||
//
|
||||
// The request could not be fulfilled because the non-AWS identity provider
|
||||
// (IDP) that was asked to verify the incoming identity token could not be reached.
|
||||
// This is often a transient error caused by network conditions. Retry the request
|
||||
// a limited number of times so that you don't exceed the request rate. If the
|
||||
// error persists, the non-AWS identity provider might be down or not responding.
|
||||
ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
|
||||
|
||||
// ErrCodeIDPRejectedClaimException for service response error code
|
||||
// "IDPRejectedClaim".
|
||||
//
|
||||
// The identity provider (IdP) reported that authentication failed. This might
|
||||
// be because the claim is invalid.
|
||||
//
|
||||
// If this error is returned for the AssumeRoleWithWebIdentity operation, it
|
||||
// can also mean that the claim has expired or has been explicitly revoked.
|
||||
ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
|
||||
|
||||
// ErrCodeInvalidAuthorizationMessageException for service response error code
|
||||
// "InvalidAuthorizationMessageException".
|
||||
//
|
||||
// The error returned if the message passed to DecodeAuthorizationMessage was
|
||||
// invalid. This can happen if the token contains invalid characters, such as
|
||||
// linebreaks.
|
||||
ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
|
||||
|
||||
// ErrCodeInvalidIdentityTokenException for service response error code
|
||||
// "InvalidIdentityToken".
|
||||
//
|
||||
// The web identity token that was passed could not be validated by AWS. Get
|
||||
// a new identity token from the identity provider and then retry the request.
|
||||
ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
|
||||
|
||||
// ErrCodeMalformedPolicyDocumentException for service response error code
|
||||
// "MalformedPolicyDocument".
|
||||
//
|
||||
// The request was rejected because the policy document was malformed. The error
|
||||
// message describes the specific error.
|
||||
ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
|
||||
|
||||
// ErrCodePackedPolicyTooLargeException for service response error code
|
||||
// "PackedPolicyTooLarge".
|
||||
//
|
||||
// The request was rejected because the policy document was too large. The error
|
||||
// message describes how big the policy document is, in packed form, as a percentage
|
||||
// of what the API allows.
|
||||
ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
|
||||
|
||||
// ErrCodeRegionDisabledException for service response error code
|
||||
// "RegionDisabledException".
|
||||
//
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
ErrCodeRegionDisabledException = "RegionDisabledException"
|
||||
)
|
||||
95
vendor/github.com/aws/aws-sdk-go/service/sts/service.go
generated
vendored
Executable file
95
vendor/github.com/aws/aws-sdk-go/service/sts/service.go
generated
vendored
Executable file
@@ -0,0 +1,95 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package sts
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/query"
|
||||
)
|
||||
|
||||
// STS provides the API operation methods for making requests to
|
||||
// AWS Security Token Service. See this package's package overview docs
|
||||
// for details on the service.
|
||||
//
|
||||
// STS methods are safe to use concurrently. It is not safe to
|
||||
// modify mutate any of the struct's properties though.
|
||||
type STS struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// Used for custom client initialization logic
|
||||
var initClient func(*client.Client)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*request.Request)
|
||||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "sts" // Name of service.
|
||||
EndpointsID = ServiceName // ID to lookup a service endpoint with.
|
||||
ServiceID = "STS" // ServiceID is a unique identifer of a specific service.
|
||||
)
|
||||
|
||||
// New creates a new instance of the STS client with a session.
|
||||
// If additional configuration is needed for the client instance use the optional
|
||||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// // Create a STS client from just a session.
|
||||
// svc := sts.New(mySession)
|
||||
//
|
||||
// // Create a STS client with additional configuration
|
||||
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
|
||||
svc := &STS{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
ServiceID: ServiceID,
|
||||
SigningName: signingName,
|
||||
SigningRegion: signingRegion,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2011-06-15",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||
svc.Handlers.Build.PushBackNamed(query.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a STS operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
||||
req := c.NewRequest(op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
Reference in New Issue
Block a user