overdue
This commit is contained in:
391
.rclone_repo/fs/accounting/accounting.go
Executable file
391
.rclone_repo/fs/accounting/accounting.go
Executable file
@@ -0,0 +1,391 @@
|
||||
// Package accounting providers an accounting and limiting reader
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/asyncreader"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ErrorMaxTransferLimitReached is returned from Read when the max
|
||||
// transfer limit is reached.
|
||||
var ErrorMaxTransferLimitReached = fserrors.FatalError(errors.New("Max transfer limit reached as set by --max-transfer"))
|
||||
|
||||
// Account limits and accounts for one transfer
|
||||
type Account struct {
|
||||
// The mutex is to make sure Read() and Close() aren't called
|
||||
// concurrently. Unfortunately the persistent connection loop
|
||||
// in http transport calls Read() after Do() returns on
|
||||
// CancelRequest so this race can happen when it apparently
|
||||
// shouldn't.
|
||||
mu sync.Mutex
|
||||
in io.Reader
|
||||
origIn io.ReadCloser
|
||||
close io.Closer
|
||||
size int64
|
||||
name string
|
||||
statmu sync.Mutex // Separate mutex for stat values.
|
||||
bytes int64 // Total number of bytes read
|
||||
max int64 // if >=0 the max number of bytes to transfer
|
||||
start time.Time // Start time of first read
|
||||
lpTime time.Time // Time of last average measurement
|
||||
lpBytes int // Number of bytes read since last measurement
|
||||
avg float64 // Moving average of last few measurements in bytes/s
|
||||
closed bool // set if the file is closed
|
||||
exit chan struct{} // channel that will be closed when transfer is finished
|
||||
withBuf bool // is using a buffered in
|
||||
}
|
||||
|
||||
const averagePeriod = 16 // period to do exponentially weighted averages over
|
||||
|
||||
// NewAccountSizeName makes a Account reader for an io.ReadCloser of
|
||||
// the given size and name
|
||||
func NewAccountSizeName(in io.ReadCloser, size int64, name string) *Account {
|
||||
acc := &Account{
|
||||
in: in,
|
||||
close: in,
|
||||
origIn: in,
|
||||
size: size,
|
||||
name: name,
|
||||
exit: make(chan struct{}),
|
||||
avg: 0,
|
||||
lpTime: time.Now(),
|
||||
max: int64(fs.Config.MaxTransfer),
|
||||
}
|
||||
go acc.averageLoop()
|
||||
Stats.inProgress.set(acc.name, acc)
|
||||
return acc
|
||||
}
|
||||
|
||||
// NewAccount makes a Account reader for an object
|
||||
func NewAccount(in io.ReadCloser, obj fs.Object) *Account {
|
||||
return NewAccountSizeName(in, obj.Size(), obj.Remote())
|
||||
}
|
||||
|
||||
// WithBuffer - If the file is above a certain size it adds an Async reader
|
||||
func (acc *Account) WithBuffer() *Account {
|
||||
acc.withBuf = true
|
||||
var buffers int
|
||||
if acc.size >= int64(fs.Config.BufferSize) || acc.size == -1 {
|
||||
buffers = int(int64(fs.Config.BufferSize) / asyncreader.BufferSize)
|
||||
} else {
|
||||
buffers = int(acc.size / asyncreader.BufferSize)
|
||||
}
|
||||
// On big files add a buffer
|
||||
if buffers > 0 {
|
||||
rc, err := asyncreader.New(acc.origIn, buffers)
|
||||
if err != nil {
|
||||
fs.Errorf(acc.name, "Failed to make buffer: %v", err)
|
||||
} else {
|
||||
acc.in = rc
|
||||
acc.close = rc
|
||||
}
|
||||
}
|
||||
return acc
|
||||
}
|
||||
|
||||
// GetReader returns the underlying io.ReadCloser under any Buffer
|
||||
func (acc *Account) GetReader() io.ReadCloser {
|
||||
acc.mu.Lock()
|
||||
defer acc.mu.Unlock()
|
||||
return acc.origIn
|
||||
}
|
||||
|
||||
// GetAsyncReader returns the current AsyncReader or nil if Account is unbuffered
|
||||
func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader {
|
||||
acc.mu.Lock()
|
||||
defer acc.mu.Unlock()
|
||||
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
|
||||
return asyncIn
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopBuffering stops the async buffer doing any more buffering
|
||||
func (acc *Account) StopBuffering() {
|
||||
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
|
||||
asyncIn.Abandon()
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateReader updates the underlying io.ReadCloser stopping the
|
||||
// asynb buffer (if any) and re-adding it
|
||||
func (acc *Account) UpdateReader(in io.ReadCloser) {
|
||||
acc.mu.Lock()
|
||||
acc.StopBuffering()
|
||||
acc.in = in
|
||||
acc.close = in
|
||||
acc.origIn = in
|
||||
acc.WithBuffer()
|
||||
acc.mu.Unlock()
|
||||
}
|
||||
|
||||
// averageLoop calculates averages for the stats in the background
|
||||
func (acc *Account) averageLoop() {
|
||||
tick := time.NewTicker(time.Second)
|
||||
var period float64
|
||||
defer tick.Stop()
|
||||
for {
|
||||
select {
|
||||
case now := <-tick.C:
|
||||
acc.statmu.Lock()
|
||||
// Add average of last second.
|
||||
elapsed := now.Sub(acc.lpTime).Seconds()
|
||||
avg := float64(acc.lpBytes) / elapsed
|
||||
// Soft start the moving average
|
||||
if period < averagePeriod {
|
||||
period++
|
||||
}
|
||||
acc.avg = (avg + (period-1)*acc.avg) / period
|
||||
acc.lpBytes = 0
|
||||
acc.lpTime = now
|
||||
// Unlock stats
|
||||
acc.statmu.Unlock()
|
||||
case <-acc.exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// read bytes from the io.Reader passed in and account them
|
||||
func (acc *Account) read(in io.Reader, p []byte) (n int, err error) {
|
||||
acc.statmu.Lock()
|
||||
if acc.max >= 0 && Stats.GetBytes() >= acc.max {
|
||||
acc.statmu.Unlock()
|
||||
return 0, ErrorMaxTransferLimitReached
|
||||
}
|
||||
// Set start time.
|
||||
if acc.start.IsZero() {
|
||||
acc.start = time.Now()
|
||||
}
|
||||
acc.statmu.Unlock()
|
||||
|
||||
n, err = in.Read(p)
|
||||
|
||||
// Update Stats
|
||||
acc.statmu.Lock()
|
||||
acc.lpBytes += n
|
||||
acc.bytes += int64(n)
|
||||
acc.statmu.Unlock()
|
||||
|
||||
Stats.Bytes(int64(n))
|
||||
|
||||
limitBandwidth(n)
|
||||
return
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (acc *Account) Read(p []byte) (n int, err error) {
|
||||
acc.mu.Lock()
|
||||
defer acc.mu.Unlock()
|
||||
return acc.read(acc.in, p)
|
||||
}
|
||||
|
||||
// Close the object
|
||||
func (acc *Account) Close() error {
|
||||
acc.mu.Lock()
|
||||
defer acc.mu.Unlock()
|
||||
if acc.closed {
|
||||
return nil
|
||||
}
|
||||
acc.closed = true
|
||||
close(acc.exit)
|
||||
Stats.inProgress.clear(acc.name)
|
||||
return acc.close.Close()
|
||||
}
|
||||
|
||||
// progress returns bytes read as well as the size.
|
||||
// Size can be <= 0 if the size is unknown.
|
||||
func (acc *Account) progress() (bytes, size int64) {
|
||||
if acc == nil {
|
||||
return 0, 0
|
||||
}
|
||||
acc.statmu.Lock()
|
||||
bytes, size = acc.bytes, acc.size
|
||||
acc.statmu.Unlock()
|
||||
return bytes, size
|
||||
}
|
||||
|
||||
// speed returns the speed of the current file transfer
|
||||
// in bytes per second, as well a an exponentially weighted moving average
|
||||
// If no read has completed yet, 0 is returned for both values.
|
||||
func (acc *Account) speed() (bps, current float64) {
|
||||
if acc == nil {
|
||||
return 0, 0
|
||||
}
|
||||
acc.statmu.Lock()
|
||||
defer acc.statmu.Unlock()
|
||||
if acc.bytes == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
// Calculate speed from first read.
|
||||
total := float64(time.Now().Sub(acc.start)) / float64(time.Second)
|
||||
bps = float64(acc.bytes) / total
|
||||
current = acc.avg
|
||||
return
|
||||
}
|
||||
|
||||
// eta returns the ETA of the current operation,
|
||||
// rounded to full seconds.
|
||||
// If the ETA cannot be determined 'ok' returns false.
|
||||
func (acc *Account) eta() (etaDuration time.Duration, ok bool) {
|
||||
if acc == nil {
|
||||
return 0, false
|
||||
}
|
||||
acc.statmu.Lock()
|
||||
defer acc.statmu.Unlock()
|
||||
return eta(acc.bytes, acc.size, acc.avg)
|
||||
}
|
||||
|
||||
// String produces stats for this file
|
||||
func (acc *Account) String() string {
|
||||
a, b := acc.progress()
|
||||
_, cur := acc.speed()
|
||||
eta, etaok := acc.eta()
|
||||
etas := "-"
|
||||
if etaok {
|
||||
if eta > 0 {
|
||||
etas = fmt.Sprintf("%v", eta)
|
||||
} else {
|
||||
etas = "0s"
|
||||
}
|
||||
}
|
||||
name := []rune(acc.name)
|
||||
if fs.Config.StatsFileNameLength > 0 {
|
||||
if len(name) > fs.Config.StatsFileNameLength {
|
||||
where := len(name) - fs.Config.StatsFileNameLength
|
||||
name = append([]rune{'.', '.', '.'}, name[where:]...)
|
||||
}
|
||||
}
|
||||
|
||||
if fs.Config.DataRateUnit == "bits" {
|
||||
cur = cur * 8
|
||||
}
|
||||
|
||||
percentageDone := 0
|
||||
if b > 0 {
|
||||
percentageDone = int(100 * float64(a) / float64(b))
|
||||
}
|
||||
|
||||
done := fmt.Sprintf("%2d%% /%s", percentageDone, fs.SizeSuffix(b))
|
||||
|
||||
return fmt.Sprintf("%45s: %s, %s/s, %s",
|
||||
string(name),
|
||||
done,
|
||||
fs.SizeSuffix(cur),
|
||||
etas,
|
||||
)
|
||||
}
|
||||
|
||||
// RemoteStats produces stats for this file
|
||||
func (acc *Account) RemoteStats() (out map[string]interface{}) {
|
||||
out = make(map[string]interface{})
|
||||
a, b := acc.progress()
|
||||
out["bytes"] = a
|
||||
out["size"] = b
|
||||
spd, cur := acc.speed()
|
||||
out["speed"] = spd
|
||||
out["speedAvg"] = cur
|
||||
|
||||
eta, etaok := acc.eta()
|
||||
out["eta"] = nil
|
||||
if etaok {
|
||||
if eta > 0 {
|
||||
out["eta"] = eta.Seconds()
|
||||
} else {
|
||||
out["eta"] = 0
|
||||
}
|
||||
}
|
||||
out["name"] = acc.name
|
||||
|
||||
percentageDone := 0
|
||||
if b > 0 {
|
||||
percentageDone = int(100 * float64(a) / float64(b))
|
||||
}
|
||||
out["percentage"] = percentageDone
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// OldStream returns the top io.Reader
|
||||
func (acc *Account) OldStream() io.Reader {
|
||||
acc.mu.Lock()
|
||||
defer acc.mu.Unlock()
|
||||
return acc.in
|
||||
}
|
||||
|
||||
// SetStream updates the top io.Reader
|
||||
func (acc *Account) SetStream(in io.Reader) {
|
||||
acc.mu.Lock()
|
||||
acc.in = in
|
||||
acc.mu.Unlock()
|
||||
}
|
||||
|
||||
// WrapStream wraps an io Reader so it will be accounted in the same
|
||||
// way as account
|
||||
func (acc *Account) WrapStream(in io.Reader) io.Reader {
|
||||
return &accountStream{
|
||||
acc: acc,
|
||||
in: in,
|
||||
}
|
||||
}
|
||||
|
||||
// accountStream accounts a single io.Reader into a parent *Account
|
||||
type accountStream struct {
|
||||
acc *Account
|
||||
in io.Reader
|
||||
}
|
||||
|
||||
// OldStream return the underlying stream
|
||||
func (a *accountStream) OldStream() io.Reader {
|
||||
return a.in
|
||||
}
|
||||
|
||||
// SetStream set the underlying stream
|
||||
func (a *accountStream) SetStream(in io.Reader) {
|
||||
a.in = in
|
||||
}
|
||||
|
||||
// WrapStream wrap in in an accounter
|
||||
func (a *accountStream) WrapStream(in io.Reader) io.Reader {
|
||||
return a.acc.WrapStream(in)
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (a *accountStream) Read(p []byte) (n int, err error) {
|
||||
return a.acc.read(a.in, p)
|
||||
}
|
||||
|
||||
// Accounter accounts a stream allowing the accounting to be removed and re-added
|
||||
type Accounter interface {
|
||||
io.Reader
|
||||
OldStream() io.Reader
|
||||
SetStream(io.Reader)
|
||||
WrapStream(io.Reader) io.Reader
|
||||
}
|
||||
|
||||
// WrapFn wraps an io.Reader (for accounting purposes usually)
|
||||
type WrapFn func(io.Reader) io.Reader
|
||||
|
||||
// UnWrap unwraps a reader returning unwrapped and wrap, a function to
|
||||
// wrap it back up again. If `in` is an Accounter then this function
|
||||
// will take the accounting unwrapped and wrap will put it back on
|
||||
// again the new Reader passed in.
|
||||
//
|
||||
// This allows functions which wrap io.Readers to move the accounting
|
||||
// to the end of the wrapped chain of readers. This is very important
|
||||
// if buffering is being introduced and if the Reader might be wrapped
|
||||
// again.
|
||||
func UnWrap(in io.Reader) (unwrapped io.Reader, wrap WrapFn) {
|
||||
acc, ok := in.(Accounter)
|
||||
if !ok {
|
||||
return in, func(r io.Reader) io.Reader { return r }
|
||||
}
|
||||
return acc.OldStream(), acc.WrapStream
|
||||
}
|
||||
10
.rclone_repo/fs/accounting/accounting_other.go
Executable file
10
.rclone_repo/fs/accounting/accounting_other.go
Executable file
@@ -0,0 +1,10 @@
|
||||
// Accounting and limiting reader
|
||||
// Non-unix specific functions.
|
||||
|
||||
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||
|
||||
package accounting
|
||||
|
||||
// startSignalHandler() is Unix specific and does nothing under non-Unix
|
||||
// platforms.
|
||||
func startSignalHandler() {}
|
||||
210
.rclone_repo/fs/accounting/accounting_test.go
Executable file
210
.rclone_repo/fs/accounting/accounting_test.go
Executable file
@@ -0,0 +1,210 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/asyncreader"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check it satisfies the interfaces
|
||||
var (
|
||||
_ io.ReadCloser = &Account{}
|
||||
_ io.Reader = &accountStream{}
|
||||
_ Accounter = &Account{}
|
||||
_ Accounter = &accountStream{}
|
||||
)
|
||||
|
||||
func TestNewAccountSizeName(t *testing.T) {
|
||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
||||
acc := NewAccountSizeName(in, 1, "test")
|
||||
assert.Equal(t, in, acc.in)
|
||||
assert.Equal(t, acc, Stats.inProgress.get("test"))
|
||||
err := acc.Close()
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, Stats.inProgress.get("test"))
|
||||
}
|
||||
|
||||
func TestNewAccount(t *testing.T) {
|
||||
obj := mockobject.Object("test")
|
||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
||||
acc := NewAccount(in, obj)
|
||||
assert.Equal(t, in, acc.in)
|
||||
assert.Equal(t, acc, Stats.inProgress.get("test"))
|
||||
err := acc.Close()
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, Stats.inProgress.get("test"))
|
||||
}
|
||||
|
||||
func TestAccountWithBuffer(t *testing.T) {
|
||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
||||
|
||||
acc := NewAccountSizeName(in, -1, "test")
|
||||
acc.WithBuffer()
|
||||
// should have a buffer for an unknown size
|
||||
_, ok := acc.in.(*asyncreader.AsyncReader)
|
||||
require.True(t, ok)
|
||||
assert.NoError(t, acc.Close())
|
||||
|
||||
acc = NewAccountSizeName(in, 1, "test")
|
||||
acc.WithBuffer()
|
||||
// should not have a buffer for a small size
|
||||
_, ok = acc.in.(*asyncreader.AsyncReader)
|
||||
require.False(t, ok)
|
||||
assert.NoError(t, acc.Close())
|
||||
}
|
||||
|
||||
func TestAccountGetUpdateReader(t *testing.T) {
|
||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
||||
acc := NewAccountSizeName(in, 1, "test")
|
||||
|
||||
assert.Equal(t, in, acc.GetReader())
|
||||
|
||||
in2 := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
||||
acc.UpdateReader(in2)
|
||||
|
||||
assert.Equal(t, in2, acc.GetReader())
|
||||
|
||||
assert.NoError(t, acc.Close())
|
||||
}
|
||||
|
||||
func TestAccountRead(t *testing.T) {
|
||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
|
||||
acc := NewAccountSizeName(in, 1, "test")
|
||||
|
||||
assert.True(t, acc.start.IsZero())
|
||||
assert.Equal(t, 0, acc.lpBytes)
|
||||
assert.Equal(t, int64(0), acc.bytes)
|
||||
assert.Equal(t, int64(0), Stats.bytes)
|
||||
|
||||
var buf = make([]byte, 2)
|
||||
n, err := acc.Read(buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, n)
|
||||
assert.Equal(t, []byte{1, 2}, buf[:n])
|
||||
|
||||
assert.False(t, acc.start.IsZero())
|
||||
assert.Equal(t, 2, acc.lpBytes)
|
||||
assert.Equal(t, int64(2), acc.bytes)
|
||||
assert.Equal(t, int64(2), Stats.bytes)
|
||||
|
||||
n, err = acc.Read(buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, []byte{3}, buf[:n])
|
||||
|
||||
n, err = acc.Read(buf)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 0, n)
|
||||
|
||||
assert.NoError(t, acc.Close())
|
||||
}
|
||||
|
||||
func TestAccountString(t *testing.T) {
|
||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
|
||||
acc := NewAccountSizeName(in, 3, "test")
|
||||
|
||||
// FIXME not an exhaustive test!
|
||||
|
||||
assert.Equal(t, "test: 0% /3, 0/s, -", strings.TrimSpace(acc.String()))
|
||||
|
||||
var buf = make([]byte, 2)
|
||||
n, err := acc.Read(buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, n)
|
||||
|
||||
assert.Equal(t, "test: 66% /3, 0/s, -", strings.TrimSpace(acc.String()))
|
||||
|
||||
assert.NoError(t, acc.Close())
|
||||
}
|
||||
|
||||
// Test the Accounter interface methods on Account and accountStream
|
||||
func TestAccountAccounter(t *testing.T) {
|
||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
|
||||
acc := NewAccountSizeName(in, 3, "test")
|
||||
|
||||
assert.True(t, in == acc.OldStream())
|
||||
|
||||
in2 := ioutil.NopCloser(bytes.NewBuffer([]byte{2, 3, 4}))
|
||||
|
||||
acc.SetStream(in2)
|
||||
assert.True(t, in2 == acc.OldStream())
|
||||
|
||||
r := acc.WrapStream(in)
|
||||
as, ok := r.(Accounter)
|
||||
require.True(t, ok)
|
||||
assert.True(t, in == as.OldStream())
|
||||
assert.True(t, in2 == acc.OldStream())
|
||||
accs, ok := r.(*accountStream)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, acc, accs.acc)
|
||||
assert.True(t, in == accs.in)
|
||||
|
||||
// Check Read on the accountStream
|
||||
var buf = make([]byte, 2)
|
||||
n, err := r.Read(buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, n)
|
||||
assert.Equal(t, []byte{1, 2}, buf[:n])
|
||||
|
||||
// Test that we can get another accountstream out
|
||||
in3 := ioutil.NopCloser(bytes.NewBuffer([]byte{3, 1, 2}))
|
||||
r2 := as.WrapStream(in3)
|
||||
as2, ok := r2.(Accounter)
|
||||
require.True(t, ok)
|
||||
assert.True(t, in3 == as2.OldStream())
|
||||
assert.True(t, in2 == acc.OldStream())
|
||||
accs2, ok := r2.(*accountStream)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, acc, accs2.acc)
|
||||
assert.True(t, in3 == accs2.in)
|
||||
|
||||
// Test we can set this new accountStream
|
||||
as2.SetStream(in)
|
||||
assert.True(t, in == as2.OldStream())
|
||||
|
||||
// Test UnWrap on accountStream
|
||||
unwrapped, wrap := UnWrap(r2)
|
||||
assert.True(t, unwrapped == in)
|
||||
r3 := wrap(in2)
|
||||
assert.True(t, in2 == r3.(Accounter).OldStream())
|
||||
|
||||
// TestUnWrap on a normal io.Reader
|
||||
unwrapped, wrap = UnWrap(in2)
|
||||
assert.True(t, unwrapped == in2)
|
||||
assert.True(t, wrap(in3) == in3)
|
||||
|
||||
}
|
||||
|
||||
func TestAccountMaxTransfer(t *testing.T) {
|
||||
old := fs.Config.MaxTransfer
|
||||
fs.Config.MaxTransfer = 15
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = old
|
||||
}()
|
||||
Stats.ResetCounters()
|
||||
|
||||
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
|
||||
acc := NewAccountSizeName(in, 1, "test")
|
||||
|
||||
var b = make([]byte, 10)
|
||||
|
||||
n, err := acc.Read(b)
|
||||
assert.Equal(t, 10, n)
|
||||
assert.NoError(t, err)
|
||||
n, err = acc.Read(b)
|
||||
assert.Equal(t, 10, n)
|
||||
assert.NoError(t, err)
|
||||
n, err = acc.Read(b)
|
||||
assert.Equal(t, 0, n)
|
||||
assert.Equal(t, ErrorMaxTransferLimitReached, err)
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
}
|
||||
36
.rclone_repo/fs/accounting/accounting_unix.go
Executable file
36
.rclone_repo/fs/accounting/accounting_unix.go
Executable file
@@ -0,0 +1,36 @@
|
||||
// Accounting and limiting reader
|
||||
// Unix specific functions.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// startSignalHandler() sets a signal handler to catch SIGUSR2 and toggle throttling.
|
||||
func startSignalHandler() {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, syscall.SIGUSR2)
|
||||
|
||||
go func() {
|
||||
// This runs forever, but blocks until the signal is received.
|
||||
for {
|
||||
<-signals
|
||||
tokenBucketMu.Lock()
|
||||
bwLimitToggledOff = !bwLimitToggledOff
|
||||
tokenBucket, prevTokenBucket = prevTokenBucket, tokenBucket
|
||||
s := "disabled"
|
||||
if tokenBucket != nil {
|
||||
s = "enabled"
|
||||
}
|
||||
tokenBucketMu.Unlock()
|
||||
fs.Logf(nil, "Bandwidth limit %s by user", s)
|
||||
}
|
||||
}()
|
||||
}
|
||||
41
.rclone_repo/fs/accounting/inprogress.go
Executable file
41
.rclone_repo/fs/accounting/inprogress.go
Executable file
@@ -0,0 +1,41 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// inProgress holds a synchronized map of in progress transfers
|
||||
type inProgress struct {
|
||||
mu sync.Mutex
|
||||
m map[string]*Account
|
||||
}
|
||||
|
||||
// newInProgress makes a new inProgress object
|
||||
func newInProgress() *inProgress {
|
||||
return &inProgress{
|
||||
m: make(map[string]*Account, fs.Config.Transfers),
|
||||
}
|
||||
}
|
||||
|
||||
// set marks the name as in progress
|
||||
func (ip *inProgress) set(name string, acc *Account) {
|
||||
ip.mu.Lock()
|
||||
defer ip.mu.Unlock()
|
||||
ip.m[name] = acc
|
||||
}
|
||||
|
||||
// clear marks the name as no longer in progress
|
||||
func (ip *inProgress) clear(name string) {
|
||||
ip.mu.Lock()
|
||||
defer ip.mu.Unlock()
|
||||
delete(ip.m, name)
|
||||
}
|
||||
|
||||
// get gets the account for name, of nil if not found
|
||||
func (ip *inProgress) get(name string) *Account {
|
||||
ip.mu.Lock()
|
||||
defer ip.mu.Unlock()
|
||||
return ip.m[name]
|
||||
}
|
||||
406
.rclone_repo/fs/accounting/stats.go
Executable file
406
.rclone_repo/fs/accounting/stats.go
Executable file
@@ -0,0 +1,406 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
)
|
||||
|
||||
var (
|
||||
// Stats is global statistics counter
|
||||
Stats = NewStats()
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set the function pointer up in fs
|
||||
fs.CountError = Stats.Error
|
||||
|
||||
rc.Add(rc.Call{
|
||||
Path: "core/stats",
|
||||
Fn: Stats.RemoteStats,
|
||||
Title: "Returns stats about current transfers.",
|
||||
Help: `
|
||||
This returns all available stats
|
||||
|
||||
rclone rc core/stats
|
||||
|
||||
Returns the following values:
|
||||
|
||||
` + "```" + `
|
||||
{
|
||||
"speed": average speed in bytes/sec since start of the process,
|
||||
"bytes": total transferred bytes since the start of the process,
|
||||
"errors": number of errors,
|
||||
"checks": number of checked files,
|
||||
"transfers": number of transferred files,
|
||||
"deletes" : number of deleted files,
|
||||
"elapsedTime": time in seconds since the start of the process,
|
||||
"lastError": last occurred error,
|
||||
"transferring": an array of currently active file transfers:
|
||||
[
|
||||
{
|
||||
"bytes": total transferred bytes for this file,
|
||||
"eta": estimated time in seconds until file transfer completion
|
||||
"name": name of the file,
|
||||
"percentage": progress of the file transfer in percent,
|
||||
"speed": speed in bytes/sec,
|
||||
"speedAvg": speed in bytes/sec as an exponentially weighted moving average,
|
||||
"size": size of the file in bytes
|
||||
}
|
||||
],
|
||||
"checking": an array of names of currently active file checks
|
||||
[]
|
||||
}
|
||||
` + "```" + `
|
||||
Values for "transferring", "checking" and "lastError" are only assigned if data is available.
|
||||
The value for "eta" is null if an eta cannot be determined.
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// StatsInfo accounts all transfers
|
||||
type StatsInfo struct {
|
||||
mu sync.RWMutex
|
||||
bytes int64
|
||||
errors int64
|
||||
lastError error
|
||||
checks int64
|
||||
checking *stringSet
|
||||
checkQueue int
|
||||
checkQueueSize int64
|
||||
transfers int64
|
||||
transferring *stringSet
|
||||
transferQueue int
|
||||
transferQueueSize int64
|
||||
renameQueue int
|
||||
renameQueueSize int64
|
||||
deletes int64
|
||||
start time.Time
|
||||
inProgress *inProgress
|
||||
}
|
||||
|
||||
// NewStats cretates an initialised StatsInfo
|
||||
func NewStats() *StatsInfo {
|
||||
return &StatsInfo{
|
||||
checking: newStringSet(fs.Config.Checkers),
|
||||
transferring: newStringSet(fs.Config.Transfers),
|
||||
start: time.Now(),
|
||||
inProgress: newInProgress(),
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteStats returns stats for rc
|
||||
func (s *StatsInfo) RemoteStats(in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
s.mu.RLock()
|
||||
dt := time.Now().Sub(s.start)
|
||||
dtSeconds := dt.Seconds()
|
||||
speed := 0.0
|
||||
if dt > 0 {
|
||||
speed = float64(s.bytes) / dtSeconds
|
||||
}
|
||||
out["speed"] = speed
|
||||
out["bytes"] = s.bytes
|
||||
out["errors"] = s.errors
|
||||
out["checks"] = s.checks
|
||||
out["transfers"] = s.transfers
|
||||
out["deletes"] = s.deletes
|
||||
out["elapsedTime"] = dtSeconds
|
||||
s.mu.RUnlock()
|
||||
if !s.checking.empty() {
|
||||
var c []string
|
||||
s.checking.mu.RLock()
|
||||
defer s.checking.mu.RUnlock()
|
||||
for name := range s.checking.items {
|
||||
c = append(c, name)
|
||||
}
|
||||
out["checking"] = c
|
||||
}
|
||||
if !s.transferring.empty() {
|
||||
var t []interface{}
|
||||
s.transferring.mu.RLock()
|
||||
defer s.transferring.mu.RUnlock()
|
||||
for name := range s.transferring.items {
|
||||
if acc := s.inProgress.get(name); acc != nil {
|
||||
t = append(t, acc.RemoteStats())
|
||||
} else {
|
||||
t = append(t, name)
|
||||
}
|
||||
}
|
||||
out["transferring"] = t
|
||||
}
|
||||
if s.errors > 0 {
|
||||
out["lastError"] = s.lastError
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// eta returns the ETA of the current operation,
|
||||
// rounded to full seconds.
|
||||
// If the ETA cannot be determined 'ok' returns false.
|
||||
func eta(size, total int64, rate float64) (eta time.Duration, ok bool) {
|
||||
if total <= 0 || size < 0 || rate <= 0 {
|
||||
return 0, false
|
||||
}
|
||||
remaining := total - size
|
||||
if remaining < 0 {
|
||||
return 0, false
|
||||
}
|
||||
seconds := float64(remaining) / rate
|
||||
return time.Second * time.Duration(seconds), true
|
||||
}
|
||||
|
||||
// etaString returns the ETA of the current operation,
|
||||
// rounded to full seconds.
|
||||
// If the ETA cannot be determined it returns "-"
|
||||
func etaString(done, total int64, rate float64) string {
|
||||
d, ok := eta(done, total, rate)
|
||||
if !ok {
|
||||
return "-"
|
||||
}
|
||||
return d.String()
|
||||
}
|
||||
|
||||
// percent returns a/b as a percentage rounded to the nearest integer
|
||||
// as a string
|
||||
//
|
||||
// if the percentage is invalid it returns "-"
|
||||
func percent(a int64, b int64) string {
|
||||
if a < 0 || b <= 0 {
|
||||
return "-"
|
||||
}
|
||||
return fmt.Sprintf("%d%%", int(float64(a)*100/float64(b)+0.5))
|
||||
}
|
||||
|
||||
// String convert the StatsInfo to a string for printing
|
||||
func (s *StatsInfo) String() string {
|
||||
// checking and transferring have their own locking so read
|
||||
// here before lock to prevent deadlock on GetBytes
|
||||
transferring, checking := s.transferring.count(), s.checking.count()
|
||||
transferringBytesDone, transferringBytesTotal := s.transferring.progress()
|
||||
|
||||
s.mu.RLock()
|
||||
|
||||
dt := time.Now().Sub(s.start)
|
||||
dtSeconds := dt.Seconds()
|
||||
speed := 0.0
|
||||
if dt > 0 {
|
||||
speed = float64(s.bytes) / dtSeconds
|
||||
}
|
||||
dtRounded := dt - (dt % (time.Second / 10))
|
||||
|
||||
if fs.Config.DataRateUnit == "bits" {
|
||||
speed = speed * 8
|
||||
}
|
||||
|
||||
var (
|
||||
totalChecks = int64(s.checkQueue) + s.checks + int64(checking)
|
||||
totalTransfer = int64(s.transferQueue) + s.transfers + int64(transferring)
|
||||
// note that s.bytes already includes transferringBytesDone so
|
||||
// we take it off here to avoid double counting
|
||||
totalSize = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone
|
||||
currentSize = s.bytes
|
||||
buf = &bytes.Buffer{}
|
||||
xfrchkString = ""
|
||||
)
|
||||
|
||||
if !fs.Config.StatsOneLine {
|
||||
_, _ = fmt.Fprintf(buf, "\nTransferred: ")
|
||||
} else {
|
||||
xfrchk := []string{}
|
||||
if totalTransfer > 0 && s.transferQueue > 0 {
|
||||
xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, totalTransfer))
|
||||
}
|
||||
if totalChecks > 0 && s.checkQueue > 0 {
|
||||
xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, totalChecks))
|
||||
}
|
||||
if len(xfrchk) > 0 {
|
||||
xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(buf, "%10s / %s, %s, %s, ETA %s%s",
|
||||
fs.SizeSuffix(s.bytes),
|
||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
||||
percent(s.bytes, totalSize),
|
||||
fs.SizeSuffix(speed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
|
||||
etaString(currentSize, totalSize, speed),
|
||||
xfrchkString,
|
||||
)
|
||||
|
||||
if !fs.Config.StatsOneLine {
|
||||
_, _ = fmt.Fprintf(buf, `
|
||||
Errors: %10d
|
||||
Checks: %10d / %d, %s
|
||||
Transferred: %10d / %d, %s
|
||||
Elapsed time: %10v
|
||||
`,
|
||||
s.errors,
|
||||
s.checks, totalChecks, percent(s.checks, totalChecks),
|
||||
s.transfers, totalTransfer, percent(s.transfers, totalTransfer),
|
||||
dtRounded)
|
||||
}
|
||||
|
||||
// checking and transferring have their own locking so unlock
|
||||
// here to prevent deadlock on GetBytes
|
||||
s.mu.RUnlock()
|
||||
|
||||
// Add per transfer stats if required
|
||||
if !fs.Config.StatsOneLine {
|
||||
if !s.checking.empty() {
|
||||
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking)
|
||||
}
|
||||
if !s.transferring.empty() {
|
||||
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring)
|
||||
}
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Log outputs the StatsInfo to the log
|
||||
func (s *StatsInfo) Log() {
|
||||
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v\n", s)
|
||||
}
|
||||
|
||||
// Bytes updates the stats for bytes bytes
|
||||
func (s *StatsInfo) Bytes(bytes int64) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.bytes += bytes
|
||||
}
|
||||
|
||||
// GetBytes returns the number of bytes transferred so far
|
||||
func (s *StatsInfo) GetBytes() int64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.bytes
|
||||
}
|
||||
|
||||
// Errors updates the stats for errors
|
||||
func (s *StatsInfo) Errors(errors int64) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.errors += errors
|
||||
}
|
||||
|
||||
// GetErrors reads the number of errors
|
||||
func (s *StatsInfo) GetErrors() int64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.errors
|
||||
}
|
||||
|
||||
// GetLastError returns the lastError
|
||||
func (s *StatsInfo) GetLastError() error {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastError
|
||||
}
|
||||
|
||||
// Deletes updates the stats for deletes
|
||||
func (s *StatsInfo) Deletes(deletes int64) int64 {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.deletes += deletes
|
||||
return s.deletes
|
||||
}
|
||||
|
||||
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
|
||||
func (s *StatsInfo) ResetCounters() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.bytes = 0
|
||||
s.errors = 0
|
||||
s.checks = 0
|
||||
s.transfers = 0
|
||||
s.deletes = 0
|
||||
}
|
||||
|
||||
// ResetErrors sets the errors count to 0
|
||||
func (s *StatsInfo) ResetErrors() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.errors = 0
|
||||
}
|
||||
|
||||
// Errored returns whether there have been any errors
|
||||
func (s *StatsInfo) Errored() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.errors != 0
|
||||
}
|
||||
|
||||
// Error adds a single error into the stats and assigns lastError
|
||||
func (s *StatsInfo) Error(err error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.errors++
|
||||
s.lastError = err
|
||||
}
|
||||
|
||||
// Checking adds a check into the stats
|
||||
func (s *StatsInfo) Checking(remote string) {
|
||||
s.checking.add(remote)
|
||||
}
|
||||
|
||||
// DoneChecking removes a check from the stats
|
||||
func (s *StatsInfo) DoneChecking(remote string) {
|
||||
s.checking.del(remote)
|
||||
s.mu.Lock()
|
||||
s.checks++
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// GetTransfers reads the number of transfers
|
||||
func (s *StatsInfo) GetTransfers() int64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.transfers
|
||||
}
|
||||
|
||||
// Transferring adds a transfer into the stats
|
||||
func (s *StatsInfo) Transferring(remote string) {
|
||||
s.transferring.add(remote)
|
||||
}
|
||||
|
||||
// DoneTransferring removes a transfer from the stats
|
||||
//
|
||||
// if ok is true then it increments the transfers count
|
||||
func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
|
||||
s.transferring.del(remote)
|
||||
if ok {
|
||||
s.mu.Lock()
|
||||
s.transfers++
|
||||
s.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// SetCheckQueue sets the number of queued checks
|
||||
func (s *StatsInfo) SetCheckQueue(n int, size int64) {
|
||||
s.mu.Lock()
|
||||
s.checkQueue = n
|
||||
s.checkQueueSize = size
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// SetTransferQueue sets the number of queued transfers
|
||||
func (s *StatsInfo) SetTransferQueue(n int, size int64) {
|
||||
s.mu.Lock()
|
||||
s.transferQueue = n
|
||||
s.transferQueueSize = size
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// SetRenameQueue sets the number of queued transfers
|
||||
func (s *StatsInfo) SetRenameQueue(n int, size int64) {
|
||||
s.mu.Lock()
|
||||
s.renameQueue = n
|
||||
s.renameQueueSize = size
|
||||
s.mu.Unlock()
|
||||
}
|
||||
51
.rclone_repo/fs/accounting/stats_test.go
Executable file
51
.rclone_repo/fs/accounting/stats_test.go
Executable file
@@ -0,0 +1,51 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestETA(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
size, total int64
|
||||
rate float64
|
||||
wantETA time.Duration
|
||||
wantOK bool
|
||||
wantString string
|
||||
}{
|
||||
{size: 0, total: 100, rate: 1.0, wantETA: 100 * time.Second, wantOK: true, wantString: "1m40s"},
|
||||
{size: 50, total: 100, rate: 1.0, wantETA: 50 * time.Second, wantOK: true, wantString: "50s"},
|
||||
{size: 100, total: 100, rate: 1.0, wantETA: 0 * time.Second, wantOK: true, wantString: "0s"},
|
||||
{size: -1, total: 100, rate: 1.0, wantETA: 0, wantOK: false, wantString: "-"},
|
||||
{size: 200, total: 100, rate: 1.0, wantETA: 0, wantOK: false, wantString: "-"},
|
||||
{size: 10, total: -1, rate: 1.0, wantETA: 0, wantOK: false, wantString: "-"},
|
||||
{size: 10, total: 20, rate: 0.0, wantETA: 0, wantOK: false, wantString: "-"},
|
||||
{size: 10, total: 20, rate: -1.0, wantETA: 0, wantOK: false, wantString: "-"},
|
||||
{size: 0, total: 0, rate: 1.0, wantETA: 0, wantOK: false, wantString: "-"},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("size=%d/total=%d/rate=%f", test.size, test.total, test.rate), func(t *testing.T) {
|
||||
gotETA, gotOK := eta(test.size, test.total, test.rate)
|
||||
assert.Equal(t, test.wantETA, gotETA)
|
||||
assert.Equal(t, test.wantOK, gotOK)
|
||||
gotString := etaString(test.size, test.total, test.rate)
|
||||
assert.Equal(t, test.wantString, gotString)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPercentage(t *testing.T) {
|
||||
assert.Equal(t, percent(0, 1000), "0%")
|
||||
assert.Equal(t, percent(1, 1000), "0%")
|
||||
assert.Equal(t, percent(9, 1000), "1%")
|
||||
assert.Equal(t, percent(500, 1000), "50%")
|
||||
assert.Equal(t, percent(1000, 1000), "100%")
|
||||
assert.Equal(t, percent(1E8, 1E9), "10%")
|
||||
assert.Equal(t, percent(1E8, 1E9), "10%")
|
||||
assert.Equal(t, percent(0, 0), "-")
|
||||
assert.Equal(t, percent(100, -100), "-")
|
||||
assert.Equal(t, percent(-100, 100), "-")
|
||||
assert.Equal(t, percent(-100, -100), "-")
|
||||
}
|
||||
88
.rclone_repo/fs/accounting/stringset.go
Executable file
88
.rclone_repo/fs/accounting/stringset.go
Executable file
@@ -0,0 +1,88 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// stringSet holds a set of strings
|
||||
type stringSet struct {
|
||||
mu sync.RWMutex
|
||||
items map[string]struct{}
|
||||
}
|
||||
|
||||
// newStringSet creates a new empty string set of capacity size
|
||||
func newStringSet(size int) *stringSet {
|
||||
return &stringSet{
|
||||
items: make(map[string]struct{}, size),
|
||||
}
|
||||
}
|
||||
|
||||
// add adds remote to the set
|
||||
func (ss *stringSet) add(remote string) {
|
||||
ss.mu.Lock()
|
||||
ss.items[remote] = struct{}{}
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
|
||||
// del removes remote from the set
|
||||
func (ss *stringSet) del(remote string) {
|
||||
ss.mu.Lock()
|
||||
delete(ss.items, remote)
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
|
||||
// empty returns whether the set has any items
|
||||
func (ss *stringSet) empty() bool {
|
||||
ss.mu.RLock()
|
||||
defer ss.mu.RUnlock()
|
||||
return len(ss.items) == 0
|
||||
}
|
||||
|
||||
// count returns the number of items in the set
|
||||
func (ss *stringSet) count() int {
|
||||
ss.mu.RLock()
|
||||
defer ss.mu.RUnlock()
|
||||
return len(ss.items)
|
||||
}
|
||||
|
||||
// Strings returns all the strings in the stringSet
|
||||
func (ss *stringSet) Strings() []string {
|
||||
ss.mu.RLock()
|
||||
defer ss.mu.RUnlock()
|
||||
strings := make([]string, 0, len(ss.items))
|
||||
for name := range ss.items {
|
||||
var out string
|
||||
if acc := Stats.inProgress.get(name); acc != nil {
|
||||
out = acc.String()
|
||||
} else {
|
||||
out = name
|
||||
}
|
||||
strings = append(strings, " * "+out)
|
||||
}
|
||||
sorted := sort.StringSlice(strings)
|
||||
sorted.Sort()
|
||||
return sorted
|
||||
}
|
||||
|
||||
// String returns all the file names in the stringSet joined by newline
|
||||
func (ss *stringSet) String() string {
|
||||
return strings.Join(ss.Strings(), "\n")
|
||||
}
|
||||
|
||||
// progress returns total bytes read as well as the size.
|
||||
func (ss *stringSet) progress() (totalBytes, totalSize int64) {
|
||||
ss.mu.RLock()
|
||||
defer ss.mu.RUnlock()
|
||||
for name := range ss.items {
|
||||
if acc := Stats.inProgress.get(name); acc != nil {
|
||||
bytes, size := acc.progress()
|
||||
if size >= 0 && bytes >= 0 {
|
||||
totalBytes += bytes
|
||||
totalSize += size
|
||||
}
|
||||
}
|
||||
}
|
||||
return totalBytes, totalSize
|
||||
}
|
||||
169
.rclone_repo/fs/accounting/token_bucket.go
Executable file
169
.rclone_repo/fs/accounting/token_bucket.go
Executable file
@@ -0,0 +1,169 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
tokenBucketMu sync.Mutex // protects the token bucket variables
|
||||
tokenBucket *rate.Limiter
|
||||
prevTokenBucket = tokenBucket
|
||||
bwLimitToggledOff = false
|
||||
currLimitMu sync.Mutex // protects changes to the timeslot
|
||||
currLimit fs.BwTimeSlot
|
||||
)
|
||||
|
||||
const maxBurstSize = 4 * 1024 * 1024 // must be bigger than the biggest request
|
||||
|
||||
// make a new empty token bucket with the bandwidth given
|
||||
func newTokenBucket(bandwidth fs.SizeSuffix) *rate.Limiter {
|
||||
newTokenBucket := rate.NewLimiter(rate.Limit(bandwidth), maxBurstSize)
|
||||
// empty the bucket
|
||||
err := newTokenBucket.WaitN(context.Background(), maxBurstSize)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to empty token bucket: %v", err)
|
||||
}
|
||||
return newTokenBucket
|
||||
}
|
||||
|
||||
// StartTokenBucket starts the token bucket if necessary
|
||||
func StartTokenBucket() {
|
||||
currLimitMu.Lock()
|
||||
currLimit := fs.Config.BwLimit.LimitAt(time.Now())
|
||||
currLimitMu.Unlock()
|
||||
|
||||
if currLimit.Bandwidth > 0 {
|
||||
tokenBucket = newTokenBucket(currLimit.Bandwidth)
|
||||
fs.Infof(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.Bandwidth)
|
||||
|
||||
// Start the SIGUSR2 signal handler to toggle bandwidth.
|
||||
// This function does nothing in windows systems.
|
||||
startSignalHandler()
|
||||
}
|
||||
}
|
||||
|
||||
// StartTokenTicker creates a ticker to update the bandwidth limiter every minute.
|
||||
func StartTokenTicker() {
|
||||
// If the timetable has a single entry or was not specified, we don't need
|
||||
// a ticker to update the bandwidth.
|
||||
if len(fs.Config.BwLimit) <= 1 {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
limitNow := fs.Config.BwLimit.LimitAt(time.Now())
|
||||
currLimitMu.Lock()
|
||||
|
||||
if currLimit.Bandwidth != limitNow.Bandwidth {
|
||||
tokenBucketMu.Lock()
|
||||
|
||||
// If bwlimit is toggled off, the change should only
|
||||
// become active on the next toggle, which causes
|
||||
// an exchange of tokenBucket <-> prevTokenBucket
|
||||
var targetBucket **rate.Limiter
|
||||
if bwLimitToggledOff {
|
||||
targetBucket = &prevTokenBucket
|
||||
} else {
|
||||
targetBucket = &tokenBucket
|
||||
}
|
||||
|
||||
// Set new bandwidth. If unlimited, set tokenbucket to nil.
|
||||
if limitNow.Bandwidth > 0 {
|
||||
*targetBucket = newTokenBucket(limitNow.Bandwidth)
|
||||
if bwLimitToggledOff {
|
||||
fs.Logf(nil, "Scheduled bandwidth change. "+
|
||||
"Limit will be set to %vBytes/s when toggled on again.", &limitNow.Bandwidth)
|
||||
} else {
|
||||
fs.Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.Bandwidth)
|
||||
}
|
||||
} else {
|
||||
*targetBucket = nil
|
||||
fs.Logf(nil, "Scheduled bandwidth change. Bandwidth limits disabled")
|
||||
}
|
||||
|
||||
currLimit = limitNow
|
||||
tokenBucketMu.Unlock()
|
||||
}
|
||||
currLimitMu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// limitBandwith sleeps for the correct amount of time for the passage
|
||||
// of n bytes according to the current bandwidth limit
|
||||
func limitBandwidth(n int) {
|
||||
tokenBucketMu.Lock()
|
||||
|
||||
// Limit the transfer speed if required
|
||||
if tokenBucket != nil {
|
||||
err := tokenBucket.WaitN(context.Background(), n)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Token bucket error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
tokenBucketMu.Unlock()
|
||||
}
|
||||
|
||||
// SetBwLimit sets the current bandwidth limit
|
||||
func SetBwLimit(bandwidth fs.SizeSuffix) {
|
||||
tokenBucketMu.Lock()
|
||||
defer tokenBucketMu.Unlock()
|
||||
if bandwidth > 0 {
|
||||
tokenBucket = newTokenBucket(bandwidth)
|
||||
fs.Logf(nil, "Bandwidth limit set to %v", bandwidth)
|
||||
} else {
|
||||
tokenBucket = nil
|
||||
fs.Logf(nil, "Bandwidth limit reset to unlimited")
|
||||
}
|
||||
}
|
||||
|
||||
// Remote control for the token bucket
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "core/bwlimit",
|
||||
Fn: func(in rc.Params) (out rc.Params, err error) {
|
||||
ibwlimit, ok := in["rate"]
|
||||
if !ok {
|
||||
return out, errors.Errorf("parameter rate not found")
|
||||
}
|
||||
bwlimit, ok := ibwlimit.(string)
|
||||
if !ok {
|
||||
return out, errors.Errorf("value must be string rate=%v", ibwlimit)
|
||||
}
|
||||
var bws fs.BwTimetable
|
||||
err = bws.Set(bwlimit)
|
||||
if err != nil {
|
||||
return out, errors.Wrap(err, "bad bwlimit")
|
||||
}
|
||||
if len(bws) != 1 {
|
||||
return out, errors.New("need exactly 1 bandwidth setting")
|
||||
}
|
||||
bw := bws[0]
|
||||
SetBwLimit(bw.Bandwidth)
|
||||
return rc.Params{"rate": bw.Bandwidth.String()}, nil
|
||||
},
|
||||
Title: "Set the bandwidth limit.",
|
||||
Help: `
|
||||
This sets the bandwidth limit to that passed in.
|
||||
|
||||
Eg
|
||||
|
||||
rclone rc core/bwlimit rate=1M
|
||||
rclone rc core/bwlimit rate=off
|
||||
|
||||
The format of the parameter is exactly the same as passed to --bwlimit
|
||||
except only one bandwidth may be specified.
|
||||
`,
|
||||
})
|
||||
}
|
||||
343
.rclone_repo/fs/asyncreader/asyncreader.go
Executable file
343
.rclone_repo/fs/asyncreader/asyncreader.go
Executable file
@@ -0,0 +1,343 @@
|
||||
// Package asyncreader provides an asynchronous reader which reads
|
||||
// independently of write
|
||||
package asyncreader
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// BufferSize is the default size of the async buffer
|
||||
BufferSize = 1024 * 1024
|
||||
softStartInitial = 4 * 1024
|
||||
)
|
||||
|
||||
var asyncBufferPool = sync.Pool{
|
||||
New: func() interface{} { return newBuffer() },
|
||||
}
|
||||
|
||||
var errorStreamAbandoned = errors.New("stream abandoned")
|
||||
|
||||
// AsyncReader will do async read-ahead from the input reader
|
||||
// and make the data available as an io.Reader.
|
||||
// This should be fully transparent, except that once an error
|
||||
// has been returned from the Reader, it will not recover.
|
||||
type AsyncReader struct {
|
||||
in io.ReadCloser // Input reader
|
||||
ready chan *buffer // Buffers ready to be handed to the reader
|
||||
token chan struct{} // Tokens which allow a buffer to be taken
|
||||
exit chan struct{} // Closes when finished
|
||||
buffers int // Number of buffers
|
||||
err error // If an error has occurred it is here
|
||||
cur *buffer // Current buffer being served
|
||||
exited chan struct{} // Channel is closed been the async reader shuts down
|
||||
size int // size of buffer to use
|
||||
closed bool // whether we have closed the underlying stream
|
||||
mu sync.Mutex // lock for Read/WriteTo/Abandon/Close
|
||||
}
|
||||
|
||||
// New returns a reader that will asynchronously read from
|
||||
// the supplied Reader into a number of buffers each of size BufferSize
|
||||
// It will start reading from the input at once, maybe even before this
|
||||
// function has returned.
|
||||
// The input can be read from the returned reader.
|
||||
// When done use Close to release the buffers and close the supplied input.
|
||||
func New(rd io.ReadCloser, buffers int) (*AsyncReader, error) {
|
||||
if buffers <= 0 {
|
||||
return nil, errors.New("number of buffers too small")
|
||||
}
|
||||
if rd == nil {
|
||||
return nil, errors.New("nil reader supplied")
|
||||
}
|
||||
a := &AsyncReader{}
|
||||
a.init(rd, buffers)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (a *AsyncReader) init(rd io.ReadCloser, buffers int) {
|
||||
a.in = rd
|
||||
a.ready = make(chan *buffer, buffers)
|
||||
a.token = make(chan struct{}, buffers)
|
||||
a.exit = make(chan struct{}, 0)
|
||||
a.exited = make(chan struct{}, 0)
|
||||
a.buffers = buffers
|
||||
a.cur = nil
|
||||
a.size = softStartInitial
|
||||
|
||||
// Create tokens
|
||||
for i := 0; i < buffers; i++ {
|
||||
a.token <- struct{}{}
|
||||
}
|
||||
|
||||
// Start async reader
|
||||
go func() {
|
||||
// Ensure that when we exit this is signalled.
|
||||
defer close(a.exited)
|
||||
defer close(a.ready)
|
||||
for {
|
||||
select {
|
||||
case <-a.token:
|
||||
b := a.getBuffer()
|
||||
if a.size < BufferSize {
|
||||
b.buf = b.buf[:a.size]
|
||||
a.size <<= 1
|
||||
}
|
||||
err := b.read(a.in)
|
||||
a.ready <- b
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
case <-a.exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// return the buffer to the pool (clearing it)
|
||||
func (a *AsyncReader) putBuffer(b *buffer) {
|
||||
b.clear()
|
||||
asyncBufferPool.Put(b)
|
||||
}
|
||||
|
||||
// get a buffer from the pool
|
||||
func (a *AsyncReader) getBuffer() *buffer {
|
||||
b := asyncBufferPool.Get().(*buffer)
|
||||
return b
|
||||
}
|
||||
|
||||
// Read will return the next available data.
|
||||
func (a *AsyncReader) fill() (err error) {
|
||||
if a.cur.isEmpty() {
|
||||
if a.cur != nil {
|
||||
a.putBuffer(a.cur)
|
||||
a.token <- struct{}{}
|
||||
a.cur = nil
|
||||
}
|
||||
b, ok := <-a.ready
|
||||
if !ok {
|
||||
// Return an error to show fill failed
|
||||
if a.err == nil {
|
||||
return errorStreamAbandoned
|
||||
}
|
||||
return a.err
|
||||
}
|
||||
a.cur = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read will return the next available data.
|
||||
func (a *AsyncReader) Read(p []byte) (n int, err error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
// Swap buffer and maybe return error
|
||||
err = a.fill()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Copy what we can
|
||||
n = copy(p, a.cur.buffer())
|
||||
a.cur.increment(n)
|
||||
|
||||
// If at end of buffer, return any error, if present
|
||||
if a.cur.isEmpty() {
|
||||
a.err = a.cur.err
|
||||
return n, a.err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until there's no more data to write or when an error occurs.
|
||||
// The return value n is the number of bytes written.
|
||||
// Any error encountered during the write is also returned.
|
||||
func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
n = 0
|
||||
for {
|
||||
err = a.fill()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n2, err := w.Write(a.cur.buffer())
|
||||
a.cur.increment(n2)
|
||||
n += int64(n2)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if a.cur.err != nil {
|
||||
a.err = a.cur.err
|
||||
return n, a.cur.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SkipBytes will try to seek 'skip' bytes relative to the current position.
|
||||
// On success it returns true. If 'skip' is outside the current buffer data or
|
||||
// an error occurs, Abandon is called and false is returned.
|
||||
func (a *AsyncReader) SkipBytes(skip int) (ok bool) {
|
||||
a.mu.Lock()
|
||||
defer func() {
|
||||
a.mu.Unlock()
|
||||
if !ok {
|
||||
a.Abandon()
|
||||
}
|
||||
}()
|
||||
|
||||
if a.err != nil {
|
||||
return false
|
||||
}
|
||||
if skip < 0 {
|
||||
// seek backwards if skip is inside current buffer
|
||||
if a.cur != nil && a.cur.offset+skip >= 0 {
|
||||
a.cur.offset += skip
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
// early return if skip is past the maximum buffer capacity
|
||||
if skip >= (len(a.ready)+1)*BufferSize {
|
||||
return false
|
||||
}
|
||||
|
||||
refillTokens := 0
|
||||
for {
|
||||
if a.cur.isEmpty() {
|
||||
if a.cur != nil {
|
||||
a.putBuffer(a.cur)
|
||||
refillTokens++
|
||||
a.cur = nil
|
||||
}
|
||||
select {
|
||||
case b, ok := <-a.ready:
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
a.cur = b
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
n := len(a.cur.buffer())
|
||||
if n > skip {
|
||||
n = skip
|
||||
}
|
||||
a.cur.increment(n)
|
||||
skip -= n
|
||||
if skip == 0 {
|
||||
for ; refillTokens > 0; refillTokens-- {
|
||||
a.token <- struct{}{}
|
||||
}
|
||||
// If at end of buffer, store any error, if present
|
||||
if a.cur.isEmpty() && a.cur.err != nil {
|
||||
a.err = a.cur.err
|
||||
}
|
||||
return true
|
||||
}
|
||||
if a.cur.err != nil {
|
||||
a.err = a.cur.err
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Abandon will ensure that the underlying async reader is shut down.
|
||||
// It will NOT close the input supplied on New.
|
||||
func (a *AsyncReader) Abandon() {
|
||||
select {
|
||||
case <-a.exit:
|
||||
// Do nothing if reader routine already exited
|
||||
return
|
||||
default:
|
||||
}
|
||||
// Close and wait for go routine
|
||||
close(a.exit)
|
||||
<-a.exited
|
||||
// take the lock to wait for Read/WriteTo to complete
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
// Return any outstanding buffers to the Pool
|
||||
if a.cur != nil {
|
||||
a.putBuffer(a.cur)
|
||||
a.cur = nil
|
||||
}
|
||||
for b := range a.ready {
|
||||
a.putBuffer(b)
|
||||
}
|
||||
}
|
||||
|
||||
// Close will ensure that the underlying async reader is shut down.
|
||||
// It will also close the input supplied on New.
|
||||
func (a *AsyncReader) Close() (err error) {
|
||||
a.Abandon()
|
||||
if a.closed {
|
||||
return nil
|
||||
}
|
||||
a.closed = true
|
||||
return a.in.Close()
|
||||
}
|
||||
|
||||
// Internal buffer
|
||||
// If an error is present, it must be returned
|
||||
// once all buffer content has been served.
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
err error
|
||||
offset int
|
||||
}
|
||||
|
||||
func newBuffer() *buffer {
|
||||
return &buffer{
|
||||
buf: make([]byte, BufferSize),
|
||||
err: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// clear returns the buffer to its full size and clears the members
|
||||
func (b *buffer) clear() {
|
||||
b.buf = b.buf[:cap(b.buf)]
|
||||
b.err = nil
|
||||
b.offset = 0
|
||||
}
|
||||
|
||||
// isEmpty returns true is offset is at end of
|
||||
// buffer, or
|
||||
func (b *buffer) isEmpty() bool {
|
||||
if b == nil {
|
||||
return true
|
||||
}
|
||||
if len(b.buf)-b.offset <= 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// read into start of the buffer from the supplied reader,
|
||||
// resets the offset and updates the size of the buffer.
|
||||
// Any error encountered during the read is returned.
|
||||
func (b *buffer) read(rd io.Reader) error {
|
||||
var n int
|
||||
n, b.err = readers.ReadFill(rd, b.buf)
|
||||
b.buf = b.buf[0:n]
|
||||
b.offset = 0
|
||||
return b.err
|
||||
}
|
||||
|
||||
// Return the buffer at current offset
|
||||
func (b *buffer) buffer() []byte {
|
||||
return b.buf[b.offset:]
|
||||
}
|
||||
|
||||
// increment the offset
|
||||
func (b *buffer) increment(n int) {
|
||||
b.offset += n
|
||||
}
|
||||
365
.rclone_repo/fs/asyncreader/asyncreader_test.go
Executable file
365
.rclone_repo/fs/asyncreader/asyncreader_test.go
Executable file
@@ -0,0 +1,365 @@
|
||||
package asyncreader
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/lib/israce"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAsyncReader(t *testing.T) {
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
ar, err := New(buf, 4)
|
||||
require.NoError(t, err)
|
||||
|
||||
var dst = make([]byte, 100)
|
||||
n, err := ar.Read(dst)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 10, n)
|
||||
|
||||
n, err = ar.Read(dst)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 0, n)
|
||||
|
||||
// Test read after error
|
||||
n, err = ar.Read(dst)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 0, n)
|
||||
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
// Test double close
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test Close without reading everything
|
||||
buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
|
||||
ar, err = New(buf, 4)
|
||||
require.NoError(t, err)
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestAsyncWriteTo(t *testing.T) {
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
ar, err := New(buf, 4)
|
||||
require.NoError(t, err)
|
||||
|
||||
var dst = &bytes.Buffer{}
|
||||
n, err := io.Copy(dst, ar)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, int64(10), n)
|
||||
|
||||
// Should still return EOF
|
||||
n, err = io.Copy(dst, ar)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, int64(0), n)
|
||||
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAsyncReaderErrors(t *testing.T) {
|
||||
// test nil reader
|
||||
_, err := New(nil, 4)
|
||||
require.Error(t, err)
|
||||
|
||||
// invalid buffer number
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
_, err = New(buf, 0)
|
||||
require.Error(t, err)
|
||||
_, err = New(buf, -1)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// Complex read tests, leveraged from "bufio".
|
||||
|
||||
type readMaker struct {
|
||||
name string
|
||||
fn func(io.Reader) io.Reader
|
||||
}
|
||||
|
||||
var readMakers = []readMaker{
|
||||
{"full", func(r io.Reader) io.Reader { return r }},
|
||||
{"byte", iotest.OneByteReader},
|
||||
{"half", iotest.HalfReader},
|
||||
{"data+err", iotest.DataErrReader},
|
||||
{"timeout", iotest.TimeoutReader},
|
||||
}
|
||||
|
||||
// Call Read to accumulate the text of a file
|
||||
func reads(buf io.Reader, m int) string {
|
||||
var b [1000]byte
|
||||
nb := 0
|
||||
for {
|
||||
n, err := buf.Read(b[nb : nb+m])
|
||||
nb += n
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil && err != iotest.ErrTimeout {
|
||||
panic("Data: " + err.Error())
|
||||
} else if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return string(b[0:nb])
|
||||
}
|
||||
|
||||
type bufReader struct {
|
||||
name string
|
||||
fn func(io.Reader) string
|
||||
}
|
||||
|
||||
var bufreaders = []bufReader{
|
||||
{"1", func(b io.Reader) string { return reads(b, 1) }},
|
||||
{"2", func(b io.Reader) string { return reads(b, 2) }},
|
||||
{"3", func(b io.Reader) string { return reads(b, 3) }},
|
||||
{"4", func(b io.Reader) string { return reads(b, 4) }},
|
||||
{"5", func(b io.Reader) string { return reads(b, 5) }},
|
||||
{"7", func(b io.Reader) string { return reads(b, 7) }},
|
||||
}
|
||||
|
||||
const minReadBufferSize = 16
|
||||
|
||||
var bufsizes = []int{
|
||||
0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096,
|
||||
}
|
||||
|
||||
// Test various input buffer sizes, number of buffers and read sizes.
|
||||
func TestAsyncReaderSizes(t *testing.T) {
|
||||
var texts [31]string
|
||||
str := ""
|
||||
all := ""
|
||||
for i := 0; i < len(texts)-1; i++ {
|
||||
texts[i] = str + "\n"
|
||||
all += texts[i]
|
||||
str += string(i%26 + 'a')
|
||||
}
|
||||
texts[len(texts)-1] = all
|
||||
|
||||
for h := 0; h < len(texts); h++ {
|
||||
text := texts[h]
|
||||
for i := 0; i < len(readMakers); i++ {
|
||||
for j := 0; j < len(bufreaders); j++ {
|
||||
for k := 0; k < len(bufsizes); k++ {
|
||||
for l := 1; l < 10; l++ {
|
||||
readmaker := readMakers[i]
|
||||
bufreader := bufreaders[j]
|
||||
bufsize := bufsizes[k]
|
||||
read := readmaker.fn(strings.NewReader(text))
|
||||
buf := bufio.NewReaderSize(read, bufsize)
|
||||
ar, _ := New(ioutil.NopCloser(buf), l)
|
||||
s := bufreader.fn(ar)
|
||||
// "timeout" expects the Reader to recover, AsyncReader does not.
|
||||
if s != text && readmaker.name != "timeout" {
|
||||
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
|
||||
readmaker.name, bufreader.name, bufsize, text, s)
|
||||
}
|
||||
err := ar.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test various input buffer sizes, number of buffers and read sizes.
|
||||
func TestAsyncReaderWriteTo(t *testing.T) {
|
||||
var texts [31]string
|
||||
str := ""
|
||||
all := ""
|
||||
for i := 0; i < len(texts)-1; i++ {
|
||||
texts[i] = str + "\n"
|
||||
all += texts[i]
|
||||
str += string(i%26 + 'a')
|
||||
}
|
||||
texts[len(texts)-1] = all
|
||||
|
||||
for h := 0; h < len(texts); h++ {
|
||||
text := texts[h]
|
||||
for i := 0; i < len(readMakers); i++ {
|
||||
for j := 0; j < len(bufreaders); j++ {
|
||||
for k := 0; k < len(bufsizes); k++ {
|
||||
for l := 1; l < 10; l++ {
|
||||
readmaker := readMakers[i]
|
||||
bufreader := bufreaders[j]
|
||||
bufsize := bufsizes[k]
|
||||
read := readmaker.fn(strings.NewReader(text))
|
||||
buf := bufio.NewReaderSize(read, bufsize)
|
||||
ar, _ := New(ioutil.NopCloser(buf), l)
|
||||
dst := &bytes.Buffer{}
|
||||
_, err := ar.WriteTo(dst)
|
||||
if err != nil && err != io.EOF && err != iotest.ErrTimeout {
|
||||
t.Fatal("Copy:", err)
|
||||
}
|
||||
s := dst.String()
|
||||
// "timeout" expects the Reader to recover, AsyncReader does not.
|
||||
if s != text && readmaker.name != "timeout" {
|
||||
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
|
||||
readmaker.name, bufreader.name, bufsize, text, s)
|
||||
}
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read an infinite number of zeros
|
||||
type zeroReader struct {
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (z *zeroReader) Read(p []byte) (n int, err error) {
|
||||
if z.closed {
|
||||
return 0, io.EOF
|
||||
}
|
||||
for i := range p {
|
||||
p[i] = 0
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (z *zeroReader) Close() error {
|
||||
if z.closed {
|
||||
panic("double close on zeroReader")
|
||||
}
|
||||
z.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Test closing and abandoning
|
||||
func testAsyncReaderClose(t *testing.T, writeto bool) {
|
||||
zr := &zeroReader{}
|
||||
a, err := New(zr, 16)
|
||||
require.NoError(t, err)
|
||||
var copyN int64
|
||||
var copyErr error
|
||||
var wg sync.WaitGroup
|
||||
started := make(chan struct{})
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
close(started)
|
||||
if writeto {
|
||||
// exercise the WriteTo path
|
||||
copyN, copyErr = a.WriteTo(ioutil.Discard)
|
||||
} else {
|
||||
// exercise the Read path
|
||||
buf := make([]byte, 64*1024)
|
||||
for {
|
||||
var n int
|
||||
n, copyErr = a.Read(buf)
|
||||
copyN += int64(n)
|
||||
if copyErr != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Do some copying
|
||||
<-started
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
// Abandon the copy
|
||||
a.Abandon()
|
||||
wg.Wait()
|
||||
assert.Equal(t, errorStreamAbandoned, copyErr)
|
||||
// t.Logf("Copied %d bytes, err %v", copyN, copyErr)
|
||||
assert.True(t, copyN > 0)
|
||||
}
|
||||
func TestAsyncReaderCloseRead(t *testing.T) { testAsyncReaderClose(t, false) }
|
||||
func TestAsyncReaderCloseWriteTo(t *testing.T) { testAsyncReaderClose(t, true) }
|
||||
|
||||
func TestAsyncReaderSkipBytes(t *testing.T) {
|
||||
t.Parallel()
|
||||
data := make([]byte, 15000)
|
||||
buf := make([]byte, len(data))
|
||||
r := rand.New(rand.NewSource(42))
|
||||
|
||||
n, err := r.Read(data)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(data), n)
|
||||
|
||||
initialReads := []int{0, 1, 100, 2048,
|
||||
softStartInitial - 1, softStartInitial, softStartInitial + 1,
|
||||
8000, len(data)}
|
||||
skips := []int{-1000, -101, -100, -99, 0, 1, 2048,
|
||||
softStartInitial - 1, softStartInitial, softStartInitial + 1,
|
||||
8000, len(data), BufferSize, 2 * BufferSize}
|
||||
|
||||
for buffers := 1; buffers <= 5; buffers++ {
|
||||
if israce.Enabled && buffers > 1 {
|
||||
t.Skip("FIXME Skipping further tests with race detector until https://github.com/golang/go/issues/27070 is fixed.")
|
||||
}
|
||||
t.Run(fmt.Sprintf("%d", buffers), func(t *testing.T) {
|
||||
for _, initialRead := range initialReads {
|
||||
t.Run(fmt.Sprintf("%d", initialRead), func(t *testing.T) {
|
||||
for _, skip := range skips {
|
||||
t.Run(fmt.Sprintf("%d", skip), func(t *testing.T) {
|
||||
ar, err := New(ioutil.NopCloser(bytes.NewReader(data)), buffers)
|
||||
require.NoError(t, err)
|
||||
|
||||
wantSkipFalse := false
|
||||
buf = buf[:initialRead]
|
||||
n, err := readers.ReadFill(ar, buf)
|
||||
if initialRead >= len(data) {
|
||||
wantSkipFalse = true
|
||||
if initialRead > len(data) {
|
||||
assert.Equal(t, err, io.EOF)
|
||||
} else {
|
||||
assert.True(t, err == nil || err == io.EOF)
|
||||
}
|
||||
assert.Equal(t, len(data), n)
|
||||
assert.Equal(t, data, buf[:len(data)])
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, initialRead, n)
|
||||
assert.Equal(t, data[:initialRead], buf)
|
||||
}
|
||||
|
||||
skipped := ar.SkipBytes(skip)
|
||||
buf = buf[:1024]
|
||||
n, err = readers.ReadFill(ar, buf)
|
||||
offset := initialRead + skip
|
||||
if skipped {
|
||||
assert.False(t, wantSkipFalse)
|
||||
l := len(buf)
|
||||
if offset >= len(data) {
|
||||
assert.Equal(t, err, io.EOF)
|
||||
} else {
|
||||
if offset+1024 >= len(data) {
|
||||
l = len(data) - offset
|
||||
}
|
||||
assert.Equal(t, l, n)
|
||||
assert.Equal(t, data[offset:offset+l], buf[:l])
|
||||
}
|
||||
} else {
|
||||
if initialRead >= len(data) {
|
||||
assert.Equal(t, err, io.EOF)
|
||||
} else {
|
||||
assert.True(t, err == errorStreamAbandoned || err == io.EOF)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
214
.rclone_repo/fs/bwtimetable.go
Executable file
214
.rclone_repo/fs/bwtimetable.go
Executable file
@@ -0,0 +1,214 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BwTimeSlot represents a bandwidth configuration at a point in time.
|
||||
type BwTimeSlot struct {
|
||||
DayOfTheWeek int
|
||||
HHMM int
|
||||
Bandwidth SizeSuffix
|
||||
}
|
||||
|
||||
// BwTimetable contains all configured time slots.
|
||||
type BwTimetable []BwTimeSlot
|
||||
|
||||
// String returns a printable representation of BwTimetable.
|
||||
func (x BwTimetable) String() string {
|
||||
ret := []string{}
|
||||
for _, ts := range x {
|
||||
ret = append(ret, fmt.Sprintf("%s-%04.4d,%s", time.Weekday(ts.DayOfTheWeek), ts.HHMM, ts.Bandwidth.String()))
|
||||
}
|
||||
return strings.Join(ret, " ")
|
||||
}
|
||||
|
||||
// Basic hour format checking
|
||||
func validateHour(HHMM string) error {
|
||||
if len(HHMM) != 5 {
|
||||
return errors.Errorf("invalid time specification (hh:mm): %q", HHMM)
|
||||
}
|
||||
hh, err := strconv.Atoi(HHMM[0:2])
|
||||
if err != nil {
|
||||
return errors.Errorf("invalid hour in time specification %q: %v", HHMM, err)
|
||||
}
|
||||
if hh < 0 || hh > 23 {
|
||||
return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
|
||||
}
|
||||
mm, err := strconv.Atoi(HHMM[3:])
|
||||
if err != nil {
|
||||
return errors.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
|
||||
}
|
||||
if mm < 0 || mm > 59 {
|
||||
return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Basic weekday format checking
|
||||
func parseWeekday(dayOfWeek string) (int, error) {
|
||||
dayOfWeek = strings.ToLower(dayOfWeek)
|
||||
if dayOfWeek == "sun" || dayOfWeek == "sunday" {
|
||||
return 0, nil
|
||||
}
|
||||
if dayOfWeek == "mon" || dayOfWeek == "monday" {
|
||||
return 1, nil
|
||||
}
|
||||
if dayOfWeek == "tue" || dayOfWeek == "tuesday" {
|
||||
return 2, nil
|
||||
}
|
||||
if dayOfWeek == "wed" || dayOfWeek == "wednesday" {
|
||||
return 3, nil
|
||||
}
|
||||
if dayOfWeek == "thu" || dayOfWeek == "thursday" {
|
||||
return 4, nil
|
||||
}
|
||||
if dayOfWeek == "fri" || dayOfWeek == "friday" {
|
||||
return 5, nil
|
||||
}
|
||||
if dayOfWeek == "sat" || dayOfWeek == "saturday" {
|
||||
return 6, nil
|
||||
}
|
||||
return 0, errors.Errorf("invalid weekday: %q", dayOfWeek)
|
||||
}
|
||||
|
||||
// Set the bandwidth timetable.
|
||||
func (x *BwTimetable) Set(s string) error {
|
||||
// The timetable is formatted as:
|
||||
// "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,banwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
|
||||
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
|
||||
|
||||
if len(s) == 0 {
|
||||
return errors.New("empty string")
|
||||
}
|
||||
// Single value without time specification.
|
||||
if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
|
||||
ts := BwTimeSlot{}
|
||||
if err := ts.Bandwidth.Set(s); err != nil {
|
||||
return err
|
||||
}
|
||||
ts.DayOfTheWeek = 0
|
||||
ts.HHMM = 0
|
||||
*x = BwTimetable{ts}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, tok := range strings.Split(s, " ") {
|
||||
tv := strings.Split(tok, ",")
|
||||
|
||||
// Format must be dayOfWeek-HH:MM,BW
|
||||
if len(tv) != 2 {
|
||||
return errors.Errorf("invalid time/bandwidth specification: %q", tok)
|
||||
}
|
||||
|
||||
weekday := 0
|
||||
HHMM := ""
|
||||
if !strings.Contains(tv[0], "-") {
|
||||
HHMM = tv[0]
|
||||
if err := validateHour(HHMM); err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < 7; i++ {
|
||||
hh, _ := strconv.Atoi(HHMM[0:2])
|
||||
mm, _ := strconv.Atoi(HHMM[3:])
|
||||
ts := BwTimeSlot{
|
||||
DayOfTheWeek: i,
|
||||
HHMM: (hh * 100) + mm,
|
||||
}
|
||||
if err := ts.Bandwidth.Set(tv[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, ts)
|
||||
}
|
||||
} else {
|
||||
timespec := strings.Split(tv[0], "-")
|
||||
if len(timespec) != 2 {
|
||||
return errors.Errorf("invalid time specification: %q", tv[0])
|
||||
}
|
||||
var err error
|
||||
weekday, err = parseWeekday(timespec[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
HHMM = timespec[1]
|
||||
if err := validateHour(HHMM); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hh, _ := strconv.Atoi(HHMM[0:2])
|
||||
mm, _ := strconv.Atoi(HHMM[3:])
|
||||
ts := BwTimeSlot{
|
||||
DayOfTheWeek: weekday,
|
||||
HHMM: (hh * 100) + mm,
|
||||
}
|
||||
// Bandwidth limit for this time slot.
|
||||
if err := ts.Bandwidth.Set(tv[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, ts)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Difference in minutes between lateDayOfWeekHHMM and earlyDayOfWeekHHMM
|
||||
func timeDiff(lateDayOfWeekHHMM int, earlyDayOfWeekHHMM int) int {
|
||||
|
||||
lateTimeMinutes := (lateDayOfWeekHHMM / 10000) * 24 * 60
|
||||
lateTimeMinutes += ((lateDayOfWeekHHMM / 100) % 100) * 60
|
||||
lateTimeMinutes += lateDayOfWeekHHMM % 100
|
||||
|
||||
earlyTimeMinutes := (earlyDayOfWeekHHMM / 10000) * 24 * 60
|
||||
earlyTimeMinutes += ((earlyDayOfWeekHHMM / 100) % 100) * 60
|
||||
earlyTimeMinutes += earlyDayOfWeekHHMM % 100
|
||||
|
||||
return lateTimeMinutes - earlyTimeMinutes
|
||||
}
|
||||
|
||||
// LimitAt returns a BwTimeSlot for the time requested.
|
||||
func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
|
||||
// If the timetable is empty, we return an unlimited BwTimeSlot starting at Sunday midnight.
|
||||
if len(x) == 0 {
|
||||
return BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: -1}
|
||||
}
|
||||
|
||||
dayOfWeekHHMM := int(tt.Weekday())*10000 + tt.Hour()*100 + tt.Minute()
|
||||
|
||||
// By default, we return the last element in the timetable. This
|
||||
// satisfies two conditions: 1) If there's only one element it
|
||||
// will always be selected, and 2) The last element of the table
|
||||
// will "wrap around" until overridden by an earlier time slot.
|
||||
// there's only one time slot in the timetable.
|
||||
ret := x[len(x)-1]
|
||||
mindif := 0
|
||||
first := true
|
||||
|
||||
// Look for most recent time slot.
|
||||
for _, ts := range x {
|
||||
// Ignore the past
|
||||
if dayOfWeekHHMM < (ts.DayOfTheWeek*10000)+ts.HHMM {
|
||||
continue
|
||||
}
|
||||
dif := timeDiff(dayOfWeekHHMM, (ts.DayOfTheWeek*10000)+ts.HHMM)
|
||||
if first {
|
||||
mindif = dif
|
||||
first = false
|
||||
}
|
||||
if dif <= mindif {
|
||||
mindif = dif
|
||||
ret = ts
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (x BwTimetable) Type() string {
|
||||
return "BwTimetable"
|
||||
}
|
||||
329
.rclone_repo/fs/bwtimetable_test.go
Executable file
329
.rclone_repo/fs/bwtimetable_test.go
Executable file
@@ -0,0 +1,329 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*BwTimetable)(nil)
|
||||
|
||||
func TestBwTimetableSet(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want BwTimetable
|
||||
err bool
|
||||
}{
|
||||
{"", BwTimetable{}, true},
|
||||
{"bad,bad", BwTimetable{}, true},
|
||||
{"bad bad", BwTimetable{}, true},
|
||||
{"bad", BwTimetable{}, true},
|
||||
{"1000X", BwTimetable{}, true},
|
||||
{"2401,666", BwTimetable{}, true},
|
||||
{"1061,666", BwTimetable{}, true},
|
||||
{"bad-10:20,666", BwTimetable{}, true},
|
||||
{"Mon-bad,666", BwTimetable{}, true},
|
||||
{"Mon-10:20,bad", BwTimetable{}, true},
|
||||
{
|
||||
"0",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: 0},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"666",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: 666 * 1024},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"10:20,666",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"11:00,333 13:40,666 23:50,10M 23:59,off",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: -1},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Mon-11:00,333 Tue-13:40,666 Fri-00:00,10M Sat-10:00,off Sun-23:00,666",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Mon-11:00,333 Tue-13:40,666 Fri-00:00,10M 00:01,off Sun-23:00,666",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
false,
|
||||
},
|
||||
} {
|
||||
tt := BwTimetable{}
|
||||
err := tt.Set(test.in)
|
||||
if test.err {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, test.want, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBwTimetableLimitAt(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
tt BwTimetable
|
||||
now time.Time
|
||||
want BwTimeSlot
|
||||
}{
|
||||
{
|
||||
BwTimetable{},
|
||||
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: -1},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 10, 15, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 11, 0, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 13, 1, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 23, 59, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 23, 59, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
time.Date(2017, time.April, 21, 23, 59, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
time.Date(2017, time.April, 17, 10, 59, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
} {
|
||||
slot := test.tt.LimitAt(test.now)
|
||||
assert.Equal(t, test.want, slot)
|
||||
}
|
||||
}
|
||||
245
.rclone_repo/fs/chunkedreader/chunkedreader.go
Executable file
245
.rclone_repo/fs/chunkedreader/chunkedreader.go
Executable file
@@ -0,0 +1,245 @@
|
||||
package chunkedreader
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// io related errors returned by ChunkedReader
|
||||
var (
|
||||
ErrorFileClosed = errors.New("file already closed")
|
||||
ErrorInvalidSeek = errors.New("invalid seek position")
|
||||
)
|
||||
|
||||
// ChunkedReader is a reader for a Object with the possibility
|
||||
// of reading the source in chunks of given size
|
||||
//
|
||||
// A initialChunkSize of <= 0 will disable chunked reading.
|
||||
type ChunkedReader struct {
|
||||
mu sync.Mutex // protects following fields
|
||||
o fs.Object // source to read from
|
||||
rc io.ReadCloser // reader for the current open chunk
|
||||
offset int64 // offset the next Read will start. -1 forces a reopen of o
|
||||
chunkOffset int64 // beginning of the current or next chunk
|
||||
chunkSize int64 // length of the current or next chunk. -1 will open o from chunkOffset to the end
|
||||
initialChunkSize int64 // default chunkSize after the chunk specified by RangeSeek is complete
|
||||
maxChunkSize int64 // consecutive read chunks will double in size until reached. -1 means no limit
|
||||
customChunkSize bool // is the current chunkSize set by RangeSeek?
|
||||
closed bool // has Close been called?
|
||||
}
|
||||
|
||||
// New returns a ChunkedReader for the Object.
|
||||
//
|
||||
// A initialChunkSize of <= 0 will disable chunked reading.
|
||||
// If maxChunkSize is greater than initialChunkSize, the chunk size will be
|
||||
// doubled after each chunk read with a maximun of maxChunkSize.
|
||||
// A Seek or RangeSeek will reset the chunk size to it's initial value
|
||||
func New(o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader {
|
||||
if initialChunkSize <= 0 {
|
||||
initialChunkSize = -1
|
||||
}
|
||||
if maxChunkSize != -1 && maxChunkSize < initialChunkSize {
|
||||
maxChunkSize = initialChunkSize
|
||||
}
|
||||
return &ChunkedReader{
|
||||
o: o,
|
||||
offset: -1,
|
||||
chunkSize: initialChunkSize,
|
||||
initialChunkSize: initialChunkSize,
|
||||
maxChunkSize: maxChunkSize,
|
||||
}
|
||||
}
|
||||
|
||||
// Read from the file - for details see io.Reader
|
||||
func (cr *ChunkedReader) Read(p []byte) (n int, err error) {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
if cr.closed {
|
||||
return 0, ErrorFileClosed
|
||||
}
|
||||
|
||||
for reqSize := int64(len(p)); reqSize > 0; reqSize = int64(len(p)) {
|
||||
// the current chunk boundary. valid only when chunkSize > 0
|
||||
chunkEnd := cr.chunkOffset + cr.chunkSize
|
||||
|
||||
fs.Debugf(cr.o, "ChunkedReader.Read at %d length %d chunkOffset %d chunkSize %d", cr.offset, reqSize, cr.chunkOffset, cr.chunkSize)
|
||||
|
||||
switch {
|
||||
case cr.chunkSize > 0 && cr.offset == chunkEnd: // last chunk read completely
|
||||
cr.chunkOffset = cr.offset
|
||||
if cr.customChunkSize { // last chunkSize was set by RangeSeek
|
||||
cr.customChunkSize = false
|
||||
cr.chunkSize = cr.initialChunkSize
|
||||
} else {
|
||||
cr.chunkSize *= 2
|
||||
if cr.chunkSize > cr.maxChunkSize && cr.maxChunkSize != -1 {
|
||||
cr.chunkSize = cr.maxChunkSize
|
||||
}
|
||||
}
|
||||
// recalculate the chunk boundary. valid only when chunkSize > 0
|
||||
chunkEnd = cr.chunkOffset + cr.chunkSize
|
||||
fallthrough
|
||||
case cr.offset == -1: // first Read or Read after RangeSeek
|
||||
err = cr.openRange()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
chunkRest := chunkEnd - cr.offset
|
||||
// limit read to chunk boundaries if chunkSize > 0
|
||||
if reqSize > chunkRest && cr.chunkSize > 0 {
|
||||
buf, p = p[0:chunkRest], p[chunkRest:]
|
||||
} else {
|
||||
buf, p = p, nil
|
||||
}
|
||||
var rn int
|
||||
rn, err = io.ReadFull(cr.rc, buf)
|
||||
n += rn
|
||||
cr.offset += int64(rn)
|
||||
if err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
err = io.EOF
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Close the file - for details see io.Closer
|
||||
//
|
||||
// All methods on ChunkedReader will return ErrorFileClosed afterwards
|
||||
func (cr *ChunkedReader) Close() error {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
if cr.closed {
|
||||
return ErrorFileClosed
|
||||
}
|
||||
cr.closed = true
|
||||
|
||||
return cr.resetReader(nil, 0)
|
||||
}
|
||||
|
||||
// Seek the file - for details see io.Seeker
|
||||
func (cr *ChunkedReader) Seek(offset int64, whence int) (int64, error) {
|
||||
return cr.RangeSeek(offset, whence, -1)
|
||||
}
|
||||
|
||||
// RangeSeek the file - for details see RangeSeeker
|
||||
//
|
||||
// The specified length will only apply to the next chunk opened.
|
||||
// RangeSeek will not reopen the source until Read is called.
|
||||
func (cr *ChunkedReader) RangeSeek(offset int64, whence int, length int64) (int64, error) {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
fs.Debugf(cr.o, "ChunkedReader.RangeSeek from %d to %d length %d", cr.offset, offset, length)
|
||||
|
||||
if cr.closed {
|
||||
return 0, ErrorFileClosed
|
||||
}
|
||||
|
||||
size := cr.o.Size()
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
cr.offset = 0
|
||||
case io.SeekEnd:
|
||||
cr.offset = size
|
||||
}
|
||||
// set the new chunk start
|
||||
cr.chunkOffset = cr.offset + offset
|
||||
// force reopen on next Read
|
||||
cr.offset = -1
|
||||
if length > 0 {
|
||||
cr.customChunkSize = true
|
||||
cr.chunkSize = length
|
||||
} else {
|
||||
cr.chunkSize = cr.initialChunkSize
|
||||
}
|
||||
if cr.chunkOffset < 0 || cr.chunkOffset >= size {
|
||||
cr.chunkOffset = 0
|
||||
return 0, ErrorInvalidSeek
|
||||
}
|
||||
return cr.chunkOffset, nil
|
||||
}
|
||||
|
||||
// Open forces the connection to be opened
|
||||
func (cr *ChunkedReader) Open() (*ChunkedReader, error) {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
if cr.rc != nil && cr.offset != -1 {
|
||||
return cr, nil
|
||||
}
|
||||
return cr, cr.openRange()
|
||||
}
|
||||
|
||||
// openRange will open the source Object with the current chunk range
|
||||
//
|
||||
// If the current open reader implenets RangeSeeker, it is tried first.
|
||||
// When RangeSeek failes, o.Open with a RangeOption is used.
|
||||
//
|
||||
// A length <= 0 will request till the end of the file
|
||||
func (cr *ChunkedReader) openRange() error {
|
||||
offset, length := cr.chunkOffset, cr.chunkSize
|
||||
fs.Debugf(cr.o, "ChunkedReader.openRange at %d length %d", offset, length)
|
||||
|
||||
if cr.closed {
|
||||
return ErrorFileClosed
|
||||
}
|
||||
|
||||
if rs, ok := cr.rc.(fs.RangeSeeker); ok {
|
||||
n, err := rs.RangeSeek(offset, io.SeekStart, length)
|
||||
if err == nil && n == offset {
|
||||
cr.offset = offset
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(cr.o, "ChunkedReader.openRange seek failed (%s). Trying Open", err)
|
||||
} else {
|
||||
fs.Debugf(cr.o, "ChunkedReader.openRange seeked to wrong offset. Wanted %d, got %d. Trying Open", offset, n)
|
||||
}
|
||||
}
|
||||
|
||||
var rc io.ReadCloser
|
||||
var err error
|
||||
if length <= 0 {
|
||||
if offset == 0 {
|
||||
rc, err = cr.o.Open()
|
||||
} else {
|
||||
rc, err = cr.o.Open(&fs.RangeOption{Start: offset, End: -1})
|
||||
}
|
||||
} else {
|
||||
rc, err = cr.o.Open(&fs.RangeOption{Start: offset, End: offset + length - 1})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cr.resetReader(rc, offset)
|
||||
}
|
||||
|
||||
// resetReader switches the current reader to the given reader.
|
||||
// The old reader will be Close'd before setting the new reader.
|
||||
func (cr *ChunkedReader) resetReader(rc io.ReadCloser, offset int64) error {
|
||||
if cr.rc != nil {
|
||||
if err := cr.rc.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cr.rc = rc
|
||||
cr.offset = offset
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReadCloser = (*ChunkedReader)(nil)
|
||||
_ io.Seeker = (*ChunkedReader)(nil)
|
||||
_ fs.RangeSeeker = (*ChunkedReader)(nil)
|
||||
)
|
||||
111
.rclone_repo/fs/chunkedreader/chunkedreader_test.go
Executable file
111
.rclone_repo/fs/chunkedreader/chunkedreader_test.go
Executable file
@@ -0,0 +1,111 @@
|
||||
package chunkedreader
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestChunkedReader(t *testing.T) {
|
||||
content := makeContent(t, 1024)
|
||||
|
||||
for _, mode := range mockobject.SeekModes {
|
||||
t.Run(mode.String(), testRead(content, mode))
|
||||
}
|
||||
}
|
||||
|
||||
func testRead(content []byte, mode mockobject.SeekMode) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
chunkSizes := []int64{-1, 0, 1, 15, 16, 17, 1023, 1024, 1025, 2000}
|
||||
offsets := []int64{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33,
|
||||
63, 64, 65, 511, 512, 513, 1023, 1024, 1025}
|
||||
limits := []int64{-1, 0, 1, 31, 32, 33, 1023, 1024, 1025}
|
||||
cl := int64(len(content))
|
||||
bl := 32
|
||||
buf := make([]byte, bl)
|
||||
|
||||
o := mockobject.New("test.bin").WithContent(content, mode)
|
||||
for ics, cs := range chunkSizes {
|
||||
for icsMax, csMax := range chunkSizes {
|
||||
// skip tests where chunkSize is much bigger than maxChunkSize
|
||||
if ics > icsMax+1 {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("Chunksize_%d_%d", cs, csMax), func(t *testing.T) {
|
||||
cr := New(o, cs, csMax)
|
||||
|
||||
for _, offset := range offsets {
|
||||
for _, limit := range limits {
|
||||
what := fmt.Sprintf("offset %d, limit %d", offset, limit)
|
||||
|
||||
p, err := cr.RangeSeek(offset, io.SeekStart, limit)
|
||||
if offset >= cl {
|
||||
require.Error(t, err, what)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err, what)
|
||||
require.Equal(t, offset, p, what)
|
||||
|
||||
n, err := cr.Read(buf)
|
||||
end := offset + int64(bl)
|
||||
if end > cl {
|
||||
end = cl
|
||||
}
|
||||
l := int(end - offset)
|
||||
if l < bl {
|
||||
require.Equal(t, io.EOF, err, what)
|
||||
} else {
|
||||
require.NoError(t, err, what)
|
||||
}
|
||||
require.Equal(t, l, n, what)
|
||||
require.Equal(t, content[offset:end], buf[:n], what)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorAfterClose(t *testing.T) {
|
||||
content := makeContent(t, 1024)
|
||||
o := mockobject.New("test.bin").WithContent(content, mockobject.SeekModeNone)
|
||||
|
||||
// Close
|
||||
cr := New(o, 0, 0)
|
||||
require.NoError(t, cr.Close())
|
||||
require.Error(t, cr.Close())
|
||||
|
||||
// Read
|
||||
cr = New(o, 0, 0)
|
||||
require.NoError(t, cr.Close())
|
||||
var buf [1]byte
|
||||
_, err := cr.Read(buf[:])
|
||||
require.Error(t, err)
|
||||
|
||||
// Seek
|
||||
cr = New(o, 0, 0)
|
||||
require.NoError(t, cr.Close())
|
||||
_, err = cr.Seek(1, io.SeekCurrent)
|
||||
require.Error(t, err)
|
||||
|
||||
// RangeSeek
|
||||
cr = New(o, 0, 0)
|
||||
require.NoError(t, cr.Close())
|
||||
_, err = cr.RangeSeek(1, io.SeekCurrent, 0)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func makeContent(t *testing.T, size int) []byte {
|
||||
content := make([]byte, size)
|
||||
r := rand.New(rand.NewSource(42))
|
||||
_, err := io.ReadFull(r, content)
|
||||
assert.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
131
.rclone_repo/fs/config.go
Executable file
131
.rclone_repo/fs/config.go
Executable file
@@ -0,0 +1,131 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Global
|
||||
var (
|
||||
// Config is the global config
|
||||
Config = NewConfig()
|
||||
|
||||
// Read a value from the config file
|
||||
//
|
||||
// This is a function pointer to decouple the config
|
||||
// implementation from the fs
|
||||
ConfigFileGet = func(section, key string) (string, bool) { return "", false }
|
||||
|
||||
// Set a value into the config file
|
||||
//
|
||||
// This is a function pointer to decouple the config
|
||||
// implementation from the fs
|
||||
ConfigFileSet = func(section, key, value string) {
|
||||
Errorf(nil, "No config handler to set %q = %q in section %q of the config file", key, value, section)
|
||||
}
|
||||
|
||||
// CountError counts an error. If any errors have been
|
||||
// counted then it will exit with a non zero error code.
|
||||
//
|
||||
// This is a function pointer to decouple the config
|
||||
// implementation from the fs
|
||||
CountError = func(err error) {}
|
||||
|
||||
// ConfigProvider is the config key used for provider options
|
||||
ConfigProvider = "provider"
|
||||
)
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
type ConfigInfo struct {
|
||||
LogLevel LogLevel
|
||||
StatsLogLevel LogLevel
|
||||
DryRun bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
IgnoreTimes bool
|
||||
IgnoreExisting bool
|
||||
IgnoreErrors bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
Dump DumpFlags
|
||||
InsecureSkipVerify bool // Skip server certificate verification
|
||||
DeleteMode DeleteMode
|
||||
MaxDelete int64
|
||||
TrackRenames bool // Track file renames.
|
||||
LowLevelRetries int
|
||||
UpdateOlder bool // Skip files that are newer on the destination
|
||||
NoGzip bool // Disable compression
|
||||
MaxDepth int
|
||||
IgnoreSize bool
|
||||
IgnoreChecksum bool
|
||||
NoUpdateModTime bool
|
||||
DataRateUnit string
|
||||
BackupDir string
|
||||
Suffix string
|
||||
UseListR bool
|
||||
BufferSize SizeSuffix
|
||||
BwLimit BwTimetable
|
||||
TPSLimit float64
|
||||
TPSLimitBurst int
|
||||
BindAddr net.IP
|
||||
DisableFeatures []string
|
||||
UserAgent string
|
||||
Immutable bool
|
||||
AutoConfirm bool
|
||||
StreamingUploadCutoff SizeSuffix
|
||||
StatsFileNameLength int
|
||||
AskPassword bool
|
||||
UseServerModTime bool
|
||||
MaxTransfer SizeSuffix
|
||||
MaxBacklog int
|
||||
StatsOneLine bool
|
||||
Progress bool
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
// value. These are the ultimate defaults and are overriden by the
|
||||
// config module.
|
||||
func NewConfig() *ConfigInfo {
|
||||
c := new(ConfigInfo)
|
||||
|
||||
// Set any values which aren't the zero for the type
|
||||
c.LogLevel = LogLevelNotice
|
||||
c.StatsLogLevel = LogLevelInfo
|
||||
c.ModifyWindow = time.Nanosecond
|
||||
c.Checkers = 8
|
||||
c.Transfers = 4
|
||||
c.ConnectTimeout = 60 * time.Second
|
||||
c.Timeout = 5 * 60 * time.Second
|
||||
c.DeleteMode = DeleteModeDefault
|
||||
c.MaxDelete = -1
|
||||
c.LowLevelRetries = 10
|
||||
c.MaxDepth = -1
|
||||
c.DataRateUnit = "bytes"
|
||||
c.BufferSize = SizeSuffix(16 << 20)
|
||||
c.UserAgent = "rclone/" + Version
|
||||
c.StreamingUploadCutoff = SizeSuffix(100 * 1024)
|
||||
c.StatsFileNameLength = 40
|
||||
c.AskPassword = true
|
||||
c.TPSLimitBurst = 1
|
||||
c.MaxTransfer = -1
|
||||
c.MaxBacklog = 10000
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// ConfigToEnv converts an config section and name, eg ("myremote",
|
||||
// "ignore-size") into an environment name
|
||||
// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"
|
||||
func ConfigToEnv(section, name string) string {
|
||||
return "RCLONE_CONFIG_" + strings.ToUpper(strings.Replace(section+"_"+name, "-", "_", -1))
|
||||
}
|
||||
|
||||
// OptionToEnv converts an option name, eg "ignore-size" into an
|
||||
// environment name "RCLONE_IGNORE_SIZE"
|
||||
func OptionToEnv(name string) string {
|
||||
return "RCLONE_" + strings.ToUpper(strings.Replace(name, "-", "_", -1))
|
||||
}
|
||||
1346
.rclone_repo/fs/config/config.go
Executable file
1346
.rclone_repo/fs/config/config.go
Executable file
File diff suppressed because it is too large
Load Diff
10
.rclone_repo/fs/config/config_other.go
Executable file
10
.rclone_repo/fs/config/config_other.go
Executable file
@@ -0,0 +1,10 @@
|
||||
// Read, write and edit the config file
|
||||
// Non-unix specific functions.
|
||||
|
||||
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||
|
||||
package config
|
||||
|
||||
// attemptCopyGroups tries to keep the group the same, which only makes sense
|
||||
// for system with user-group-world permission model.
|
||||
func attemptCopyGroup(fromPath, toPath string) {}
|
||||
29
.rclone_repo/fs/config/config_read_password.go
Executable file
29
.rclone_repo/fs/config/config_read_password.go
Executable file
@@ -0,0 +1,29 @@
|
||||
// ReadPassword for OSes which are supported by golang.org/x/crypto/ssh/terminal
|
||||
// See https://github.com/golang/go/issues/14441 - plan9
|
||||
// https://github.com/golang/go/issues/13085 - solaris
|
||||
|
||||
// +build !solaris,!plan9
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
// ReadPassword reads a password without echoing it to the terminal.
|
||||
func ReadPassword() string {
|
||||
stdin := int(os.Stdin.Fd())
|
||||
if !terminal.IsTerminal(stdin) {
|
||||
return ReadLine()
|
||||
}
|
||||
line, err := terminal.ReadPassword(stdin)
|
||||
_, _ = fmt.Fprintln(os.Stderr)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read password: %v", err)
|
||||
}
|
||||
return string(line)
|
||||
}
|
||||
12
.rclone_repo/fs/config/config_read_password_unsupported.go
Executable file
12
.rclone_repo/fs/config/config_read_password_unsupported.go
Executable file
@@ -0,0 +1,12 @@
|
||||
// ReadPassword for OSes which are not supported by golang.org/x/crypto/ssh/terminal
|
||||
// See https://github.com/golang/go/issues/14441 - plan9
|
||||
// https://github.com/golang/go/issues/13085 - solaris
|
||||
|
||||
// +build solaris plan9
|
||||
|
||||
package config
|
||||
|
||||
// ReadPassword reads a password with echoing it to the terminal.
|
||||
func ReadPassword() string {
|
||||
return ReadLine()
|
||||
}
|
||||
229
.rclone_repo/fs/config/config_test.go
Executable file
229
.rclone_repo/fs/config/config_test.go
Executable file
@@ -0,0 +1,229 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCRUD(t *testing.T) {
|
||||
configKey = nil // reset password
|
||||
// create temp config file
|
||||
tempFile, err := ioutil.TempFile("", "crud.conf")
|
||||
assert.NoError(t, err)
|
||||
path := tempFile.Name()
|
||||
defer func() {
|
||||
err := os.Remove(path)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
assert.NoError(t, tempFile.Close())
|
||||
|
||||
// temporarily adapt configuration
|
||||
oldOsStdout := os.Stdout
|
||||
oldConfigPath := ConfigPath
|
||||
oldConfig := fs.Config
|
||||
oldConfigFile := configFile
|
||||
oldReadLine := ReadLine
|
||||
os.Stdout = nil
|
||||
ConfigPath = path
|
||||
fs.Config = &fs.ConfigInfo{}
|
||||
configFile = nil
|
||||
defer func() {
|
||||
os.Stdout = oldOsStdout
|
||||
ConfigPath = oldConfigPath
|
||||
ReadLine = oldReadLine
|
||||
fs.Config = oldConfig
|
||||
configFile = oldConfigFile
|
||||
}()
|
||||
|
||||
LoadConfig()
|
||||
assert.Equal(t, []string{}, getConfigData().GetSectionList())
|
||||
|
||||
// Fake a remote
|
||||
fs.Register(&fs.RegInfo{Name: "config_test_remote"})
|
||||
|
||||
// add new remote
|
||||
i := 0
|
||||
ReadLine = func() string {
|
||||
answers := []string{
|
||||
"config_test_remote", // type
|
||||
"y", // looks good, save
|
||||
}
|
||||
i = i + 1
|
||||
return answers[i-1]
|
||||
}
|
||||
|
||||
NewRemote("test")
|
||||
assert.Equal(t, []string{"test"}, configFile.GetSectionList())
|
||||
|
||||
// Reload the config file to workaround this bug
|
||||
// https://github.com/Unknwon/goconfig/issues/39
|
||||
configFile, err = loadConfigFile()
|
||||
require.NoError(t, err)
|
||||
|
||||
// normal rename, test → asdf
|
||||
ReadLine = func() string { return "asdf" }
|
||||
RenameRemote("test")
|
||||
assert.Equal(t, []string{"asdf"}, configFile.GetSectionList())
|
||||
|
||||
// no-op rename, asdf → asdf
|
||||
RenameRemote("asdf")
|
||||
assert.Equal(t, []string{"asdf"}, configFile.GetSectionList())
|
||||
|
||||
// delete remote
|
||||
DeleteRemote("asdf")
|
||||
assert.Equal(t, []string{}, configFile.GetSectionList())
|
||||
}
|
||||
|
||||
// Test some error cases
|
||||
func TestReveal(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
wantErr string
|
||||
}{
|
||||
{"YmJiYmJiYmJiYmJiYmJiYp*gcEWbAw", "base64 decode failed when revealing password - is it obscured?: illegal base64 data at input byte 22"},
|
||||
{"aGVsbG8", "input too short when revealing password - is it obscured?"},
|
||||
{"", "input too short when revealing password - is it obscured?"},
|
||||
} {
|
||||
gotString, gotErr := obscure.Reveal(test.in)
|
||||
assert.Equal(t, "", gotString)
|
||||
assert.Equal(t, test.wantErr, gotErr.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigLoad(t *testing.T) {
|
||||
oldConfigPath := ConfigPath
|
||||
ConfigPath = "./testdata/plain.conf"
|
||||
defer func() {
|
||||
ConfigPath = oldConfigPath
|
||||
}()
|
||||
configKey = nil // reset password
|
||||
c, err := loadConfigFile()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sections := c.GetSectionList()
|
||||
var expect = []string{"RCLONE_ENCRYPT_V0", "nounc", "unc"}
|
||||
assert.Equal(t, expect, sections)
|
||||
|
||||
keys := c.GetKeyList("nounc")
|
||||
expect = []string{"type", "nounc"}
|
||||
assert.Equal(t, expect, keys)
|
||||
}
|
||||
|
||||
func TestConfigLoadEncrypted(t *testing.T) {
|
||||
var err error
|
||||
oldConfigPath := ConfigPath
|
||||
ConfigPath = "./testdata/encrypted.conf"
|
||||
defer func() {
|
||||
ConfigPath = oldConfigPath
|
||||
configKey = nil // reset password
|
||||
}()
|
||||
|
||||
// Set correct password
|
||||
err = setConfigPassword("asdf")
|
||||
require.NoError(t, err)
|
||||
c, err := loadConfigFile()
|
||||
require.NoError(t, err)
|
||||
sections := c.GetSectionList()
|
||||
var expect = []string{"nounc", "unc"}
|
||||
assert.Equal(t, expect, sections)
|
||||
|
||||
keys := c.GetKeyList("nounc")
|
||||
expect = []string{"type", "nounc"}
|
||||
assert.Equal(t, expect, keys)
|
||||
}
|
||||
|
||||
func TestConfigLoadEncryptedFailures(t *testing.T) {
|
||||
var err error
|
||||
|
||||
// This file should be too short to be decoded.
|
||||
oldConfigPath := ConfigPath
|
||||
ConfigPath = "./testdata/enc-short.conf"
|
||||
defer func() { ConfigPath = oldConfigPath }()
|
||||
_, err = loadConfigFile()
|
||||
require.Error(t, err)
|
||||
|
||||
// This file contains invalid base64 characters.
|
||||
ConfigPath = "./testdata/enc-invalid.conf"
|
||||
_, err = loadConfigFile()
|
||||
require.Error(t, err)
|
||||
|
||||
// This file contains invalid base64 characters.
|
||||
ConfigPath = "./testdata/enc-too-new.conf"
|
||||
_, err = loadConfigFile()
|
||||
require.Error(t, err)
|
||||
|
||||
// This file does not exist.
|
||||
ConfigPath = "./testdata/filenotfound.conf"
|
||||
c, err := loadConfigFile()
|
||||
assert.Equal(t, errorConfigFileNotFound, err)
|
||||
assert.Nil(t, c)
|
||||
}
|
||||
|
||||
func TestPassword(t *testing.T) {
|
||||
defer func() {
|
||||
configKey = nil // reset password
|
||||
}()
|
||||
var err error
|
||||
// Empty password should give error
|
||||
err = setConfigPassword(" \t ")
|
||||
require.Error(t, err)
|
||||
|
||||
// Test invalid utf8 sequence
|
||||
err = setConfigPassword(string([]byte{0xff, 0xfe, 0xfd}) + "abc")
|
||||
require.Error(t, err)
|
||||
|
||||
// Simple check of wrong passwords
|
||||
hashedKeyCompare(t, "mis", "match", false)
|
||||
|
||||
// Check that passwords match after unicode normalization
|
||||
hashedKeyCompare(t, "ff\u0041\u030A", "ffÅ", true)
|
||||
|
||||
// Check that passwords preserves case
|
||||
hashedKeyCompare(t, "abcdef", "ABCDEF", false)
|
||||
|
||||
}
|
||||
|
||||
func hashedKeyCompare(t *testing.T, a, b string, shouldMatch bool) {
|
||||
err := setConfigPassword(a)
|
||||
require.NoError(t, err)
|
||||
k1 := configKey
|
||||
|
||||
err = setConfigPassword(b)
|
||||
require.NoError(t, err)
|
||||
k2 := configKey
|
||||
|
||||
if shouldMatch {
|
||||
assert.Equal(t, k1, k2)
|
||||
} else {
|
||||
assert.NotEqual(t, k1, k2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchProvider(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
config string
|
||||
provider string
|
||||
want bool
|
||||
}{
|
||||
{"", "", true},
|
||||
{"one", "one", true},
|
||||
{"one,two", "two", true},
|
||||
{"one,two,three", "two", true},
|
||||
{"one", "on", false},
|
||||
{"one,two,three", "tw", false},
|
||||
{"!one,two,three", "two", false},
|
||||
{"!one,two,three", "four", true},
|
||||
} {
|
||||
what := fmt.Sprintf("%q,%q", test.config, test.provider)
|
||||
got := matchProvider(test.config, test.provider)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
}
|
||||
37
.rclone_repo/fs/config/config_unix.go
Executable file
37
.rclone_repo/fs/config/config_unix.go
Executable file
@@ -0,0 +1,37 @@
|
||||
// Read, write and edit the config file
|
||||
// Unix specific functions.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// attemptCopyGroups tries to keep the group the same. User will be the one
|
||||
// who is currently running this process.
|
||||
func attemptCopyGroup(fromPath, toPath string) {
|
||||
info, err := os.Stat(fromPath)
|
||||
if err != nil || info.Sys() == nil {
|
||||
return
|
||||
}
|
||||
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
|
||||
uid := int(stat.Uid)
|
||||
// prefer self over previous owner of file, because it has a higher chance
|
||||
// of success
|
||||
if user, err := user.Current(); err == nil {
|
||||
if tmpUID, err := strconv.Atoi(user.Uid); err == nil {
|
||||
uid = tmpUID
|
||||
}
|
||||
}
|
||||
if err = os.Chown(toPath, uid, int(stat.Gid)); err != nil {
|
||||
fs.Debugf(nil, "Failed to keep previous owner of config file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
172
.rclone_repo/fs/config/configflags/configflags.go
Executable file
172
.rclone_repo/fs/config/configflags/configflags.go
Executable file
@@ -0,0 +1,172 @@
|
||||
// Package configflags defines the flags used by rclone. It is
|
||||
// decoupled into a separate package so it can be replaced.
|
||||
package configflags
|
||||
|
||||
// Options set by command line flags
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var (
|
||||
// these will get interpreted into fs.Config via SetFlags() below
|
||||
verbose int
|
||||
quiet bool
|
||||
dumpHeaders bool
|
||||
dumpBodies bool
|
||||
deleteBefore bool
|
||||
deleteDuring bool
|
||||
deleteAfter bool
|
||||
bindAddr string
|
||||
disableFeatures string
|
||||
noTraverse bool
|
||||
)
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
// NB defaults which aren't the zero for the type should be set in fs/config.go NewConfig
|
||||
flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)")
|
||||
flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible")
|
||||
flags.DurationVarP(flagSet, &fs.Config.ModifyWindow, "modify-window", "", fs.Config.ModifyWindow, "Max time diff to be considered the same")
|
||||
flags.IntVarP(flagSet, &fs.Config.Checkers, "checkers", "", fs.Config.Checkers, "Number of checkers to run in parallel.")
|
||||
flags.IntVarP(flagSet, &fs.Config.Transfers, "transfers", "", fs.Config.Transfers, "Number of file transfers to run in parallel.")
|
||||
flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.")
|
||||
flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum & size, not mod-time & size")
|
||||
flags.BoolVarP(flagSet, &fs.Config.SizeOnly, "size-only", "", fs.Config.SizeOnly, "Skip based on size only, not mod-time or checksum")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreTimes, "ignore-times", "I", fs.Config.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreExisting, "ignore-existing", "", fs.Config.IgnoreExisting, "Skip all files that exist on destination")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreErrors, "ignore-errors", "", fs.Config.IgnoreErrors, "delete even if there are I/O errors")
|
||||
flags.BoolVarP(flagSet, &fs.Config.DryRun, "dry-run", "n", fs.Config.DryRun, "Do a trial run with no permanent changes")
|
||||
flags.DurationVarP(flagSet, &fs.Config.ConnectTimeout, "contimeout", "", fs.Config.ConnectTimeout, "Connect timeout")
|
||||
flags.DurationVarP(flagSet, &fs.Config.Timeout, "timeout", "", fs.Config.Timeout, "IO idle timeout")
|
||||
flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP bodies - may contain sensitive info")
|
||||
flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||
flags.BoolVarP(flagSet, &fs.Config.InsecureSkipVerify, "no-check-certificate", "", fs.Config.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.AskPassword, "ask-password", "", fs.Config.AskPassword, "Allow prompt for password for encrypted configuration.")
|
||||
flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transfering")
|
||||
flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer")
|
||||
flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transfering (default)")
|
||||
flags.IntVar64P(flagSet, &fs.Config.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes")
|
||||
flags.BoolVarP(flagSet, &fs.Config.TrackRenames, "track-renames", "", fs.Config.TrackRenames, "When synchronizing, track file renames and do a server side move if possible")
|
||||
flags.IntVarP(flagSet, &fs.Config.LowLevelRetries, "low-level-retries", "", fs.Config.LowLevelRetries, "Number of low level retries to do.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UpdateOlder, "update", "u", fs.Config.UpdateOlder, "Skip files that are newer on the destination.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UseServerModTime, "use-server-modtime", "", fs.Config.UseServerModTime, "Use server modified time instead of object metadata")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoGzip, "no-gzip-encoding", "", fs.Config.NoGzip, "Don't set Accept-Encoding: gzip.")
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxDepth, "max-depth", "", fs.Config.MaxDepth, "If set limits the recursion depth to this.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.")
|
||||
flags.BoolVarP(flagSet, &noTraverse, "no-traverse", "", noTraverse, "Obsolete - does nothing.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
|
||||
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
|
||||
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix for use with --backup-dir.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
|
||||
flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.")
|
||||
flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.")
|
||||
flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.")
|
||||
flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features. Use help to see a list.")
|
||||
flags.StringVarP(flagSet, &fs.Config.UserAgent, "user-agent", "", fs.Config.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Immutable, "immutable", "", fs.Config.Immutable, "Do not modify files. Fail if existing files have been modified.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.AutoConfirm, "auto-confirm", "", fs.Config.AutoConfirm, "If enabled, do not request console confirmation.")
|
||||
flags.IntVarP(flagSet, &fs.Config.StatsFileNameLength, "stats-file-name-length", "", fs.Config.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
|
||||
flags.FVarP(flagSet, &fs.Config.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
|
||||
flags.FVarP(flagSet, &fs.Config.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
|
||||
flags.FVarP(flagSet, &fs.Config.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
|
||||
flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
|
||||
flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
|
||||
flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
|
||||
flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.")
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
|
||||
}
|
||||
|
||||
// SetFlags converts any flags into config which weren't straight foward
|
||||
func SetFlags() {
|
||||
if verbose >= 2 {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
} else if verbose >= 1 {
|
||||
fs.Config.LogLevel = fs.LogLevelInfo
|
||||
}
|
||||
if quiet {
|
||||
if verbose > 0 {
|
||||
log.Fatalf("Can't set -v and -q")
|
||||
}
|
||||
fs.Config.LogLevel = fs.LogLevelError
|
||||
}
|
||||
logLevelFlag := pflag.Lookup("log-level")
|
||||
if logLevelFlag != nil && logLevelFlag.Changed {
|
||||
if verbose > 0 {
|
||||
log.Fatalf("Can't set -v and --log-level")
|
||||
}
|
||||
if quiet {
|
||||
log.Fatalf("Can't set -q and --log-level")
|
||||
}
|
||||
}
|
||||
|
||||
if noTraverse {
|
||||
fs.Logf(nil, "--no-traverse is obsolete and no longer needed - please remove")
|
||||
}
|
||||
|
||||
if dumpHeaders {
|
||||
fs.Config.Dump |= fs.DumpHeaders
|
||||
fs.Logf(nil, "--dump-headers is obsolete - please use --dump headers instead")
|
||||
}
|
||||
if dumpBodies {
|
||||
fs.Config.Dump |= fs.DumpBodies
|
||||
fs.Logf(nil, "--dump-bodies is obsolete - please use --dump bodies instead")
|
||||
}
|
||||
|
||||
switch {
|
||||
case deleteBefore && (deleteDuring || deleteAfter),
|
||||
deleteDuring && deleteAfter:
|
||||
log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`)
|
||||
case deleteBefore:
|
||||
fs.Config.DeleteMode = fs.DeleteModeBefore
|
||||
case deleteDuring:
|
||||
fs.Config.DeleteMode = fs.DeleteModeDuring
|
||||
case deleteAfter:
|
||||
fs.Config.DeleteMode = fs.DeleteModeAfter
|
||||
default:
|
||||
fs.Config.DeleteMode = fs.DeleteModeDefault
|
||||
}
|
||||
|
||||
if fs.Config.IgnoreSize && fs.Config.SizeOnly {
|
||||
log.Fatalf(`Can't use --size-only and --ignore-size together.`)
|
||||
}
|
||||
|
||||
if fs.Config.Suffix != "" && fs.Config.BackupDir == "" {
|
||||
log.Fatalf(`Can only use --suffix with --backup-dir.`)
|
||||
}
|
||||
|
||||
if bindAddr != "" {
|
||||
addrs, err := net.LookupIP(bindAddr)
|
||||
if err != nil {
|
||||
log.Fatalf("--bind: Failed to parse %q as IP address: %v", bindAddr, err)
|
||||
}
|
||||
if len(addrs) != 1 {
|
||||
log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs))
|
||||
}
|
||||
fs.Config.BindAddr = addrs[0]
|
||||
}
|
||||
|
||||
if disableFeatures != "" {
|
||||
if disableFeatures == "help" {
|
||||
log.Fatalf("Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", "))
|
||||
}
|
||||
fs.Config.DisableFeatures = strings.Split(disableFeatures, ",")
|
||||
}
|
||||
|
||||
// Make the config file absolute
|
||||
configPath, err := filepath.Abs(config.ConfigPath)
|
||||
if err == nil {
|
||||
config.ConfigPath = configPath
|
||||
}
|
||||
}
|
||||
86
.rclone_repo/fs/config/configmap/configmap.go
Executable file
86
.rclone_repo/fs/config/configmap/configmap.go
Executable file
@@ -0,0 +1,86 @@
|
||||
// Package configmap provides an abstraction for reading and writing config
|
||||
package configmap
|
||||
|
||||
// Getter provides an interface to get config items
|
||||
type Getter interface {
|
||||
// Get should get an item with the key passed in and return
|
||||
// the value. If the item is found then it should return true,
|
||||
// otherwise false.
|
||||
Get(key string) (value string, ok bool)
|
||||
}
|
||||
|
||||
// Setter provides an interface to set config items
|
||||
type Setter interface {
|
||||
// Set should set an item into persistent config store.
|
||||
Set(key, value string)
|
||||
}
|
||||
|
||||
// Mapper provides an interface to read and write config
|
||||
type Mapper interface {
|
||||
Getter
|
||||
Setter
|
||||
}
|
||||
|
||||
// Map provides a wrapper around multiple Setter and
|
||||
// Getter interfaces.
|
||||
type Map struct {
|
||||
setters []Setter
|
||||
getters []Getter
|
||||
}
|
||||
|
||||
// New returns an empty Map
|
||||
func New() *Map {
|
||||
return &Map{}
|
||||
}
|
||||
|
||||
// AddGetter appends a getter onto the end of the getters
|
||||
func (c *Map) AddGetter(getter Getter) *Map {
|
||||
c.getters = append(c.getters, getter)
|
||||
return c
|
||||
}
|
||||
|
||||
// AddGetters appends multiple getters onto the end of the getters
|
||||
func (c *Map) AddGetters(getters ...Getter) *Map {
|
||||
c.getters = append(c.getters, getters...)
|
||||
return c
|
||||
}
|
||||
|
||||
// AddSetter appends a setter onto the end of the setters
|
||||
func (c *Map) AddSetter(setter Setter) *Map {
|
||||
c.setters = append(c.setters, setter)
|
||||
return c
|
||||
}
|
||||
|
||||
// Get gets an item with the key passed in and return the value from
|
||||
// the first getter. If the item is found then it returns true,
|
||||
// otherwise false.
|
||||
func (c *Map) Get(key string) (value string, ok bool) {
|
||||
for _, do := range c.getters {
|
||||
value, ok = do.Get(key)
|
||||
if ok {
|
||||
return value, ok
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Set sets an item into all the stored setters.
|
||||
func (c *Map) Set(key, value string) {
|
||||
for _, do := range c.setters {
|
||||
do.Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Simple is a simple Mapper for testing
|
||||
type Simple map[string]string
|
||||
|
||||
// Get the value
|
||||
func (c Simple) Get(key string) (value string, ok bool) {
|
||||
value, ok = c[key]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// Set the value
|
||||
func (c Simple) Set(key, value string) {
|
||||
c[key] = value
|
||||
}
|
||||
91
.rclone_repo/fs/config/configmap/configmap_test.go
Executable file
91
.rclone_repo/fs/config/configmap/configmap_test.go
Executable file
@@ -0,0 +1,91 @@
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
_ Mapper = Simple(nil)
|
||||
_ Getter = Simple(nil)
|
||||
_ Setter = Simple(nil)
|
||||
)
|
||||
|
||||
func TestConfigMapGet(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
value, found := m.Get("config1")
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
value, found = m.Get("config2")
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
m1 := Simple{
|
||||
"config1": "one",
|
||||
}
|
||||
|
||||
m.AddGetter(m1)
|
||||
|
||||
value, found = m.Get("config1")
|
||||
assert.Equal(t, "one", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.Get("config2")
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
m2 := Simple{
|
||||
"config1": "one2",
|
||||
"config2": "two2",
|
||||
}
|
||||
|
||||
m.AddGetter(m2)
|
||||
|
||||
value, found = m.Get("config1")
|
||||
assert.Equal(t, "one", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.Get("config2")
|
||||
assert.Equal(t, "two2", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
}
|
||||
|
||||
func TestConfigMapSet(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
m1 := Simple{
|
||||
"config1": "one",
|
||||
}
|
||||
m2 := Simple{
|
||||
"config1": "one2",
|
||||
"config2": "two2",
|
||||
}
|
||||
|
||||
m.AddSetter(m1).AddSetter(m2)
|
||||
|
||||
m.Set("config2", "potato")
|
||||
|
||||
assert.Equal(t, Simple{
|
||||
"config1": "one",
|
||||
"config2": "potato",
|
||||
}, m1)
|
||||
assert.Equal(t, Simple{
|
||||
"config1": "one2",
|
||||
"config2": "potato",
|
||||
}, m2)
|
||||
|
||||
m.Set("config1", "beetroot")
|
||||
|
||||
assert.Equal(t, Simple{
|
||||
"config1": "beetroot",
|
||||
"config2": "potato",
|
||||
}, m1)
|
||||
assert.Equal(t, Simple{
|
||||
"config1": "beetroot",
|
||||
"config2": "potato",
|
||||
}, m2)
|
||||
}
|
||||
127
.rclone_repo/fs/config/configstruct/configstruct.go
Executable file
127
.rclone_repo/fs/config/configstruct/configstruct.go
Executable file
@@ -0,0 +1,127 @@
|
||||
// Package configstruct parses unstructured maps into structures
|
||||
package configstruct
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var matchUpper = regexp.MustCompile("([A-Z]+)")
|
||||
|
||||
// camelToSnake converts CamelCase to snake_case
|
||||
func camelToSnake(in string) string {
|
||||
out := matchUpper.ReplaceAllString(in, "_$1")
|
||||
out = strings.ToLower(out)
|
||||
out = strings.Trim(out, "_")
|
||||
return out
|
||||
}
|
||||
|
||||
// StringToInterface turns in into an interface{} the same type as def
|
||||
func StringToInterface(def interface{}, in string) (newValue interface{}, err error) {
|
||||
typ := reflect.TypeOf(def)
|
||||
switch typ.Kind() {
|
||||
case reflect.String:
|
||||
// Pass strings unmodified
|
||||
return in, nil
|
||||
}
|
||||
// Otherwise parse with Sscanln
|
||||
//
|
||||
// This means any types we use here must implement fmt.Scanner
|
||||
o := reflect.New(typ)
|
||||
n, err := fmt.Sscanln(in, o.Interface())
|
||||
if err != nil {
|
||||
return newValue, errors.Wrapf(err, "parsing %q as %T failed", in, def)
|
||||
}
|
||||
if n != 1 {
|
||||
return newValue, errors.New("no items parsed")
|
||||
}
|
||||
return o.Elem().Interface(), nil
|
||||
}
|
||||
|
||||
// Item descripts a single entry in the options structure
|
||||
type Item struct {
|
||||
Name string // snake_case
|
||||
Field string // CamelCase
|
||||
Num int // number of the field in the struct
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// Items parses the opt struct and returns a slice of Item objects.
|
||||
//
|
||||
// opt must be a pointer to a struct. The struct should have entirely
|
||||
// public fields.
|
||||
//
|
||||
// The config_name is looked up in a struct tag called "config" or if
|
||||
// not found is the field name converted from CamelCase to snake_case.
|
||||
func Items(opt interface{}) (items []Item, err error) {
|
||||
def := reflect.ValueOf(opt)
|
||||
if def.Kind() != reflect.Ptr {
|
||||
return nil, errors.New("argument must be a pointer")
|
||||
}
|
||||
def = def.Elem() // indirect the pointer
|
||||
if def.Kind() != reflect.Struct {
|
||||
return nil, errors.New("argument must be a pointer to a struct")
|
||||
}
|
||||
defType := def.Type()
|
||||
for i := 0; i < def.NumField(); i++ {
|
||||
field := defType.Field(i)
|
||||
fieldName := field.Name
|
||||
configName, ok := field.Tag.Lookup("config")
|
||||
if !ok {
|
||||
configName = camelToSnake(fieldName)
|
||||
}
|
||||
defaultItem := Item{
|
||||
Name: configName,
|
||||
Field: fieldName,
|
||||
Num: i,
|
||||
Value: def.Field(i).Interface(),
|
||||
}
|
||||
items = append(items, defaultItem)
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// Set interprets the field names in defaults and looks up config
|
||||
// values in the config passed in. Any values found in config will be
|
||||
// set in the opt structure.
|
||||
//
|
||||
// opt must be a pointer to a struct. The struct should have entirely
|
||||
// public fields. The field names are converted from CamelCase to
|
||||
// snake_case and looked up in the config supplied or a
|
||||
// `config:"field_name"` is looked up.
|
||||
//
|
||||
// If items are found then they are converted from string to native
|
||||
// types and set in opt.
|
||||
//
|
||||
// All the field types in the struct must implement fmt.Scanner.
|
||||
func Set(config configmap.Getter, opt interface{}) (err error) {
|
||||
defaultItems, err := Items(opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defStruct := reflect.ValueOf(opt).Elem()
|
||||
for _, defaultItem := range defaultItems {
|
||||
newValue := defaultItem.Value
|
||||
if configValue, ok := config.Get(defaultItem.Name); ok {
|
||||
var newNewValue interface{}
|
||||
newNewValue, err = StringToInterface(newValue, configValue)
|
||||
if err != nil {
|
||||
// Mask errors if setting an empty string as
|
||||
// it isn't valid for all types. This makes
|
||||
// empty string be the equivalent of unset.
|
||||
if configValue != "" {
|
||||
return errors.Wrapf(err, "couldn't parse config item %q = %q as %T", defaultItem.Name, configValue, defaultItem.Value)
|
||||
}
|
||||
} else {
|
||||
newValue = newNewValue
|
||||
}
|
||||
}
|
||||
defStruct.Field(defaultItem.Num).Set(reflect.ValueOf(newValue))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
116
.rclone_repo/fs/config/configstruct/configstruct_test.go
Executable file
116
.rclone_repo/fs/config/configstruct/configstruct_test.go
Executable file
@@ -0,0 +1,116 @@
|
||||
package configstruct_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type conf struct {
|
||||
A string
|
||||
B string
|
||||
}
|
||||
|
||||
type conf2 struct {
|
||||
PotatoPie string `config:"spud_pie"`
|
||||
BeanStew bool
|
||||
RaisinRoll int
|
||||
SausageOnStick int64
|
||||
ForbiddenFruit uint
|
||||
CookingTime fs.Duration
|
||||
TotalWeight fs.SizeSuffix
|
||||
}
|
||||
|
||||
func TestItemsError(t *testing.T) {
|
||||
_, err := configstruct.Items(nil)
|
||||
assert.EqualError(t, err, "argument must be a pointer")
|
||||
_, err = configstruct.Items(new(int))
|
||||
assert.EqualError(t, err, "argument must be a pointer to a struct")
|
||||
}
|
||||
|
||||
func TestItems(t *testing.T) {
|
||||
in := &conf2{
|
||||
PotatoPie: "yum",
|
||||
BeanStew: true,
|
||||
RaisinRoll: 42,
|
||||
SausageOnStick: 101,
|
||||
ForbiddenFruit: 6,
|
||||
CookingTime: fs.Duration(42 * time.Second),
|
||||
TotalWeight: fs.SizeSuffix(17 << 20),
|
||||
}
|
||||
got, err := configstruct.Items(in)
|
||||
require.NoError(t, err)
|
||||
want := []configstruct.Item{
|
||||
{Name: "spud_pie", Field: "PotatoPie", Num: 0, Value: string("yum")},
|
||||
{Name: "bean_stew", Field: "BeanStew", Num: 1, Value: true},
|
||||
{Name: "raisin_roll", Field: "RaisinRoll", Num: 2, Value: int(42)},
|
||||
{Name: "sausage_on_stick", Field: "SausageOnStick", Num: 3, Value: int64(101)},
|
||||
{Name: "forbidden_fruit", Field: "ForbiddenFruit", Num: 4, Value: uint(6)},
|
||||
{Name: "cooking_time", Field: "CookingTime", Num: 5, Value: fs.Duration(42 * time.Second)},
|
||||
{Name: "total_weight", Field: "TotalWeight", Num: 6, Value: fs.SizeSuffix(17 << 20)},
|
||||
}
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func TestSetBasics(t *testing.T) {
|
||||
c := &conf{A: "one", B: "two"}
|
||||
err := configstruct.Set(configMap{}, c)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &conf{A: "one", B: "two"}, c)
|
||||
}
|
||||
|
||||
// a simple configmap.Getter for testing
|
||||
type configMap map[string]string
|
||||
|
||||
// Get the value
|
||||
func (c configMap) Get(key string) (value string, ok bool) {
|
||||
value, ok = c[key]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
func TestSetMore(t *testing.T) {
|
||||
c := &conf{A: "one", B: "two"}
|
||||
m := configMap{
|
||||
"a": "ONE",
|
||||
}
|
||||
err := configstruct.Set(m, c)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &conf{A: "ONE", B: "two"}, c)
|
||||
}
|
||||
|
||||
func TestSetFull(t *testing.T) {
|
||||
in := &conf2{
|
||||
PotatoPie: "yum",
|
||||
BeanStew: true,
|
||||
RaisinRoll: 42,
|
||||
SausageOnStick: 101,
|
||||
ForbiddenFruit: 6,
|
||||
CookingTime: fs.Duration(42 * time.Second),
|
||||
TotalWeight: fs.SizeSuffix(17 << 20),
|
||||
}
|
||||
m := configMap{
|
||||
"spud_pie": "YUM",
|
||||
"bean_stew": "FALSE",
|
||||
"raisin_roll": "43 ",
|
||||
"sausage_on_stick": " 102 ",
|
||||
"forbidden_fruit": "0x7",
|
||||
"cooking_time": "43s",
|
||||
"total_weight": "18M",
|
||||
}
|
||||
want := &conf2{
|
||||
PotatoPie: "YUM",
|
||||
BeanStew: false,
|
||||
RaisinRoll: 43,
|
||||
SausageOnStick: 102,
|
||||
ForbiddenFruit: 7,
|
||||
CookingTime: fs.Duration(43 * time.Second),
|
||||
TotalWeight: fs.SizeSuffix(18 << 20),
|
||||
}
|
||||
err := configstruct.Set(m, in)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, want, in)
|
||||
}
|
||||
60
.rclone_repo/fs/config/configstruct/internal_test.go
Executable file
60
.rclone_repo/fs/config/configstruct/internal_test.go
Executable file
@@ -0,0 +1,60 @@
|
||||
package configstruct
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCamelToSnake(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"Type", "type"},
|
||||
{"AuthVersion", "auth_version"},
|
||||
{"AccessKeyID", "access_key_id"},
|
||||
} {
|
||||
got := camelToSnake(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringToInterface(t *testing.T) {
|
||||
item := struct{ A int }{2}
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
def interface{}
|
||||
want interface{}
|
||||
err string
|
||||
}{
|
||||
{"", string(""), "", ""},
|
||||
{" string ", string(""), " string ", ""},
|
||||
{"123", int(0), int(123), ""},
|
||||
{"0x123", int(0), int(0x123), ""},
|
||||
{" 0x123 ", int(0), int(0x123), ""},
|
||||
{"-123", int(0), int(-123), ""},
|
||||
{"0", false, false, ""},
|
||||
{"1", false, true, ""},
|
||||
{"FALSE", false, false, ""},
|
||||
{"true", false, true, ""},
|
||||
{"123", uint(0), uint(123), ""},
|
||||
{"123", int64(0), int64(123), ""},
|
||||
{"123x", int64(0), nil, "parsing \"123x\" as int64 failed: expected newline"},
|
||||
{"truth", false, nil, "parsing \"truth\" as bool failed: syntax error scanning boolean"},
|
||||
{"struct", item, nil, "parsing \"struct\" as struct { A int } failed: can't scan type: *struct { A int }"},
|
||||
} {
|
||||
what := fmt.Sprintf("parse %q as %T", test.in, test.def)
|
||||
got, err := StringToInterface(test.def, test.in)
|
||||
if test.err == "" {
|
||||
require.NoError(t, err, what)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
} else {
|
||||
assert.Nil(t, got)
|
||||
assert.EqualError(t, err, test.err, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
195
.rclone_repo/fs/config/flags/flags.go
Executable file
195
.rclone_repo/fs/config/flags/flags.go
Executable file
@@ -0,0 +1,195 @@
|
||||
// Package flags contains enahnced versions of spf13/pflag flag
|
||||
// routines which will read from the environment also.
|
||||
package flags
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// setDefaultFromEnv constructs a name from the flag passed in and
|
||||
// sets the default from the environment if possible.
|
||||
func setDefaultFromEnv(name string) {
|
||||
key := fs.OptionToEnv(name)
|
||||
newValue, found := os.LookupEnv(key)
|
||||
if found {
|
||||
flag := pflag.Lookup(name)
|
||||
if flag == nil {
|
||||
log.Fatalf("Couldn't find flag %q", name)
|
||||
}
|
||||
err := flag.Value.Set(newValue)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid value for environment variable %q: %v", key, err)
|
||||
}
|
||||
fs.Debugf(nil, "Set default for %q from %q to %q (%v)", name, key, newValue, flag.Value)
|
||||
flag.DefValue = newValue
|
||||
}
|
||||
}
|
||||
|
||||
// StringP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.StringP
|
||||
func StringP(name, shorthand string, value string, usage string) (out *string) {
|
||||
out = pflag.StringP(name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
return out
|
||||
}
|
||||
|
||||
// StringVarP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.StringVarP
|
||||
func StringVarP(flags *pflag.FlagSet, p *string, name, shorthand string, value string, usage string) {
|
||||
flags.StringVarP(p, name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
|
||||
// BoolP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.BoolP
|
||||
func BoolP(name, shorthand string, value bool, usage string) (out *bool) {
|
||||
out = pflag.BoolP(name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
return out
|
||||
}
|
||||
|
||||
// BoolVarP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.BoolVarP
|
||||
func BoolVarP(flags *pflag.FlagSet, p *bool, name, shorthand string, value bool, usage string) {
|
||||
flags.BoolVarP(p, name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
|
||||
// IntP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.IntP
|
||||
func IntP(name, shorthand string, value int, usage string) (out *int) {
|
||||
out = pflag.IntP(name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
return out
|
||||
}
|
||||
|
||||
// Int64P defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.IntP
|
||||
func Int64P(name, shorthand string, value int64, usage string) (out *int64) {
|
||||
out = pflag.Int64P(name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
return out
|
||||
}
|
||||
|
||||
// IntVar64P defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.Int64VarP
|
||||
func IntVar64P(flags *pflag.FlagSet, p *int64, name, shorthand string, value int64, usage string) {
|
||||
flags.Int64VarP(p, name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
|
||||
// IntVarP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.IntVarP
|
||||
func IntVarP(flags *pflag.FlagSet, p *int, name, shorthand string, value int, usage string) {
|
||||
flags.IntVarP(p, name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
|
||||
// Uint32VarP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.Uint32VarP
|
||||
func Uint32VarP(flags *pflag.FlagSet, p *uint32, name, shorthand string, value uint32, usage string) {
|
||||
flags.Uint32VarP(p, name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
|
||||
// Float64P defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.Float64P
|
||||
func Float64P(name, shorthand string, value float64, usage string) (out *float64) {
|
||||
out = pflag.Float64P(name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
return out
|
||||
}
|
||||
|
||||
// Float64VarP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.Float64VarP
|
||||
func Float64VarP(flags *pflag.FlagSet, p *float64, name, shorthand string, value float64, usage string) {
|
||||
flags.Float64VarP(p, name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
|
||||
// DurationP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.DurationP
|
||||
func DurationP(name, shorthand string, value time.Duration, usage string) (out *time.Duration) {
|
||||
out = pflag.DurationP(name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
return out
|
||||
}
|
||||
|
||||
// DurationVarP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.DurationVarP
|
||||
func DurationVarP(flags *pflag.FlagSet, p *time.Duration, name, shorthand string, value time.Duration, usage string) {
|
||||
flags.DurationVarP(p, name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
|
||||
// VarP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.VarP
|
||||
func VarP(value pflag.Value, name, shorthand, usage string) {
|
||||
pflag.VarP(value, name, shorthand, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
|
||||
// FVarP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.VarP
|
||||
func FVarP(flags *pflag.FlagSet, value pflag.Value, name, shorthand, usage string) {
|
||||
flags.VarP(value, name, shorthand, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
|
||||
// StringArrayP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It sets one value only - command line flags can be used to set more.
|
||||
//
|
||||
// It is a thin wrapper around pflag.StringArrayP
|
||||
func StringArrayP(name, shorthand string, value []string, usage string) (out *[]string) {
|
||||
out = pflag.StringArrayP(name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
return out
|
||||
}
|
||||
|
||||
// StringArrayVarP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It sets one value only - command line flags can be used to set more.
|
||||
//
|
||||
// It is a thin wrapper around pflag.StringArrayVarP
|
||||
func StringArrayVarP(flags *pflag.FlagSet, p *[]string, name, shorthand string, value []string, usage string) {
|
||||
flags.StringArrayVarP(p, name, shorthand, value, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
|
||||
// CountP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.CountP
|
||||
func CountP(name, shorthand string, usage string) (out *int) {
|
||||
out = pflag.CountP(name, shorthand, usage)
|
||||
setDefaultFromEnv(name)
|
||||
return out
|
||||
}
|
||||
|
||||
// CountVarP defines a flag which can be overridden by an environment variable
|
||||
//
|
||||
// It is a thin wrapper around pflag.CountVarP
|
||||
func CountVarP(flags *pflag.FlagSet, p *int, name, shorthand string, usage string) {
|
||||
flags.CountVarP(p, name, shorthand, usage)
|
||||
setDefaultFromEnv(name)
|
||||
}
|
||||
94
.rclone_repo/fs/config/obscure/obscure.go
Executable file
94
.rclone_repo/fs/config/obscure/obscure.go
Executable file
@@ -0,0 +1,94 @@
|
||||
// Package obscure contains the Obscure and Reveal commands
|
||||
package obscure
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"log"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// crypt internals
|
||||
var (
|
||||
cryptKey = []byte{
|
||||
0x9c, 0x93, 0x5b, 0x48, 0x73, 0x0a, 0x55, 0x4d,
|
||||
0x6b, 0xfd, 0x7c, 0x63, 0xc8, 0x86, 0xa9, 0x2b,
|
||||
0xd3, 0x90, 0x19, 0x8e, 0xb8, 0x12, 0x8a, 0xfb,
|
||||
0xf4, 0xde, 0x16, 0x2b, 0x8b, 0x95, 0xf6, 0x38,
|
||||
}
|
||||
cryptBlock cipher.Block
|
||||
cryptRand = rand.Reader
|
||||
)
|
||||
|
||||
// crypt transforms in to out using iv under AES-CTR.
|
||||
//
|
||||
// in and out may be the same buffer.
|
||||
//
|
||||
// Note encryption and decryption are the same operation
|
||||
func crypt(out, in, iv []byte) error {
|
||||
if cryptBlock == nil {
|
||||
var err error
|
||||
cryptBlock, err = aes.NewCipher(cryptKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
stream := cipher.NewCTR(cryptBlock, iv)
|
||||
stream.XORKeyStream(out, in)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Obscure a value
|
||||
//
|
||||
// This is done by encrypting with AES-CTR
|
||||
func Obscure(x string) (string, error) {
|
||||
plaintext := []byte(x)
|
||||
ciphertext := make([]byte, aes.BlockSize+len(plaintext))
|
||||
iv := ciphertext[:aes.BlockSize]
|
||||
if _, err := io.ReadFull(cryptRand, iv); err != nil {
|
||||
return "", errors.Wrap(err, "failed to read iv")
|
||||
}
|
||||
if err := crypt(ciphertext[aes.BlockSize:], plaintext, iv); err != nil {
|
||||
return "", errors.Wrap(err, "encrypt failed")
|
||||
}
|
||||
return base64.RawURLEncoding.EncodeToString(ciphertext), nil
|
||||
}
|
||||
|
||||
// MustObscure obscures a value, exiting with a fatal error if it failed
|
||||
func MustObscure(x string) string {
|
||||
out, err := Obscure(x)
|
||||
if err != nil {
|
||||
log.Fatalf("Obscure failed: %v", err)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Reveal an obscured value
|
||||
func Reveal(x string) (string, error) {
|
||||
ciphertext, err := base64.RawURLEncoding.DecodeString(x)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "base64 decode failed when revealing password - is it obscured?")
|
||||
}
|
||||
if len(ciphertext) < aes.BlockSize {
|
||||
return "", errors.New("input too short when revealing password - is it obscured?")
|
||||
}
|
||||
buf := ciphertext[aes.BlockSize:]
|
||||
iv := ciphertext[:aes.BlockSize]
|
||||
if err := crypt(buf, buf, iv); err != nil {
|
||||
return "", errors.Wrap(err, "decrypt failed when revealing password - is it obscured?")
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// MustReveal reveals an obscured value, exiting with a fatal error if it failed
|
||||
func MustReveal(x string) string {
|
||||
out, err := Reveal(x)
|
||||
if err != nil {
|
||||
log.Fatalf("Reveal failed: %v", err)
|
||||
}
|
||||
return out
|
||||
}
|
||||
60
.rclone_repo/fs/config/obscure/obscure_test.go
Executable file
60
.rclone_repo/fs/config/obscure/obscure_test.go
Executable file
@@ -0,0 +1,60 @@
|
||||
package obscure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestObscure(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
iv string
|
||||
}{
|
||||
{"", "YWFhYWFhYWFhYWFhYWFhYQ", "aaaaaaaaaaaaaaaa"},
|
||||
{"potato", "YWFhYWFhYWFhYWFhYWFhYXMaGgIlEQ", "aaaaaaaaaaaaaaaa"},
|
||||
{"potato", "YmJiYmJiYmJiYmJiYmJiYp3gcEWbAw", "bbbbbbbbbbbbbbbb"},
|
||||
} {
|
||||
cryptRand = bytes.NewBufferString(test.iv)
|
||||
got, err := Obscure(test.in)
|
||||
cryptRand = rand.Reader
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.want, got)
|
||||
recoveredIn, err := Reveal(got)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.in, recoveredIn, "not bidirectional")
|
||||
// Now the Must variants
|
||||
cryptRand = bytes.NewBufferString(test.iv)
|
||||
got = MustObscure(test.in)
|
||||
cryptRand = rand.Reader
|
||||
assert.Equal(t, test.want, got)
|
||||
recoveredIn = MustReveal(got)
|
||||
assert.Equal(t, test.in, recoveredIn, "not bidirectional")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestReveal(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
iv string
|
||||
}{
|
||||
{"YWFhYWFhYWFhYWFhYWFhYQ", "", "aaaaaaaaaaaaaaaa"},
|
||||
{"YWFhYWFhYWFhYWFhYWFhYXMaGgIlEQ", "potato", "aaaaaaaaaaaaaaaa"},
|
||||
{"YmJiYmJiYmJiYmJiYmJiYp3gcEWbAw", "potato", "bbbbbbbbbbbbbbbb"},
|
||||
} {
|
||||
cryptRand = bytes.NewBufferString(test.iv)
|
||||
got, err := Reveal(test.in)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.want, got)
|
||||
// Now the Must variants
|
||||
cryptRand = bytes.NewBufferString(test.iv)
|
||||
got = MustReveal(test.in)
|
||||
assert.Equal(t, test.want, got)
|
||||
|
||||
}
|
||||
}
|
||||
4
.rclone_repo/fs/config/testdata/enc-invalid.conf
vendored
Executable file
4
.rclone_repo/fs/config/testdata/enc-invalid.conf
vendored
Executable file
@@ -0,0 +1,4 @@
|
||||
# Encrypted rclone configuration File
|
||||
|
||||
RCLONE_ENCRYPT_V0:
|
||||
b5Uk6mE3cUn5Wb8xiWYnVBAxXUirAaEG1PO/GIDiO9274AOæøå+Yj790BwJA4d2y7lNkmHt4nJwIsoueFvUYmm7RDyzER8IA3XOCrjzl3OUcczZqcplk5JfBdhxMZpt1aGYWUdle1IgO/kAFne6sLD6IuxPySEb
|
||||
4
.rclone_repo/fs/config/testdata/enc-short.conf
vendored
Executable file
4
.rclone_repo/fs/config/testdata/enc-short.conf
vendored
Executable file
@@ -0,0 +1,4 @@
|
||||
# Encrypted rclone configuration File
|
||||
|
||||
RCLONE_ENCRYPT_V0:
|
||||
b5Uk6mE3cUn5Wb8xi
|
||||
4
.rclone_repo/fs/config/testdata/enc-too-new.conf
vendored
Executable file
4
.rclone_repo/fs/config/testdata/enc-too-new.conf
vendored
Executable file
@@ -0,0 +1,4 @@
|
||||
# Encrypted rclone configuration File
|
||||
|
||||
RCLONE_ENCRYPT_V1:
|
||||
b5Uk6mE3cUn5Wb8xiWYnVBAxXUirAaEG1PO/GIDiO9274AO+Yj790BwJA4d2y7lNkmHt4nJwIsoueFvUYmm7RDyzER8IA3XOCrjzl3OUcczZqcplk5JfBdhxMZpt1aGYWUdle1IgO/kAFne6sLD6IuxPySEb
|
||||
4
.rclone_repo/fs/config/testdata/encrypted.conf
vendored
Executable file
4
.rclone_repo/fs/config/testdata/encrypted.conf
vendored
Executable file
@@ -0,0 +1,4 @@
|
||||
# Encrypted rclone configuration File
|
||||
|
||||
RCLONE_ENCRYPT_V0:
|
||||
b5Uk6mE3cUn5Wb8xiWYnVBAxXUirAaEG1PO/GIDiO9274AO+Yj790BwJA4d2y7lNkmHt4nJwIsoueFvUYmm7RDyzER8IA3XOCrjzl3OUcczZqcplk5JfBdhxMZpt1aGYWUdle1IgO/kAFne6sLD6IuxPySEb
|
||||
12
.rclone_repo/fs/config/testdata/plain.conf
vendored
Executable file
12
.rclone_repo/fs/config/testdata/plain.conf
vendored
Executable file
@@ -0,0 +1,12 @@
|
||||
[RCLONE_ENCRYPT_V0]
|
||||
type = local
|
||||
nounc = true
|
||||
|
||||
[nounc]
|
||||
type = local
|
||||
nounc = true
|
||||
|
||||
|
||||
[unc]
|
||||
type = local
|
||||
nounc = false
|
||||
94
.rclone_repo/fs/config_list.go
Executable file
94
.rclone_repo/fs/config_list.go
Executable file
@@ -0,0 +1,94 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CommaSepList is a comma separated config value
|
||||
// It uses the encoding/csv rules for quoting and escaping
|
||||
type CommaSepList []string
|
||||
|
||||
// SpaceSepList is a space separated config value
|
||||
// It uses the encoding/csv rules for quoting and escaping
|
||||
type SpaceSepList []string
|
||||
|
||||
type genericList []string
|
||||
|
||||
func (l CommaSepList) String() string {
|
||||
return genericList(l).string(',')
|
||||
}
|
||||
|
||||
// Set the List entries
|
||||
func (l *CommaSepList) Set(s string) error {
|
||||
return (*genericList)(l).set(',', []byte(s))
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (CommaSepList) Type() string {
|
||||
return "[]string"
|
||||
}
|
||||
|
||||
// Scan implements the fmt.Scanner interface
|
||||
func (l *CommaSepList) Scan(s fmt.ScanState, ch rune) error {
|
||||
return (*genericList)(l).scan(',', s, ch)
|
||||
}
|
||||
|
||||
func (l SpaceSepList) String() string {
|
||||
return genericList(l).string(' ')
|
||||
}
|
||||
|
||||
// Set the List entries
|
||||
func (l *SpaceSepList) Set(s string) error {
|
||||
return (*genericList)(l).set(' ', []byte(s))
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (SpaceSepList) Type() string {
|
||||
return "[]string"
|
||||
}
|
||||
|
||||
// Scan implements the fmt.Scanner interface
|
||||
func (l *SpaceSepList) Scan(s fmt.ScanState, ch rune) error {
|
||||
return (*genericList)(l).scan(' ', s, ch)
|
||||
}
|
||||
|
||||
func (gl genericList) string(sep rune) string {
|
||||
var buf bytes.Buffer
|
||||
w := csv.NewWriter(&buf)
|
||||
w.Comma = sep
|
||||
err := w.Write(gl)
|
||||
if err != nil {
|
||||
// can only happen if w.Comma is invalid
|
||||
panic(err)
|
||||
}
|
||||
w.Flush()
|
||||
return string(bytes.TrimSpace(buf.Bytes()))
|
||||
}
|
||||
|
||||
func (gl *genericList) set(sep rune, b []byte) error {
|
||||
if len(b) == 0 {
|
||||
*gl = nil
|
||||
return nil
|
||||
}
|
||||
r := csv.NewReader(bytes.NewReader(b))
|
||||
r.Comma = sep
|
||||
|
||||
record, err := r.Read()
|
||||
switch _err := err.(type) {
|
||||
case nil:
|
||||
*gl = record
|
||||
case *csv.ParseError:
|
||||
err = _err.Err // remove line numbers from the error message
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (gl *genericList) scan(sep rune, s fmt.ScanState, ch rune) error {
|
||||
token, err := s.Token(true, func(rune) bool { return true })
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return gl.set(sep, bytes.TrimSpace(token))
|
||||
}
|
||||
87
.rclone_repo/fs/config_list_test.go
Executable file
87
.rclone_repo/fs/config_list_test.go
Executable file
@@ -0,0 +1,87 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func must(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleSpaceSepList() {
|
||||
for _, s := range []string{
|
||||
`remotea:test/dir remoteb:`,
|
||||
`"remotea:test/space dir" remoteb:`,
|
||||
`"remotea:test/quote""dir" remoteb:`,
|
||||
} {
|
||||
var l SpaceSepList
|
||||
must(l.Set(s))
|
||||
fmt.Printf("%#v\n", l)
|
||||
}
|
||||
// Output:
|
||||
// fs.SpaceSepList{"remotea:test/dir", "remoteb:"}
|
||||
// fs.SpaceSepList{"remotea:test/space dir", "remoteb:"}
|
||||
// fs.SpaceSepList{"remotea:test/quote\"dir", "remoteb:"}
|
||||
}
|
||||
|
||||
func ExampleCommaSepList() {
|
||||
for _, s := range []string{
|
||||
`remotea:test/dir,remoteb:`,
|
||||
`"remotea:test/space dir",remoteb:`,
|
||||
`"remotea:test/quote""dir",remoteb:`,
|
||||
} {
|
||||
var l CommaSepList
|
||||
must(l.Set(s))
|
||||
fmt.Printf("%#v\n", l)
|
||||
}
|
||||
// Output:
|
||||
// fs.CommaSepList{"remotea:test/dir", "remoteb:"}
|
||||
// fs.CommaSepList{"remotea:test/space dir", "remoteb:"}
|
||||
// fs.CommaSepList{"remotea:test/quote\"dir", "remoteb:"}
|
||||
}
|
||||
|
||||
func TestSpaceSepListSet(t *testing.T) {
|
||||
type tc struct {
|
||||
in string
|
||||
out SpaceSepList
|
||||
err string
|
||||
}
|
||||
tests := []tc{
|
||||
{``, nil, ""},
|
||||
{`\`, SpaceSepList{`\`}, ""},
|
||||
{`\\`, SpaceSepList{`\\`}, ""},
|
||||
{`potato`, SpaceSepList{`potato`}, ""},
|
||||
{`po\tato`, SpaceSepList{`po\tato`}, ""},
|
||||
{`potato\`, SpaceSepList{`potato\`}, ""},
|
||||
{`'potato`, SpaceSepList{`'potato`}, ""},
|
||||
{`pot'ato`, SpaceSepList{`pot'ato`}, ""},
|
||||
{`potato'`, SpaceSepList{`potato'`}, ""},
|
||||
{`"potato"`, SpaceSepList{`potato`}, ""},
|
||||
{`'potato'`, SpaceSepList{`'potato'`}, ""},
|
||||
{`potato apple`, SpaceSepList{`potato`, `apple`}, ""},
|
||||
{`potato\ apple`, SpaceSepList{`potato\`, `apple`}, ""},
|
||||
{`"potato apple"`, SpaceSepList{`potato apple`}, ""},
|
||||
{`"potato'apple"`, SpaceSepList{`potato'apple`}, ""},
|
||||
{`"potato''apple"`, SpaceSepList{`potato''apple`}, ""},
|
||||
{`"potato' 'apple"`, SpaceSepList{`potato' 'apple`}, ""},
|
||||
{`potato="apple"`, nil, `bare " in non-quoted-field`},
|
||||
{`apple "potato`, nil, "extraneous"},
|
||||
{`apple pot"ato`, nil, "bare \" in non-quoted-field"},
|
||||
{`potato"`, nil, "bare \" in non-quoted-field"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
var l SpaceSepList
|
||||
err := l.Set(tc.in)
|
||||
if tc.err == "" {
|
||||
require.NoErrorf(t, err, "input: %q", tc.in)
|
||||
} else {
|
||||
require.Containsf(t, err.Error(), tc.err, "input: %q", tc.in)
|
||||
}
|
||||
require.Equalf(t, tc.out, l, "input: %q", tc.in)
|
||||
}
|
||||
}
|
||||
14
.rclone_repo/fs/deletemode.go
Executable file
14
.rclone_repo/fs/deletemode.go
Executable file
@@ -0,0 +1,14 @@
|
||||
package fs
|
||||
|
||||
// DeleteMode describes the possible delete modes in the config
|
||||
type DeleteMode byte
|
||||
|
||||
// DeleteMode constants
|
||||
const (
|
||||
DeleteModeOff DeleteMode = iota
|
||||
DeleteModeBefore
|
||||
DeleteModeDuring
|
||||
DeleteModeAfter
|
||||
DeleteModeOnly
|
||||
DeleteModeDefault = DeleteModeAfter
|
||||
)
|
||||
97
.rclone_repo/fs/dir.go
Executable file
97
.rclone_repo/fs/dir.go
Executable file
@@ -0,0 +1,97 @@
|
||||
package fs
|
||||
|
||||
import "time"
|
||||
|
||||
// Dir describes an unspecialized directory for directory/container/bucket lists
|
||||
type Dir struct {
|
||||
remote string // name of the directory
|
||||
modTime time.Time // modification or creation time - IsZero for unknown
|
||||
size int64 // size of directory and contents or -1 if unknown
|
||||
items int64 // number of objects or -1 for unknown
|
||||
id string // optional ID
|
||||
}
|
||||
|
||||
// NewDir creates an unspecialized Directory object
|
||||
func NewDir(remote string, modTime time.Time) *Dir {
|
||||
return &Dir{
|
||||
remote: remote,
|
||||
modTime: modTime,
|
||||
size: -1,
|
||||
items: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// NewDirCopy creates an unspecialized copy of the Directory object passed in
|
||||
func NewDirCopy(d Directory) *Dir {
|
||||
return &Dir{
|
||||
remote: d.Remote(),
|
||||
modTime: d.ModTime(),
|
||||
size: d.Size(),
|
||||
items: d.Items(),
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the name
|
||||
func (d *Dir) String() string {
|
||||
return d.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (d *Dir) Remote() string {
|
||||
return d.remote
|
||||
}
|
||||
|
||||
// SetRemote sets the remote
|
||||
func (d *Dir) SetRemote(remote string) *Dir {
|
||||
d.remote = remote
|
||||
return d
|
||||
}
|
||||
|
||||
// ID gets the optional ID
|
||||
func (d *Dir) ID() string {
|
||||
return d.id
|
||||
}
|
||||
|
||||
// SetID sets the optional ID
|
||||
func (d *Dir) SetID(id string) *Dir {
|
||||
d.id = id
|
||||
return d
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (d *Dir) ModTime() time.Time {
|
||||
if !d.modTime.IsZero() {
|
||||
return d.modTime
|
||||
}
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (d *Dir) Size() int64 {
|
||||
return d.size
|
||||
}
|
||||
|
||||
// SetSize sets the size of the directory
|
||||
func (d *Dir) SetSize(size int64) *Dir {
|
||||
d.size = size
|
||||
return d
|
||||
}
|
||||
|
||||
// Items returns the count of items in this directory or this
|
||||
// directory and subdirectories if known, -1 for unknown
|
||||
func (d *Dir) Items() int64 {
|
||||
return d.items
|
||||
}
|
||||
|
||||
// SetItems sets the number of items in the directory
|
||||
func (d *Dir) SetItems(items int64) *Dir {
|
||||
d.items = items
|
||||
return d
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var (
|
||||
_ DirEntry = (*Dir)(nil)
|
||||
_ Directory = (*Dir)(nil)
|
||||
)
|
||||
81
.rclone_repo/fs/direntries.go
Executable file
81
.rclone_repo/fs/direntries.go
Executable file
@@ -0,0 +1,81 @@
|
||||
package fs
|
||||
|
||||
import "fmt"
|
||||
|
||||
// DirEntries is a slice of Object or *Dir
|
||||
type DirEntries []DirEntry
|
||||
|
||||
// Len is part of sort.Interface.
|
||||
func (ds DirEntries) Len() int {
|
||||
return len(ds)
|
||||
}
|
||||
|
||||
// Swap is part of sort.Interface.
|
||||
func (ds DirEntries) Swap(i, j int) {
|
||||
ds[i], ds[j] = ds[j], ds[i]
|
||||
}
|
||||
|
||||
// Less is part of sort.Interface.
|
||||
func (ds DirEntries) Less(i, j int) bool {
|
||||
return ds[i].Remote() < ds[j].Remote()
|
||||
}
|
||||
|
||||
// ForObject runs the function supplied on every object in the entries
|
||||
func (ds DirEntries) ForObject(fn func(o Object)) {
|
||||
for _, entry := range ds {
|
||||
o, ok := entry.(Object)
|
||||
if ok {
|
||||
fn(o)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ForObjectError runs the function supplied on every object in the entries
|
||||
func (ds DirEntries) ForObjectError(fn func(o Object) error) error {
|
||||
for _, entry := range ds {
|
||||
o, ok := entry.(Object)
|
||||
if ok {
|
||||
err := fn(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForDir runs the function supplied on every Directory in the entries
|
||||
func (ds DirEntries) ForDir(fn func(dir Directory)) {
|
||||
for _, entry := range ds {
|
||||
dir, ok := entry.(Directory)
|
||||
if ok {
|
||||
fn(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ForDirError runs the function supplied on every Directory in the entries
|
||||
func (ds DirEntries) ForDirError(fn func(dir Directory) error) error {
|
||||
for _, entry := range ds {
|
||||
dir, ok := entry.(Directory)
|
||||
if ok {
|
||||
err := fn(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DirEntryType returns a string description of the DirEntry, either
|
||||
// "object", "directory" or "unknown type XXX"
|
||||
func DirEntryType(d DirEntry) string {
|
||||
switch d.(type) {
|
||||
case Object:
|
||||
return "object"
|
||||
case Directory:
|
||||
return "directory"
|
||||
}
|
||||
return fmt.Sprintf("unknown type %T", d)
|
||||
}
|
||||
14
.rclone_repo/fs/driveletter/driveletter.go
Executable file
14
.rclone_repo/fs/driveletter/driveletter.go
Executable file
@@ -0,0 +1,14 @@
|
||||
// Package driveletter returns whether a name is a valid drive letter
|
||||
|
||||
// +build !windows
|
||||
|
||||
package driveletter
|
||||
|
||||
// IsDriveLetter returns a bool indicating whether name is a valid
|
||||
// Windows drive letter
|
||||
//
|
||||
// On non windows platforms we don't have drive letters so we always
|
||||
// return false
|
||||
func IsDriveLetter(name string) bool {
|
||||
return false
|
||||
}
|
||||
13
.rclone_repo/fs/driveletter/driveletter_windows.go
Executable file
13
.rclone_repo/fs/driveletter/driveletter_windows.go
Executable file
@@ -0,0 +1,13 @@
|
||||
// +build windows
|
||||
|
||||
package driveletter
|
||||
|
||||
// IsDriveLetter returns a bool indicating whether name is a valid
|
||||
// Windows drive letter
|
||||
func IsDriveLetter(name string) bool {
|
||||
if len(name) != 1 {
|
||||
return false
|
||||
}
|
||||
c := name[0]
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
||||
93
.rclone_repo/fs/dump.go
Executable file
93
.rclone_repo/fs/dump.go
Executable file
@@ -0,0 +1,93 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DumpFlags describes the Dump options in force
|
||||
type DumpFlags int
|
||||
|
||||
// DumpFlags definitions
|
||||
const (
|
||||
DumpHeaders DumpFlags = 1 << iota
|
||||
DumpBodies
|
||||
DumpRequests
|
||||
DumpResponses
|
||||
DumpAuth
|
||||
DumpFilters
|
||||
DumpGoRoutines
|
||||
DumpOpenFiles
|
||||
)
|
||||
|
||||
var dumpFlags = []struct {
|
||||
flag DumpFlags
|
||||
name string
|
||||
}{
|
||||
{DumpHeaders, "headers"},
|
||||
{DumpBodies, "bodies"},
|
||||
{DumpRequests, "requests"},
|
||||
{DumpResponses, "responses"},
|
||||
{DumpAuth, "auth"},
|
||||
{DumpFilters, "filters"},
|
||||
{DumpGoRoutines, "goroutines"},
|
||||
{DumpOpenFiles, "openfiles"},
|
||||
}
|
||||
|
||||
// DumpFlagsList is a list of dump flags used in the help
|
||||
var DumpFlagsList string
|
||||
|
||||
func init() {
|
||||
// calculate the dump flags list
|
||||
var out []string
|
||||
for _, info := range dumpFlags {
|
||||
out = append(out, info.name)
|
||||
}
|
||||
DumpFlagsList = strings.Join(out, ",")
|
||||
}
|
||||
|
||||
// String turns a DumpFlags into a string
|
||||
func (f DumpFlags) String() string {
|
||||
var out []string
|
||||
for _, info := range dumpFlags {
|
||||
if f&info.flag != 0 {
|
||||
out = append(out, info.name)
|
||||
f &^= info.flag
|
||||
}
|
||||
}
|
||||
if f != 0 {
|
||||
out = append(out, fmt.Sprintf("Unknown-0x%X", int(f)))
|
||||
}
|
||||
return strings.Join(out, ",")
|
||||
}
|
||||
|
||||
// Set a DumpFlags as a comma separated list of flags
|
||||
func (f *DumpFlags) Set(s string) error {
|
||||
var flags DumpFlags
|
||||
parts := strings.Split(s, ",")
|
||||
for _, part := range parts {
|
||||
found := false
|
||||
part = strings.ToLower(strings.TrimSpace(part))
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
for _, info := range dumpFlags {
|
||||
if part == info.name {
|
||||
found = true
|
||||
flags |= info.flag
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return errors.Errorf("Unknown dump flag %q", part)
|
||||
}
|
||||
}
|
||||
*f = flags
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (f *DumpFlags) Type() string {
|
||||
return "string"
|
||||
}
|
||||
58
.rclone_repo/fs/dump_test.go
Executable file
58
.rclone_repo/fs/dump_test.go
Executable file
@@ -0,0 +1,58 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*DumpFlags)(nil)
|
||||
|
||||
func TestDumpFlagsString(t *testing.T) {
|
||||
assert.Equal(t, "", DumpFlags(0).String())
|
||||
assert.Equal(t, "headers", (DumpHeaders).String())
|
||||
assert.Equal(t, "headers,bodies", (DumpHeaders | DumpBodies).String())
|
||||
assert.Equal(t, "headers,bodies,requests,responses,auth,filters", (DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters).String())
|
||||
assert.Equal(t, "headers,Unknown-0x8000", (DumpHeaders | DumpFlags(0x8000)).String())
|
||||
}
|
||||
|
||||
func TestDumpFlagsSet(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want DumpFlags
|
||||
wantErr string
|
||||
}{
|
||||
{"", DumpFlags(0), ""},
|
||||
{"bodies", DumpBodies, ""},
|
||||
{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
|
||||
{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
|
||||
{"headers,bodies,requests,responses,auth,filters", DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters, ""},
|
||||
{"headers,bodies,unknown,auth", 0, "Unknown dump flag \"unknown\""},
|
||||
} {
|
||||
f := DumpFlags(-1)
|
||||
initial := f
|
||||
err := f.Set(test.in)
|
||||
if err != nil {
|
||||
if test.wantErr == "" {
|
||||
t.Errorf("Got an error when not expecting one on %q: %v", test.in, err)
|
||||
} else {
|
||||
assert.Contains(t, err.Error(), test.wantErr)
|
||||
}
|
||||
assert.Equal(t, initial, f, test.want)
|
||||
} else {
|
||||
if test.wantErr != "" {
|
||||
t.Errorf("Got no error when expecting one on %q", test.in)
|
||||
} else {
|
||||
assert.Equal(t, test.want, f)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestDumpFlagsType(t *testing.T) {
|
||||
f := DumpFlags(0)
|
||||
assert.Equal(t, "string", f.Type())
|
||||
}
|
||||
498
.rclone_repo/fs/filter/filter.go
Executable file
498
.rclone_repo/fs/filter/filter.go
Executable file
@@ -0,0 +1,498 @@
|
||||
// Package filter controls the filtering of files
|
||||
package filter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Active is the globally active filter
|
||||
var Active = mustNewFilter(nil)
|
||||
|
||||
// rule is one filter rule
|
||||
type rule struct {
|
||||
Include bool
|
||||
Regexp *regexp.Regexp
|
||||
}
|
||||
|
||||
// Match returns true if rule matches path
|
||||
func (r *rule) Match(path string) bool {
|
||||
return r.Regexp.MatchString(path)
|
||||
}
|
||||
|
||||
// String the rule
|
||||
func (r *rule) String() string {
|
||||
c := "-"
|
||||
if r.Include {
|
||||
c = "+"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", c, r.Regexp.String())
|
||||
}
|
||||
|
||||
// rules is a slice of rules
|
||||
type rules struct {
|
||||
rules []rule
|
||||
existing map[string]struct{}
|
||||
}
|
||||
|
||||
// add adds a rule if it doesn't exist already
|
||||
func (rs *rules) add(Include bool, re *regexp.Regexp) {
|
||||
if rs.existing == nil {
|
||||
rs.existing = make(map[string]struct{})
|
||||
}
|
||||
newRule := rule{
|
||||
Include: Include,
|
||||
Regexp: re,
|
||||
}
|
||||
newRuleString := newRule.String()
|
||||
if _, ok := rs.existing[newRuleString]; ok {
|
||||
return // rule already exists
|
||||
}
|
||||
rs.rules = append(rs.rules, newRule)
|
||||
rs.existing[newRuleString] = struct{}{}
|
||||
}
|
||||
|
||||
// clear clears all the rules
|
||||
func (rs *rules) clear() {
|
||||
rs.rules = nil
|
||||
rs.existing = nil
|
||||
}
|
||||
|
||||
// len returns the number of rules
|
||||
func (rs *rules) len() int {
|
||||
return len(rs.rules)
|
||||
}
|
||||
|
||||
// FilesMap describes the map of files to transfer
|
||||
type FilesMap map[string]struct{}
|
||||
|
||||
// Opt configues the filter
|
||||
type Opt struct {
|
||||
DeleteExcluded bool
|
||||
FilterRule []string
|
||||
FilterFrom []string
|
||||
ExcludeRule []string
|
||||
ExcludeFrom []string
|
||||
ExcludeFile string
|
||||
IncludeRule []string
|
||||
IncludeFrom []string
|
||||
FilesFrom []string
|
||||
MinAge fs.Duration
|
||||
MaxAge fs.Duration
|
||||
MinSize fs.SizeSuffix
|
||||
MaxSize fs.SizeSuffix
|
||||
}
|
||||
|
||||
// DefaultOpt is the default config for the filter
|
||||
var DefaultOpt = Opt{
|
||||
MinAge: fs.DurationOff,
|
||||
MaxAge: fs.DurationOff,
|
||||
MinSize: fs.SizeSuffix(-1),
|
||||
MaxSize: fs.SizeSuffix(-1),
|
||||
}
|
||||
|
||||
// Filter describes any filtering in operation
|
||||
type Filter struct {
|
||||
Opt Opt
|
||||
ModTimeFrom time.Time
|
||||
ModTimeTo time.Time
|
||||
fileRules rules
|
||||
dirRules rules
|
||||
files FilesMap // files if filesFrom
|
||||
dirs FilesMap // dirs from filesFrom
|
||||
}
|
||||
|
||||
// NewFilter parses the command line options and creates a Filter
|
||||
// object. If opt is nil, then DefaultOpt will be used
|
||||
func NewFilter(opt *Opt) (f *Filter, err error) {
|
||||
f = &Filter{}
|
||||
|
||||
// Make a copy of the options
|
||||
if opt != nil {
|
||||
f.Opt = *opt
|
||||
} else {
|
||||
f.Opt = DefaultOpt
|
||||
}
|
||||
|
||||
// Filter flags
|
||||
if f.Opt.MinAge.IsSet() {
|
||||
f.ModTimeTo = time.Now().Add(-time.Duration(f.Opt.MinAge))
|
||||
fs.Debugf(nil, "--min-age %v to %v", f.Opt.MinAge, f.ModTimeTo)
|
||||
}
|
||||
if f.Opt.MaxAge.IsSet() {
|
||||
f.ModTimeFrom = time.Now().Add(-time.Duration(f.Opt.MaxAge))
|
||||
if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
|
||||
log.Fatal("filter: --min-age can't be larger than --max-age")
|
||||
}
|
||||
fs.Debugf(nil, "--max-age %v to %v", f.Opt.MaxAge, f.ModTimeFrom)
|
||||
}
|
||||
|
||||
addImplicitExclude := false
|
||||
foundExcludeRule := false
|
||||
|
||||
for _, rule := range f.Opt.IncludeRule {
|
||||
err = f.Add(true, rule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addImplicitExclude = true
|
||||
}
|
||||
for _, rule := range f.Opt.IncludeFrom {
|
||||
err := forEachLine(rule, func(line string) error {
|
||||
return f.Add(true, line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addImplicitExclude = true
|
||||
}
|
||||
for _, rule := range f.Opt.ExcludeRule {
|
||||
err = f.Add(false, rule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
foundExcludeRule = true
|
||||
}
|
||||
for _, rule := range f.Opt.ExcludeFrom {
|
||||
err := forEachLine(rule, func(line string) error {
|
||||
return f.Add(false, line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
foundExcludeRule = true
|
||||
}
|
||||
|
||||
if addImplicitExclude && foundExcludeRule {
|
||||
fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
|
||||
}
|
||||
|
||||
for _, rule := range f.Opt.FilterRule {
|
||||
err = f.AddRule(rule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, rule := range f.Opt.FilterFrom {
|
||||
err := forEachLine(rule, f.AddRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, rule := range f.Opt.FilesFrom {
|
||||
f.initAddFile() // init to show --files-from set even if no files within
|
||||
err := forEachLine(rule, func(line string) error {
|
||||
return f.AddFile(line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if addImplicitExclude {
|
||||
err = f.Add(false, "/**")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if fs.Config.Dump&fs.DumpFilters != 0 {
|
||||
fmt.Println("--- start filters ---")
|
||||
fmt.Println(f.DumpFilters())
|
||||
fmt.Println("--- end filters ---")
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func mustNewFilter(opt *Opt) *Filter {
|
||||
f, err := NewFilter(opt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// addDirGlobs adds directory globs from the file glob passed in
|
||||
func (f *Filter) addDirGlobs(Include bool, glob string) error {
|
||||
for _, dirGlob := range globToDirGlobs(glob) {
|
||||
// Don't add "/" as we always include the root
|
||||
if dirGlob == "/" {
|
||||
continue
|
||||
}
|
||||
dirRe, err := globToRegexp(dirGlob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dirRules.add(Include, dirRe)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add adds a filter rule with include or exclude status indicated
|
||||
func (f *Filter) Add(Include bool, glob string) error {
|
||||
isDirRule := strings.HasSuffix(glob, "/")
|
||||
isFileRule := !isDirRule
|
||||
if strings.Contains(glob, "**") {
|
||||
isDirRule, isFileRule = true, true
|
||||
}
|
||||
re, err := globToRegexp(glob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isFileRule {
|
||||
f.fileRules.add(Include, re)
|
||||
// If include rule work out what directories are needed to scan
|
||||
// if exclude rule, we can't rule anything out
|
||||
// Unless it is `*` which matches everything
|
||||
// NB ** and /** are DirRules
|
||||
if Include || glob == "*" {
|
||||
err = f.addDirGlobs(Include, glob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if isDirRule {
|
||||
f.dirRules.add(Include, re)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddRule adds a filter rule with include/exclude indicated by the prefix
|
||||
//
|
||||
// These are
|
||||
//
|
||||
// + glob
|
||||
// - glob
|
||||
// !
|
||||
//
|
||||
// '+' includes the glob, '-' excludes it and '!' resets the filter list
|
||||
//
|
||||
// Line comments may be introduced with '#' or ';'
|
||||
func (f *Filter) AddRule(rule string) error {
|
||||
switch {
|
||||
case rule == "!":
|
||||
f.Clear()
|
||||
return nil
|
||||
case strings.HasPrefix(rule, "- "):
|
||||
return f.Add(false, rule[2:])
|
||||
case strings.HasPrefix(rule, "+ "):
|
||||
return f.Add(true, rule[2:])
|
||||
}
|
||||
return errors.Errorf("malformed rule %q", rule)
|
||||
}
|
||||
|
||||
// initAddFile creates f.files and f.dirs
|
||||
func (f *Filter) initAddFile() {
|
||||
if f.files == nil {
|
||||
f.files = make(FilesMap)
|
||||
f.dirs = make(FilesMap)
|
||||
}
|
||||
}
|
||||
|
||||
// AddFile adds a single file to the files from list
|
||||
func (f *Filter) AddFile(file string) error {
|
||||
f.initAddFile()
|
||||
file = strings.Trim(file, "/")
|
||||
f.files[file] = struct{}{}
|
||||
// Put all the parent directories into f.dirs
|
||||
for {
|
||||
file = path.Dir(file)
|
||||
if file == "." {
|
||||
break
|
||||
}
|
||||
if _, found := f.dirs[file]; found {
|
||||
break
|
||||
}
|
||||
f.dirs[file] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Files returns all the files from the `--files-from` list
|
||||
//
|
||||
// It may be nil if the list is empty
|
||||
func (f *Filter) Files() FilesMap {
|
||||
return f.files
|
||||
}
|
||||
|
||||
// Clear clears all the filter rules
|
||||
func (f *Filter) Clear() {
|
||||
f.fileRules.clear()
|
||||
f.dirRules.clear()
|
||||
}
|
||||
|
||||
// InActive returns false if any filters are active
|
||||
func (f *Filter) InActive() bool {
|
||||
return (f.files == nil &&
|
||||
f.ModTimeFrom.IsZero() &&
|
||||
f.ModTimeTo.IsZero() &&
|
||||
f.Opt.MinSize < 0 &&
|
||||
f.Opt.MaxSize < 0 &&
|
||||
f.fileRules.len() == 0 &&
|
||||
f.dirRules.len() == 0 &&
|
||||
len(f.Opt.ExcludeFile) == 0)
|
||||
}
|
||||
|
||||
// includeRemote returns whether this remote passes the filter rules.
|
||||
func (f *Filter) includeRemote(remote string) bool {
|
||||
for _, rule := range f.fileRules.rules {
|
||||
if rule.Match(remote) {
|
||||
return rule.Include
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ListContainsExcludeFile checks if exclude file is present in the list.
|
||||
func (f *Filter) ListContainsExcludeFile(entries fs.DirEntries) bool {
|
||||
if len(f.Opt.ExcludeFile) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, entry := range entries {
|
||||
obj, ok := entry.(fs.Object)
|
||||
if ok {
|
||||
basename := path.Base(obj.Remote())
|
||||
if basename == f.Opt.ExcludeFile {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IncludeDirectory returns a function which checks whether this
|
||||
// directory should be included in the sync or not.
|
||||
func (f *Filter) IncludeDirectory(fs fs.Fs) func(string) (bool, error) {
|
||||
return func(remote string) (bool, error) {
|
||||
remote = strings.Trim(remote, "/")
|
||||
// first check if we need to remove directory based on
|
||||
// the exclude file
|
||||
excl, err := f.DirContainsExcludeFile(fs, remote)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if excl {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// filesFrom takes precedence
|
||||
if f.files != nil {
|
||||
_, include := f.dirs[remote]
|
||||
return include, nil
|
||||
}
|
||||
remote += "/"
|
||||
for _, rule := range f.dirRules.rules {
|
||||
if rule.Match(remote) {
|
||||
return rule.Include, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// DirContainsExcludeFile checks if exclude file is present in a
|
||||
// directroy. If fs is nil, it works properly if ExcludeFile is an
|
||||
// empty string (for testing).
|
||||
func (f *Filter) DirContainsExcludeFile(fremote fs.Fs, remote string) (bool, error) {
|
||||
if len(f.Opt.ExcludeFile) > 0 {
|
||||
exists, err := fs.FileExists(fremote, path.Join(remote, f.Opt.ExcludeFile))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if exists {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Include returns whether this object should be included into the
|
||||
// sync or not
|
||||
func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
|
||||
// filesFrom takes precedence
|
||||
if f.files != nil {
|
||||
_, include := f.files[remote]
|
||||
return include
|
||||
}
|
||||
if !f.ModTimeFrom.IsZero() && modTime.Before(f.ModTimeFrom) {
|
||||
return false
|
||||
}
|
||||
if !f.ModTimeTo.IsZero() && modTime.After(f.ModTimeTo) {
|
||||
return false
|
||||
}
|
||||
if f.Opt.MinSize >= 0 && size < int64(f.Opt.MinSize) {
|
||||
return false
|
||||
}
|
||||
if f.Opt.MaxSize >= 0 && size > int64(f.Opt.MaxSize) {
|
||||
return false
|
||||
}
|
||||
return f.includeRemote(remote)
|
||||
}
|
||||
|
||||
// IncludeObject returns whether this object should be included into
|
||||
// the sync or not. This is a convenience function to avoid calling
|
||||
// o.ModTime(), which is an expensive operation.
|
||||
func (f *Filter) IncludeObject(o fs.Object) bool {
|
||||
var modTime time.Time
|
||||
|
||||
if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() {
|
||||
modTime = o.ModTime()
|
||||
} else {
|
||||
modTime = time.Unix(0, 0)
|
||||
}
|
||||
|
||||
return f.Include(o.Remote(), o.Size(), modTime)
|
||||
}
|
||||
|
||||
// forEachLine calls fn on every line in the file pointed to by path
|
||||
//
|
||||
// It ignores empty lines and lines starting with '#' or ';'
|
||||
func forEachLine(path string, fn func(string) error) (err error) {
|
||||
in, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
scanner := bufio.NewScanner(in)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
|
||||
continue
|
||||
}
|
||||
err := fn(line)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return scanner.Err()
|
||||
}
|
||||
|
||||
// DumpFilters dumps the filters in textual form, 1 per line
|
||||
func (f *Filter) DumpFilters() string {
|
||||
rules := []string{}
|
||||
if !f.ModTimeFrom.IsZero() {
|
||||
rules = append(rules, fmt.Sprintf("Last-modified date must be equal or greater than: %s", f.ModTimeFrom.String()))
|
||||
}
|
||||
if !f.ModTimeTo.IsZero() {
|
||||
rules = append(rules, fmt.Sprintf("Last-modified date must be equal or less than: %s", f.ModTimeTo.String()))
|
||||
}
|
||||
rules = append(rules, "--- File filter rules ---")
|
||||
for _, rule := range f.fileRules.rules {
|
||||
rules = append(rules, rule.String())
|
||||
}
|
||||
rules = append(rules, "--- Directory filter rules ---")
|
||||
for _, dirRule := range f.dirRules.rules {
|
||||
rules = append(rules, dirRule.String())
|
||||
}
|
||||
return strings.Join(rules, "\n")
|
||||
}
|
||||
438
.rclone_repo/fs/filter/filter_test.go
Executable file
438
.rclone_repo/fs/filter/filter_test.go
Executable file
@@ -0,0 +1,438 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewFilterDefault(t *testing.T) {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, f.Opt.DeleteExcluded)
|
||||
assert.Equal(t, fs.SizeSuffix(-1), f.Opt.MinSize)
|
||||
assert.Equal(t, fs.SizeSuffix(-1), f.Opt.MaxSize)
|
||||
assert.Len(t, f.fileRules.rules, 0)
|
||||
assert.Len(t, f.dirRules.rules, 0)
|
||||
assert.Nil(t, f.files)
|
||||
assert.True(t, f.InActive())
|
||||
}
|
||||
|
||||
// testFile creates a temp file with the contents
|
||||
func testFile(t *testing.T, contents string) string {
|
||||
out, err := ioutil.TempFile("", "filter_test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := out.Close()
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
_, err = out.Write([]byte(contents))
|
||||
require.NoError(t, err)
|
||||
s := out.Name()
|
||||
return s
|
||||
}
|
||||
|
||||
func TestNewFilterFull(t *testing.T) {
|
||||
Opt := DefaultOpt
|
||||
|
||||
mins := fs.SizeSuffix(100 * 1024)
|
||||
maxs := fs.SizeSuffix(1000 * 1024)
|
||||
|
||||
// Set up the input
|
||||
Opt.DeleteExcluded = true
|
||||
Opt.FilterRule = []string{"- filter1", "- filter1b"}
|
||||
Opt.FilterFrom = []string{testFile(t, "#comment\n+ filter2\n- filter3\n")}
|
||||
Opt.ExcludeRule = []string{"exclude1"}
|
||||
Opt.ExcludeFrom = []string{testFile(t, "#comment\nexclude2\nexclude3\n")}
|
||||
Opt.IncludeRule = []string{"include1"}
|
||||
Opt.IncludeFrom = []string{testFile(t, "#comment\ninclude2\ninclude3\n")}
|
||||
Opt.FilesFrom = []string{testFile(t, "#comment\nfiles1\nfiles2\n")}
|
||||
Opt.MinSize = mins
|
||||
Opt.MaxSize = maxs
|
||||
|
||||
rm := func(p string) {
|
||||
err := os.Remove(p)
|
||||
if err != nil {
|
||||
t.Logf("error removing %q: %v", p, err)
|
||||
}
|
||||
}
|
||||
// Reset the input
|
||||
defer func() {
|
||||
rm(Opt.FilterFrom[0])
|
||||
rm(Opt.ExcludeFrom[0])
|
||||
rm(Opt.IncludeFrom[0])
|
||||
rm(Opt.FilesFrom[0])
|
||||
}()
|
||||
|
||||
f, err := NewFilter(&Opt)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, f.Opt.DeleteExcluded)
|
||||
assert.Equal(t, f.Opt.MinSize, mins)
|
||||
assert.Equal(t, f.Opt.MaxSize, maxs)
|
||||
got := f.DumpFilters()
|
||||
want := `--- File filter rules ---
|
||||
+ (^|/)include1$
|
||||
+ (^|/)include2$
|
||||
+ (^|/)include3$
|
||||
- (^|/)exclude1$
|
||||
- (^|/)exclude2$
|
||||
- (^|/)exclude3$
|
||||
- (^|/)filter1$
|
||||
- (^|/)filter1b$
|
||||
+ (^|/)filter2$
|
||||
- (^|/)filter3$
|
||||
- ^.*$
|
||||
--- Directory filter rules ---
|
||||
+ ^.*$
|
||||
- ^.*$`
|
||||
assert.Equal(t, want, got)
|
||||
assert.Len(t, f.files, 2)
|
||||
for _, name := range []string{"files1", "files2"} {
|
||||
_, ok := f.files[name]
|
||||
if !ok {
|
||||
t.Errorf("Didn't find file %q in f.files", name)
|
||||
}
|
||||
}
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
type includeTest struct {
|
||||
in string
|
||||
size int64
|
||||
modTime int64
|
||||
want bool
|
||||
}
|
||||
|
||||
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
|
||||
for _, test := range tests {
|
||||
got := f.Include(test.in, test.size, time.Unix(test.modTime, 0))
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("in=%q, size=%v, modTime=%v", test.in, test.size, time.Unix(test.modTime, 0)))
|
||||
}
|
||||
}
|
||||
|
||||
type includeDirTest struct {
|
||||
in string
|
||||
want bool
|
||||
}
|
||||
|
||||
func testDirInclude(t *testing.T, f *Filter, tests []includeDirTest) {
|
||||
for _, test := range tests {
|
||||
got, err := f.IncludeDirectory(nil)(test.in)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFilterIncludeFiles(t *testing.T) {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
err = f.AddFile("file1.jpg")
|
||||
require.NoError(t, err)
|
||||
err = f.AddFile("/file2.jpg")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, FilesMap{
|
||||
"file1.jpg": {},
|
||||
"file2.jpg": {},
|
||||
}, f.files)
|
||||
assert.Equal(t, FilesMap{}, f.dirs)
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 0, 0, true},
|
||||
{"file2.jpg", 1, 0, true},
|
||||
{"potato/file2.jpg", 2, 0, false},
|
||||
{"file3.jpg", 3, 0, false},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterIncludeFilesDirs(t *testing.T) {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
for _, path := range []string{
|
||||
"path/to/dir/file1.png",
|
||||
"/path/to/dir/file2.png",
|
||||
"/path/to/file3.png",
|
||||
"/path/to/dir2/file4.png",
|
||||
} {
|
||||
err = f.AddFile(path)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, FilesMap{
|
||||
"path": {},
|
||||
"path/to": {},
|
||||
"path/to/dir": {},
|
||||
"path/to/dir2": {},
|
||||
}, f.dirs)
|
||||
testDirInclude(t, f, []includeDirTest{
|
||||
{"path", true},
|
||||
{"path/to", true},
|
||||
{"path/to/", true},
|
||||
{"/path/to", true},
|
||||
{"/path/to/", true},
|
||||
{"path/to/dir", true},
|
||||
{"path/to/dir2", true},
|
||||
{"path/too", false},
|
||||
{"path/three", false},
|
||||
{"four", false},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewFilterMinSize(t *testing.T) {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
f.Opt.MinSize = 100
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, 0, true},
|
||||
{"file2.jpg", 101, 0, true},
|
||||
{"potato/file2.jpg", 99, 0, false},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterMaxSize(t *testing.T) {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
f.Opt.MaxSize = 100
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, 0, true},
|
||||
{"file2.jpg", 101, 0, false},
|
||||
{"potato/file2.jpg", 99, 0, true},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterMinAndMaxAge(t *testing.T) {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
f.ModTimeFrom = time.Unix(1440000002, 0)
|
||||
f.ModTimeTo = time.Unix(1440000003, 0)
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, 1440000000, false},
|
||||
{"file2.jpg", 101, 1440000001, false},
|
||||
{"file3.jpg", 102, 1440000002, true},
|
||||
{"potato/file1.jpg", 98, 1440000003, true},
|
||||
{"potato/file2.jpg", 99, 1440000004, false},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterMinAge(t *testing.T) {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
f.ModTimeTo = time.Unix(1440000002, 0)
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, 1440000000, true},
|
||||
{"file2.jpg", 101, 1440000001, true},
|
||||
{"file3.jpg", 102, 1440000002, true},
|
||||
{"potato/file1.jpg", 98, 1440000003, false},
|
||||
{"potato/file2.jpg", 99, 1440000004, false},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterMaxAge(t *testing.T) {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
f.ModTimeFrom = time.Unix(1440000002, 0)
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, 1440000000, false},
|
||||
{"file2.jpg", 101, 1440000001, false},
|
||||
{"file3.jpg", 102, 1440000002, true},
|
||||
{"potato/file1.jpg", 98, 1440000003, true},
|
||||
{"potato/file2.jpg", 99, 1440000004, true},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestNewFilterMatches(t *testing.T) {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
add := func(s string) {
|
||||
err := f.AddRule(s)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
add("+ cleared")
|
||||
add("!")
|
||||
add("- /file1.jpg")
|
||||
add("+ /file2.png")
|
||||
add("+ /*.jpg")
|
||||
add("- /*.png")
|
||||
add("- /potato")
|
||||
add("+ /sausage1")
|
||||
add("+ /sausage2*")
|
||||
add("+ /sausage3**")
|
||||
add("+ /a/*.jpg")
|
||||
add("- *")
|
||||
testInclude(t, f, []includeTest{
|
||||
{"cleared", 100, 0, false},
|
||||
{"file1.jpg", 100, 0, false},
|
||||
{"file2.png", 100, 0, true},
|
||||
{"afile2.png", 100, 0, false},
|
||||
{"file3.jpg", 101, 0, true},
|
||||
{"file4.png", 101, 0, false},
|
||||
{"potato", 101, 0, false},
|
||||
{"sausage1", 101, 0, true},
|
||||
{"sausage1/potato", 101, 0, false},
|
||||
{"sausage2potato", 101, 0, true},
|
||||
{"sausage2/potato", 101, 0, false},
|
||||
{"sausage3/potato", 101, 0, true},
|
||||
{"a/one.jpg", 101, 0, true},
|
||||
{"a/one.png", 101, 0, false},
|
||||
{"unicorn", 99, 0, false},
|
||||
})
|
||||
testDirInclude(t, f, []includeDirTest{
|
||||
{"sausage1", false},
|
||||
{"sausage2", false},
|
||||
{"sausage2/sub", false},
|
||||
{"sausage2/sub/dir", false},
|
||||
{"sausage3", true},
|
||||
{"sausage3/sub", true},
|
||||
{"sausage3/sub/dir", true},
|
||||
{"sausage4", false},
|
||||
{"a", true},
|
||||
})
|
||||
assert.False(t, f.InActive())
|
||||
}
|
||||
|
||||
func TestFilterAddDirRuleOrFileRule(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
included bool
|
||||
glob string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
false,
|
||||
"potato",
|
||||
`--- File filter rules ---
|
||||
- (^|/)potato$
|
||||
--- Directory filter rules ---`,
|
||||
},
|
||||
{
|
||||
true,
|
||||
"potato",
|
||||
`--- File filter rules ---
|
||||
+ (^|/)potato$
|
||||
--- Directory filter rules ---
|
||||
+ ^.*$`,
|
||||
},
|
||||
{
|
||||
false,
|
||||
"*",
|
||||
`--- File filter rules ---
|
||||
- (^|/)[^/]*$
|
||||
--- Directory filter rules ---
|
||||
- ^.*$`,
|
||||
},
|
||||
{
|
||||
true,
|
||||
"*",
|
||||
`--- File filter rules ---
|
||||
+ (^|/)[^/]*$
|
||||
--- Directory filter rules ---
|
||||
+ ^.*$`,
|
||||
},
|
||||
{
|
||||
false,
|
||||
".*{,/**}",
|
||||
`--- File filter rules ---
|
||||
- (^|/)\.[^/]*(|/.*)$
|
||||
--- Directory filter rules ---
|
||||
- (^|/)\.[^/]*(|/.*)$`,
|
||||
},
|
||||
{
|
||||
true,
|
||||
"a/b/c/d",
|
||||
`--- File filter rules ---
|
||||
+ (^|/)a/b/c/d$
|
||||
--- Directory filter rules ---
|
||||
+ (^|/)a/b/c/$
|
||||
+ (^|/)a/b/$
|
||||
+ (^|/)a/$`,
|
||||
},
|
||||
} {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
err = f.Add(test.included, test.glob)
|
||||
require.NoError(t, err)
|
||||
got := f.DumpFilters()
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("Add(%v, %q)", test.included, test.glob))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterForEachLine(t *testing.T) {
|
||||
file := testFile(t, `; comment
|
||||
one
|
||||
# another comment
|
||||
|
||||
|
||||
two
|
||||
# indented comment
|
||||
three
|
||||
four
|
||||
five
|
||||
six `)
|
||||
defer func() {
|
||||
err := os.Remove(file)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
lines := []string{}
|
||||
err := forEachLine(file, func(s string) error {
|
||||
lines = append(lines, s)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "one,two,three,four,five,six", strings.Join(lines, ","))
|
||||
}
|
||||
|
||||
func TestFilterMatchesFromDocs(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
glob string
|
||||
included bool
|
||||
file string
|
||||
}{
|
||||
{"file.jpg", true, "file.jpg"},
|
||||
{"file.jpg", true, "directory/file.jpg"},
|
||||
{"file.jpg", false, "afile.jpg"},
|
||||
{"file.jpg", false, "directory/afile.jpg"},
|
||||
{"/file.jpg", true, "file.jpg"},
|
||||
{"/file.jpg", false, "afile.jpg"},
|
||||
{"/file.jpg", false, "directory/file.jpg"},
|
||||
{"*.jpg", true, "file.jpg"},
|
||||
{"*.jpg", true, "directory/file.jpg"},
|
||||
{"*.jpg", false, "file.jpg/anotherfile.png"},
|
||||
{"dir/**", true, "dir/file.jpg"},
|
||||
{"dir/**", true, "dir/dir1/dir2/file.jpg"},
|
||||
{"dir/**", false, "directory/file.jpg"},
|
||||
{"dir/**", false, "adir/file.jpg"},
|
||||
{"l?ss", true, "less"},
|
||||
{"l?ss", true, "lass"},
|
||||
{"l?ss", false, "floss"},
|
||||
{"h[ae]llo", true, "hello"},
|
||||
{"h[ae]llo", true, "hallo"},
|
||||
{"h[ae]llo", false, "hullo"},
|
||||
{"{one,two}_potato", true, "one_potato"},
|
||||
{"{one,two}_potato", true, "two_potato"},
|
||||
{"{one,two}_potato", false, "three_potato"},
|
||||
{"{one,two}_potato", false, "_potato"},
|
||||
{"\\*.jpg", true, "*.jpg"},
|
||||
{"\\\\.jpg", true, "\\.jpg"},
|
||||
{"\\[one\\].jpg", true, "[one].jpg"},
|
||||
} {
|
||||
f, err := NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
err = f.Add(true, test.glob)
|
||||
require.NoError(t, err)
|
||||
err = f.Add(false, "*")
|
||||
require.NoError(t, err)
|
||||
included := f.Include(test.file, 0, time.Unix(0, 0))
|
||||
if included != test.included {
|
||||
t.Errorf("%q match %q: want %v got %v", test.glob, test.file, test.included, included)
|
||||
}
|
||||
}
|
||||
}
|
||||
31
.rclone_repo/fs/filter/filterflags/filterflags.go
Executable file
31
.rclone_repo/fs/filter/filterflags/filterflags.go
Executable file
@@ -0,0 +1,31 @@
|
||||
// Package filterflags implements command line flags to set up a filter
|
||||
package filterflags
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
Opt = filter.DefaultOpt
|
||||
)
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExcludeRule, "exclude", "", nil, "Exclude files matching pattern")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExcludeFrom, "exclude-from", "", nil, "Read exclude patterns from file")
|
||||
flags.StringVarP(flagSet, &Opt.ExcludeFile, "exclude-if-present", "", "", "Exclude directories if filename is present")
|
||||
flags.StringArrayVarP(flagSet, &Opt.IncludeRule, "include", "", nil, "Include files matching pattern")
|
||||
flags.StringArrayVarP(flagSet, &Opt.IncludeFrom, "include-from", "", nil, "Read include patterns from file")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilesFrom, "files-from", "", nil, "Read list of source-file names from file")
|
||||
flags.FVarP(flagSet, &Opt.MinAge, "min-age", "", "Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y")
|
||||
flags.FVarP(flagSet, &Opt.MaxAge, "max-age", "", "Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y")
|
||||
flags.FVarP(flagSet, &Opt.MinSize, "min-size", "", "Only transfer files bigger than this in k or suffix b|k|M|G")
|
||||
flags.FVarP(flagSet, &Opt.MaxSize, "max-size", "", "Only transfer files smaller than this in k or suffix b|k|M|G")
|
||||
//cvsExclude = BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
|
||||
}
|
||||
166
.rclone_repo/fs/filter/glob.go
Executable file
166
.rclone_repo/fs/filter/glob.go
Executable file
@@ -0,0 +1,166 @@
|
||||
// rsync style glob parser
|
||||
|
||||
package filter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// globToRegexp converts an rsync style glob to a regexp
|
||||
//
|
||||
// documented in filtering.md
|
||||
func globToRegexp(glob string) (*regexp.Regexp, error) {
|
||||
var re bytes.Buffer
|
||||
if strings.HasPrefix(glob, "/") {
|
||||
glob = glob[1:]
|
||||
_, _ = re.WriteRune('^')
|
||||
} else {
|
||||
_, _ = re.WriteString("(^|/)")
|
||||
}
|
||||
consecutiveStars := 0
|
||||
insertStars := func() error {
|
||||
if consecutiveStars > 0 {
|
||||
switch consecutiveStars {
|
||||
case 1:
|
||||
_, _ = re.WriteString(`[^/]*`)
|
||||
case 2:
|
||||
_, _ = re.WriteString(`.*`)
|
||||
default:
|
||||
return errors.Errorf("too many stars in %q", glob)
|
||||
}
|
||||
}
|
||||
consecutiveStars = 0
|
||||
return nil
|
||||
}
|
||||
inBraces := false
|
||||
inBrackets := 0
|
||||
slashed := false
|
||||
for _, c := range glob {
|
||||
if slashed {
|
||||
_, _ = re.WriteRune(c)
|
||||
slashed = false
|
||||
continue
|
||||
}
|
||||
if c != '*' {
|
||||
err := insertStars()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if inBrackets > 0 {
|
||||
_, _ = re.WriteRune(c)
|
||||
if c == '[' {
|
||||
inBrackets++
|
||||
}
|
||||
if c == ']' {
|
||||
inBrackets--
|
||||
}
|
||||
continue
|
||||
}
|
||||
switch c {
|
||||
case '\\':
|
||||
_, _ = re.WriteRune(c)
|
||||
slashed = true
|
||||
case '*':
|
||||
consecutiveStars++
|
||||
case '?':
|
||||
_, _ = re.WriteString(`[^/]`)
|
||||
case '[':
|
||||
_, _ = re.WriteRune(c)
|
||||
inBrackets++
|
||||
case ']':
|
||||
return nil, errors.Errorf("mismatched ']' in glob %q", glob)
|
||||
case '{':
|
||||
if inBraces {
|
||||
return nil, errors.Errorf("can't nest '{' '}' in glob %q", glob)
|
||||
}
|
||||
inBraces = true
|
||||
_, _ = re.WriteRune('(')
|
||||
case '}':
|
||||
if !inBraces {
|
||||
return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||
}
|
||||
_, _ = re.WriteRune(')')
|
||||
inBraces = false
|
||||
case ',':
|
||||
if inBraces {
|
||||
_, _ = re.WriteRune('|')
|
||||
} else {
|
||||
_, _ = re.WriteRune(c)
|
||||
}
|
||||
case '.', '+', '(', ')', '|', '^', '$': // regexp meta characters not dealt with above
|
||||
_, _ = re.WriteRune('\\')
|
||||
_, _ = re.WriteRune(c)
|
||||
default:
|
||||
_, _ = re.WriteRune(c)
|
||||
}
|
||||
}
|
||||
err := insertStars()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if inBrackets > 0 {
|
||||
return nil, errors.Errorf("mismatched '[' and ']' in glob %q", glob)
|
||||
}
|
||||
if inBraces {
|
||||
return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||
}
|
||||
_, _ = re.WriteRune('$')
|
||||
result, err := regexp.Compile(re.String())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "bad glob pattern %q (regexp %q)", glob, re.String())
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var (
|
||||
// Can't deal with / or ** in {}
|
||||
tooHardRe = regexp.MustCompile(`{[^{}]*(\*\*|/)[^{}]*}`)
|
||||
|
||||
// Squash all /
|
||||
squashSlash = regexp.MustCompile(`/{2,}`)
|
||||
)
|
||||
|
||||
// globToDirGlobs takes a file glob and turns it into a series of
|
||||
// directory globs. When matched with a directory (with a trailing /)
|
||||
// this should answer the question as to whether this glob could be in
|
||||
// this directory.
|
||||
func globToDirGlobs(glob string) (out []string) {
|
||||
if tooHardRe.MatchString(glob) {
|
||||
// Can't figure this one out so return any directory might match
|
||||
out = append(out, "/**")
|
||||
return out
|
||||
}
|
||||
|
||||
// Get rid of multiple /s
|
||||
glob = squashSlash.ReplaceAllString(glob, "/")
|
||||
|
||||
// Split on / or **
|
||||
// (** can contain /)
|
||||
for {
|
||||
i := strings.LastIndex(glob, "/")
|
||||
j := strings.LastIndex(glob, "**")
|
||||
what := ""
|
||||
if j > i {
|
||||
i = j
|
||||
what = "**"
|
||||
}
|
||||
if i < 0 {
|
||||
if len(out) == 0 {
|
||||
out = append(out, "/**")
|
||||
}
|
||||
break
|
||||
}
|
||||
glob = glob[:i]
|
||||
newGlob := glob + what + "/"
|
||||
if len(out) == 0 || out[len(out)-1] != newGlob {
|
||||
out = append(out, newGlob)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
104
.rclone_repo/fs/filter/glob_test.go
Executable file
104
.rclone_repo/fs/filter/glob_test.go
Executable file
@@ -0,0 +1,104 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGlobToRegexp(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
error string
|
||||
}{
|
||||
{``, `(^|/)$`, ``},
|
||||
{`potato`, `(^|/)potato$`, ``},
|
||||
{`potato,sausage`, `(^|/)potato,sausage$`, ``},
|
||||
{`/potato`, `^potato$`, ``},
|
||||
{`potato?sausage`, `(^|/)potato[^/]sausage$`, ``},
|
||||
{`potat[oa]`, `(^|/)potat[oa]$`, ``},
|
||||
{`potat[a-z]or`, `(^|/)potat[a-z]or$`, ``},
|
||||
{`potat[[:alpha:]]or`, `(^|/)potat[[:alpha:]]or$`, ``},
|
||||
{`'.' '+' '(' ')' '|' '^' '$'`, `(^|/)'\.' '\+' '\(' '\)' '\|' '\^' '\$'$`, ``},
|
||||
{`*.jpg`, `(^|/)[^/]*\.jpg$`, ``},
|
||||
{`a{b,c,d}e`, `(^|/)a(b|c|d)e$`, ``},
|
||||
{`potato**`, `(^|/)potato.*$`, ``},
|
||||
{`potato**sausage`, `(^|/)potato.*sausage$`, ``},
|
||||
{`*.p[lm]`, `(^|/)[^/]*\.p[lm]$`, ``},
|
||||
{`[\[\]]`, `(^|/)[\[\]]$`, ``},
|
||||
{`***potato`, `(^|/)`, `too many stars`},
|
||||
{`***`, `(^|/)`, `too many stars`},
|
||||
{`ab]c`, `(^|/)`, `mismatched ']'`},
|
||||
{`ab[c`, `(^|/)`, `mismatched '[' and ']'`},
|
||||
{`ab{{cd`, `(^|/)`, `can't nest`},
|
||||
{`ab{}}cd`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`ab}c`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`ab{c`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`*.{jpg,png,gif}`, `(^|/)[^/]*\.(jpg|png|gif)$`, ``},
|
||||
{`[a--b]`, `(^|/)`, `bad glob pattern`},
|
||||
{`a\*b`, `(^|/)a\*b$`, ``},
|
||||
{`a\\b`, `(^|/)a\\b$`, ``},
|
||||
} {
|
||||
gotRe, err := globToRegexp(test.in)
|
||||
if test.error == "" {
|
||||
got := gotRe.String()
|
||||
require.NoError(t, err, test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
} else {
|
||||
require.Error(t, err, test.in)
|
||||
assert.Contains(t, err.Error(), test.error, test.in)
|
||||
assert.Nil(t, gotRe)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGlobToDirGlobs(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
}{
|
||||
{`*`, []string{"/**"}},
|
||||
{`/*`, []string{"/"}},
|
||||
{`*.jpg`, []string{"/**"}},
|
||||
{`/*.jpg`, []string{"/"}},
|
||||
{`//*.jpg`, []string{"/"}},
|
||||
{`///*.jpg`, []string{"/"}},
|
||||
{`/a/*.jpg`, []string{"/a/", "/"}},
|
||||
{`/a//*.jpg`, []string{"/a/", "/"}},
|
||||
{`/a///*.jpg`, []string{"/a/", "/"}},
|
||||
{`/a/b/*.jpg`, []string{"/a/b/", "/a/", "/"}},
|
||||
{`a/*.jpg`, []string{"a/"}},
|
||||
{`a/b/*.jpg`, []string{"a/b/", "a/"}},
|
||||
{`*/*/*.jpg`, []string{"*/*/", "*/"}},
|
||||
{`a/b/`, []string{"a/b/", "a/"}},
|
||||
{`a/b`, []string{"a/"}},
|
||||
{`a/b/*.{jpg,png,gif}`, []string{"a/b/", "a/"}},
|
||||
{`/a/{jpg,png,gif}/*.{jpg,png,gif}`, []string{"/a/{jpg,png,gif}/", "/a/", "/"}},
|
||||
{`a/{a,a*b,a**c}/d/`, []string{"/**"}},
|
||||
{`/a/{a,a*b,a/c,d}/d/`, []string{"/**"}},
|
||||
{`**`, []string{"**/"}},
|
||||
{`a**`, []string{"a**/"}},
|
||||
{`a**b`, []string{"a**/"}},
|
||||
{`a**b**c**d`, []string{"a**b**c**/", "a**b**/", "a**/"}},
|
||||
{`a**b/c**d`, []string{"a**b/c**/", "a**b/", "a**/"}},
|
||||
{`/A/a**b/B/c**d/C/`, []string{"/A/a**b/B/c**d/C/", "/A/a**b/B/c**d/", "/A/a**b/B/c**/", "/A/a**b/B/", "/A/a**b/", "/A/a**/", "/A/", "/"}},
|
||||
{`/var/spool/**/ncw`, []string{"/var/spool/**/", "/var/spool/", "/var/", "/"}},
|
||||
{`var/spool/**/ncw/`, []string{"var/spool/**/ncw/", "var/spool/**/", "var/spool/", "var/"}},
|
||||
{"/file1.jpg", []string{`/`}},
|
||||
{"/file2.png", []string{`/`}},
|
||||
{"/*.jpg", []string{`/`}},
|
||||
{"/*.png", []string{`/`}},
|
||||
{"/potato", []string{`/`}},
|
||||
{"/sausage1", []string{`/`}},
|
||||
{"/sausage2*", []string{`/`}},
|
||||
{"/sausage3**", []string{`/sausage3**/`, "/"}},
|
||||
{"/a/*.jpg", []string{`/a/`, "/"}},
|
||||
} {
|
||||
_, err := globToRegexp(test.in)
|
||||
assert.NoError(t, err)
|
||||
got := globToDirGlobs(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
1063
.rclone_repo/fs/fs.go
Executable file
1063
.rclone_repo/fs/fs.go
Executable file
File diff suppressed because it is too large
Load Diff
72
.rclone_repo/fs/fs_test.go
Executable file
72
.rclone_repo/fs/fs_test.go
Executable file
@@ -0,0 +1,72 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFeaturesDisable(t *testing.T) {
|
||||
ft := new(Features)
|
||||
ft.Copy = func(src Object, remote string) (Object, error) {
|
||||
return nil, nil
|
||||
}
|
||||
ft.CaseInsensitive = true
|
||||
|
||||
assert.NotNil(t, ft.Copy)
|
||||
assert.Nil(t, ft.Purge)
|
||||
ft.Disable("copy")
|
||||
assert.Nil(t, ft.Copy)
|
||||
assert.Nil(t, ft.Purge)
|
||||
|
||||
assert.True(t, ft.CaseInsensitive)
|
||||
assert.False(t, ft.DuplicateFiles)
|
||||
ft.Disable("caseinsensitive")
|
||||
assert.False(t, ft.CaseInsensitive)
|
||||
assert.False(t, ft.DuplicateFiles)
|
||||
}
|
||||
|
||||
func TestFeaturesList(t *testing.T) {
|
||||
ft := new(Features)
|
||||
names := strings.Join(ft.List(), ",")
|
||||
assert.True(t, strings.Contains(names, ",Copy,"))
|
||||
}
|
||||
|
||||
func TestFeaturesDisableList(t *testing.T) {
|
||||
ft := new(Features)
|
||||
ft.Copy = func(src Object, remote string) (Object, error) {
|
||||
return nil, nil
|
||||
}
|
||||
ft.CaseInsensitive = true
|
||||
|
||||
assert.NotNil(t, ft.Copy)
|
||||
assert.Nil(t, ft.Purge)
|
||||
assert.True(t, ft.CaseInsensitive)
|
||||
assert.False(t, ft.DuplicateFiles)
|
||||
|
||||
ft.DisableList([]string{"copy", "caseinsensitive"})
|
||||
|
||||
assert.Nil(t, ft.Copy)
|
||||
assert.Nil(t, ft.Purge)
|
||||
assert.False(t, ft.CaseInsensitive)
|
||||
assert.False(t, ft.DuplicateFiles)
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*Option)(nil)
|
||||
|
||||
func TestOption(t *testing.T) {
|
||||
d := &Option{
|
||||
Name: "potato",
|
||||
Value: SizeSuffix(17 << 20),
|
||||
}
|
||||
assert.Equal(t, "17M", d.String())
|
||||
assert.Equal(t, "SizeSuffix", d.Type())
|
||||
err := d.Set("18M")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, SizeSuffix(18<<20), d.Value)
|
||||
err = d.Set("sdfsdf")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
292
.rclone_repo/fs/fserrors/error.go
Executable file
292
.rclone_repo/fs/fserrors/error.go
Executable file
@@ -0,0 +1,292 @@
|
||||
// Package fserrors provides errors and error handling
|
||||
package fserrors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Retrier is an optional interface for error as to whether the
|
||||
// operation should be retried at a high level.
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type Retrier interface {
|
||||
error
|
||||
Retry() bool
|
||||
}
|
||||
|
||||
// retryError is a type of error
|
||||
type retryError string
|
||||
|
||||
// Error interface
|
||||
func (r retryError) Error() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (r retryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retrier = retryError("")
|
||||
|
||||
// RetryErrorf makes an error which indicates it would like to be retried
|
||||
func RetryErrorf(format string, a ...interface{}) error {
|
||||
return retryError(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// wrappedRetryError is an error wrapped so it will satisfy the
|
||||
// Retrier interface and return true
|
||||
type wrappedRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (err wrappedRetryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retrier = wrappedRetryError{error(nil)}
|
||||
|
||||
// RetryError makes an error which indicates it would like to be retried
|
||||
func RetryError(err error) error {
|
||||
if err == nil {
|
||||
err = errors.New("needs retry")
|
||||
}
|
||||
return wrappedRetryError{err}
|
||||
}
|
||||
|
||||
// IsRetryError returns true if err conforms to the Retry interface
|
||||
// and calling the Retry method returns true.
|
||||
func IsRetryError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, err = Cause(err)
|
||||
if r, ok := err.(Retrier); ok {
|
||||
return r.Retry()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Fataler is an optional interface for error as to whether the
|
||||
// operation should cause the entire operation to finish immediately.
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type Fataler interface {
|
||||
error
|
||||
Fatal() bool
|
||||
}
|
||||
|
||||
// wrappedFatalError is an error wrapped so it will satisfy the
|
||||
// Retrier interface and return true
|
||||
type wrappedFatalError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// Fatal interface
|
||||
func (err wrappedFatalError) Fatal() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Fataler = wrappedFatalError{error(nil)}
|
||||
|
||||
// FatalError makes an error which indicates it is a fatal error and
|
||||
// the sync should stop.
|
||||
func FatalError(err error) error {
|
||||
if err == nil {
|
||||
err = errors.New("fatal error")
|
||||
}
|
||||
return wrappedFatalError{err}
|
||||
}
|
||||
|
||||
// IsFatalError returns true if err conforms to the Fatal interface
|
||||
// and calling the Fatal method returns true.
|
||||
func IsFatalError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, err = Cause(err)
|
||||
if r, ok := err.(Fataler); ok {
|
||||
return r.Fatal()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NoRetrier is an optional interface for error as to whether the
|
||||
// operation should not be retried at a high level.
|
||||
//
|
||||
// If only NoRetry errors are returned in a sync then the sync won't
|
||||
// be retried.
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type NoRetrier interface {
|
||||
error
|
||||
NoRetry() bool
|
||||
}
|
||||
|
||||
// wrappedNoRetryError is an error wrapped so it will satisfy the
|
||||
// Retrier interface and return true
|
||||
type wrappedNoRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// NoRetry interface
|
||||
func (err wrappedNoRetryError) NoRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ NoRetrier = wrappedNoRetryError{error(nil)}
|
||||
|
||||
// NoRetryError makes an error which indicates the sync shouldn't be
|
||||
// retried.
|
||||
func NoRetryError(err error) error {
|
||||
return wrappedNoRetryError{err}
|
||||
}
|
||||
|
||||
// IsNoRetryError returns true if err conforms to the NoRetry
|
||||
// interface and calling the NoRetry method returns true.
|
||||
func IsNoRetryError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, err = Cause(err)
|
||||
if r, ok := err.(NoRetrier); ok {
|
||||
return r.NoRetry()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Cause is a souped up errors.Cause which can unwrap some standard
|
||||
// library errors too. It returns true if any of the intermediate
|
||||
// errors had a Timeout() or Temporary() method which returned true.
|
||||
func Cause(cause error) (retriable bool, err error) {
|
||||
err = cause
|
||||
for prev := err; err != nil; prev = err {
|
||||
// Check for net error Timeout()
|
||||
if x, ok := err.(interface {
|
||||
Timeout() bool
|
||||
}); ok && x.Timeout() {
|
||||
retriable = true
|
||||
}
|
||||
|
||||
// Check for net error Temporary()
|
||||
if x, ok := err.(interface {
|
||||
Temporary() bool
|
||||
}); ok && x.Temporary() {
|
||||
retriable = true
|
||||
}
|
||||
|
||||
// Unwrap 1 level if possible
|
||||
err = errors.Cause(err)
|
||||
if err == nil {
|
||||
// errors.Cause can return nil which isn't
|
||||
// desirable so pick the previous error in
|
||||
// this case.
|
||||
err = prev
|
||||
}
|
||||
if err == prev {
|
||||
// Unpack any struct or *struct with a field
|
||||
// of name Err which satisfies the error
|
||||
// interface. This includes *url.Error,
|
||||
// *net.OpError, *os.SyscallError and many
|
||||
// others in the stdlib
|
||||
errType := reflect.TypeOf(err)
|
||||
errValue := reflect.ValueOf(err)
|
||||
if errValue.IsValid() && errType.Kind() == reflect.Ptr {
|
||||
errType = errType.Elem()
|
||||
errValue = errValue.Elem()
|
||||
}
|
||||
if errValue.IsValid() && errType.Kind() == reflect.Struct {
|
||||
if errField := errValue.FieldByName("Err"); errField.IsValid() {
|
||||
errFieldValue := errField.Interface()
|
||||
if newErr, ok := errFieldValue.(error); ok {
|
||||
err = newErr
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == prev {
|
||||
break
|
||||
}
|
||||
}
|
||||
return retriable, err
|
||||
}
|
||||
|
||||
// retriableErrorStrings is a list of phrases which when we find it
|
||||
// in an an error, we know it is a networking error which should be
|
||||
// retried.
|
||||
//
|
||||
// This is incredibly ugly - if only errors.Cause worked for all
|
||||
// errors and all errors were exported from the stdlib.
|
||||
var retriableErrorStrings = []string{
|
||||
"use of closed network connection", // internal/poll/fd.go
|
||||
"unexpected EOF reading trailer", // net/http/transfer.go
|
||||
"transport connection broken", // net/http/transport.go
|
||||
"http: ContentLength=", // net/http/transfer.go
|
||||
}
|
||||
|
||||
// Errors which indicate networking errors which should be retried
|
||||
//
|
||||
// These are added to in retriable_errors*.go
|
||||
var retriableErrors = []error{
|
||||
io.EOF,
|
||||
io.ErrUnexpectedEOF,
|
||||
}
|
||||
|
||||
// ShouldRetry looks at an error and tries to work out if retrying the
|
||||
// operation that caused it would be a good idea. It returns true if
|
||||
// the error implements Timeout() or Temporary() or if the error
|
||||
// indicates a premature closing of the connection.
|
||||
func ShouldRetry(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Find root cause if available
|
||||
retriable, err := Cause(err)
|
||||
if retriable {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if it is a retriable error
|
||||
for _, retriableErr := range retriableErrors {
|
||||
if err == retriableErr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Check error strings (yuch!) too
|
||||
errString := err.Error()
|
||||
for _, phrase := range retriableErrorStrings {
|
||||
if strings.Contains(errString, phrase) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ShouldRetryHTTP returns a boolean as to whether this resp deserves.
|
||||
// It checks to see if the HTTP response code is in the slice
|
||||
// retryErrorCodes.
|
||||
func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
145
.rclone_repo/fs/fserrors/error_test.go
Executable file
145
.rclone_repo/fs/fserrors/error_test.go
Executable file
@@ -0,0 +1,145 @@
|
||||
package fserrors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var errUseOfClosedNetworkConnection = errors.New("use of closed network connection")
|
||||
|
||||
// make a plausible network error with the underlying errno
|
||||
func makeNetErr(errno syscall.Errno) error {
|
||||
return &net.OpError{
|
||||
Op: "write",
|
||||
Net: "tcp",
|
||||
Source: &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 123},
|
||||
Addr: &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8080},
|
||||
Err: &os.SyscallError{
|
||||
Syscall: "write",
|
||||
Err: errno,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type myError1 struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e myError1) Error() string { return e.Err.Error() }
|
||||
|
||||
type myError2 struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *myError2) Error() string {
|
||||
if e == nil {
|
||||
return "myError2(nil)"
|
||||
}
|
||||
if e.Err == nil {
|
||||
return "myError2{Err: nil}"
|
||||
}
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
type myError3 struct {
|
||||
Err int
|
||||
}
|
||||
|
||||
func (e *myError3) Error() string { return "hello" }
|
||||
|
||||
type myError4 struct {
|
||||
e error
|
||||
}
|
||||
|
||||
func (e *myError4) Error() string { return e.e.Error() }
|
||||
|
||||
type errorCause struct {
|
||||
e error
|
||||
}
|
||||
|
||||
func (e *errorCause) Error() string { return fmt.Sprintf("%#v", e) }
|
||||
|
||||
func (e *errorCause) Cause() error { return e.e }
|
||||
|
||||
func TestCause(t *testing.T) {
|
||||
e3 := &myError3{3}
|
||||
e4 := &myError4{io.EOF}
|
||||
eNil1 := &myError2{nil}
|
||||
eNil2 := &myError2{Err: (*myError2)(nil)}
|
||||
errPotato := errors.New("potato")
|
||||
nilCause1 := &errorCause{nil}
|
||||
nilCause2 := &errorCause{(*myError2)(nil)}
|
||||
|
||||
for i, test := range []struct {
|
||||
err error
|
||||
wantRetriable bool
|
||||
wantErr error
|
||||
}{
|
||||
{nil, false, nil},
|
||||
{errPotato, false, errPotato},
|
||||
{errors.Wrap(errPotato, "potato"), false, errPotato},
|
||||
{errors.Wrap(errors.Wrap(errPotato, "potato2"), "potato"), false, errPotato},
|
||||
{errUseOfClosedNetworkConnection, false, errUseOfClosedNetworkConnection},
|
||||
{makeNetErr(syscall.EAGAIN), true, syscall.EAGAIN},
|
||||
{makeNetErr(syscall.Errno(123123123)), false, syscall.Errno(123123123)},
|
||||
{eNil1, false, eNil1},
|
||||
{eNil2, false, eNil2.Err},
|
||||
{myError1{io.EOF}, false, io.EOF},
|
||||
{&myError2{io.EOF}, false, io.EOF},
|
||||
{e3, false, e3},
|
||||
{e4, false, e4},
|
||||
{&errorCause{errPotato}, false, errPotato},
|
||||
{nilCause1, false, nilCause1},
|
||||
{nilCause2, false, nilCause2.e},
|
||||
} {
|
||||
gotRetriable, gotErr := Cause(test.err)
|
||||
what := fmt.Sprintf("test #%d: %v", i, test.err)
|
||||
assert.Equal(t, test.wantErr, gotErr, what)
|
||||
assert.Equal(t, test.wantRetriable, gotRetriable, what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldRetry(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
err error
|
||||
want bool
|
||||
}{
|
||||
{nil, false},
|
||||
{errors.New("potato"), false},
|
||||
{errors.Wrap(errUseOfClosedNetworkConnection, "connection"), true},
|
||||
{io.EOF, true},
|
||||
{io.ErrUnexpectedEOF, true},
|
||||
{makeNetErr(syscall.EAGAIN), true},
|
||||
{makeNetErr(syscall.Errno(123123123)), false},
|
||||
{&url.Error{Op: "post", URL: "/", Err: io.EOF}, true},
|
||||
{&url.Error{Op: "post", URL: "/", Err: errUseOfClosedNetworkConnection}, true},
|
||||
{&url.Error{Op: "post", URL: "/", Err: fmt.Errorf("net/http: HTTP/1.x transport connection broken: %v", fmt.Errorf("http: ContentLength=%d with Body length %d", 100663336, 99590598))}, true},
|
||||
{
|
||||
errors.Wrap(&url.Error{
|
||||
Op: "post",
|
||||
URL: "http://localhost/",
|
||||
Err: makeNetErr(syscall.EPIPE),
|
||||
}, "potato error"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
errors.Wrap(&url.Error{
|
||||
Op: "post",
|
||||
URL: "http://localhost/",
|
||||
Err: makeNetErr(syscall.Errno(123123123)),
|
||||
}, "listing error"),
|
||||
false,
|
||||
},
|
||||
} {
|
||||
got := ShouldRetry(test.err)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("test #%d: %v", i, test.err))
|
||||
}
|
||||
}
|
||||
21
.rclone_repo/fs/fserrors/retriable_errors.go
Executable file
21
.rclone_repo/fs/fserrors/retriable_errors.go
Executable file
@@ -0,0 +1,21 @@
|
||||
// +build !plan9
|
||||
|
||||
package fserrors
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
retriableErrors = append(retriableErrors,
|
||||
syscall.EPIPE,
|
||||
syscall.ETIMEDOUT,
|
||||
syscall.ECONNREFUSED,
|
||||
syscall.EHOSTDOWN,
|
||||
syscall.EHOSTUNREACH,
|
||||
syscall.ECONNABORTED,
|
||||
syscall.EAGAIN,
|
||||
syscall.EWOULDBLOCK,
|
||||
syscall.ECONNRESET,
|
||||
)
|
||||
}
|
||||
31
.rclone_repo/fs/fserrors/retriable_errors_windows.go
Executable file
31
.rclone_repo/fs/fserrors/retriable_errors_windows.go
Executable file
@@ -0,0 +1,31 @@
|
||||
// +build windows
|
||||
|
||||
package fserrors
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
WSAECONNABORTED syscall.Errno = 10053
|
||||
WSAHOST_NOT_FOUND syscall.Errno = 11001
|
||||
WSATRY_AGAIN syscall.Errno = 11002
|
||||
WSAENETRESET syscall.Errno = 10052
|
||||
WSAETIMEDOUT syscall.Errno = 10060
|
||||
)
|
||||
|
||||
func init() {
|
||||
// append some lower level errors since the standardized ones
|
||||
// don't seem to happen
|
||||
retriableErrors = append(retriableErrors,
|
||||
syscall.WSAECONNRESET,
|
||||
WSAECONNABORTED,
|
||||
WSAHOST_NOT_FOUND,
|
||||
WSATRY_AGAIN,
|
||||
WSAENETRESET,
|
||||
WSAETIMEDOUT,
|
||||
syscall.ERROR_HANDLE_EOF,
|
||||
syscall.ERROR_NETNAME_DELETED,
|
||||
syscall.ERROR_BROKEN_PIPE,
|
||||
)
|
||||
}
|
||||
312
.rclone_repo/fs/fshttp/http.go
Executable file
312
.rclone_repo/fs/fshttp/http.go
Executable file
@@ -0,0 +1,312 @@
|
||||
// Package fshttp contains the common http parts of the config, Transport and Client
|
||||
package fshttp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const (
|
||||
separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
|
||||
separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
|
||||
)
|
||||
|
||||
var (
|
||||
transport http.RoundTripper
|
||||
noTransport sync.Once
|
||||
tpsBucket *rate.Limiter // for limiting number of http transactions per second
|
||||
)
|
||||
|
||||
// StartHTTPTokenBucket starts the token bucket if necessary
|
||||
func StartHTTPTokenBucket() {
|
||||
if fs.Config.TPSLimit > 0 {
|
||||
tpsBurst := fs.Config.TPSLimitBurst
|
||||
if tpsBurst < 1 {
|
||||
tpsBurst = 1
|
||||
}
|
||||
tpsBucket = rate.NewLimiter(rate.Limit(fs.Config.TPSLimit), tpsBurst)
|
||||
fs.Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", fs.Config.TPSLimit, tpsBurst)
|
||||
}
|
||||
}
|
||||
|
||||
// A net.Conn that sets a deadline for every Read or Write operation
|
||||
type timeoutConn struct {
|
||||
net.Conn
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// create a timeoutConn using the timeout
|
||||
func newTimeoutConn(conn net.Conn, timeout time.Duration) (c *timeoutConn, err error) {
|
||||
c = &timeoutConn{
|
||||
Conn: conn,
|
||||
timeout: timeout,
|
||||
}
|
||||
err = c.nudgeDeadline()
|
||||
return
|
||||
}
|
||||
|
||||
// Nudge the deadline for an idle timeout on by c.timeout if non-zero
|
||||
func (c *timeoutConn) nudgeDeadline() (err error) {
|
||||
if c.timeout == 0 {
|
||||
return nil
|
||||
}
|
||||
when := time.Now().Add(c.timeout)
|
||||
return c.Conn.SetDeadline(when)
|
||||
}
|
||||
|
||||
// readOrWrite bytes doing idle timeouts
|
||||
func (c *timeoutConn) readOrWrite(f func([]byte) (int, error), b []byte) (n int, err error) {
|
||||
n, err = f(b)
|
||||
// Don't nudge if no bytes or an error
|
||||
if n == 0 || err != nil {
|
||||
return
|
||||
}
|
||||
// Nudge the deadline on successful Read or Write
|
||||
err = c.nudgeDeadline()
|
||||
return
|
||||
}
|
||||
|
||||
// Read bytes doing idle timeouts
|
||||
func (c *timeoutConn) Read(b []byte) (n int, err error) {
|
||||
return c.readOrWrite(c.Conn.Read, b)
|
||||
}
|
||||
|
||||
// Write bytes doing idle timeouts
|
||||
func (c *timeoutConn) Write(b []byte) (n int, err error) {
|
||||
return c.readOrWrite(c.Conn.Write, b)
|
||||
}
|
||||
|
||||
// setDefaults for a from b
|
||||
//
|
||||
// Copy the public members from b to a. We can't just use a struct
|
||||
// copy as Transport contains a private mutex.
|
||||
func setDefaults(a, b interface{}) {
|
||||
pt := reflect.TypeOf(a)
|
||||
t := pt.Elem()
|
||||
va := reflect.ValueOf(a).Elem()
|
||||
vb := reflect.ValueOf(b).Elem()
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
aField := va.Field(i)
|
||||
// Set a from b if it is public
|
||||
if aField.CanSet() {
|
||||
bField := vb.Field(i)
|
||||
aField.Set(bField)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dial with context and timeouts
|
||||
func dialContextTimeout(ctx context.Context, network, address string, ci *fs.ConfigInfo) (net.Conn, error) {
|
||||
dialer := NewDialer(ci)
|
||||
c, err := dialer.DialContext(ctx, network, address)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
return newTimeoutConn(c, ci.Timeout)
|
||||
}
|
||||
|
||||
// NewTransport returns an http.RoundTripper with the correct timeouts
|
||||
func NewTransport(ci *fs.ConfigInfo) http.RoundTripper {
|
||||
noTransport.Do(func() {
|
||||
// Start with a sensible set of defaults then override.
|
||||
// This also means we get new stuff when it gets added to go
|
||||
t := new(http.Transport)
|
||||
setDefaults(t, http.DefaultTransport.(*http.Transport))
|
||||
t.Proxy = http.ProxyFromEnvironment
|
||||
t.MaxIdleConnsPerHost = 2 * (ci.Checkers + ci.Transfers + 1)
|
||||
t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
|
||||
t.TLSHandshakeTimeout = ci.ConnectTimeout
|
||||
t.ResponseHeaderTimeout = ci.Timeout
|
||||
t.TLSClientConfig = &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify}
|
||||
t.DisableCompression = ci.NoGzip
|
||||
t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return dialContextTimeout(ctx, network, addr, ci)
|
||||
}
|
||||
t.IdleConnTimeout = 60 * time.Second
|
||||
t.ExpectContinueTimeout = ci.ConnectTimeout
|
||||
// Wrap that http.Transport in our own transport
|
||||
transport = newTransport(ci, t)
|
||||
})
|
||||
return transport
|
||||
}
|
||||
|
||||
// NewClient returns an http.Client with the correct timeouts
|
||||
func NewClient(ci *fs.ConfigInfo) *http.Client {
|
||||
return &http.Client{
|
||||
Transport: NewTransport(ci),
|
||||
}
|
||||
}
|
||||
|
||||
// Transport is a our http Transport which wraps an http.Transport
|
||||
// * Sets the User Agent
|
||||
// * Does logging
|
||||
type Transport struct {
|
||||
*http.Transport
|
||||
dump fs.DumpFlags
|
||||
filterRequest func(req *http.Request)
|
||||
userAgent string
|
||||
}
|
||||
|
||||
// newTransport wraps the http.Transport passed in and logs all
|
||||
// roundtrips including the body if logBody is set.
|
||||
func newTransport(ci *fs.ConfigInfo, transport *http.Transport) *Transport {
|
||||
return &Transport{
|
||||
Transport: transport,
|
||||
dump: ci.Dump,
|
||||
userAgent: ci.UserAgent,
|
||||
}
|
||||
}
|
||||
|
||||
// SetRequestFilter sets a filter to be used on each request
|
||||
func (t *Transport) SetRequestFilter(f func(req *http.Request)) {
|
||||
t.filterRequest = f
|
||||
}
|
||||
|
||||
// A mutex to protect this map
|
||||
var checkedHostMu sync.RWMutex
|
||||
|
||||
// A map of servers we have checked for time
|
||||
var checkedHost = make(map[string]struct{}, 1)
|
||||
|
||||
// Check the server time is the same as ours, once for each server
|
||||
func checkServerTime(req *http.Request, resp *http.Response) {
|
||||
host := req.URL.Host
|
||||
if req.Host != "" {
|
||||
host = req.Host
|
||||
}
|
||||
checkedHostMu.RLock()
|
||||
_, ok := checkedHost[host]
|
||||
checkedHostMu.RUnlock()
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
dateString := resp.Header.Get("Date")
|
||||
if dateString == "" {
|
||||
return
|
||||
}
|
||||
date, err := http.ParseTime(dateString)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err)
|
||||
return
|
||||
}
|
||||
dt := time.Since(date)
|
||||
const window = 5 * 60 * time.Second
|
||||
if dt > window || dt < -window {
|
||||
fs.Logf(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt)
|
||||
}
|
||||
checkedHostMu.Lock()
|
||||
checkedHost[host] = struct{}{}
|
||||
checkedHostMu.Unlock()
|
||||
}
|
||||
|
||||
// cleanAuth gets rid of one authBuf header within the first 4k
|
||||
func cleanAuth(buf, authBuf []byte) []byte {
|
||||
// Find how much buffer to check
|
||||
n := 4096
|
||||
if len(buf) < n {
|
||||
n = len(buf)
|
||||
}
|
||||
// See if there is an Authorization: header
|
||||
i := bytes.Index(buf[:n], authBuf)
|
||||
if i < 0 {
|
||||
return buf
|
||||
}
|
||||
i += len(authBuf)
|
||||
// Overwrite the next 4 chars with 'X'
|
||||
for j := 0; i < len(buf) && j < 4; j++ {
|
||||
if buf[i] == '\n' {
|
||||
break
|
||||
}
|
||||
buf[i] = 'X'
|
||||
i++
|
||||
}
|
||||
// Snip out to the next '\n'
|
||||
j := bytes.IndexByte(buf[i:], '\n')
|
||||
if j < 0 {
|
||||
return buf[:i]
|
||||
}
|
||||
n = copy(buf[i:], buf[i+j:])
|
||||
return buf[:i+n]
|
||||
}
|
||||
|
||||
var authBufs = [][]byte{
|
||||
[]byte("Authorization: "),
|
||||
[]byte("X-Auth-Token: "),
|
||||
}
|
||||
|
||||
// cleanAuths gets rid of all the possible Auth headers
|
||||
func cleanAuths(buf []byte) []byte {
|
||||
for _, authBuf := range authBufs {
|
||||
buf = cleanAuth(buf, authBuf)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// RoundTrip implements the RoundTripper interface.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
// Get transactions per second token first if limiting
|
||||
if tpsBucket != nil {
|
||||
tbErr := tpsBucket.Wait(req.Context())
|
||||
if tbErr != nil {
|
||||
fs.Errorf(nil, "HTTP token bucket error: %v", err)
|
||||
}
|
||||
}
|
||||
// Force user agent
|
||||
req.Header.Set("User-Agent", t.userAgent)
|
||||
// Filter the request if required
|
||||
if t.filterRequest != nil {
|
||||
t.filterRequest(req)
|
||||
}
|
||||
// Logf request
|
||||
if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
buf, _ := httputil.DumpRequestOut(req, t.dump&(fs.DumpBodies|fs.DumpRequests) != 0)
|
||||
if t.dump&fs.DumpAuth == 0 {
|
||||
buf = cleanAuths(buf)
|
||||
}
|
||||
fs.Debugf(nil, "%s", separatorReq)
|
||||
fs.Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
|
||||
fs.Debugf(nil, "%s", string(buf))
|
||||
fs.Debugf(nil, "%s", separatorReq)
|
||||
}
|
||||
// Do round trip
|
||||
resp, err = t.Transport.RoundTrip(req)
|
||||
// Logf response
|
||||
if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
fs.Debugf(nil, "%s", separatorResp)
|
||||
fs.Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Error: %v", err)
|
||||
} else {
|
||||
buf, _ := httputil.DumpResponse(resp, t.dump&(fs.DumpBodies|fs.DumpResponses) != 0)
|
||||
fs.Debugf(nil, "%s", string(buf))
|
||||
}
|
||||
fs.Debugf(nil, "%s", separatorResp)
|
||||
}
|
||||
if err == nil {
|
||||
checkServerTime(req, resp)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// NewDialer creates a net.Dialer structure with Timeout, Keepalive
|
||||
// and LocalAddr set from rclone flags.
|
||||
func NewDialer(ci *fs.ConfigInfo) *net.Dialer {
|
||||
dialer := &net.Dialer{
|
||||
Timeout: ci.ConnectTimeout,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}
|
||||
if ci.BindAddr != nil {
|
||||
dialer.LocalAddr = &net.TCPAddr{IP: ci.BindAddr}
|
||||
}
|
||||
return dialer
|
||||
}
|
||||
78
.rclone_repo/fs/fshttp/http_test.go
Executable file
78
.rclone_repo/fs/fshttp/http_test.go
Executable file
@@ -0,0 +1,78 @@
|
||||
package fshttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// returns the "%p" reprentation of the thing passed in
|
||||
func ptr(p interface{}) string {
|
||||
return fmt.Sprintf("%p", p)
|
||||
}
|
||||
|
||||
func TestSetDefaults(t *testing.T) {
|
||||
old := http.DefaultTransport.(*http.Transport)
|
||||
newT := new(http.Transport)
|
||||
setDefaults(newT, old)
|
||||
// Can't use assert.Equal or reflect.DeepEqual for this as it has functions in
|
||||
// Check functions by comparing the "%p" representations of them
|
||||
assert.Equal(t, ptr(old.Proxy), ptr(newT.Proxy), "when checking .Proxy")
|
||||
assert.Equal(t, ptr(old.DialContext), ptr(newT.DialContext), "when checking .DialContext")
|
||||
// Check the other public fields
|
||||
assert.Equal(t, ptr(old.Dial), ptr(newT.Dial), "when checking .Dial")
|
||||
assert.Equal(t, ptr(old.DialTLS), ptr(newT.DialTLS), "when checking .DialTLS")
|
||||
assert.Equal(t, old.TLSClientConfig, newT.TLSClientConfig, "when checking .TLSClientConfig")
|
||||
assert.Equal(t, old.TLSHandshakeTimeout, newT.TLSHandshakeTimeout, "when checking .TLSHandshakeTimeout")
|
||||
assert.Equal(t, old.DisableKeepAlives, newT.DisableKeepAlives, "when checking .DisableKeepAlives")
|
||||
assert.Equal(t, old.DisableCompression, newT.DisableCompression, "when checking .DisableCompression")
|
||||
assert.Equal(t, old.MaxIdleConns, newT.MaxIdleConns, "when checking .MaxIdleConns")
|
||||
assert.Equal(t, old.MaxIdleConnsPerHost, newT.MaxIdleConnsPerHost, "when checking .MaxIdleConnsPerHost")
|
||||
assert.Equal(t, old.IdleConnTimeout, newT.IdleConnTimeout, "when checking .IdleConnTimeout")
|
||||
assert.Equal(t, old.ResponseHeaderTimeout, newT.ResponseHeaderTimeout, "when checking .ResponseHeaderTimeout")
|
||||
assert.Equal(t, old.ExpectContinueTimeout, newT.ExpectContinueTimeout, "when checking .ExpectContinueTimeout")
|
||||
assert.Equal(t, old.TLSNextProto, newT.TLSNextProto, "when checking .TLSNextProto")
|
||||
assert.Equal(t, old.MaxResponseHeaderBytes, newT.MaxResponseHeaderBytes, "when checking .MaxResponseHeaderBytes")
|
||||
}
|
||||
|
||||
func TestCleanAuth(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"floo", "floo"},
|
||||
{"Authorization: ", "Authorization: "},
|
||||
{"Authorization: \n", "Authorization: \n"},
|
||||
{"Authorization: A", "Authorization: X"},
|
||||
{"Authorization: A\n", "Authorization: X\n"},
|
||||
{"Authorization: AAAA", "Authorization: XXXX"},
|
||||
{"Authorization: AAAA\n", "Authorization: XXXX\n"},
|
||||
{"Authorization: AAAAA", "Authorization: XXXX"},
|
||||
{"Authorization: AAAAA\n", "Authorization: XXXX\n"},
|
||||
{"Authorization: AAAA\n", "Authorization: XXXX\n"},
|
||||
{"Authorization: AAAAAAAAA\nPotato: Help\n", "Authorization: XXXX\nPotato: Help\n"},
|
||||
{"Sausage: 1\nAuthorization: AAAAAAAAA\nPotato: Help\n", "Sausage: 1\nAuthorization: XXXX\nPotato: Help\n"},
|
||||
} {
|
||||
got := string(cleanAuth([]byte(test.in), authBufs[0]))
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanAuths(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"floo", "floo"},
|
||||
{"Authorization: AAAAAAAAA\nPotato: Help\n", "Authorization: XXXX\nPotato: Help\n"},
|
||||
{"X-Auth-Token: AAAAAAAAA\nPotato: Help\n", "X-Auth-Token: XXXX\nPotato: Help\n"},
|
||||
{"X-Auth-Token: AAAAAAAAA\nAuthorization: AAAAAAAAA\nPotato: Help\n", "X-Auth-Token: XXXX\nAuthorization: XXXX\nPotato: Help\n"},
|
||||
} {
|
||||
got := string(cleanAuths([]byte(test.in)))
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
50
.rclone_repo/fs/fspath/path.go
Executable file
50
.rclone_repo/fs/fspath/path.go
Executable file
@@ -0,0 +1,50 @@
|
||||
// Package fspath contains routines for fspath manipulation
|
||||
package fspath
|
||||
|
||||
import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/ncw/rclone/fs/driveletter"
|
||||
)
|
||||
|
||||
// Matcher is a pattern to match an rclone URL
|
||||
var Matcher = regexp.MustCompile(`^(:?[\w_ -]+):(.*)$`)
|
||||
|
||||
// Parse deconstructs a remote path into configName and fsPath
|
||||
//
|
||||
// If the path is a local path then configName will be returned as "".
|
||||
//
|
||||
// So "remote:path/to/dir" will return "remote", "path/to/dir"
|
||||
// and "/path/to/local" will return ("", "/path/to/local")
|
||||
//
|
||||
// Note that this will turn \ into / in the fsPath on Windows
|
||||
func Parse(path string) (configName, fsPath string) {
|
||||
parts := Matcher.FindStringSubmatch(path)
|
||||
configName, fsPath = "", path
|
||||
if parts != nil && !driveletter.IsDriveLetter(parts[1]) {
|
||||
configName, fsPath = parts[1], parts[2]
|
||||
}
|
||||
// change native directory separators to / if there are any
|
||||
fsPath = filepath.ToSlash(fsPath)
|
||||
return configName, fsPath
|
||||
}
|
||||
|
||||
// Split splits a remote into a parent and a leaf
|
||||
//
|
||||
// if it returns leaf as an empty string then remote is a directory
|
||||
//
|
||||
// if it returns parent as an empty string then that means the current directory
|
||||
//
|
||||
// The returned values have the property that parent + leaf == remote
|
||||
// (except under Windows where \ will be translated into /)
|
||||
func Split(remote string) (parent string, leaf string) {
|
||||
remoteName, remotePath := Parse(remote)
|
||||
if remoteName != "" {
|
||||
remoteName += ":"
|
||||
}
|
||||
// Construct new remote name without last segment
|
||||
parent, leaf = path.Split(remotePath)
|
||||
return remoteName + parent, leaf
|
||||
}
|
||||
60
.rclone_repo/fs/fspath/path_test.go
Executable file
60
.rclone_repo/fs/fspath/path_test.go
Executable file
@@ -0,0 +1,60 @@
|
||||
package fspath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in, wantConfigName, wantFsPath string
|
||||
}{
|
||||
{"", "", ""},
|
||||
{"/path/to/file", "", "/path/to/file"},
|
||||
{"path/to/file", "", "path/to/file"},
|
||||
{"remote:path/to/file", "remote", "path/to/file"},
|
||||
{"remote:/path/to/file", "remote", "/path/to/file"},
|
||||
{":backend:/path/to/file", ":backend", "/path/to/file"},
|
||||
} {
|
||||
gotConfigName, gotFsPath := Parse(test.in)
|
||||
assert.Equal(t, test.wantConfigName, gotConfigName)
|
||||
assert.Equal(t, test.wantFsPath, gotFsPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplit(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
remote, wantParent, wantLeaf string
|
||||
}{
|
||||
{"", "", ""},
|
||||
|
||||
{"remote:", "remote:", ""},
|
||||
{"remote:potato", "remote:", "potato"},
|
||||
{"remote:/", "remote:/", ""},
|
||||
{"remote:/potato", "remote:/", "potato"},
|
||||
{"remote:/potato/potato", "remote:/potato/", "potato"},
|
||||
{"remote:potato/sausage", "remote:potato/", "sausage"},
|
||||
|
||||
{":remote:", ":remote:", ""},
|
||||
{":remote:potato", ":remote:", "potato"},
|
||||
{":remote:/", ":remote:/", ""},
|
||||
{":remote:/potato", ":remote:/", "potato"},
|
||||
{":remote:/potato/potato", ":remote:/potato/", "potato"},
|
||||
{":remote:potato/sausage", ":remote:potato/", "sausage"},
|
||||
|
||||
{"/", "/", ""},
|
||||
{"/root", "/", "root"},
|
||||
{"/a/b", "/a/", "b"},
|
||||
{"root", "", "root"},
|
||||
{"a/b", "a/", "b"},
|
||||
{"root/", "root/", ""},
|
||||
{"a/b/", "a/b/", ""},
|
||||
} {
|
||||
gotParent, gotLeaf := Split(test.remote)
|
||||
assert.Equal(t, test.wantParent, gotParent, test.remote)
|
||||
assert.Equal(t, test.wantLeaf, gotLeaf, test.remote)
|
||||
assert.Equal(t, test.remote, gotParent+gotLeaf, fmt.Sprintf("%s: %q + %q != %q", test.remote, gotParent, gotLeaf, test.remote))
|
||||
}
|
||||
}
|
||||
308
.rclone_repo/fs/hash/hash.go
Executable file
308
.rclone_repo/fs/hash/hash.go
Executable file
@@ -0,0 +1,308 @@
|
||||
package hash
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox/dbhash"
|
||||
"github.com/ncw/rclone/backend/onedrive/quickxorhash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Type indicates a standard hashing algorithm
|
||||
type Type int
|
||||
|
||||
// ErrUnsupported should be returned by filesystem,
|
||||
// if it is requested to deliver an unsupported hash type.
|
||||
var ErrUnsupported = errors.New("hash type not supported")
|
||||
|
||||
const (
|
||||
// MD5 indicates MD5 support
|
||||
MD5 Type = 1 << iota
|
||||
|
||||
// SHA1 indicates SHA-1 support
|
||||
SHA1
|
||||
|
||||
// Dropbox indicates Dropbox special hash
|
||||
// https://www.dropbox.com/developers/reference/content-hash
|
||||
Dropbox
|
||||
|
||||
// QuickXorHash indicates Microsoft onedrive hash
|
||||
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
QuickXorHash
|
||||
|
||||
// None indicates no hashes are supported
|
||||
None Type = 0
|
||||
)
|
||||
|
||||
// Supported returns a set of all the supported hashes by
|
||||
// HashStream and MultiHasher.
|
||||
var Supported = NewHashSet(MD5, SHA1, Dropbox, QuickXorHash)
|
||||
|
||||
// Width returns the width in characters for any HashType
|
||||
var Width = map[Type]int{
|
||||
MD5: 32,
|
||||
SHA1: 40,
|
||||
Dropbox: 64,
|
||||
QuickXorHash: 40,
|
||||
}
|
||||
|
||||
// Stream will calculate hashes of all supported hash types.
|
||||
func Stream(r io.Reader) (map[Type]string, error) {
|
||||
return StreamTypes(r, Supported)
|
||||
}
|
||||
|
||||
// StreamTypes will calculate hashes of the requested hash types.
|
||||
func StreamTypes(r io.Reader, set Set) (map[Type]string, error) {
|
||||
hashers, err := fromTypes(set)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = io.Copy(toMultiWriter(hashers), r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ret = make(map[Type]string)
|
||||
for k, v := range hashers {
|
||||
ret[k] = hex.EncodeToString(v.Sum(nil))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// String returns a string representation of the hash type.
|
||||
// The function will panic if the hash type is unknown.
|
||||
func (h Type) String() string {
|
||||
switch h {
|
||||
case None:
|
||||
return "None"
|
||||
case MD5:
|
||||
return "MD5"
|
||||
case SHA1:
|
||||
return "SHA-1"
|
||||
case Dropbox:
|
||||
return "DropboxHash"
|
||||
case QuickXorHash:
|
||||
return "QuickXorHash"
|
||||
default:
|
||||
err := fmt.Sprintf("internal error: unknown hash type: 0x%x", int(h))
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set a Type from a flag
|
||||
func (h *Type) Set(s string) error {
|
||||
switch s {
|
||||
case "None":
|
||||
*h = None
|
||||
case "MD5":
|
||||
*h = MD5
|
||||
case "SHA-1":
|
||||
*h = SHA1
|
||||
case "DropboxHash":
|
||||
*h = Dropbox
|
||||
case "QuickXorHash":
|
||||
*h = QuickXorHash
|
||||
default:
|
||||
return errors.Errorf("Unknown hash type %q", s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (h Type) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
// fromTypes will return hashers for all the requested types.
|
||||
// The types must be a subset of SupportedHashes,
|
||||
// and this function must support all types.
|
||||
func fromTypes(set Set) (map[Type]hash.Hash, error) {
|
||||
if !set.SubsetOf(Supported) {
|
||||
return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set))
|
||||
}
|
||||
var hashers = make(map[Type]hash.Hash)
|
||||
types := set.Array()
|
||||
for _, t := range types {
|
||||
switch t {
|
||||
case MD5:
|
||||
hashers[t] = md5.New()
|
||||
case SHA1:
|
||||
hashers[t] = sha1.New()
|
||||
case Dropbox:
|
||||
hashers[t] = dbhash.New()
|
||||
case QuickXorHash:
|
||||
hashers[t] = quickxorhash.New()
|
||||
default:
|
||||
err := fmt.Sprintf("internal error: Unsupported hash type %v", t)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return hashers, nil
|
||||
}
|
||||
|
||||
// toMultiWriter will return a set of hashers into a
|
||||
// single multiwriter, where one write will update all
|
||||
// the hashers.
|
||||
func toMultiWriter(h map[Type]hash.Hash) io.Writer {
|
||||
// Convert to to slice
|
||||
var w = make([]io.Writer, 0, len(h))
|
||||
for _, v := range h {
|
||||
w = append(w, v)
|
||||
}
|
||||
return io.MultiWriter(w...)
|
||||
}
|
||||
|
||||
// A MultiHasher will construct various hashes on
|
||||
// all incoming writes.
|
||||
type MultiHasher struct {
|
||||
w io.Writer
|
||||
size int64
|
||||
h map[Type]hash.Hash // Hashes
|
||||
}
|
||||
|
||||
// NewMultiHasher will return a hash writer that will write all
|
||||
// supported hash types.
|
||||
func NewMultiHasher() *MultiHasher {
|
||||
h, err := NewMultiHasherTypes(Supported)
|
||||
if err != nil {
|
||||
panic("internal error: could not create multihasher")
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// NewMultiHasherTypes will return a hash writer that will write
|
||||
// the requested hash types.
|
||||
func NewMultiHasherTypes(set Set) (*MultiHasher, error) {
|
||||
hashers, err := fromTypes(set)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := MultiHasher{h: hashers, w: toMultiWriter(hashers)}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
func (m *MultiHasher) Write(p []byte) (n int, err error) {
|
||||
n, err = m.w.Write(p)
|
||||
m.size += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Sums returns the sums of all accumulated hashes as hex encoded
|
||||
// strings.
|
||||
func (m *MultiHasher) Sums() map[Type]string {
|
||||
dst := make(map[Type]string)
|
||||
for k, v := range m.h {
|
||||
dst[k] = hex.EncodeToString(v.Sum(nil))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Size returns the number of bytes written
|
||||
func (m *MultiHasher) Size() int64 {
|
||||
return m.size
|
||||
}
|
||||
|
||||
// A Set Indicates one or more hash types.
|
||||
type Set int
|
||||
|
||||
// NewHashSet will create a new hash set with the hash types supplied
|
||||
func NewHashSet(t ...Type) Set {
|
||||
h := Set(None)
|
||||
return h.Add(t...)
|
||||
}
|
||||
|
||||
// Add one or more hash types to the set.
|
||||
// Returns the modified hash set.
|
||||
func (h *Set) Add(t ...Type) Set {
|
||||
for _, v := range t {
|
||||
*h |= Set(v)
|
||||
}
|
||||
return *h
|
||||
}
|
||||
|
||||
// Contains returns true if the
|
||||
func (h Set) Contains(t Type) bool {
|
||||
return int(h)&int(t) != 0
|
||||
}
|
||||
|
||||
// Overlap returns the overlapping hash types
|
||||
func (h Set) Overlap(t Set) Set {
|
||||
return Set(int(h) & int(t))
|
||||
}
|
||||
|
||||
// SubsetOf will return true if all types of h
|
||||
// is present in the set c
|
||||
func (h Set) SubsetOf(c Set) bool {
|
||||
return int(h)|int(c) == int(c)
|
||||
}
|
||||
|
||||
// GetOne will return a hash type.
|
||||
// Currently the first is returned, but it could be
|
||||
// improved to return the strongest.
|
||||
func (h Set) GetOne() Type {
|
||||
v := int(h)
|
||||
i := uint(0)
|
||||
for v != 0 {
|
||||
if v&1 != 0 {
|
||||
return Type(1 << i)
|
||||
}
|
||||
i++
|
||||
v >>= 1
|
||||
}
|
||||
return Type(None)
|
||||
}
|
||||
|
||||
// Array returns an array of all hash types in the set
|
||||
func (h Set) Array() (ht []Type) {
|
||||
v := int(h)
|
||||
i := uint(0)
|
||||
for v != 0 {
|
||||
if v&1 != 0 {
|
||||
ht = append(ht, Type(1<<i))
|
||||
}
|
||||
i++
|
||||
v >>= 1
|
||||
}
|
||||
return ht
|
||||
}
|
||||
|
||||
// Count returns the number of hash types in the set
|
||||
func (h Set) Count() int {
|
||||
if int(h) == 0 {
|
||||
return 0
|
||||
}
|
||||
// credit: https://code.google.com/u/arnehormann/
|
||||
x := uint64(h)
|
||||
x -= (x >> 1) & 0x5555555555555555
|
||||
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
|
||||
x += x >> 4
|
||||
x &= 0x0f0f0f0f0f0f0f0f
|
||||
x *= 0x0101010101010101
|
||||
return int(x >> 56)
|
||||
}
|
||||
|
||||
// String returns a string representation of the hash set.
|
||||
// The function will panic if it contains an unknown type.
|
||||
func (h Set) String() string {
|
||||
a := h.Array()
|
||||
var r []string
|
||||
for _, v := range a {
|
||||
r = append(r, v.String())
|
||||
}
|
||||
return "[" + strings.Join(r, ", ") + "]"
|
||||
}
|
||||
|
||||
// Equals checks to see if src == dst, but ignores empty strings
|
||||
// and returns true if either is empty.
|
||||
func Equals(src, dst string) bool {
|
||||
if src == "" || dst == "" {
|
||||
return true
|
||||
}
|
||||
return src == dst
|
||||
}
|
||||
169
.rclone_repo/fs/hash/hash_test.go
Executable file
169
.rclone_repo/fs/hash/hash_test.go
Executable file
@@ -0,0 +1,169 @@
|
||||
package hash_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*hash.Type)(nil)
|
||||
|
||||
func TestHashSet(t *testing.T) {
|
||||
var h hash.Set
|
||||
|
||||
assert.Equal(t, 0, h.Count())
|
||||
|
||||
a := h.Array()
|
||||
assert.Len(t, a, 0)
|
||||
|
||||
h = h.Add(hash.MD5)
|
||||
assert.Equal(t, 1, h.Count())
|
||||
assert.Equal(t, hash.MD5, h.GetOne())
|
||||
a = h.Array()
|
||||
assert.Len(t, a, 1)
|
||||
assert.Equal(t, a[0], hash.MD5)
|
||||
|
||||
// Test overlap, with all hashes
|
||||
h = h.Overlap(hash.Supported)
|
||||
assert.Equal(t, 1, h.Count())
|
||||
assert.Equal(t, hash.MD5, h.GetOne())
|
||||
assert.True(t, h.SubsetOf(hash.Supported))
|
||||
assert.True(t, h.SubsetOf(hash.NewHashSet(hash.MD5)))
|
||||
|
||||
h = h.Add(hash.SHA1)
|
||||
assert.Equal(t, 2, h.Count())
|
||||
one := h.GetOne()
|
||||
if !(one == hash.MD5 || one == hash.SHA1) {
|
||||
t.Fatalf("expected to be either MD5 or SHA1, got %v", one)
|
||||
}
|
||||
assert.True(t, h.SubsetOf(hash.Supported))
|
||||
assert.False(t, h.SubsetOf(hash.NewHashSet(hash.MD5)))
|
||||
assert.False(t, h.SubsetOf(hash.NewHashSet(hash.SHA1)))
|
||||
assert.True(t, h.SubsetOf(hash.NewHashSet(hash.MD5, hash.SHA1)))
|
||||
a = h.Array()
|
||||
assert.Len(t, a, 2)
|
||||
|
||||
ol := h.Overlap(hash.NewHashSet(hash.MD5))
|
||||
assert.Equal(t, 1, ol.Count())
|
||||
assert.True(t, ol.Contains(hash.MD5))
|
||||
assert.False(t, ol.Contains(hash.SHA1))
|
||||
|
||||
ol = h.Overlap(hash.NewHashSet(hash.MD5, hash.SHA1))
|
||||
assert.Equal(t, 2, ol.Count())
|
||||
assert.True(t, ol.Contains(hash.MD5))
|
||||
assert.True(t, ol.Contains(hash.SHA1))
|
||||
}
|
||||
|
||||
type hashTest struct {
|
||||
input []byte
|
||||
output map[hash.Type]string
|
||||
}
|
||||
|
||||
var hashTestSet = []hashTest{
|
||||
{
|
||||
input: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
|
||||
output: map[hash.Type]string{
|
||||
hash.MD5: "bf13fc19e5151ac57d4252e0e0f87abe",
|
||||
hash.SHA1: "3ab6543c08a75f292a5ecedac87ec41642d12166",
|
||||
hash.Dropbox: "214d2fcf3566e94c99ad2f59bd993daca46d8521a0c447adf4b324f53fddc0c7",
|
||||
hash.QuickXorHash: "0110c000085000031c0001095ec00218d0000700",
|
||||
},
|
||||
},
|
||||
// Empty data set
|
||||
{
|
||||
input: []byte{},
|
||||
output: map[hash.Type]string{
|
||||
hash.MD5: "d41d8cd98f00b204e9800998ecf8427e",
|
||||
hash.SHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
|
||||
hash.Dropbox: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
hash.QuickXorHash: "0000000000000000000000000000000000000000",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestMultiHasher(t *testing.T) {
|
||||
for _, test := range hashTestSet {
|
||||
mh := hash.NewMultiHasher()
|
||||
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, test.input, int(n))
|
||||
sums := mh.Sums()
|
||||
for k, v := range sums {
|
||||
expect, ok := test.output[k]
|
||||
require.True(t, ok, "test output for hash not found")
|
||||
assert.Equal(t, expect, v)
|
||||
}
|
||||
// Test that all are present
|
||||
for k, v := range test.output {
|
||||
expect, ok := sums[k]
|
||||
require.True(t, ok, "test output for hash not found")
|
||||
assert.Equal(t, expect, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiHasherTypes(t *testing.T) {
|
||||
h := hash.SHA1
|
||||
for _, test := range hashTestSet {
|
||||
mh, err := hash.NewMultiHasherTypes(hash.NewHashSet(h))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, test.input, int(n))
|
||||
sums := mh.Sums()
|
||||
assert.Len(t, sums, 1)
|
||||
assert.Equal(t, sums[h], test.output[h])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashStream(t *testing.T) {
|
||||
for _, test := range hashTestSet {
|
||||
sums, err := hash.Stream(bytes.NewBuffer(test.input))
|
||||
require.NoError(t, err)
|
||||
for k, v := range sums {
|
||||
expect, ok := test.output[k]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, v, expect)
|
||||
}
|
||||
// Test that all are present
|
||||
for k, v := range test.output {
|
||||
expect, ok := sums[k]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, v, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashStreamTypes(t *testing.T) {
|
||||
h := hash.SHA1
|
||||
for _, test := range hashTestSet {
|
||||
sums, err := hash.StreamTypes(bytes.NewBuffer(test.input), hash.NewHashSet(h))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, sums, 1)
|
||||
assert.Equal(t, sums[h], test.output[h])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashSetStringer(t *testing.T) {
|
||||
h := hash.NewHashSet(hash.SHA1, hash.MD5, hash.Dropbox, hash.QuickXorHash)
|
||||
assert.Equal(t, h.String(), "[MD5, SHA-1, DropboxHash, QuickXorHash]")
|
||||
h = hash.NewHashSet(hash.SHA1)
|
||||
assert.Equal(t, h.String(), "[SHA-1]")
|
||||
h = hash.NewHashSet()
|
||||
assert.Equal(t, h.String(), "[]")
|
||||
}
|
||||
|
||||
func TestHashStringer(t *testing.T) {
|
||||
h := hash.MD5
|
||||
assert.Equal(t, h.String(), "MD5")
|
||||
h = hash.None
|
||||
assert.Equal(t, h.String(), "None")
|
||||
}
|
||||
102
.rclone_repo/fs/list/list.go
Executable file
102
.rclone_repo/fs/list/list.go
Executable file
@@ -0,0 +1,102 @@
|
||||
// Package list contains list functions
|
||||
package list
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DirSorted reads Object and *Dir into entries for the given Fs.
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
//
|
||||
// If includeAll is specified all files will be added, otherwise only
|
||||
// files and directories passing the filter will be added.
|
||||
//
|
||||
// Files will be returned in sorted order
|
||||
func DirSorted(f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
|
||||
// Get unfiltered entries from the fs
|
||||
entries, err = f.List(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This should happen only if exclude files lives in the
|
||||
// starting directory, otherwise ListDirSorted should not be
|
||||
// called.
|
||||
if !includeAll && filter.Active.ListContainsExcludeFile(entries) {
|
||||
fs.Debugf(dir, "Excluded from sync (and deletion)")
|
||||
return nil, nil
|
||||
}
|
||||
return filterAndSortDir(entries, includeAll, dir, filter.Active.IncludeObject, filter.Active.IncludeDirectory(f))
|
||||
}
|
||||
|
||||
// filter (if required) and check the entries, then sort them
|
||||
func filterAndSortDir(entries fs.DirEntries, includeAll bool, dir string,
|
||||
IncludeObject func(o fs.Object) bool,
|
||||
IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) {
|
||||
newEntries = entries[:0] // in place filter
|
||||
prefix := ""
|
||||
if dir != "" {
|
||||
prefix = dir + "/"
|
||||
}
|
||||
for _, entry := range entries {
|
||||
ok := true
|
||||
// check includes and types
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
// Make sure we don't delete excluded files if not required
|
||||
if !includeAll && !IncludeObject(x) {
|
||||
ok = false
|
||||
fs.Debugf(x, "Excluded from sync (and deletion)")
|
||||
}
|
||||
case fs.Directory:
|
||||
if !includeAll {
|
||||
include, err := IncludeDirectory(x.Remote())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !include {
|
||||
ok = false
|
||||
fs.Debugf(x, "Excluded from sync (and deletion)")
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
// check remote name belongs in this directry
|
||||
remote := entry.Remote()
|
||||
switch {
|
||||
case !ok:
|
||||
// ignore
|
||||
case !strings.HasPrefix(remote, prefix):
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
|
||||
case remote == prefix:
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
|
||||
case strings.ContainsRune(remote[len(prefix):], '/'):
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
|
||||
default:
|
||||
// ok
|
||||
}
|
||||
if ok {
|
||||
newEntries = append(newEntries, entry)
|
||||
}
|
||||
}
|
||||
entries = newEntries
|
||||
|
||||
// Sort the directory entries by Remote
|
||||
//
|
||||
// We use a stable sort here just in case there are
|
||||
// duplicates. Assuming the remote delivers the entries in a
|
||||
// consistent order, this will give the best user experience
|
||||
// in syncing as it will use the first entry for the sync
|
||||
// comparison.
|
||||
sort.Stable(entries)
|
||||
return entries, nil
|
||||
}
|
||||
104
.rclone_repo/fs/list/list_test.go
Executable file
104
.rclone_repo/fs/list/list_test.go
Executable file
@@ -0,0 +1,104 @@
|
||||
package list
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/mockdir"
|
||||
"github.com/ncw/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// NB integration tests for DirSorted are in
|
||||
// fs/operations/listdirsorted_test.go
|
||||
|
||||
func TestFilterAndSortIncludeAll(t *testing.T) {
|
||||
da := mockdir.New("a")
|
||||
oA := mockobject.Object("A")
|
||||
db := mockdir.New("b")
|
||||
oB := mockobject.Object("B")
|
||||
dc := mockdir.New("c")
|
||||
oC := mockobject.Object("C")
|
||||
dd := mockdir.New("d")
|
||||
oD := mockobject.Object("D")
|
||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
||||
includeObject := func(o fs.Object) bool {
|
||||
return o != oB
|
||||
}
|
||||
includeDirectory := func(remote string) (bool, error) {
|
||||
return remote != "c", nil
|
||||
}
|
||||
// no filter
|
||||
newEntries, err := filterAndSortDir(entries, true, "", includeObject, includeDirectory)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
newEntries,
|
||||
fs.DirEntries{oA, oB, oC, oD, da, db, dc, dd},
|
||||
)
|
||||
// filter
|
||||
newEntries, err = filterAndSortDir(entries, false, "", includeObject, includeDirectory)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
newEntries,
|
||||
fs.DirEntries{oA, oC, oD, da, db, dd},
|
||||
)
|
||||
}
|
||||
|
||||
func TestFilterAndSortCheckDir(t *testing.T) {
|
||||
// Check the different kinds of error when listing "dir"
|
||||
da := mockdir.New("dir/")
|
||||
oA := mockobject.Object("diR/a")
|
||||
db := mockdir.New("dir/b")
|
||||
oB := mockobject.Object("dir/B/sub")
|
||||
dc := mockdir.New("dir/c")
|
||||
oC := mockobject.Object("dir/C")
|
||||
dd := mockdir.New("dir/d")
|
||||
oD := mockobject.Object("dir/D")
|
||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
||||
newEntries, err := filterAndSortDir(entries, true, "dir", nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
newEntries,
|
||||
fs.DirEntries{oC, oD, db, dc, dd},
|
||||
)
|
||||
}
|
||||
|
||||
func TestFilterAndSortCheckDirRoot(t *testing.T) {
|
||||
// Check the different kinds of error when listing the root ""
|
||||
da := mockdir.New("")
|
||||
oA := mockobject.Object("A")
|
||||
db := mockdir.New("b")
|
||||
oB := mockobject.Object("B/sub")
|
||||
dc := mockdir.New("c")
|
||||
oC := mockobject.Object("C")
|
||||
dd := mockdir.New("d")
|
||||
oD := mockobject.Object("D")
|
||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
||||
newEntries, err := filterAndSortDir(entries, true, "", nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
newEntries,
|
||||
fs.DirEntries{oA, oC, oD, db, dc, dd},
|
||||
)
|
||||
}
|
||||
|
||||
type unknownDirEntry string
|
||||
|
||||
func (o unknownDirEntry) String() string { return string(o) }
|
||||
func (o unknownDirEntry) Remote() string { return string(o) }
|
||||
func (o unknownDirEntry) ModTime() (t time.Time) { return t }
|
||||
func (o unknownDirEntry) Size() int64 { return 0 }
|
||||
|
||||
func TestFilterAndSortUnknown(t *testing.T) {
|
||||
// Check that an unknown entry produces an error
|
||||
da := mockdir.New("")
|
||||
oA := mockobject.Object("A")
|
||||
ub := unknownDirEntry("b")
|
||||
oB := mockobject.Object("B/sub")
|
||||
entries := fs.DirEntries{da, oA, ub, oB}
|
||||
newEntries, err := filterAndSortDir(entries, true, "", nil, nil)
|
||||
assert.Error(t, err, "error")
|
||||
assert.Nil(t, newEntries)
|
||||
}
|
||||
135
.rclone_repo/fs/log.go
Executable file
135
.rclone_repo/fs/log.go
Executable file
@@ -0,0 +1,135 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LogLevel describes rclone's logs. These are a subset of the syslog log levels.
|
||||
type LogLevel byte
|
||||
|
||||
// Log levels. These are the syslog levels of which we only use a
|
||||
// subset.
|
||||
//
|
||||
// LOG_EMERG system is unusable
|
||||
// LOG_ALERT action must be taken immediately
|
||||
// LOG_CRIT critical conditions
|
||||
// LOG_ERR error conditions
|
||||
// LOG_WARNING warning conditions
|
||||
// LOG_NOTICE normal, but significant, condition
|
||||
// LOG_INFO informational message
|
||||
// LOG_DEBUG debug-level message
|
||||
const (
|
||||
LogLevelEmergency LogLevel = iota
|
||||
LogLevelAlert
|
||||
LogLevelCritical
|
||||
LogLevelError // Error - can't be suppressed
|
||||
LogLevelWarning
|
||||
LogLevelNotice // Normal logging, -q suppresses
|
||||
LogLevelInfo // Transfers, needs -v
|
||||
LogLevelDebug // Debug level, needs -vv
|
||||
)
|
||||
|
||||
var logLevelToString = []string{
|
||||
LogLevelEmergency: "EMERGENCY",
|
||||
LogLevelAlert: "ALERT",
|
||||
LogLevelCritical: "CRITICAL",
|
||||
LogLevelError: "ERROR",
|
||||
LogLevelWarning: "WARNING",
|
||||
LogLevelNotice: "NOTICE",
|
||||
LogLevelInfo: "INFO",
|
||||
LogLevelDebug: "DEBUG",
|
||||
}
|
||||
|
||||
// String turns a LogLevel into a string
|
||||
func (l LogLevel) String() string {
|
||||
if l >= LogLevel(len(logLevelToString)) {
|
||||
return fmt.Sprintf("LogLevel(%d)", l)
|
||||
}
|
||||
return logLevelToString[l]
|
||||
}
|
||||
|
||||
// Set a LogLevel
|
||||
func (l *LogLevel) Set(s string) error {
|
||||
for n, name := range logLevelToString {
|
||||
if s != "" && name == s {
|
||||
*l = LogLevel(n)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Errorf("Unknown log level %q", s)
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (l *LogLevel) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
// LogPrint sends the text to the logger of level
|
||||
var LogPrint = func(level LogLevel, text string) {
|
||||
text = fmt.Sprintf("%-6s: %s", level, text)
|
||||
log.Print(text)
|
||||
}
|
||||
|
||||
// LogPrintf produces a log string from the arguments passed in
|
||||
func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
|
||||
out := fmt.Sprintf(text, args...)
|
||||
if o != nil {
|
||||
out = fmt.Sprintf("%v: %s", o, out)
|
||||
}
|
||||
LogPrint(level, out)
|
||||
}
|
||||
|
||||
// LogLevelPrintf writes logs at the given level
|
||||
func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
|
||||
if Config.LogLevel >= level {
|
||||
LogPrintf(level, o, text, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf writes error log output for this Object or Fs. It
|
||||
// should always be seen by the user.
|
||||
func Errorf(o interface{}, text string, args ...interface{}) {
|
||||
if Config.LogLevel >= LogLevelError {
|
||||
LogPrintf(LogLevelError, o, text, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Logf writes log output for this Object or Fs. This should be
|
||||
// considered to be Info level logging. It is the default level. By
|
||||
// default rclone should not log very much so only use this for
|
||||
// important things the user should see. The user can filter these
|
||||
// out with the -q flag.
|
||||
func Logf(o interface{}, text string, args ...interface{}) {
|
||||
if Config.LogLevel >= LogLevelNotice {
|
||||
LogPrintf(LogLevelNotice, o, text, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Infof writes info on transfers for this Object or Fs. Use this
|
||||
// level for logging transfers, deletions and things which should
|
||||
// appear with the -v flag.
|
||||
func Infof(o interface{}, text string, args ...interface{}) {
|
||||
if Config.LogLevel >= LogLevelInfo {
|
||||
LogPrintf(LogLevelInfo, o, text, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Debugf writes debugging output for this Object or Fs. Use this for
|
||||
// debug only. The user must have to specify -vv to see this.
|
||||
func Debugf(o interface{}, text string, args ...interface{}) {
|
||||
if Config.LogLevel >= LogLevelDebug {
|
||||
LogPrintf(LogLevelDebug, o, text, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// LogDirName returns an object for the logger, logging a root
|
||||
// directory which would normally be "" as the Fs
|
||||
func LogDirName(f Fs, dir string) interface{} {
|
||||
if dir != "" {
|
||||
return dir
|
||||
}
|
||||
return f
|
||||
}
|
||||
95
.rclone_repo/fs/log/log.go
Executable file
95
.rclone_repo/fs/log/log.go
Executable file
@@ -0,0 +1,95 @@
|
||||
// Package log provides logging for rclone
|
||||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
)
|
||||
|
||||
// Flags
|
||||
var (
|
||||
logFile = flags.StringP("log-file", "", "", "Log everything to this file")
|
||||
useSyslog = flags.BoolP("syslog", "", false, "Use Syslog for logging")
|
||||
syslogFacility = flags.StringP("syslog-facility", "", "DAEMON", "Facility for syslog, eg KERN,USER,...")
|
||||
)
|
||||
|
||||
// fnName returns the name of the calling +2 function
|
||||
func fnName() string {
|
||||
pc, _, _, ok := runtime.Caller(2)
|
||||
name := "*Unknown*"
|
||||
if ok {
|
||||
name = runtime.FuncForPC(pc).Name()
|
||||
dot := strings.LastIndex(name, ".")
|
||||
if dot >= 0 {
|
||||
name = name[dot+1:]
|
||||
}
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Trace debugs the entry and exit of the calling function
|
||||
//
|
||||
// It is designed to be used in a defer statement so it returns a
|
||||
// function that logs the exit parameters.
|
||||
//
|
||||
// Any pointers in the exit function will be dereferenced
|
||||
func Trace(o interface{}, format string, a ...interface{}) func(string, ...interface{}) {
|
||||
if fs.Config.LogLevel < fs.LogLevelDebug {
|
||||
return func(format string, a ...interface{}) {}
|
||||
}
|
||||
name := fnName()
|
||||
fs.LogPrintf(fs.LogLevelDebug, o, name+": "+format, a...)
|
||||
return func(format string, a ...interface{}) {
|
||||
for i := range a {
|
||||
// read the values of the pointed to items
|
||||
typ := reflect.TypeOf(a[i])
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
value := reflect.ValueOf(a[i])
|
||||
if value.IsNil() {
|
||||
a[i] = nil
|
||||
} else {
|
||||
pointedToValue := reflect.Indirect(value)
|
||||
a[i] = pointedToValue.Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
fs.LogPrintf(fs.LogLevelDebug, o, ">"+name+": "+format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// InitLogging start the logging as per the command line flags
|
||||
func InitLogging() {
|
||||
// Log file output
|
||||
if *logFile != "" {
|
||||
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open log file: %v", err)
|
||||
}
|
||||
_, err = f.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to seek log file to end: %v", err)
|
||||
}
|
||||
log.SetOutput(f)
|
||||
redirectStderr(f)
|
||||
}
|
||||
|
||||
// Syslog output
|
||||
if *useSyslog {
|
||||
if *logFile != "" {
|
||||
log.Fatalf("Can't use --syslog and --log-file together")
|
||||
}
|
||||
startSysLog()
|
||||
}
|
||||
}
|
||||
|
||||
// Redirected returns true if the log has been redirected from stdout
|
||||
func Redirected() bool {
|
||||
return *useSyslog || *logFile != ""
|
||||
}
|
||||
16
.rclone_repo/fs/log/redirect_stderr.go
Executable file
16
.rclone_repo/fs/log/redirect_stderr.go
Executable file
@@ -0,0 +1,16 @@
|
||||
// Log the panic to the log file - for oses which can't do this
|
||||
|
||||
// +build !windows,!darwin,!dragonfly,!freebsd,!linux,!nacl,!netbsd,!openbsd
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// redirectStderr to the file passed in
|
||||
func redirectStderr(f *os.File) {
|
||||
fs.Errorf(nil, "Can't redirect stderr to file")
|
||||
}
|
||||
27
.rclone_repo/fs/log/redirect_stderr_unix.go
Executable file
27
.rclone_repo/fs/log/redirect_stderr_unix.go
Executable file
@@ -0,0 +1,27 @@
|
||||
// Log the panic under unix to the log file
|
||||
|
||||
// +build !windows,!solaris,!plan9
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// redirectStderr to the file passed in
|
||||
func redirectStderr(f *os.File) {
|
||||
passPromptFd, err := unix.Dup(int(os.Stderr.Fd()))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to duplicate stderr: %v", err)
|
||||
}
|
||||
config.PasswordPromptOutput = os.NewFile(uintptr(passPromptFd), "passPrompt")
|
||||
err = unix.Dup2(int(f.Fd()), int(os.Stderr.Fd()))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to redirect stderr to file: %v", err)
|
||||
}
|
||||
}
|
||||
39
.rclone_repo/fs/log/redirect_stderr_windows.go
Executable file
39
.rclone_repo/fs/log/redirect_stderr_windows.go
Executable file
@@ -0,0 +1,39 @@
|
||||
// Log the panic under windows to the log file
|
||||
//
|
||||
// Code from minix, via
|
||||
//
|
||||
// https://play.golang.org/p/kLtct7lSUg
|
||||
|
||||
// +build windows
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32 = syscall.MustLoadDLL("kernel32.dll")
|
||||
procSetStdHandle = kernel32.MustFindProc("SetStdHandle")
|
||||
)
|
||||
|
||||
func setStdHandle(stdhandle int32, handle syscall.Handle) error {
|
||||
r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
|
||||
if r0 == 0 {
|
||||
if e1 != 0 {
|
||||
return error(e1)
|
||||
}
|
||||
return syscall.EINVAL
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// redirectStderr to the file passed in
|
||||
func redirectStderr(f *os.File) {
|
||||
err := setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd()))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to redirect stderr to file: %v", err)
|
||||
}
|
||||
}
|
||||
16
.rclone_repo/fs/log/syslog.go
Executable file
16
.rclone_repo/fs/log/syslog.go
Executable file
@@ -0,0 +1,16 @@
|
||||
// Syslog interface for non-Unix variants only
|
||||
|
||||
// +build windows nacl plan9
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"log"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Starts syslog if configured, returns true if it was started
|
||||
func startSysLog() bool {
|
||||
log.Fatalf("--syslog not supported on %s platform", runtime.GOOS)
|
||||
return false
|
||||
}
|
||||
67
.rclone_repo/fs/log/syslog_unix.go
Executable file
67
.rclone_repo/fs/log/syslog_unix.go
Executable file
@@ -0,0 +1,67 @@
|
||||
// Syslog interface for Unix variants only
|
||||
|
||||
// +build !windows,!nacl,!plan9
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/syslog"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
var (
|
||||
syslogFacilityMap = map[string]syslog.Priority{
|
||||
"KERN": syslog.LOG_KERN,
|
||||
"USER": syslog.LOG_USER,
|
||||
"MAIL": syslog.LOG_MAIL,
|
||||
"DAEMON": syslog.LOG_DAEMON,
|
||||
"AUTH": syslog.LOG_AUTH,
|
||||
"SYSLOG": syslog.LOG_SYSLOG,
|
||||
"LPR": syslog.LOG_LPR,
|
||||
"NEWS": syslog.LOG_NEWS,
|
||||
"UUCP": syslog.LOG_UUCP,
|
||||
"CRON": syslog.LOG_CRON,
|
||||
"AUTHPRIV": syslog.LOG_AUTHPRIV,
|
||||
"FTP": syslog.LOG_FTP,
|
||||
}
|
||||
)
|
||||
|
||||
// Starts syslog
|
||||
func startSysLog() bool {
|
||||
facility, ok := syslogFacilityMap[*syslogFacility]
|
||||
if !ok {
|
||||
log.Fatalf("Unknown syslog facility %q - man syslog for list", *syslogFacility)
|
||||
}
|
||||
Me := path.Base(os.Args[0])
|
||||
w, err := syslog.New(syslog.LOG_NOTICE|facility, Me)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start syslog: %v", err)
|
||||
}
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(w)
|
||||
fs.LogPrint = func(level fs.LogLevel, text string) {
|
||||
switch level {
|
||||
case fs.LogLevelEmergency:
|
||||
_ = w.Emerg(text)
|
||||
case fs.LogLevelAlert:
|
||||
_ = w.Alert(text)
|
||||
case fs.LogLevelCritical:
|
||||
_ = w.Crit(text)
|
||||
case fs.LogLevelError:
|
||||
_ = w.Err(text)
|
||||
case fs.LogLevelWarning:
|
||||
_ = w.Warning(text)
|
||||
case fs.LogLevelNotice:
|
||||
_ = w.Notice(text)
|
||||
case fs.LogLevelInfo:
|
||||
_ = w.Info(text)
|
||||
case fs.LogLevelDebug:
|
||||
_ = w.Debug(text)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
6
.rclone_repo/fs/log_test.go
Executable file
6
.rclone_repo/fs/log_test.go
Executable file
@@ -0,0 +1,6 @@
|
||||
package fs
|
||||
|
||||
import "github.com/spf13/pflag"
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*LogLevel)(nil)
|
||||
418
.rclone_repo/fs/march/march.go
Executable file
418
.rclone_repo/fs/march/march.go
Executable file
@@ -0,0 +1,418 @@
|
||||
// Package march traverses two directories in lock step
|
||||
package march
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/ncw/rclone/fs/list"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// March holds the data used to traverse two Fs simultaneously,
|
||||
// calling callback for each match
|
||||
type March struct {
|
||||
// parameters
|
||||
ctx context.Context
|
||||
fdst fs.Fs
|
||||
fsrc fs.Fs
|
||||
dir string
|
||||
callback Marcher
|
||||
// internal state
|
||||
srcListDir listDirFn // function to call to list a directory in the src
|
||||
dstListDir listDirFn // function to call to list a directory in the dst
|
||||
transforms []matchTransformFn
|
||||
}
|
||||
|
||||
// Marcher is called on each match
|
||||
type Marcher interface {
|
||||
// SrcOnly is called for a DirEntry found only in the source
|
||||
SrcOnly(src fs.DirEntry) (recurse bool)
|
||||
// DstOnly is called for a DirEntry found only in the destination
|
||||
DstOnly(dst fs.DirEntry) (recurse bool)
|
||||
// Match is called for a DirEntry found both in the source and destination
|
||||
Match(dst, src fs.DirEntry) (recurse bool)
|
||||
}
|
||||
|
||||
// New sets up a march over fsrc, and fdst calling back callback for each match
|
||||
func New(ctx context.Context, fdst, fsrc fs.Fs, dir string, callback Marcher) *March {
|
||||
m := &March{
|
||||
ctx: ctx,
|
||||
fdst: fdst,
|
||||
fsrc: fsrc,
|
||||
dir: dir,
|
||||
callback: callback,
|
||||
}
|
||||
m.srcListDir = m.makeListDir(fsrc, false)
|
||||
m.dstListDir = m.makeListDir(fdst, filter.Active.Opt.DeleteExcluded)
|
||||
// Now create the matching transform
|
||||
// ..normalise the UTF8 first
|
||||
m.transforms = append(m.transforms, norm.NFC.String)
|
||||
// ..if destination is caseInsensitive then make it lower case
|
||||
// case Insensitive | src | dst | lower case compare |
|
||||
// | No | No | No |
|
||||
// | Yes | No | No |
|
||||
// | No | Yes | Yes |
|
||||
// | Yes | Yes | Yes |
|
||||
if fdst.Features().CaseInsensitive {
|
||||
m.transforms = append(m.transforms, strings.ToLower)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// list a directory into entries, err
|
||||
type listDirFn func(dir string) (entries fs.DirEntries, err error)
|
||||
|
||||
// makeListDir makes a listing function for the given fs and includeAll flags
|
||||
func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn {
|
||||
if !fs.Config.UseListR || f.Features().ListR == nil {
|
||||
return func(dir string) (entries fs.DirEntries, err error) {
|
||||
return list.DirSorted(f, includeAll, dir)
|
||||
}
|
||||
}
|
||||
var (
|
||||
mu sync.Mutex
|
||||
started bool
|
||||
dirs walk.DirTree
|
||||
dirsErr error
|
||||
)
|
||||
return func(dir string) (entries fs.DirEntries, err error) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if !started {
|
||||
dirs, dirsErr = walk.NewDirTree(f, m.dir, includeAll, fs.Config.MaxDepth)
|
||||
started = true
|
||||
}
|
||||
if dirsErr != nil {
|
||||
return nil, dirsErr
|
||||
}
|
||||
entries, ok := dirs[dir]
|
||||
if !ok {
|
||||
err = fs.ErrorDirNotFound
|
||||
} else {
|
||||
delete(dirs, dir)
|
||||
}
|
||||
return entries, err
|
||||
}
|
||||
}
|
||||
|
||||
// listDirJob describe a directory listing that needs to be done
|
||||
type listDirJob struct {
|
||||
srcRemote string
|
||||
dstRemote string
|
||||
srcDepth int
|
||||
dstDepth int
|
||||
noSrc bool
|
||||
noDst bool
|
||||
}
|
||||
|
||||
// Run starts the matching process off
|
||||
func (m *March) Run() {
|
||||
srcDepth := fs.Config.MaxDepth
|
||||
if srcDepth < 0 {
|
||||
srcDepth = fs.MaxLevel
|
||||
}
|
||||
dstDepth := srcDepth
|
||||
if filter.Active.Opt.DeleteExcluded {
|
||||
dstDepth = fs.MaxLevel
|
||||
}
|
||||
|
||||
// Start some directory listing go routines
|
||||
var wg sync.WaitGroup // sync closing of go routines
|
||||
var traversing sync.WaitGroup // running directory traversals
|
||||
in := make(chan listDirJob, fs.Config.Checkers)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return
|
||||
case job, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
jobs := m.processJob(job)
|
||||
if len(jobs) > 0 {
|
||||
traversing.Add(len(jobs))
|
||||
go func() {
|
||||
// Now we have traversed this directory, send these
|
||||
// jobs off for traversal in the background
|
||||
for _, newJob := range jobs {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
// discard job if finishing
|
||||
traversing.Done()
|
||||
case in <- newJob:
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
traversing.Done()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Start the process
|
||||
traversing.Add(1)
|
||||
in <- listDirJob{
|
||||
srcRemote: m.dir,
|
||||
srcDepth: srcDepth - 1,
|
||||
dstRemote: m.dir,
|
||||
dstDepth: dstDepth - 1,
|
||||
}
|
||||
go func() {
|
||||
// when the context is cancelled discard the remaining jobs
|
||||
<-m.ctx.Done()
|
||||
for range in {
|
||||
traversing.Done()
|
||||
}
|
||||
}()
|
||||
traversing.Wait()
|
||||
close(in)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Check to see if the context has been cancelled
|
||||
func (m *March) aborting() bool {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return true
|
||||
default:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// matchEntry is an entry plus transformed name
|
||||
type matchEntry struct {
|
||||
entry fs.DirEntry
|
||||
leaf string
|
||||
name string
|
||||
}
|
||||
|
||||
// matchEntries contains many matchEntry~s
|
||||
type matchEntries []matchEntry
|
||||
|
||||
// Len is part of sort.Interface.
|
||||
func (es matchEntries) Len() int { return len(es) }
|
||||
|
||||
// Swap is part of sort.Interface.
|
||||
func (es matchEntries) Swap(i, j int) { es[i], es[j] = es[j], es[i] }
|
||||
|
||||
// Less is part of sort.Interface.
|
||||
//
|
||||
// Compare in order (name, leaf, remote)
|
||||
func (es matchEntries) Less(i, j int) bool {
|
||||
ei, ej := &es[i], &es[j]
|
||||
if ei.name == ej.name {
|
||||
if ei.leaf == ej.leaf {
|
||||
return ei.entry.Remote() < ej.entry.Remote()
|
||||
}
|
||||
return ei.leaf < ej.leaf
|
||||
}
|
||||
return ei.name < ej.name
|
||||
}
|
||||
|
||||
// Sort the directory entries by (name, leaf, remote)
|
||||
//
|
||||
// We use a stable sort here just in case there are
|
||||
// duplicates. Assuming the remote delivers the entries in a
|
||||
// consistent order, this will give the best user experience
|
||||
// in syncing as it will use the first entry for the sync
|
||||
// comparison.
|
||||
func (es matchEntries) sort() {
|
||||
sort.Stable(es)
|
||||
}
|
||||
|
||||
// make a matchEntries from a newMatch entries
|
||||
func newMatchEntries(entries fs.DirEntries, transforms []matchTransformFn) matchEntries {
|
||||
es := make(matchEntries, len(entries))
|
||||
for i := range es {
|
||||
es[i].entry = entries[i]
|
||||
name := path.Base(entries[i].Remote())
|
||||
es[i].leaf = name
|
||||
for _, transform := range transforms {
|
||||
name = transform(name)
|
||||
}
|
||||
es[i].name = name
|
||||
}
|
||||
es.sort()
|
||||
return es
|
||||
}
|
||||
|
||||
// matchPair is a matched pair of direntries returned by matchListings
|
||||
type matchPair struct {
|
||||
src, dst fs.DirEntry
|
||||
}
|
||||
|
||||
// matchTransformFn converts a name into a form which is used for
|
||||
// comparison in matchListings.
|
||||
type matchTransformFn func(name string) string
|
||||
|
||||
// Process the two listings, matching up the items in the two slices
|
||||
// using the transform function on each name first.
|
||||
//
|
||||
// Into srcOnly go Entries which only exist in the srcList
|
||||
// Into dstOnly go Entries which only exist in the dstList
|
||||
// Into matches go matchPair's of src and dst which have the same name
|
||||
//
|
||||
// This checks for duplicates and checks the list is sorted.
|
||||
func matchListings(srcListEntries, dstListEntries fs.DirEntries, transforms []matchTransformFn) (srcOnly fs.DirEntries, dstOnly fs.DirEntries, matches []matchPair) {
|
||||
srcList := newMatchEntries(srcListEntries, transforms)
|
||||
dstList := newMatchEntries(dstListEntries, transforms)
|
||||
for iSrc, iDst := 0, 0; ; iSrc, iDst = iSrc+1, iDst+1 {
|
||||
var src, dst fs.DirEntry
|
||||
var srcName, dstName string
|
||||
if iSrc < len(srcList) {
|
||||
src = srcList[iSrc].entry
|
||||
srcName = srcList[iSrc].name
|
||||
}
|
||||
if iDst < len(dstList) {
|
||||
dst = dstList[iDst].entry
|
||||
dstName = dstList[iDst].name
|
||||
}
|
||||
if src == nil && dst == nil {
|
||||
break
|
||||
}
|
||||
if src != nil && iSrc > 0 {
|
||||
prev := srcList[iSrc-1].name
|
||||
if srcName == prev {
|
||||
fs.Logf(src, "Duplicate %s found in source - ignoring", fs.DirEntryType(src))
|
||||
iDst-- // ignore the src and retry the dst
|
||||
continue
|
||||
} else if srcName < prev {
|
||||
// this should never happen since we sort the listings
|
||||
panic("Out of order listing in source")
|
||||
}
|
||||
}
|
||||
if dst != nil && iDst > 0 {
|
||||
prev := dstList[iDst-1].name
|
||||
if dstName == prev {
|
||||
fs.Logf(dst, "Duplicate %s found in destination - ignoring", fs.DirEntryType(dst))
|
||||
iSrc-- // ignore the dst and retry the src
|
||||
continue
|
||||
} else if dstName < prev {
|
||||
// this should never happen since we sort the listings
|
||||
panic("Out of order listing in destination")
|
||||
}
|
||||
}
|
||||
if src != nil && dst != nil {
|
||||
if srcName < dstName {
|
||||
dst = nil
|
||||
iDst-- // retry the dst
|
||||
} else if srcName > dstName {
|
||||
src = nil
|
||||
iSrc-- // retry the src
|
||||
}
|
||||
}
|
||||
// Debugf(nil, "src = %v, dst = %v", src, dst)
|
||||
switch {
|
||||
case src == nil && dst == nil:
|
||||
// do nothing
|
||||
case src == nil:
|
||||
dstOnly = append(dstOnly, dst)
|
||||
case dst == nil:
|
||||
srcOnly = append(srcOnly, src)
|
||||
default:
|
||||
matches = append(matches, matchPair{src: src, dst: dst})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// processJob processes a listDirJob listing the source and
|
||||
// destination directories, comparing them and returning a slice of
|
||||
// more jobs
|
||||
//
|
||||
// returns errors using processError
|
||||
func (m *March) processJob(job listDirJob) (jobs []listDirJob) {
|
||||
var (
|
||||
srcList, dstList fs.DirEntries
|
||||
srcListErr, dstListErr error
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
// List the src and dst directories
|
||||
if !job.noSrc {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
srcList, srcListErr = m.srcListDir(job.srcRemote)
|
||||
}()
|
||||
}
|
||||
if !job.noDst {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
dstList, dstListErr = m.dstListDir(job.dstRemote)
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for listings to complete and report errors
|
||||
wg.Wait()
|
||||
if srcListErr != nil {
|
||||
fs.Errorf(job.srcRemote, "error reading source directory: %v", srcListErr)
|
||||
fs.CountError(srcListErr)
|
||||
return nil
|
||||
}
|
||||
if dstListErr == fs.ErrorDirNotFound {
|
||||
// Copy the stuff anyway
|
||||
} else if dstListErr != nil {
|
||||
fs.Errorf(job.dstRemote, "error reading destination directory: %v", dstListErr)
|
||||
fs.CountError(dstListErr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Work out what to do and do it
|
||||
srcOnly, dstOnly, matches := matchListings(srcList, dstList, m.transforms)
|
||||
for _, src := range srcOnly {
|
||||
if m.aborting() {
|
||||
return nil
|
||||
}
|
||||
recurse := m.callback.SrcOnly(src)
|
||||
if recurse && job.srcDepth > 0 {
|
||||
jobs = append(jobs, listDirJob{
|
||||
srcRemote: src.Remote(),
|
||||
srcDepth: job.srcDepth - 1,
|
||||
noDst: true,
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
for _, dst := range dstOnly {
|
||||
if m.aborting() {
|
||||
return nil
|
||||
}
|
||||
recurse := m.callback.DstOnly(dst)
|
||||
if recurse && job.dstDepth > 0 {
|
||||
jobs = append(jobs, listDirJob{
|
||||
dstRemote: dst.Remote(),
|
||||
dstDepth: job.dstDepth - 1,
|
||||
noSrc: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, match := range matches {
|
||||
if m.aborting() {
|
||||
return nil
|
||||
}
|
||||
recurse := m.callback.Match(match.dst, match.src)
|
||||
if recurse && job.srcDepth > 0 && job.dstDepth > 0 {
|
||||
jobs = append(jobs, listDirJob{
|
||||
srcRemote: match.src.Remote(),
|
||||
dstRemote: match.dst.Remote(),
|
||||
srcDepth: job.srcDepth - 1,
|
||||
dstDepth: job.dstDepth - 1,
|
||||
})
|
||||
}
|
||||
}
|
||||
return jobs
|
||||
}
|
||||
171
.rclone_repo/fs/march/march_test.go
Executable file
171
.rclone_repo/fs/march/march_test.go
Executable file
@@ -0,0 +1,171 @@
|
||||
// Internal tests for march
|
||||
|
||||
package march
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewMatchEntries(t *testing.T) {
|
||||
var (
|
||||
a = mockobject.Object("path/a")
|
||||
A = mockobject.Object("path/A")
|
||||
B = mockobject.Object("path/B")
|
||||
c = mockobject.Object("path/c")
|
||||
)
|
||||
|
||||
es := newMatchEntries(fs.DirEntries{a, A, B, c}, nil)
|
||||
assert.Equal(t, es, matchEntries{
|
||||
{name: "A", leaf: "A", entry: A},
|
||||
{name: "B", leaf: "B", entry: B},
|
||||
{name: "a", leaf: "a", entry: a},
|
||||
{name: "c", leaf: "c", entry: c},
|
||||
})
|
||||
|
||||
es = newMatchEntries(fs.DirEntries{a, A, B, c}, []matchTransformFn{strings.ToLower})
|
||||
assert.Equal(t, es, matchEntries{
|
||||
{name: "a", leaf: "A", entry: A},
|
||||
{name: "a", leaf: "a", entry: a},
|
||||
{name: "b", leaf: "B", entry: B},
|
||||
{name: "c", leaf: "c", entry: c},
|
||||
})
|
||||
}
|
||||
|
||||
func TestMatchListings(t *testing.T) {
|
||||
var (
|
||||
a = mockobject.Object("a")
|
||||
A = mockobject.Object("A")
|
||||
b = mockobject.Object("b")
|
||||
c = mockobject.Object("c")
|
||||
d = mockobject.Object("d")
|
||||
)
|
||||
|
||||
for _, test := range []struct {
|
||||
what string
|
||||
input fs.DirEntries // pairs of input src, dst
|
||||
srcOnly fs.DirEntries
|
||||
dstOnly fs.DirEntries
|
||||
matches []matchPair // pairs of output
|
||||
transforms []matchTransformFn
|
||||
}{
|
||||
{
|
||||
what: "only src or dst",
|
||||
input: fs.DirEntries{
|
||||
a, nil,
|
||||
b, nil,
|
||||
c, nil,
|
||||
d, nil,
|
||||
},
|
||||
srcOnly: fs.DirEntries{
|
||||
a, b, c, d,
|
||||
},
|
||||
},
|
||||
{
|
||||
what: "typical sync #1",
|
||||
input: fs.DirEntries{
|
||||
a, nil,
|
||||
b, b,
|
||||
nil, c,
|
||||
nil, d,
|
||||
},
|
||||
srcOnly: fs.DirEntries{
|
||||
a,
|
||||
},
|
||||
dstOnly: fs.DirEntries{
|
||||
c, d,
|
||||
},
|
||||
matches: []matchPair{
|
||||
{b, b},
|
||||
},
|
||||
},
|
||||
{
|
||||
what: "typical sync #2",
|
||||
input: fs.DirEntries{
|
||||
a, a,
|
||||
b, b,
|
||||
nil, c,
|
||||
d, d,
|
||||
},
|
||||
dstOnly: fs.DirEntries{
|
||||
c,
|
||||
},
|
||||
matches: []matchPair{
|
||||
{a, a},
|
||||
{b, b},
|
||||
{d, d},
|
||||
},
|
||||
},
|
||||
{
|
||||
what: "One duplicate",
|
||||
input: fs.DirEntries{
|
||||
A, A,
|
||||
a, a,
|
||||
a, nil,
|
||||
b, b,
|
||||
},
|
||||
matches: []matchPair{
|
||||
{A, A},
|
||||
{a, a},
|
||||
{b, b},
|
||||
},
|
||||
},
|
||||
{
|
||||
what: "Two duplicates",
|
||||
input: fs.DirEntries{
|
||||
a, a,
|
||||
a, a,
|
||||
a, nil,
|
||||
},
|
||||
matches: []matchPair{
|
||||
{a, a},
|
||||
},
|
||||
},
|
||||
{
|
||||
what: "Case insensitive duplicate - no transform",
|
||||
input: fs.DirEntries{
|
||||
a, a,
|
||||
A, A,
|
||||
},
|
||||
matches: []matchPair{
|
||||
{A, A},
|
||||
{a, a},
|
||||
},
|
||||
},
|
||||
{
|
||||
what: "Case insensitive duplicate - transform to lower case",
|
||||
input: fs.DirEntries{
|
||||
a, a,
|
||||
A, A,
|
||||
},
|
||||
matches: []matchPair{
|
||||
{A, A},
|
||||
},
|
||||
transforms: []matchTransformFn{strings.ToLower},
|
||||
},
|
||||
} {
|
||||
var srcList, dstList fs.DirEntries
|
||||
for i := 0; i < len(test.input); i += 2 {
|
||||
src, dst := test.input[i], test.input[i+1]
|
||||
if src != nil {
|
||||
srcList = append(srcList, src)
|
||||
}
|
||||
if dst != nil {
|
||||
dstList = append(dstList, dst)
|
||||
}
|
||||
}
|
||||
srcOnly, dstOnly, matches := matchListings(srcList, dstList, test.transforms)
|
||||
assert.Equal(t, test.srcOnly, srcOnly, test.what)
|
||||
assert.Equal(t, test.dstOnly, dstOnly, test.what)
|
||||
assert.Equal(t, test.matches, matches, test.what)
|
||||
// now swap src and dst
|
||||
dstOnly, srcOnly, matches = matchListings(dstList, srcList, test.transforms)
|
||||
assert.Equal(t, test.srcOnly, srcOnly, test.what)
|
||||
assert.Equal(t, test.dstOnly, dstOnly, test.what)
|
||||
assert.Equal(t, test.matches, matches, test.what)
|
||||
}
|
||||
}
|
||||
44
.rclone_repo/fs/mimetype.go
Executable file
44
.rclone_repo/fs/mimetype.go
Executable file
@@ -0,0 +1,44 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MimeTypeFromName returns a guess at the mime type from the name
|
||||
func MimeTypeFromName(remote string) (mimeType string) {
|
||||
mimeType = mime.TypeByExtension(path.Ext(remote))
|
||||
if !strings.ContainsRune(mimeType, '/') {
|
||||
mimeType = "application/octet-stream"
|
||||
}
|
||||
return mimeType
|
||||
}
|
||||
|
||||
// MimeType returns the MimeType from the object, either by calling
|
||||
// the MimeTyper interface or using MimeTypeFromName
|
||||
func MimeType(o ObjectInfo) (mimeType string) {
|
||||
// Read the MimeType from the optional interface if available
|
||||
if do, ok := o.(MimeTyper); ok {
|
||||
mimeType = do.MimeType()
|
||||
// Debugf(o, "Read MimeType as %q", mimeType)
|
||||
if mimeType != "" {
|
||||
return mimeType
|
||||
}
|
||||
}
|
||||
return MimeTypeFromName(o.Remote())
|
||||
}
|
||||
|
||||
// MimeTypeDirEntry returns the MimeType of a DirEntry
|
||||
//
|
||||
// It returns "inode/directory" for directories, or uses
|
||||
// MimeType(Object)
|
||||
func MimeTypeDirEntry(item DirEntry) string {
|
||||
switch x := item.(type) {
|
||||
case Object:
|
||||
return MimeType(x)
|
||||
case Directory:
|
||||
return "inode/directory"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
239
.rclone_repo/fs/object/object.go
Executable file
239
.rclone_repo/fs/object/object.go
Executable file
@@ -0,0 +1,239 @@
|
||||
// Package object defines some useful Objects
|
||||
package object
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// NewStaticObjectInfo returns a static ObjectInfo
|
||||
// If hashes is nil and fs is not nil, the hash map will be replaced with
|
||||
// empty hashes of the types supported by the fs.
|
||||
func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[hash.Type]string, fs fs.Info) fs.ObjectInfo {
|
||||
info := &staticObjectInfo{
|
||||
remote: remote,
|
||||
modTime: modTime,
|
||||
size: size,
|
||||
storable: storable,
|
||||
hashes: hashes,
|
||||
fs: fs,
|
||||
}
|
||||
if fs != nil && hashes == nil {
|
||||
set := fs.Hashes().Array()
|
||||
info.hashes = make(map[hash.Type]string)
|
||||
for _, ht := range set {
|
||||
info.hashes[ht] = ""
|
||||
}
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
type staticObjectInfo struct {
|
||||
remote string
|
||||
modTime time.Time
|
||||
size int64
|
||||
storable bool
|
||||
hashes map[hash.Type]string
|
||||
fs fs.Info
|
||||
}
|
||||
|
||||
func (i *staticObjectInfo) Fs() fs.Info { return i.fs }
|
||||
func (i *staticObjectInfo) Remote() string { return i.remote }
|
||||
func (i *staticObjectInfo) String() string { return i.remote }
|
||||
func (i *staticObjectInfo) ModTime() time.Time { return i.modTime }
|
||||
func (i *staticObjectInfo) Size() int64 { return i.size }
|
||||
func (i *staticObjectInfo) Storable() bool { return i.storable }
|
||||
func (i *staticObjectInfo) Hash(h hash.Type) (string, error) {
|
||||
if len(i.hashes) == 0 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if hash, ok := i.hashes[h]; ok {
|
||||
return hash, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// MemoryFs is an in memory Fs, it only supports FsInfo and Put
|
||||
var MemoryFs memoryFs
|
||||
|
||||
// memoryFs is an in memory fs
|
||||
type memoryFs struct{}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (memoryFs) Name() string { return "memory" }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (memoryFs) Root() string { return "" }
|
||||
|
||||
// String returns a description of the FS
|
||||
func (memoryFs) String() string { return "memory" }
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (memoryFs) Precision() time.Duration { return time.Nanosecond }
|
||||
|
||||
// Returns the supported hash types of the filesystem
|
||||
func (memoryFs) Hashes() hash.Set { return hash.Supported }
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (memoryFs) Features() *fs.Features { return &fs.Features{} }
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (memoryFs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (memoryFs) NewObject(remote string) (fs.Object, error) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (memoryFs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := NewMemoryObject(src.Remote(), src.ModTime(), nil)
|
||||
return o, o.Update(in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (memoryFs) Mkdir(dir string) error {
|
||||
return errors.New("memoryFs: can't make directory")
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (memoryFs) Rmdir(dir string) error {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
var _ fs.Fs = MemoryFs
|
||||
|
||||
// MemoryObject is an in memory object
|
||||
type MemoryObject struct {
|
||||
remote string
|
||||
modTime time.Time
|
||||
content []byte
|
||||
}
|
||||
|
||||
// NewMemoryObject returns an in memory Object with the modTime and content passed in
|
||||
func NewMemoryObject(remote string, modTime time.Time, content []byte) *MemoryObject {
|
||||
return &MemoryObject{
|
||||
remote: remote,
|
||||
modTime: modTime,
|
||||
content: content,
|
||||
}
|
||||
}
|
||||
|
||||
// Content returns the underlying buffer
|
||||
func (o *MemoryObject) Content() []byte {
|
||||
return o.content
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *MemoryObject) Fs() fs.Info {
|
||||
return MemoryFs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *MemoryObject) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// String returns a description of the Object
|
||||
func (o *MemoryObject) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
func (o *MemoryObject) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *MemoryObject) Size() int64 {
|
||||
return int64(len(o.content))
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *MemoryObject) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash returns the requested hash of the contents
|
||||
func (o *MemoryObject) Hash(h hash.Type) (string, error) {
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Set(h))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, err = hash.Write(o.content)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hash.Sums()[h], nil
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *MemoryObject) SetModTime(modTime time.Time) error {
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *MemoryObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
content := o.content
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
content = o.content[x.Start:x.End]
|
||||
case *fs.SeekOption:
|
||||
content = o.content[x.Offset:]
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewBuffer(content)), nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// This re-uses the internal buffer if at all possible.
|
||||
func (o *MemoryObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
if size == 0 {
|
||||
o.content = nil
|
||||
} else if size < 0 || int64(cap(o.content)) < size {
|
||||
o.content, err = ioutil.ReadAll(in)
|
||||
} else {
|
||||
o.content = o.content[:size]
|
||||
_, err = io.ReadFull(in, o.content)
|
||||
}
|
||||
o.modTime = src.ModTime()
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *MemoryObject) Remove() error {
|
||||
return errors.New("memoryObject.Remove not supported")
|
||||
}
|
||||
175
.rclone_repo/fs/object/object_test.go
Executable file
175
.rclone_repo/fs/object/object_test.go
Executable file
@@ -0,0 +1,175 @@
|
||||
package object_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStaticObject(t *testing.T) {
|
||||
now := time.Now()
|
||||
remote := "path/to/object"
|
||||
size := int64(1024)
|
||||
|
||||
o := object.NewStaticObjectInfo(remote, now, size, true, nil, object.MemoryFs)
|
||||
|
||||
assert.Equal(t, object.MemoryFs, o.Fs())
|
||||
assert.Equal(t, remote, o.Remote())
|
||||
assert.Equal(t, remote, o.String())
|
||||
assert.Equal(t, now, o.ModTime())
|
||||
assert.Equal(t, size, o.Size())
|
||||
assert.Equal(t, true, o.Storable())
|
||||
|
||||
Hash, err := o.Hash(hash.MD5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "", Hash)
|
||||
|
||||
o = object.NewStaticObjectInfo(remote, now, size, true, nil, nil)
|
||||
_, err = o.Hash(hash.MD5)
|
||||
assert.Equal(t, hash.ErrUnsupported, err)
|
||||
|
||||
hs := map[hash.Type]string{
|
||||
hash.MD5: "potato",
|
||||
}
|
||||
o = object.NewStaticObjectInfo(remote, now, size, true, hs, nil)
|
||||
Hash, err = o.Hash(hash.MD5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "potato", Hash)
|
||||
_, err = o.Hash(hash.SHA1)
|
||||
assert.Equal(t, hash.ErrUnsupported, err)
|
||||
}
|
||||
|
||||
func TestMemoryFs(t *testing.T) {
|
||||
f := object.MemoryFs
|
||||
assert.Equal(t, "memory", f.Name())
|
||||
assert.Equal(t, "", f.Root())
|
||||
assert.Equal(t, "memory", f.String())
|
||||
assert.Equal(t, time.Nanosecond, f.Precision())
|
||||
assert.Equal(t, hash.Supported, f.Hashes())
|
||||
assert.Equal(t, &fs.Features{}, f.Features())
|
||||
|
||||
entries, err := f.List("")
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, entries)
|
||||
|
||||
o, err := f.NewObject("obj")
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
assert.Nil(t, o)
|
||||
|
||||
buf := bytes.NewBufferString("potato")
|
||||
now := time.Now()
|
||||
src := object.NewStaticObjectInfo("remote", now, int64(buf.Len()), true, nil, nil)
|
||||
o, err = f.Put(buf, src)
|
||||
assert.NoError(t, err)
|
||||
hash, err := o.Hash(hash.SHA1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", hash)
|
||||
|
||||
err = f.Mkdir("dir")
|
||||
assert.Error(t, err)
|
||||
|
||||
err = f.Rmdir("dir")
|
||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||
}
|
||||
|
||||
func TestMemoryObject(t *testing.T) {
|
||||
remote := "path/to/object"
|
||||
now := time.Now()
|
||||
content := []byte("potatoXXXXXXXXXXXXX")
|
||||
content = content[:6] // make some extra cap
|
||||
|
||||
o := object.NewMemoryObject(remote, now, content)
|
||||
|
||||
assert.Equal(t, content, o.Content())
|
||||
assert.Equal(t, object.MemoryFs, o.Fs())
|
||||
assert.Equal(t, remote, o.Remote())
|
||||
assert.Equal(t, remote, o.String())
|
||||
assert.Equal(t, now, o.ModTime())
|
||||
assert.Equal(t, int64(len(content)), o.Size())
|
||||
assert.Equal(t, true, o.Storable())
|
||||
|
||||
Hash, err := o.Hash(hash.MD5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "8ee2027983915ec78acc45027d874316", Hash)
|
||||
|
||||
Hash, err = o.Hash(hash.SHA1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", Hash)
|
||||
|
||||
newNow := now.Add(time.Minute)
|
||||
err = o.SetModTime(newNow)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, newNow, o.ModTime())
|
||||
|
||||
checkOpen := func(rc io.ReadCloser, expected string) {
|
||||
actual, err := ioutil.ReadAll(rc)
|
||||
assert.NoError(t, err)
|
||||
err = rc.Close()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, string(actual))
|
||||
}
|
||||
|
||||
checkContent := func(o fs.Object, expected string) {
|
||||
rc, err := o.Open()
|
||||
assert.NoError(t, err)
|
||||
checkOpen(rc, expected)
|
||||
}
|
||||
|
||||
checkContent(o, string(content))
|
||||
|
||||
rc, err := o.Open(&fs.RangeOption{Start: 1, End: 3})
|
||||
assert.NoError(t, err)
|
||||
checkOpen(rc, "ot")
|
||||
|
||||
rc, err = o.Open(&fs.SeekOption{Offset: 3})
|
||||
assert.NoError(t, err)
|
||||
checkOpen(rc, "ato")
|
||||
|
||||
// check it fits within the buffer
|
||||
newNow = now.Add(2 * time.Minute)
|
||||
newContent := bytes.NewBufferString("Rutabaga")
|
||||
assert.True(t, newContent.Len() < cap(content)) // fits within cap(content)
|
||||
src := object.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
|
||||
err = o.Update(newContent, src)
|
||||
assert.NoError(t, err)
|
||||
checkContent(o, "Rutabaga")
|
||||
assert.Equal(t, newNow, o.ModTime())
|
||||
assert.Equal(t, "Rutaba", string(content)) // check we re-used the buffer
|
||||
|
||||
// not within the buffer
|
||||
newStr := "0123456789"
|
||||
newStr = newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr
|
||||
newContent = bytes.NewBufferString(newStr)
|
||||
assert.True(t, newContent.Len() > cap(content)) // does not fit within cap(content)
|
||||
src = object.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
|
||||
err = o.Update(newContent, src)
|
||||
assert.NoError(t, err)
|
||||
checkContent(o, newStr)
|
||||
assert.Equal(t, "Rutaba", string(content)) // check we didn't re-use the buffer
|
||||
|
||||
// now try streaming
|
||||
newStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
newContent = bytes.NewBufferString(newStr)
|
||||
src = object.NewStaticObjectInfo(remote, newNow, -1, true, nil, nil)
|
||||
err = o.Update(newContent, src)
|
||||
assert.NoError(t, err)
|
||||
checkContent(o, newStr)
|
||||
|
||||
// and zero length
|
||||
newStr = ""
|
||||
newContent = bytes.NewBufferString(newStr)
|
||||
src = object.NewStaticObjectInfo(remote, newNow, 0, true, nil, nil)
|
||||
err = o.Update(newContent, src)
|
||||
assert.NoError(t, err)
|
||||
checkContent(o, newStr)
|
||||
|
||||
err = o.Remove()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
309
.rclone_repo/fs/operations/dedupe.go
Executable file
309
.rclone_repo/fs/operations/dedupe.go
Executable file
@@ -0,0 +1,309 @@
|
||||
// dedupe - gets rid of identical files remotes which can have duplicate file names (drive, mega)
|
||||
|
||||
package operations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// dedupeRename renames the objs slice to different names
|
||||
func dedupeRename(remote string, objs []fs.Object) {
|
||||
f := objs[0].Fs()
|
||||
doMove := f.Features().Move
|
||||
if doMove == nil {
|
||||
log.Fatalf("Fs %v doesn't support Move", f)
|
||||
}
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
for i, o := range objs {
|
||||
newName := fmt.Sprintf("%s-%d%s", base, i+1, ext)
|
||||
if !fs.Config.DryRun {
|
||||
newObj, err := doMove(o, newName)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
fs.Errorf(o, "Failed to rename: %v", err)
|
||||
continue
|
||||
}
|
||||
fs.Infof(newObj, "renamed from: %v", o)
|
||||
} else {
|
||||
fs.Logf(remote, "Not renaming to %q as --dry-run", newName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dedupeDeleteAllButOne deletes all but the one in keep
|
||||
func dedupeDeleteAllButOne(keep int, remote string, objs []fs.Object) {
|
||||
for i, o := range objs {
|
||||
if i == keep {
|
||||
continue
|
||||
}
|
||||
_ = DeleteFile(o)
|
||||
}
|
||||
fs.Logf(remote, "Deleted %d extra copies", len(objs)-1)
|
||||
}
|
||||
|
||||
// dedupeDeleteIdentical deletes all but one of identical (by hash) copies
|
||||
func dedupeDeleteIdentical(ht hash.Type, remote string, objs []fs.Object) (remainingObjs []fs.Object) {
|
||||
// See how many of these duplicates are identical
|
||||
byHash := make(map[string][]fs.Object, len(objs))
|
||||
for _, o := range objs {
|
||||
md5sum, err := o.Hash(ht)
|
||||
if err != nil || md5sum == "" {
|
||||
remainingObjs = append(remainingObjs, o)
|
||||
} else {
|
||||
byHash[md5sum] = append(byHash[md5sum], o)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete identical duplicates, filling remainingObjs with the ones remaining
|
||||
for md5sum, hashObjs := range byHash {
|
||||
if len(hashObjs) > 1 {
|
||||
fs.Logf(remote, "Deleting %d/%d identical duplicates (%v %q)", len(hashObjs)-1, len(hashObjs), ht, md5sum)
|
||||
for _, o := range hashObjs[1:] {
|
||||
_ = DeleteFile(o)
|
||||
}
|
||||
}
|
||||
remainingObjs = append(remainingObjs, hashObjs[0])
|
||||
}
|
||||
|
||||
return remainingObjs
|
||||
}
|
||||
|
||||
// dedupeInteractive interactively dedupes the slice of objects
|
||||
func dedupeInteractive(ht hash.Type, remote string, objs []fs.Object) {
|
||||
fmt.Printf("%s: %d duplicates remain\n", remote, len(objs))
|
||||
for i, o := range objs {
|
||||
md5sum, err := o.Hash(ht)
|
||||
if err != nil {
|
||||
md5sum = err.Error()
|
||||
}
|
||||
fmt.Printf(" %d: %12d bytes, %s, %v %32s\n", i+1, o.Size(), o.ModTime().Local().Format("2006-01-02 15:04:05.000000000"), ht, md5sum)
|
||||
}
|
||||
switch config.Command([]string{"sSkip and do nothing", "kKeep just one (choose which in next step)", "rRename all to be different (by changing file.jpg to file-1.jpg)"}) {
|
||||
case 's':
|
||||
case 'k':
|
||||
keep := config.ChooseNumber("Enter the number of the file to keep", 1, len(objs))
|
||||
dedupeDeleteAllButOne(keep-1, remote, objs)
|
||||
case 'r':
|
||||
dedupeRename(remote, objs)
|
||||
}
|
||||
}
|
||||
|
||||
type objectsSortedByModTime []fs.Object
|
||||
|
||||
func (objs objectsSortedByModTime) Len() int { return len(objs) }
|
||||
func (objs objectsSortedByModTime) Swap(i, j int) { objs[i], objs[j] = objs[j], objs[i] }
|
||||
func (objs objectsSortedByModTime) Less(i, j int) bool {
|
||||
return objs[i].ModTime().Before(objs[j].ModTime())
|
||||
}
|
||||
|
||||
// DeduplicateMode is how the dedupe command chooses what to do
|
||||
type DeduplicateMode int
|
||||
|
||||
// Deduplicate modes
|
||||
const (
|
||||
DeduplicateInteractive DeduplicateMode = iota // interactively ask the user
|
||||
DeduplicateSkip // skip all conflicts
|
||||
DeduplicateFirst // choose the first object
|
||||
DeduplicateNewest // choose the newest object
|
||||
DeduplicateOldest // choose the oldest object
|
||||
DeduplicateRename // rename the objects
|
||||
DeduplicateLargest // choose the largest object
|
||||
)
|
||||
|
||||
func (x DeduplicateMode) String() string {
|
||||
switch x {
|
||||
case DeduplicateInteractive:
|
||||
return "interactive"
|
||||
case DeduplicateSkip:
|
||||
return "skip"
|
||||
case DeduplicateFirst:
|
||||
return "first"
|
||||
case DeduplicateNewest:
|
||||
return "newest"
|
||||
case DeduplicateOldest:
|
||||
return "oldest"
|
||||
case DeduplicateRename:
|
||||
return "rename"
|
||||
case DeduplicateLargest:
|
||||
return "largest"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// Set a DeduplicateMode from a string
|
||||
func (x *DeduplicateMode) Set(s string) error {
|
||||
switch strings.ToLower(s) {
|
||||
case "interactive":
|
||||
*x = DeduplicateInteractive
|
||||
case "skip":
|
||||
*x = DeduplicateSkip
|
||||
case "first":
|
||||
*x = DeduplicateFirst
|
||||
case "newest":
|
||||
*x = DeduplicateNewest
|
||||
case "oldest":
|
||||
*x = DeduplicateOldest
|
||||
case "rename":
|
||||
*x = DeduplicateRename
|
||||
case "largest":
|
||||
*x = DeduplicateLargest
|
||||
default:
|
||||
return errors.Errorf("Unknown mode for dedupe %q.", s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (x *DeduplicateMode) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*DeduplicateMode)(nil)
|
||||
|
||||
// dedupeFindDuplicateDirs scans f for duplicate directories
|
||||
func dedupeFindDuplicateDirs(f fs.Fs) ([][]fs.Directory, error) {
|
||||
duplicateDirs := [][]fs.Directory{}
|
||||
err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dirs := map[string][]fs.Directory{}
|
||||
entries.ForDir(func(d fs.Directory) {
|
||||
dirs[d.Remote()] = append(dirs[d.Remote()], d)
|
||||
})
|
||||
for _, ds := range dirs {
|
||||
if len(ds) > 1 {
|
||||
duplicateDirs = append(duplicateDirs, ds)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "find duplicate dirs")
|
||||
}
|
||||
return duplicateDirs, nil
|
||||
}
|
||||
|
||||
// dedupeMergeDuplicateDirs merges all the duplicate directories found
|
||||
func dedupeMergeDuplicateDirs(f fs.Fs, duplicateDirs [][]fs.Directory) error {
|
||||
mergeDirs := f.Features().MergeDirs
|
||||
if mergeDirs == nil {
|
||||
return errors.Errorf("%v: can't merge directories", f)
|
||||
}
|
||||
dirCacheFlush := f.Features().DirCacheFlush
|
||||
if dirCacheFlush == nil {
|
||||
return errors.Errorf("%v: can't flush dir cache", f)
|
||||
}
|
||||
for _, dirs := range duplicateDirs {
|
||||
if !fs.Config.DryRun {
|
||||
fs.Infof(dirs[0], "Merging contents of duplicate directories")
|
||||
err := mergeDirs(dirs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "merge duplicate dirs")
|
||||
}
|
||||
} else {
|
||||
fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run")
|
||||
}
|
||||
}
|
||||
dirCacheFlush()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deduplicate interactively finds duplicate files and offers to
|
||||
// delete all but one or rename them to be different. Only useful with
|
||||
// Google Drive which can have duplicate file names.
|
||||
func Deduplicate(f fs.Fs, mode DeduplicateMode) error {
|
||||
fs.Infof(f, "Looking for duplicates using %v mode.", mode)
|
||||
|
||||
// Find duplicate directories first and fix them - repeat
|
||||
// until all fixed
|
||||
for {
|
||||
duplicateDirs, err := dedupeFindDuplicateDirs(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(duplicateDirs) == 0 {
|
||||
break
|
||||
}
|
||||
err = dedupeMergeDuplicateDirs(f, duplicateDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fs.Config.DryRun {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// find a hash to use
|
||||
ht := f.Hashes().GetOne()
|
||||
|
||||
// Now find duplicate files
|
||||
files := map[string][]fs.Object{}
|
||||
err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
remote := o.Remote()
|
||||
files[remote] = append(files[remote], o)
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for remote, objs := range files {
|
||||
if len(objs) > 1 {
|
||||
fs.Logf(remote, "Found %d duplicates - deleting identical copies", len(objs))
|
||||
objs = dedupeDeleteIdentical(ht, remote, objs)
|
||||
if len(objs) <= 1 {
|
||||
fs.Logf(remote, "All duplicates removed")
|
||||
continue
|
||||
}
|
||||
switch mode {
|
||||
case DeduplicateInteractive:
|
||||
dedupeInteractive(ht, remote, objs)
|
||||
case DeduplicateFirst:
|
||||
dedupeDeleteAllButOne(0, remote, objs)
|
||||
case DeduplicateNewest:
|
||||
sort.Sort(objectsSortedByModTime(objs)) // sort oldest first
|
||||
dedupeDeleteAllButOne(len(objs)-1, remote, objs)
|
||||
case DeduplicateOldest:
|
||||
sort.Sort(objectsSortedByModTime(objs)) // sort oldest first
|
||||
dedupeDeleteAllButOne(0, remote, objs)
|
||||
case DeduplicateRename:
|
||||
dedupeRename(remote, objs)
|
||||
case DeduplicateLargest:
|
||||
largest, largestIndex := int64(-1), -1
|
||||
for i, obj := range objs {
|
||||
size := obj.Size()
|
||||
if size > largest {
|
||||
largest, largestIndex = size, i
|
||||
}
|
||||
}
|
||||
if largestIndex > -1 {
|
||||
dedupeDeleteAllButOne(largestIndex, remote, objs)
|
||||
}
|
||||
case DeduplicateSkip:
|
||||
// skip
|
||||
default:
|
||||
//skip
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
215
.rclone_repo/fs/operations/dedupe_test.go
Executable file
215
.rclone_repo/fs/operations/dedupe_test.go
Executable file
@@ -0,0 +1,215 @@
|
||||
package operations_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func skipIfCantDedupe(t *testing.T, f fs.Fs) {
|
||||
if !f.Features().DuplicateFiles {
|
||||
t.Skip("Can't test deduplicate - no duplicate files possible")
|
||||
}
|
||||
if f.Features().PutUnchecked == nil {
|
||||
t.Skip("Can't test deduplicate - no PutUnchecked")
|
||||
}
|
||||
if f.Features().MergeDirs == nil {
|
||||
t.Skip("Can't test deduplicate - no MergeDirs")
|
||||
}
|
||||
}
|
||||
|
||||
func skipIfNoHash(t *testing.T, f fs.Fs) {
|
||||
if f.Hashes().GetOne() == hash.None {
|
||||
t.Skip("Can't run this test without a hash")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeduplicateInteractive(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
skipIfCantDedupe(t, r.Fremote)
|
||||
skipIfNoHash(t, r.Fremote)
|
||||
|
||||
file1 := r.WriteUncheckedObject("one", "This is one", t1)
|
||||
file2 := r.WriteUncheckedObject("one", "This is one", t1)
|
||||
file3 := r.WriteUncheckedObject("one", "This is one", t1)
|
||||
r.CheckWithDuplicates(t, file1, file2, file3)
|
||||
|
||||
err := operations.Deduplicate(r.Fremote, operations.DeduplicateInteractive)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
}
|
||||
|
||||
func TestDeduplicateSkip(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
skipIfCantDedupe(t, r.Fremote)
|
||||
haveHash := r.Fremote.Hashes().GetOne() != hash.None
|
||||
|
||||
file1 := r.WriteUncheckedObject("one", "This is one", t1)
|
||||
files := []fstest.Item{file1}
|
||||
if haveHash {
|
||||
file2 := r.WriteUncheckedObject("one", "This is one", t1)
|
||||
files = append(files, file2)
|
||||
}
|
||||
file3 := r.WriteUncheckedObject("one", "This is another one", t1)
|
||||
files = append(files, file3)
|
||||
r.CheckWithDuplicates(t, files...)
|
||||
|
||||
err := operations.Deduplicate(r.Fremote, operations.DeduplicateSkip)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckWithDuplicates(t, file1, file3)
|
||||
}
|
||||
|
||||
func TestDeduplicateFirst(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
skipIfCantDedupe(t, r.Fremote)
|
||||
|
||||
file1 := r.WriteUncheckedObject("one", "This is one", t1)
|
||||
file2 := r.WriteUncheckedObject("one", "This is one A", t1)
|
||||
file3 := r.WriteUncheckedObject("one", "This is one BB", t1)
|
||||
r.CheckWithDuplicates(t, file1, file2, file3)
|
||||
|
||||
err := operations.Deduplicate(r.Fremote, operations.DeduplicateFirst)
|
||||
require.NoError(t, err)
|
||||
|
||||
// list until we get one object
|
||||
var objects, size int64
|
||||
for try := 1; try <= *fstest.ListRetries; try++ {
|
||||
objects, size, err = operations.Count(r.Fremote)
|
||||
require.NoError(t, err)
|
||||
if objects == 1 {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
assert.Equal(t, int64(1), objects)
|
||||
if size != file1.Size && size != file2.Size && size != file3.Size {
|
||||
t.Errorf("Size not one of the object sizes %d", size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeduplicateNewest(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
skipIfCantDedupe(t, r.Fremote)
|
||||
|
||||
file1 := r.WriteUncheckedObject("one", "This is one", t1)
|
||||
file2 := r.WriteUncheckedObject("one", "This is one too", t2)
|
||||
file3 := r.WriteUncheckedObject("one", "This is another one", t3)
|
||||
r.CheckWithDuplicates(t, file1, file2, file3)
|
||||
|
||||
err := operations.Deduplicate(r.Fremote, operations.DeduplicateNewest)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file3)
|
||||
}
|
||||
|
||||
func TestDeduplicateOldest(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
skipIfCantDedupe(t, r.Fremote)
|
||||
|
||||
file1 := r.WriteUncheckedObject("one", "This is one", t1)
|
||||
file2 := r.WriteUncheckedObject("one", "This is one too", t2)
|
||||
file3 := r.WriteUncheckedObject("one", "This is another one", t3)
|
||||
r.CheckWithDuplicates(t, file1, file2, file3)
|
||||
|
||||
err := operations.Deduplicate(r.Fremote, operations.DeduplicateOldest)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
}
|
||||
|
||||
func TestDeduplicateLargest(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
skipIfCantDedupe(t, r.Fremote)
|
||||
|
||||
file1 := r.WriteUncheckedObject("one", "This is one", t1)
|
||||
file2 := r.WriteUncheckedObject("one", "This is one too", t2)
|
||||
file3 := r.WriteUncheckedObject("one", "This is another one", t3)
|
||||
r.CheckWithDuplicates(t, file1, file2, file3)
|
||||
|
||||
err := operations.Deduplicate(r.Fremote, operations.DeduplicateLargest)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file3)
|
||||
}
|
||||
|
||||
func TestDeduplicateRename(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
skipIfCantDedupe(t, r.Fremote)
|
||||
|
||||
file1 := r.WriteUncheckedObject("one.txt", "This is one", t1)
|
||||
file2 := r.WriteUncheckedObject("one.txt", "This is one too", t2)
|
||||
file3 := r.WriteUncheckedObject("one.txt", "This is another one", t3)
|
||||
r.CheckWithDuplicates(t, file1, file2, file3)
|
||||
|
||||
err := operations.Deduplicate(r.Fremote, operations.DeduplicateRename)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
remote := o.Remote()
|
||||
if remote != "one-1.txt" &&
|
||||
remote != "one-2.txt" &&
|
||||
remote != "one-3.txt" {
|
||||
t.Errorf("Bad file name after rename %q", remote)
|
||||
}
|
||||
size := o.Size()
|
||||
if size != file1.Size && size != file2.Size && size != file3.Size {
|
||||
t.Errorf("Size not one of the object sizes %d", size)
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
// This should really be a unit test, but the test framework there
|
||||
// doesn't have enough tools to make it easy
|
||||
func TestMergeDirs(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
mergeDirs := r.Fremote.Features().MergeDirs
|
||||
if mergeDirs == nil {
|
||||
t.Skip("Can't merge directories")
|
||||
}
|
||||
|
||||
file1 := r.WriteObject("dupe1/one.txt", "This is one", t1)
|
||||
file2 := r.WriteObject("dupe2/two.txt", "This is one too", t2)
|
||||
file3 := r.WriteObject("dupe3/three.txt", "This is another one", t3)
|
||||
|
||||
objs, dirs, err := walk.GetAll(r.Fremote, "", true, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, len(dirs))
|
||||
assert.Equal(t, 0, len(objs))
|
||||
|
||||
err = mergeDirs(dirs)
|
||||
require.NoError(t, err)
|
||||
|
||||
file2.Path = "dupe1/two.txt"
|
||||
file3.Path = "dupe1/three.txt"
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
|
||||
|
||||
objs, dirs, err = walk.GetAll(r.Fremote, "", true, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(dirs))
|
||||
assert.Equal(t, 0, len(objs))
|
||||
assert.Equal(t, "dupe1", dirs[0].Remote())
|
||||
}
|
||||
104
.rclone_repo/fs/operations/listdirsorted_test.go
Executable file
104
.rclone_repo/fs/operations/listdirsorted_test.go
Executable file
@@ -0,0 +1,104 @@
|
||||
package operations_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/ncw/rclone/fs/list"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestListDirSorted is integration testing code in fs/list/list.go
|
||||
// which can't be tested there due to import loops.
|
||||
func TestListDirSorted(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
filter.Active.Opt.MaxSize = 10
|
||||
defer func() {
|
||||
filter.Active.Opt.MaxSize = -1
|
||||
}()
|
||||
|
||||
files := []fstest.Item{
|
||||
r.WriteObject("a.txt", "hello world", t1),
|
||||
r.WriteObject("zend.txt", "hello", t1),
|
||||
r.WriteObject("sub dir/hello world", "hello world", t1),
|
||||
r.WriteObject("sub dir/hello world2", "hello world", t1),
|
||||
r.WriteObject("sub dir/ignore dir/.ignore", "", t1),
|
||||
r.WriteObject("sub dir/ignore dir/should be ignored", "to ignore", t1),
|
||||
r.WriteObject("sub dir/sub sub dir/hello world3", "hello world", t1),
|
||||
}
|
||||
fstest.CheckItems(t, r.Fremote, files...)
|
||||
var items fs.DirEntries
|
||||
var err error
|
||||
|
||||
// Turn the DirEntry into a name, ending with a / if it is a
|
||||
// dir
|
||||
str := func(i int) string {
|
||||
item := items[i]
|
||||
name := item.Remote()
|
||||
switch item.(type) {
|
||||
case fs.Object:
|
||||
case fs.Directory:
|
||||
name += "/"
|
||||
default:
|
||||
t.Fatalf("Unknown type %+v", item)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
items, err = list.DirSorted(r.Fremote, true, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 3)
|
||||
assert.Equal(t, "a.txt", str(0))
|
||||
assert.Equal(t, "sub dir/", str(1))
|
||||
assert.Equal(t, "zend.txt", str(2))
|
||||
|
||||
items, err = list.DirSorted(r.Fremote, false, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 2)
|
||||
assert.Equal(t, "sub dir/", str(0))
|
||||
assert.Equal(t, "zend.txt", str(1))
|
||||
|
||||
items, err = list.DirSorted(r.Fremote, true, "sub dir")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 4)
|
||||
assert.Equal(t, "sub dir/hello world", str(0))
|
||||
assert.Equal(t, "sub dir/hello world2", str(1))
|
||||
assert.Equal(t, "sub dir/ignore dir/", str(2))
|
||||
assert.Equal(t, "sub dir/sub sub dir/", str(3))
|
||||
|
||||
items, err = list.DirSorted(r.Fremote, false, "sub dir")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 2)
|
||||
assert.Equal(t, "sub dir/ignore dir/", str(0))
|
||||
assert.Equal(t, "sub dir/sub sub dir/", str(1))
|
||||
|
||||
// testing ignore file
|
||||
filter.Active.Opt.ExcludeFile = ".ignore"
|
||||
|
||||
items, err = list.DirSorted(r.Fremote, false, "sub dir")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 1)
|
||||
assert.Equal(t, "sub dir/sub sub dir/", str(0))
|
||||
|
||||
items, err = list.DirSorted(r.Fremote, false, "sub dir/ignore dir")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 0)
|
||||
|
||||
items, err = list.DirSorted(r.Fremote, true, "sub dir/ignore dir")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 2)
|
||||
assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
|
||||
assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
|
||||
|
||||
filter.Active.Opt.ExcludeFile = ""
|
||||
items, err = list.DirSorted(r.Fremote, false, "sub dir/ignore dir")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 2)
|
||||
assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
|
||||
assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
|
||||
}
|
||||
1532
.rclone_repo/fs/operations/operations.go
Executable file
1532
.rclone_repo/fs/operations/operations.go
Executable file
File diff suppressed because it is too large
Load Diff
40
.rclone_repo/fs/operations/operations_internal_test.go
Executable file
40
.rclone_repo/fs/operations/operations_internal_test.go
Executable file
@@ -0,0 +1,40 @@
|
||||
// Internal tests for operations
|
||||
|
||||
package operations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSizeDiffers(t *testing.T) {
|
||||
when := time.Now()
|
||||
for _, test := range []struct {
|
||||
ignoreSize bool
|
||||
srcSize int64
|
||||
dstSize int64
|
||||
want bool
|
||||
}{
|
||||
{false, 0, 0, false},
|
||||
{false, 1, 2, true},
|
||||
{false, 1, -1, false},
|
||||
{false, -1, 1, false},
|
||||
{true, 0, 0, false},
|
||||
{true, 1, 2, false},
|
||||
{true, 1, -1, false},
|
||||
{true, -1, 1, false},
|
||||
} {
|
||||
src := object.NewStaticObjectInfo("a", when, test.srcSize, true, nil, nil)
|
||||
dst := object.NewStaticObjectInfo("a", when, test.dstSize, true, nil, nil)
|
||||
oldIgnoreSize := fs.Config.IgnoreSize
|
||||
fs.Config.IgnoreSize = test.ignoreSize
|
||||
got := sizeDiffers(src, dst)
|
||||
fs.Config.IgnoreSize = oldIgnoreSize
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("ignoreSize=%v, srcSize=%v, dstSize=%v", test.ignoreSize, test.srcSize, test.dstSize))
|
||||
}
|
||||
}
|
||||
799
.rclone_repo/fs/operations/operations_test.go
Executable file
799
.rclone_repo/fs/operations/operations_test.go
Executable file
@@ -0,0 +1,799 @@
|
||||
// Integration tests - test rclone by doing real transactions to a
|
||||
// storage provider to and from the local disk.
|
||||
//
|
||||
// By default it will use a local fs, however you can provide a
|
||||
// -remote option to use a different remote. The test_all.go script
|
||||
// is a wrapper to call this for all the test remotes.
|
||||
//
|
||||
// FIXME not safe for concurrent running of tests until fs.Config is
|
||||
// no longer a global
|
||||
//
|
||||
// NB When writing tests
|
||||
//
|
||||
// Make sure every series of writes to the remote has a
|
||||
// fstest.CheckItems() before use. This make sure the directory
|
||||
// listing is now consistent and stops cascading errors.
|
||||
//
|
||||
// Call accounting.Stats.ResetCounters() before every fs.Sync() as it
|
||||
// uses the error count internally.
|
||||
|
||||
package operations_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/all" // import all backends
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/list"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Some times used in the tests
|
||||
var (
|
||||
t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
t2 = fstest.Time("2011-12-25T12:59:59.123456789Z")
|
||||
t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
|
||||
)
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
func TestMkdir(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := operations.Mkdir(r.Fremote, "")
|
||||
require.NoError(t, err)
|
||||
fstest.CheckListing(t, r.Fremote, []fstest.Item{})
|
||||
|
||||
err = operations.Mkdir(r.Fremote, "")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLsd(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := operations.ListDir(r.Fremote, &buf)
|
||||
require.NoError(t, err)
|
||||
res := buf.String()
|
||||
assert.Contains(t, res, "sub dir\n")
|
||||
}
|
||||
|
||||
func TestLs(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := operations.List(r.Fremote, &buf)
|
||||
require.NoError(t, err)
|
||||
res := buf.String()
|
||||
assert.Contains(t, res, " 0 empty space\n")
|
||||
assert.Contains(t, res, " 60 potato2\n")
|
||||
}
|
||||
|
||||
func TestLsLong(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := operations.ListLong(r.Fremote, &buf)
|
||||
require.NoError(t, err)
|
||||
res := buf.String()
|
||||
lines := strings.Split(strings.Trim(res, "\n"), "\n")
|
||||
assert.Equal(t, 2, len(lines))
|
||||
|
||||
timeFormat := "2006-01-02 15:04:05.000000000"
|
||||
precision := r.Fremote.Precision()
|
||||
location := time.Now().Location()
|
||||
checkTime := func(m, filename string, expected time.Time) {
|
||||
modTime, err := time.ParseInLocation(timeFormat, m, location) // parse as localtime
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing %q: %v", m, err)
|
||||
} else {
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(expected, modTime, precision)
|
||||
if !ok {
|
||||
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", filename, dt, precision, modTime, expected, precision)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m1 := regexp.MustCompile(`(?m)^ 0 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) empty space$`)
|
||||
if ms := m1.FindStringSubmatch(res); ms == nil {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
} else {
|
||||
checkTime(ms[1], "empty space", t2.Local())
|
||||
}
|
||||
|
||||
m2 := regexp.MustCompile(`(?m)^ 60 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) potato2$`)
|
||||
if ms := m2.FindStringSubmatch(res); ms == nil {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
} else {
|
||||
checkTime(ms[1], "potato2", t1.Local())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashSums(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
|
||||
// MD5 Sum
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := operations.Md5sum(r.Fremote, &buf)
|
||||
require.NoError(t, err)
|
||||
res := buf.String()
|
||||
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") &&
|
||||
!strings.Contains(res, " UNSUPPORTED empty space\n") &&
|
||||
!strings.Contains(res, " empty space\n") {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
}
|
||||
if !strings.Contains(res, "d6548b156ea68a4e003e786df99eee76 potato2\n") &&
|
||||
!strings.Contains(res, " UNSUPPORTED potato2\n") &&
|
||||
!strings.Contains(res, " potato2\n") {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
}
|
||||
|
||||
// SHA1 Sum
|
||||
|
||||
buf.Reset()
|
||||
err = operations.Sha1sum(r.Fremote, &buf)
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
if !strings.Contains(res, "da39a3ee5e6b4b0d3255bfef95601890afd80709 empty space\n") &&
|
||||
!strings.Contains(res, " UNSUPPORTED empty space\n") &&
|
||||
!strings.Contains(res, " empty space\n") {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
}
|
||||
if !strings.Contains(res, "9dc7f7d3279715991a22853f5981df582b7f9f6d potato2\n") &&
|
||||
!strings.Contains(res, " UNSUPPORTED potato2\n") &&
|
||||
!strings.Contains(res, " potato2\n") {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
}
|
||||
|
||||
// Dropbox Hash Sum
|
||||
|
||||
buf.Reset()
|
||||
err = operations.DropboxHashSum(r.Fremote, &buf)
|
||||
require.NoError(t, err)
|
||||
res = buf.String()
|
||||
if !strings.Contains(res, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 empty space\n") &&
|
||||
!strings.Contains(res, " UNSUPPORTED empty space\n") &&
|
||||
!strings.Contains(res, " empty space\n") {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
}
|
||||
if !strings.Contains(res, "a979481df794fed9c3990a6a422e0b1044ac802c15fab13af9c687f8bdbee01a potato2\n") &&
|
||||
!strings.Contains(res, " UNSUPPORTED potato2\n") &&
|
||||
!strings.Contains(res, " potato2\n") {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
file3 := r.WriteBoth("sub dir/potato3", "hello", t2)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
|
||||
|
||||
// Check the MaxDepth too
|
||||
fs.Config.MaxDepth = 1
|
||||
defer func() { fs.Config.MaxDepth = -1 }()
|
||||
|
||||
objects, size, err := operations.Count(r.Fremote)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(2), objects)
|
||||
assert.Equal(t, int64(60), size)
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("small", "1234567890", t2) // 10 bytes
|
||||
file2 := r.WriteObject("medium", "------------------------------------------------------------", t1) // 60 bytes
|
||||
file3 := r.WriteObject("large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
|
||||
|
||||
filter.Active.Opt.MaxSize = 60
|
||||
defer func() {
|
||||
filter.Active.Opt.MaxSize = -1
|
||||
}()
|
||||
|
||||
err := operations.Delete(r.Fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Fremote, file3)
|
||||
}
|
||||
|
||||
func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs, oneway bool) error) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
check := func(i int, wantErrors int64, oneway bool) {
|
||||
fs.Debugf(r.Fremote, "%d: Starting check test", i)
|
||||
oldErrors := accounting.Stats.GetErrors()
|
||||
err := checkFunction(r.Fremote, r.Flocal, oneway)
|
||||
gotErrors := accounting.Stats.GetErrors() - oldErrors
|
||||
if wantErrors == 0 && err != nil {
|
||||
t.Errorf("%d: Got error when not expecting one: %v", i, err)
|
||||
}
|
||||
if wantErrors != 0 && err == nil {
|
||||
t.Errorf("%d: No error when expecting one", i)
|
||||
}
|
||||
if wantErrors != gotErrors {
|
||||
t.Errorf("%d: Expecting %d errors but got %d", i, wantErrors, gotErrors)
|
||||
}
|
||||
fs.Debugf(r.Fremote, "%d: Ending check test", i)
|
||||
}
|
||||
|
||||
file1 := r.WriteBoth("rutabaga", "is tasty", t3)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
check(1, 0, false)
|
||||
|
||||
file2 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
check(2, 1, false)
|
||||
|
||||
file3 := r.WriteObject("empty space", "", t2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file3)
|
||||
check(3, 2, false)
|
||||
|
||||
file2r := file2
|
||||
if fs.Config.SizeOnly {
|
||||
file2r = r.WriteObject("potato2", "--Some-Differences-But-Size-Only-Is-Enabled-----------------", t1)
|
||||
} else {
|
||||
r.WriteObject("potato2", "------------------------------------------------------------", t1)
|
||||
}
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2r, file3)
|
||||
check(4, 1, false)
|
||||
|
||||
r.WriteFile("empty space", "", t2)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
check(5, 0, false)
|
||||
|
||||
file4 := r.WriteObject("remotepotato", "------------------------------------------------------------", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2r, file3, file4)
|
||||
check(6, 1, false)
|
||||
check(7, 0, true)
|
||||
}
|
||||
|
||||
func TestCheck(t *testing.T) {
|
||||
testCheck(t, operations.Check)
|
||||
}
|
||||
|
||||
func TestCheckDownload(t *testing.T) {
|
||||
testCheck(t, operations.CheckDownload)
|
||||
}
|
||||
|
||||
func TestCheckSizeOnly(t *testing.T) {
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
TestCheck(t)
|
||||
}
|
||||
|
||||
func TestCat(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("file1", "ABCDEFGHIJ", t1)
|
||||
file2 := r.WriteBoth("file2", "012345678", t2)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
|
||||
for _, test := range []struct {
|
||||
offset int64
|
||||
count int64
|
||||
a string
|
||||
b string
|
||||
}{
|
||||
{0, -1, "ABCDEFGHIJ", "012345678"},
|
||||
{0, 5, "ABCDE", "01234"},
|
||||
{-3, -1, "HIJ", "678"},
|
||||
{1, 3, "BCD", "123"},
|
||||
} {
|
||||
var buf bytes.Buffer
|
||||
err := operations.Cat(r.Fremote, &buf, test.offset, test.count)
|
||||
require.NoError(t, err)
|
||||
res := buf.String()
|
||||
|
||||
if res != test.a+test.b && res != test.b+test.a {
|
||||
t.Errorf("Incorrect output from Cat(%d,%d): %q", test.offset, test.count, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRcat(t *testing.T) {
|
||||
checkSumBefore := fs.Config.CheckSum
|
||||
defer func() { fs.Config.CheckSum = checkSumBefore }()
|
||||
|
||||
check := func(withChecksum bool) {
|
||||
fs.Config.CheckSum = withChecksum
|
||||
prefix := "no_checksum_"
|
||||
if withChecksum {
|
||||
prefix = "with_checksum_"
|
||||
}
|
||||
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fstest.CheckListing(t, r.Fremote, []fstest.Item{})
|
||||
|
||||
data1 := "this is some really nice test data"
|
||||
path1 := prefix + "small_file_from_pipe"
|
||||
|
||||
data2 := string(make([]byte, fs.Config.StreamingUploadCutoff+1))
|
||||
path2 := prefix + "big_file_from_pipe"
|
||||
|
||||
in := ioutil.NopCloser(strings.NewReader(data1))
|
||||
_, err := operations.Rcat(r.Fremote, path1, in, t1)
|
||||
require.NoError(t, err)
|
||||
|
||||
in = ioutil.NopCloser(strings.NewReader(data2))
|
||||
_, err = operations.Rcat(r.Fremote, path2, in, t2)
|
||||
require.NoError(t, err)
|
||||
|
||||
file1 := fstest.NewItem(path1, data1, t1)
|
||||
file2 := fstest.NewItem(path2, data2, t2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
}
|
||||
|
||||
check(true)
|
||||
check(false)
|
||||
}
|
||||
|
||||
func TestRmdirsNoLeaveRoot(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
// Make some files and dirs we expect to keep
|
||||
r.ForceMkdir(r.Fremote)
|
||||
file1 := r.WriteObject("A1/B1/C1/one", "aaa", t1)
|
||||
//..and dirs we expect to delete
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A2"))
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2"))
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2/C2"))
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C3"))
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A3"))
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3"))
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3/C4"))
|
||||
//..and one more file at the end
|
||||
file2 := r.WriteObject("A1/two", "bbb", t2)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
[]fstest.Item{
|
||||
file1, file2,
|
||||
},
|
||||
[]string{
|
||||
"A1",
|
||||
"A1/B1",
|
||||
"A1/B1/C1",
|
||||
"A2",
|
||||
"A1/B2",
|
||||
"A1/B2/C2",
|
||||
"A1/B1/C3",
|
||||
"A3",
|
||||
"A3/B3",
|
||||
"A3/B3/C4",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
|
||||
require.NoError(t, operations.Rmdirs(r.Fremote, "", false))
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
[]fstest.Item{
|
||||
file1, file2,
|
||||
},
|
||||
[]string{
|
||||
"A1",
|
||||
"A1/B1",
|
||||
"A1/B1/C1",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
func TestRmdirsLeaveRoot(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
r.ForceMkdir(r.Fremote)
|
||||
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A1"))
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1"))
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C1"))
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
[]fstest.Item{},
|
||||
[]string{
|
||||
"A1",
|
||||
"A1/B1",
|
||||
"A1/B1/C1",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
|
||||
require.NoError(t, operations.Rmdirs(r.Fremote, "A1", true))
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
[]fstest.Item{},
|
||||
[]string{
|
||||
"A1",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
}
|
||||
|
||||
func TestRcatSize(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
const body = "------------------------------------------------------------"
|
||||
file1 := r.WriteFile("potato1", body, t1)
|
||||
file2 := r.WriteFile("potato2", body, t2)
|
||||
// Test with known length
|
||||
bodyReader := ioutil.NopCloser(strings.NewReader(body))
|
||||
obj, err := operations.RcatSize(r.Fremote, file1.Path, bodyReader, int64(len(body)), file1.ModTime)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(len(body)), obj.Size())
|
||||
assert.Equal(t, file1.Path, obj.Remote())
|
||||
|
||||
// Test with unknown length
|
||||
bodyReader = ioutil.NopCloser(strings.NewReader(body)) // reset Reader
|
||||
ioutil.NopCloser(strings.NewReader(body))
|
||||
obj, err = operations.RcatSize(r.Fremote, file2.Path, bodyReader, -1, file2.ModTime)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(len(body)), obj.Size())
|
||||
assert.Equal(t, file2.Path, obj.Remote())
|
||||
|
||||
// Check files exist
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
}
|
||||
|
||||
func TestMoveFile(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
file1 := r.WriteFile("file1", "file1 contents", t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
file2 := file1
|
||||
file2.Path = "sub/file2"
|
||||
|
||||
err := operations.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
r.WriteFile("file1", "file1 contents", t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
err = operations.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
err = operations.MoveFile(r.Fremote, r.Fremote, file2.Path, file2.Path)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
}
|
||||
|
||||
func TestCopyFile(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
file1 := r.WriteFile("file1", "file1 contents", t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
file2 := file1
|
||||
file2.Path = "sub/file2"
|
||||
|
||||
err := operations.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
err = operations.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
err = operations.CopyFile(r.Fremote, r.Fremote, file2.Path, file2.Path)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
}
|
||||
|
||||
// testFsInfo is for unit testing fs.Info
|
||||
type testFsInfo struct {
|
||||
name string
|
||||
root string
|
||||
stringVal string
|
||||
precision time.Duration
|
||||
hashes hash.Set
|
||||
features fs.Features
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (i *testFsInfo) Name() string { return i.name }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (i *testFsInfo) Root() string { return i.root }
|
||||
|
||||
// String returns a description of the FS
|
||||
func (i *testFsInfo) String() string { return i.stringVal }
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (i *testFsInfo) Precision() time.Duration { return i.precision }
|
||||
|
||||
// Returns the supported hash types of the filesystem
|
||||
func (i *testFsInfo) Hashes() hash.Set { return i.hashes }
|
||||
|
||||
// Returns the supported hash types of the filesystem
|
||||
func (i *testFsInfo) Features() *fs.Features { return &i.features }
|
||||
|
||||
func TestSameConfig(t *testing.T) {
|
||||
a := &testFsInfo{name: "name", root: "root"}
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
root string
|
||||
expected bool
|
||||
}{
|
||||
{"name", "root", true},
|
||||
{"name", "rooty", true},
|
||||
{"namey", "root", false},
|
||||
{"namey", "roott", false},
|
||||
} {
|
||||
b := &testFsInfo{name: test.name, root: test.root}
|
||||
actual := operations.SameConfig(a, b)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
actual = operations.SameConfig(b, a)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSame(t *testing.T) {
|
||||
a := &testFsInfo{name: "name", root: "root"}
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
root string
|
||||
expected bool
|
||||
}{
|
||||
{"name", "root", true},
|
||||
{"name", "rooty", false},
|
||||
{"namey", "root", false},
|
||||
{"namey", "roott", false},
|
||||
} {
|
||||
b := &testFsInfo{name: test.name, root: test.root}
|
||||
actual := operations.Same(a, b)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
actual = operations.Same(b, a)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverlapping(t *testing.T) {
|
||||
a := &testFsInfo{name: "name", root: "root"}
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
root string
|
||||
expected bool
|
||||
}{
|
||||
{"name", "root", true},
|
||||
{"namey", "root", false},
|
||||
{"name", "rooty", false},
|
||||
{"namey", "rooty", false},
|
||||
{"name", "roo", false},
|
||||
{"name", "root/toot", true},
|
||||
{"name", "root/toot/", true},
|
||||
{"name", "", true},
|
||||
{"name", "/", true},
|
||||
} {
|
||||
b := &testFsInfo{name: test.name, root: test.root}
|
||||
what := fmt.Sprintf("(%q,%q) vs (%q,%q)", a.name, a.root, b.name, b.root)
|
||||
actual := operations.Overlapping(a, b)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
actual = operations.Overlapping(b, a)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
}
|
||||
}
|
||||
|
||||
type errorReader struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (er errorReader) Read(p []byte) (n int, err error) {
|
||||
return 0, er.err
|
||||
}
|
||||
|
||||
func TestCheckEqualReaders(t *testing.T) {
|
||||
b65a := make([]byte, 65*1024)
|
||||
b65b := make([]byte, 65*1024)
|
||||
b65b[len(b65b)-1] = 1
|
||||
b66 := make([]byte, 66*1024)
|
||||
|
||||
differ, err := operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65a))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, false)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65b))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b66))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), bytes.NewBuffer(b65a))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
myErr := errors.New("sentinel")
|
||||
wrap := func(b []byte) io.Reader {
|
||||
r := bytes.NewBuffer(b)
|
||||
e := errorReader{myErr}
|
||||
return io.MultiReader(r, e)
|
||||
}
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65b))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b66))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b66), bytes.NewBuffer(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65b))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b66))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), wrap(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
}
|
||||
|
||||
func TestListFormat(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("a", "a", t1)
|
||||
file2 := r.WriteObject("subdir/b", "b", t1)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
|
||||
items, _ := list.DirSorted(r.Fremote, true, "")
|
||||
var list operations.ListFormat
|
||||
list.AddPath()
|
||||
list.SetDirSlash(false)
|
||||
assert.Equal(t, "subdir", list.Format(items[1]))
|
||||
|
||||
list.SetDirSlash(true)
|
||||
assert.Equal(t, "subdir/", list.Format(items[1]))
|
||||
|
||||
list.SetOutput(nil)
|
||||
assert.Equal(t, "", list.Format(items[1]))
|
||||
|
||||
list.AppendOutput(func() string { return "a" })
|
||||
list.AppendOutput(func() string { return "b" })
|
||||
assert.Equal(t, "ab", list.Format(items[1]))
|
||||
list.SetSeparator(":::")
|
||||
assert.Equal(t, "a:::b", list.Format(items[1]))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddModTime()
|
||||
assert.Equal(t, items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddID()
|
||||
_ = list.Format(items[0]) // Can't really check anything - at least it didn't panic!
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddMimeType()
|
||||
assert.Contains(t, list.Format(items[0]), "/")
|
||||
assert.Equal(t, "inode/directory", list.Format(items[1]))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddPath()
|
||||
list.SetAbsolute(true)
|
||||
assert.Equal(t, "/a", list.Format(items[0]))
|
||||
list.SetAbsolute(false)
|
||||
assert.Equal(t, "a", list.Format(items[0]))
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.AddSize()
|
||||
assert.Equal(t, "1", list.Format(items[0]))
|
||||
|
||||
list.AddPath()
|
||||
list.AddModTime()
|
||||
list.SetDirSlash(true)
|
||||
list.SetSeparator("__SEP__")
|
||||
assert.Equal(t, "1__SEP__a__SEP__"+items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
|
||||
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"__SEP__subdir/__SEP__"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1]))
|
||||
|
||||
for _, test := range []struct {
|
||||
ht hash.Type
|
||||
want string
|
||||
}{
|
||||
{hash.MD5, "0cc175b9c0f1b6a831c399e269772661"},
|
||||
{hash.SHA1, "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"},
|
||||
{hash.Dropbox, "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8"},
|
||||
} {
|
||||
list.SetOutput(nil)
|
||||
list.AddHash(test.ht)
|
||||
got := list.Format(items[0])
|
||||
if got != "UNSUPPORTED" && got != "" {
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
list.SetOutput(nil)
|
||||
list.SetSeparator("|")
|
||||
list.SetCSV(true)
|
||||
list.AddSize()
|
||||
list.AddPath()
|
||||
list.AddModTime()
|
||||
list.SetDirSlash(true)
|
||||
assert.Equal(t, "1|a|"+items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0]))
|
||||
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"|subdir/|"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1]))
|
||||
|
||||
}
|
||||
262
.rclone_repo/fs/options.go
Executable file
262
.rclone_repo/fs/options.go
Executable file
@@ -0,0 +1,262 @@
|
||||
// Define the options for Open
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// OpenOption is an interface describing options for Open
|
||||
type OpenOption interface {
|
||||
fmt.Stringer
|
||||
|
||||
// Header returns the option as an HTTP header
|
||||
Header() (key string, value string)
|
||||
|
||||
// Mandatory returns whether this option can be ignored or not
|
||||
Mandatory() bool
|
||||
}
|
||||
|
||||
// RangeOption defines an HTTP Range option with start and end. If
|
||||
// either start or end are < 0 then they will be omitted.
|
||||
//
|
||||
// End may be bigger than the Size of the object in which case it will
|
||||
// be capped to the size of the object.
|
||||
//
|
||||
// Note that the End is inclusive, so to fetch 100 bytes you would use
|
||||
// RangeOption{Start: 0, End: 99}
|
||||
//
|
||||
// If Start is specified but End is not then it will fetch from Start
|
||||
// to the end of the file.
|
||||
//
|
||||
// If End is specified, but Start is not then it will fetch the last
|
||||
// End bytes.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// RangeOption{Start: 0, End: 99} - fetch the first 100 bytes
|
||||
// RangeOption{Start: 100, End: 199} - fetch the second 100 bytes
|
||||
// RangeOption{Start: 100} - fetch bytes from offset 100 to the end
|
||||
// RangeOption{End: 100} - fetch the last 100 bytes
|
||||
//
|
||||
// A RangeOption implements a single byte-range-spec from
|
||||
// https://tools.ietf.org/html/rfc7233#section-2.1
|
||||
type RangeOption struct {
|
||||
Start int64
|
||||
End int64
|
||||
}
|
||||
|
||||
// Header formats the option as an http header
|
||||
func (o *RangeOption) Header() (key string, value string) {
|
||||
key = "Range"
|
||||
value = "bytes="
|
||||
if o.Start >= 0 {
|
||||
value += strconv.FormatInt(o.Start, 10)
|
||||
|
||||
}
|
||||
value += "-"
|
||||
if o.End >= 0 {
|
||||
value += strconv.FormatInt(o.End, 10)
|
||||
}
|
||||
return key, value
|
||||
}
|
||||
|
||||
// ParseRangeOption parses a RangeOption from a Range: header.
|
||||
// It only appects single ranges.
|
||||
func ParseRangeOption(s string) (po *RangeOption, err error) {
|
||||
const preamble = "bytes="
|
||||
if !strings.HasPrefix(s, preamble) {
|
||||
return nil, errors.New("Range: header invalid: doesn't start with " + preamble)
|
||||
}
|
||||
s = s[len(preamble):]
|
||||
if strings.IndexRune(s, ',') >= 0 {
|
||||
return nil, errors.New("Range: header invalid: contains multiple ranges which isn't supported")
|
||||
}
|
||||
dash := strings.IndexRune(s, '-')
|
||||
if dash < 0 {
|
||||
return nil, errors.New("Range: header invalid: contains no '-'")
|
||||
}
|
||||
start, end := strings.TrimSpace(s[:dash]), strings.TrimSpace(s[dash+1:])
|
||||
o := RangeOption{Start: -1, End: -1}
|
||||
if start != "" {
|
||||
o.Start, err = strconv.ParseInt(start, 10, 64)
|
||||
if err != nil || o.Start < 0 {
|
||||
return nil, errors.New("Range: header invalid: bad start")
|
||||
}
|
||||
}
|
||||
if end != "" {
|
||||
o.End, err = strconv.ParseInt(end, 10, 64)
|
||||
if err != nil || o.End < 0 {
|
||||
return nil, errors.New("Range: header invalid: bad end")
|
||||
}
|
||||
}
|
||||
return &o, nil
|
||||
}
|
||||
|
||||
// String formats the option into human readable form
|
||||
func (o *RangeOption) String() string {
|
||||
return fmt.Sprintf("RangeOption(%d,%d)", o.Start, o.End)
|
||||
}
|
||||
|
||||
// Mandatory returns whether the option must be parsed or can be ignored
|
||||
func (o *RangeOption) Mandatory() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Decode interprets the RangeOption into an offset and a limit
|
||||
//
|
||||
// The offset is the start of the stream and the limit is how many
|
||||
// bytes should be read from it. If the limit is -1 then the stream
|
||||
// should be read to the end.
|
||||
func (o *RangeOption) Decode(size int64) (offset, limit int64) {
|
||||
if o.Start >= 0 {
|
||||
offset = o.Start
|
||||
if o.End >= 0 {
|
||||
limit = o.End - o.Start + 1
|
||||
} else {
|
||||
limit = -1
|
||||
}
|
||||
} else {
|
||||
if o.End >= 0 {
|
||||
offset = size - o.End
|
||||
} else {
|
||||
offset = 0
|
||||
}
|
||||
limit = -1
|
||||
}
|
||||
return offset, limit
|
||||
}
|
||||
|
||||
// FixRangeOption looks through the slice of options and adjusts any
|
||||
// RangeOption~s found that request a fetch from the end into an
|
||||
// absolute fetch using the size passed in and makes sure the range does
|
||||
// not exceed filesize. Some remotes (eg Onedrive, Box) don't support
|
||||
// range requests which index from the end.
|
||||
func FixRangeOption(options []OpenOption, size int64) {
|
||||
for i := range options {
|
||||
option := options[i]
|
||||
if x, ok := option.(*RangeOption); ok {
|
||||
// If start is < 0 then fetch from the end
|
||||
if x.Start < 0 {
|
||||
x = &RangeOption{Start: size - x.End, End: -1}
|
||||
options[i] = x
|
||||
}
|
||||
if x.End > size {
|
||||
x = &RangeOption{Start: x.Start, End: size}
|
||||
options[i] = x
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekOption defines an HTTP Range option with start only.
|
||||
type SeekOption struct {
|
||||
Offset int64
|
||||
}
|
||||
|
||||
// Header formats the option as an http header
|
||||
func (o *SeekOption) Header() (key string, value string) {
|
||||
key = "Range"
|
||||
value = fmt.Sprintf("bytes=%d-", o.Offset)
|
||||
return key, value
|
||||
}
|
||||
|
||||
// String formats the option into human readable form
|
||||
func (o *SeekOption) String() string {
|
||||
return fmt.Sprintf("SeekOption(%d)", o.Offset)
|
||||
}
|
||||
|
||||
// Mandatory returns whether the option must be parsed or can be ignored
|
||||
func (o *SeekOption) Mandatory() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// HTTPOption defines a general purpose HTTP option
|
||||
type HTTPOption struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
// Header formats the option as an http header
|
||||
func (o *HTTPOption) Header() (key string, value string) {
|
||||
return o.Key, o.Value
|
||||
}
|
||||
|
||||
// String formats the option into human readable form
|
||||
func (o *HTTPOption) String() string {
|
||||
return fmt.Sprintf("HTTPOption(%q,%q)", o.Key, o.Value)
|
||||
}
|
||||
|
||||
// Mandatory returns whether the option must be parsed or can be ignored
|
||||
func (o *HTTPOption) Mandatory() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// HashesOption defines an option used to tell the local fs to limit
|
||||
// the number of hashes it calculates.
|
||||
type HashesOption struct {
|
||||
Hashes hash.Set
|
||||
}
|
||||
|
||||
// Header formats the option as an http header
|
||||
func (o *HashesOption) Header() (key string, value string) {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// String formats the option into human readable form
|
||||
func (o *HashesOption) String() string {
|
||||
return fmt.Sprintf("HashesOption(%v)", o.Hashes)
|
||||
}
|
||||
|
||||
// Mandatory returns whether the option must be parsed or can be ignored
|
||||
func (o *HashesOption) Mandatory() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// OpenOptionAddHeaders adds each header found in options to the
|
||||
// headers map provided the key was non empty.
|
||||
func OpenOptionAddHeaders(options []OpenOption, headers map[string]string) {
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
if key != "" && value != "" {
|
||||
headers[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OpenOptionHeaders adds each header found in options to the
|
||||
// headers map provided the key was non empty.
|
||||
//
|
||||
// It returns a nil map if options was empty
|
||||
func OpenOptionHeaders(options []OpenOption) (headers map[string]string) {
|
||||
if len(options) == 0 {
|
||||
return nil
|
||||
}
|
||||
headers = make(map[string]string, len(options))
|
||||
OpenOptionAddHeaders(options, headers)
|
||||
return headers
|
||||
}
|
||||
|
||||
// OpenOptionAddHTTPHeaders Sets each header found in options to the
|
||||
// http.Header map provided the key was non empty.
|
||||
func OpenOptionAddHTTPHeaders(headers http.Header, options []OpenOption) {
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
if key != "" && value != "" {
|
||||
headers.Set(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check interface
|
||||
var (
|
||||
_ OpenOption = (*RangeOption)(nil)
|
||||
_ OpenOption = (*SeekOption)(nil)
|
||||
_ OpenOption = (*HTTPOption)(nil)
|
||||
)
|
||||
60
.rclone_repo/fs/options_test.go
Executable file
60
.rclone_repo/fs/options_test.go
Executable file
@@ -0,0 +1,60 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseRangeOption(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want RangeOption
|
||||
err string
|
||||
}{
|
||||
{in: "", err: "doesn't start with bytes="},
|
||||
{in: "bytes=1-2,3-4", err: "contains multiple ranges"},
|
||||
{in: "bytes=100", err: "contains no '-'"},
|
||||
{in: "bytes=x-8", err: "bad start"},
|
||||
{in: "bytes=8-x", err: "bad end"},
|
||||
{in: "bytes=1-2", want: RangeOption{Start: 1, End: 2}},
|
||||
{in: "bytes=-123456789123456789", want: RangeOption{Start: -1, End: 123456789123456789}},
|
||||
{in: "bytes=123456789123456789-", want: RangeOption{Start: 123456789123456789, End: -1}},
|
||||
{in: "bytes= 1 - 2 ", want: RangeOption{Start: 1, End: 2}},
|
||||
{in: "bytes=-", want: RangeOption{Start: -1, End: -1}},
|
||||
{in: "bytes= - ", want: RangeOption{Start: -1, End: -1}},
|
||||
} {
|
||||
got, err := ParseRangeOption(test.in)
|
||||
what := fmt.Sprintf("parsing %q", test.in)
|
||||
if test.err != "" {
|
||||
require.Contains(t, err.Error(), test.err)
|
||||
require.Nil(t, got, what)
|
||||
} else {
|
||||
require.NoError(t, err, what)
|
||||
assert.Equal(t, test.want, *got, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRangeOptionDecode(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in RangeOption
|
||||
size int64
|
||||
wantOffset int64
|
||||
wantLimit int64
|
||||
}{
|
||||
{in: RangeOption{Start: 1, End: 10}, size: 100, wantOffset: 1, wantLimit: 10},
|
||||
{in: RangeOption{Start: 10, End: 10}, size: 100, wantOffset: 10, wantLimit: 1},
|
||||
{in: RangeOption{Start: 10, End: 9}, size: 100, wantOffset: 10, wantLimit: 0},
|
||||
{in: RangeOption{Start: 1, End: -1}, size: 100, wantOffset: 1, wantLimit: -1},
|
||||
{in: RangeOption{Start: -1, End: 90}, size: 100, wantOffset: 10, wantLimit: -1},
|
||||
{in: RangeOption{Start: -1, End: -1}, size: 100, wantOffset: 0, wantLimit: -1},
|
||||
} {
|
||||
gotOffset, gotLimit := test.in.Decode(test.size)
|
||||
what := fmt.Sprintf("%+v size=%d", test.in, test.size)
|
||||
assert.Equal(t, test.wantOffset, gotOffset, "offset "+what)
|
||||
assert.Equal(t, test.wantLimit, gotLimit, "limit "+what)
|
||||
}
|
||||
}
|
||||
103
.rclone_repo/fs/parseduration.go
Executable file
103
.rclone_repo/fs/parseduration.go
Executable file
@@ -0,0 +1,103 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Duration is a time.Duration with some more parsing options
|
||||
type Duration time.Duration
|
||||
|
||||
// DurationOff is the default value for flags which can be turned off
|
||||
const DurationOff = Duration((1 << 63) - 1)
|
||||
|
||||
// Turn Duration into a string
|
||||
func (d Duration) String() string {
|
||||
if d == DurationOff {
|
||||
return "off"
|
||||
}
|
||||
for i := len(ageSuffixes) - 2; i >= 0; i-- {
|
||||
ageSuffix := &ageSuffixes[i]
|
||||
if math.Abs(float64(d)) >= float64(ageSuffix.Multiplier) {
|
||||
timeUnits := float64(d) / float64(ageSuffix.Multiplier)
|
||||
return strconv.FormatFloat(timeUnits, 'f', -1, 64) + ageSuffix.Suffix
|
||||
}
|
||||
}
|
||||
return time.Duration(d).String()
|
||||
}
|
||||
|
||||
// IsSet returns if the duration is != DurationOff
|
||||
func (d Duration) IsSet() bool {
|
||||
return d != DurationOff
|
||||
}
|
||||
|
||||
// We use time conventions
|
||||
var ageSuffixes = []struct {
|
||||
Suffix string
|
||||
Multiplier time.Duration
|
||||
}{
|
||||
{Suffix: "d", Multiplier: time.Hour * 24},
|
||||
{Suffix: "w", Multiplier: time.Hour * 24 * 7},
|
||||
{Suffix: "M", Multiplier: time.Hour * 24 * 30},
|
||||
{Suffix: "y", Multiplier: time.Hour * 24 * 365},
|
||||
|
||||
// Default to second
|
||||
{Suffix: "", Multiplier: time.Second},
|
||||
}
|
||||
|
||||
// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided
|
||||
func ParseDuration(age string) (time.Duration, error) {
|
||||
var period float64
|
||||
|
||||
if age == "off" {
|
||||
return time.Duration(DurationOff), nil
|
||||
}
|
||||
|
||||
// Attempt to parse as a time.Duration first
|
||||
d, err := time.ParseDuration(age)
|
||||
if err == nil {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
for _, ageSuffix := range ageSuffixes {
|
||||
if strings.HasSuffix(age, ageSuffix.Suffix) {
|
||||
numberString := age[:len(age)-len(ageSuffix.Suffix)]
|
||||
var err error
|
||||
period, err = strconv.ParseFloat(numberString, 64)
|
||||
if err != nil {
|
||||
return time.Duration(0), err
|
||||
}
|
||||
period *= float64(ageSuffix.Multiplier)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return time.Duration(period), nil
|
||||
}
|
||||
|
||||
// Set a Duration
|
||||
func (d *Duration) Set(s string) error {
|
||||
duration, err := ParseDuration(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = Duration(duration)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (d Duration) Type() string {
|
||||
return "duration"
|
||||
}
|
||||
|
||||
// Scan implements the fmt.Scanner interface
|
||||
func (d *Duration) Scan(s fmt.ScanState, ch rune) error {
|
||||
token, err := s.Token(true, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return d.Set(string(token))
|
||||
}
|
||||
93
.rclone_repo/fs/parseduration_test.go
Executable file
93
.rclone_repo/fs/parseduration_test.go
Executable file
@@ -0,0 +1,93 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*Duration)(nil)
|
||||
|
||||
func TestParseDuration(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want time.Duration
|
||||
err bool
|
||||
}{
|
||||
{"0", 0, false},
|
||||
{"", 0, true},
|
||||
{"1ms", time.Millisecond, false},
|
||||
{"1s", time.Second, false},
|
||||
{"1m", time.Minute, false},
|
||||
{"1.5m", (3 * time.Minute) / 2, false},
|
||||
{"1h", time.Hour, false},
|
||||
{"1d", time.Hour * 24, false},
|
||||
{"1w", time.Hour * 24 * 7, false},
|
||||
{"1M", time.Hour * 24 * 30, false},
|
||||
{"1y", time.Hour * 24 * 365, false},
|
||||
{"1.5y", time.Hour * 24 * 365 * 3 / 2, false},
|
||||
{"-1s", -time.Second, false},
|
||||
{"1.s", time.Second, false},
|
||||
{"1x", 0, true},
|
||||
{"off", time.Duration(DurationOff), false},
|
||||
{"1h2m3s", time.Hour + 2*time.Minute + 3*time.Second, false},
|
||||
} {
|
||||
duration, err := ParseDuration(test.in)
|
||||
if test.err {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, test.want, duration)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDurationString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in time.Duration
|
||||
want string
|
||||
}{
|
||||
{time.Duration(0), "0s"},
|
||||
{time.Second, "1s"},
|
||||
{time.Minute, "1m0s"},
|
||||
{time.Millisecond, "1ms"},
|
||||
{time.Second, "1s"},
|
||||
{(3 * time.Minute) / 2, "1m30s"},
|
||||
{time.Hour, "1h0m0s"},
|
||||
{time.Hour * 24, "1d"},
|
||||
{time.Hour * 24 * 7, "1w"},
|
||||
{time.Hour * 24 * 30, "1M"},
|
||||
{time.Hour * 24 * 365, "1y"},
|
||||
{time.Hour * 24 * 365 * 3 / 2, "1.5y"},
|
||||
{-time.Second, "-1s"},
|
||||
{time.Second, "1s"},
|
||||
{time.Duration(DurationOff), "off"},
|
||||
{time.Hour + 2*time.Minute + 3*time.Second, "1h2m3s"},
|
||||
{time.Hour * 24, "1d"},
|
||||
{time.Hour * 24 * 7, "1w"},
|
||||
{time.Hour * 24 * 30, "1M"},
|
||||
{time.Hour * 24 * 365, "1y"},
|
||||
{time.Hour * 24 * 365 * 3 / 2, "1.5y"},
|
||||
{-time.Hour * 24 * 365 * 3 / 2, "-1.5y"},
|
||||
} {
|
||||
got := Duration(test.in).String()
|
||||
assert.Equal(t, test.want, got)
|
||||
// Test the reverse
|
||||
reverse, err := ParseDuration(test.want)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.in, reverse)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDurationScan(t *testing.T) {
|
||||
var v Duration
|
||||
n, err := fmt.Sscan(" 17m ", &v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, Duration(17*60*time.Second), v)
|
||||
}
|
||||
131
.rclone_repo/fs/rc/internal.go
Executable file
131
.rclone_repo/fs/rc/internal.go
Executable file
@@ -0,0 +1,131 @@
|
||||
// Define the internal rc functions
|
||||
|
||||
package rc
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Add(Call{
|
||||
Path: "rc/noop",
|
||||
Fn: rcNoop,
|
||||
Title: "Echo the input to the output parameters",
|
||||
Help: `
|
||||
This echoes the input parameters to the output parameters for testing
|
||||
purposes. It can be used to check that rclone is still alive and to
|
||||
check that parameter passing is working properly.`,
|
||||
})
|
||||
Add(Call{
|
||||
Path: "rc/error",
|
||||
Fn: rcError,
|
||||
Title: "This returns an error",
|
||||
Help: `
|
||||
This returns an error with the input as part of its error string.
|
||||
Useful for testing error handling.`,
|
||||
})
|
||||
Add(Call{
|
||||
Path: "rc/list",
|
||||
Fn: rcList,
|
||||
Title: "List all the registered remote control commands",
|
||||
Help: `
|
||||
This lists all the registered remote control commands as a JSON map in
|
||||
the commands response.`,
|
||||
})
|
||||
Add(Call{
|
||||
Path: "core/pid",
|
||||
Fn: rcPid,
|
||||
Title: "Return PID of current process",
|
||||
Help: `
|
||||
This returns PID of current process.
|
||||
Useful for stopping rclone process.`,
|
||||
})
|
||||
Add(Call{
|
||||
Path: "core/memstats",
|
||||
Fn: rcMemStats,
|
||||
Title: "Returns the memory statistics",
|
||||
Help: `
|
||||
This returns the memory statistics of the running program. What the values mean
|
||||
are explained in the go docs: https://golang.org/pkg/runtime/#MemStats
|
||||
|
||||
The most interesting values for most people are:
|
||||
|
||||
* HeapAlloc: This is the amount of memory rclone is actually using
|
||||
* HeapSys: This is the amount of memory rclone has obtained from the OS
|
||||
* Sys: this is the total amount of memory requested from the OS
|
||||
* It is virtual memory so may include unused memory
|
||||
`,
|
||||
})
|
||||
Add(Call{
|
||||
Path: "core/gc",
|
||||
Fn: rcGc,
|
||||
Title: "Runs a garbage collection.",
|
||||
Help: `
|
||||
This tells the go runtime to do a garbage collection run. It isn't
|
||||
necessary to call this normally, but it can be useful for debugging
|
||||
memory problems.
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// Echo the input to the ouput parameters
|
||||
func rcNoop(in Params) (out Params, err error) {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// Return an error regardless
|
||||
func rcError(in Params) (out Params, err error) {
|
||||
return nil, errors.Errorf("arbitrary error on input %+v", in)
|
||||
}
|
||||
|
||||
// List the registered commands
|
||||
func rcList(in Params) (out Params, err error) {
|
||||
out = make(Params)
|
||||
out["commands"] = registry.list()
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Return PID of current process
|
||||
func rcPid(in Params) (out Params, err error) {
|
||||
out = make(Params)
|
||||
out["pid"] = os.Getpid()
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Return the memory statistics
|
||||
func rcMemStats(in Params) (out Params, err error) {
|
||||
out = make(Params)
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
out["Alloc"] = m.Alloc
|
||||
out["TotalAlloc"] = m.TotalAlloc
|
||||
out["Sys"] = m.Sys
|
||||
out["Mallocs"] = m.Mallocs
|
||||
out["Frees"] = m.Frees
|
||||
out["HeapAlloc"] = m.HeapAlloc
|
||||
out["HeapSys"] = m.HeapSys
|
||||
out["HeapIdle"] = m.HeapIdle
|
||||
out["HeapInuse"] = m.HeapInuse
|
||||
out["HeapReleased"] = m.HeapReleased
|
||||
out["HeapObjects"] = m.HeapObjects
|
||||
out["StackInuse"] = m.StackInuse
|
||||
out["StackSys"] = m.StackSys
|
||||
out["MSpanInuse"] = m.MSpanInuse
|
||||
out["MSpanSys"] = m.MSpanSys
|
||||
out["MCacheInuse"] = m.MCacheInuse
|
||||
out["MCacheSys"] = m.MCacheSys
|
||||
out["BuckHashSys"] = m.BuckHashSys
|
||||
out["GCSys"] = m.GCSys
|
||||
out["OtherSys"] = m.OtherSys
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Do a garbage collection run
|
||||
func rcGc(in Params) (out Params, err error) {
|
||||
out = make(Params)
|
||||
runtime.GC()
|
||||
return out, nil
|
||||
}
|
||||
146
.rclone_repo/fs/rc/rc.go
Executable file
146
.rclone_repo/fs/rc/rc.go
Executable file
@@ -0,0 +1,146 @@
|
||||
// Package rc implements a remote control server and registry for rclone
|
||||
//
|
||||
// To register your internal calls, call rc.Add(path, function). Your
|
||||
// function should take ane return a Param. It can also return an
|
||||
// error. Use rc.NewError to wrap an existing error along with an
|
||||
// http response type if another response other than 500 internal
|
||||
// error is required on error.
|
||||
package rc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // install the pprof http handlers
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/cmd/serve/httplib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Options contains options for the remote control server
|
||||
type Options struct {
|
||||
HTTPOptions httplib.Options
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
HTTPOptions: httplib.DefaultOpt,
|
||||
Enabled: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
DefaultOpt.HTTPOptions.ListenAddr = "localhost:5572"
|
||||
}
|
||||
|
||||
// Start the remote control server if configured
|
||||
func Start(opt *Options) {
|
||||
if opt.Enabled {
|
||||
s := newServer(opt)
|
||||
go s.serve()
|
||||
}
|
||||
}
|
||||
|
||||
// server contains everything to run the server
|
||||
type server struct {
|
||||
srv *httplib.Server
|
||||
}
|
||||
|
||||
func newServer(opt *Options) *server {
|
||||
// Serve on the DefaultServeMux so can have global registrations appear
|
||||
mux := http.DefaultServeMux
|
||||
s := &server{
|
||||
srv: httplib.NewServer(mux, &opt.HTTPOptions),
|
||||
}
|
||||
mux.HandleFunc("/", s.handler)
|
||||
return s
|
||||
}
|
||||
|
||||
// serve runs the http server - doesn't return
|
||||
func (s *server) serve() {
|
||||
err := s.srv.Serve()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Opening listener: %v", err)
|
||||
}
|
||||
fs.Logf(nil, "Serving remote control on %s", s.srv.URL())
|
||||
s.srv.Wait()
|
||||
}
|
||||
|
||||
// WriteJSON writes JSON in out to w
|
||||
func WriteJSON(w io.Writer, out Params) error {
|
||||
enc := json.NewEncoder(w)
|
||||
enc.SetIndent("", "\t")
|
||||
return enc.Encode(out)
|
||||
}
|
||||
|
||||
// handler reads incoming requests and dispatches them
|
||||
func (s *server) handler(w http.ResponseWriter, r *http.Request) {
|
||||
path := strings.Trim(r.URL.Path, "/")
|
||||
in := make(Params)
|
||||
|
||||
writeError := func(err error, status int) {
|
||||
fs.Errorf(nil, "rc: %q: error: %v", path, err)
|
||||
w.WriteHeader(status)
|
||||
err = WriteJSON(w, Params{
|
||||
"error": err.Error(),
|
||||
"input": in,
|
||||
})
|
||||
if err != nil {
|
||||
// can't return the error at this point
|
||||
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if r.Method != "POST" {
|
||||
writeError(errors.Errorf("method %q not allowed - POST required", r.Method), http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Find the call
|
||||
call := registry.get(path)
|
||||
if call == nil {
|
||||
writeError(errors.Errorf("couldn't find method %q", path), http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse the POST and URL parameters into r.Form
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
writeError(errors.Wrap(err, "failed to parse form/URL parameters"), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Read the POST and URL parameters into in
|
||||
for k, vs := range r.Form {
|
||||
if len(vs) > 0 {
|
||||
in[k] = vs[len(vs)-1]
|
||||
}
|
||||
}
|
||||
fs.Debugf(nil, "form = %+v", r.Form)
|
||||
|
||||
// Parse a JSON blob from the input
|
||||
if r.Header.Get("Content-Type") == "application/json" {
|
||||
err := json.NewDecoder(r.Body).Decode(&in)
|
||||
if err != nil {
|
||||
writeError(errors.Wrap(err, "failed to read input JSON"), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "rc: %q: with parameters %+v", path, in)
|
||||
out, err := call.Fn(in)
|
||||
if err != nil {
|
||||
writeError(errors.Wrap(err, "remote control command failed"), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "rc: %q: reply %+v: %v", path, out, err)
|
||||
err = WriteJSON(w, out)
|
||||
if err != nil {
|
||||
// can't return the error at this point
|
||||
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
|
||||
}
|
||||
}
|
||||
20
.rclone_repo/fs/rc/rcflags/rcflags.go
Executable file
20
.rclone_repo/fs/rc/rcflags/rcflags.go
Executable file
@@ -0,0 +1,20 @@
|
||||
// Package rcflags implements command line flags to set up the remote control
|
||||
package rcflags
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd/serve/httplib/httpflags"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
Opt = rc.DefaultOpt
|
||||
)
|
||||
|
||||
// AddFlags adds the remote control flags to the flagSet
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &Opt.Enabled, "rc", "", false, "Enable the remote control server.")
|
||||
httpflags.AddFlagsPrefix(flagSet, "rc-", &Opt.HTTPOptions)
|
||||
}
|
||||
79
.rclone_repo/fs/rc/registry.go
Executable file
79
.rclone_repo/fs/rc/registry.go
Executable file
@@ -0,0 +1,79 @@
|
||||
// Define the registry
|
||||
|
||||
package rc
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Params is the input and output type for the Func
|
||||
type Params map[string]interface{}
|
||||
|
||||
// Func defines a type for a remote control function
|
||||
type Func func(in Params) (out Params, err error)
|
||||
|
||||
// Call defines info about a remote control function and is used in
|
||||
// the Add function to create new entry points.
|
||||
type Call struct {
|
||||
Path string // path to activate this RC
|
||||
Fn Func `json:"-"` // function to call
|
||||
Title string // help for the function
|
||||
Help string // multi-line markdown formatted help
|
||||
}
|
||||
|
||||
// Registry holds the list of all the registered remote control functions
|
||||
type Registry struct {
|
||||
mu sync.RWMutex
|
||||
call map[string]*Call
|
||||
}
|
||||
|
||||
// NewRegistry makes a new registry for remote control functions
|
||||
func NewRegistry() *Registry {
|
||||
return &Registry{
|
||||
call: make(map[string]*Call),
|
||||
}
|
||||
}
|
||||
|
||||
// Add a call to the registry
|
||||
func (r *Registry) add(call Call) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
call.Path = strings.Trim(call.Path, "/")
|
||||
call.Help = strings.TrimSpace(call.Help)
|
||||
fs.Debugf(nil, "Adding path %q to remote control registry", call.Path)
|
||||
r.call[call.Path] = &call
|
||||
}
|
||||
|
||||
// get a Call from a path or nil
|
||||
func (r *Registry) get(path string) *Call {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
return r.call[path]
|
||||
}
|
||||
|
||||
// get a list of all calls in alphabetical order
|
||||
func (r *Registry) list() (out []*Call) {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
var keys []string
|
||||
for key := range r.call {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, key := range keys {
|
||||
out = append(out, r.call[key])
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// The global registry
|
||||
var registry = NewRegistry()
|
||||
|
||||
// Add a function to the global registry
|
||||
func Add(call Call) {
|
||||
registry.add(call)
|
||||
}
|
||||
132
.rclone_repo/fs/sizesuffix.go
Executable file
132
.rclone_repo/fs/sizesuffix.go
Executable file
@@ -0,0 +1,132 @@
|
||||
package fs
|
||||
|
||||
// SizeSuffix is parsed by flag with k/M/G suffixes
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// SizeSuffix is an int64 with a friendly way of printing setting
|
||||
type SizeSuffix int64
|
||||
|
||||
// Common multipliers for SizeSuffix
|
||||
const (
|
||||
Byte SizeSuffix = 1 << (iota * 10)
|
||||
KibiByte
|
||||
MebiByte
|
||||
GibiByte
|
||||
TebiByte
|
||||
PebiByte
|
||||
ExbiByte
|
||||
)
|
||||
|
||||
// Turn SizeSuffix into a string and a suffix
|
||||
func (x SizeSuffix) string() (string, string) {
|
||||
scaled := float64(0)
|
||||
suffix := ""
|
||||
switch {
|
||||
case x < 0:
|
||||
return "off", ""
|
||||
case x == 0:
|
||||
return "0", ""
|
||||
case x < 1<<10:
|
||||
scaled = float64(x)
|
||||
suffix = ""
|
||||
case x < 1<<20:
|
||||
scaled = float64(x) / (1 << 10)
|
||||
suffix = "k"
|
||||
case x < 1<<30:
|
||||
scaled = float64(x) / (1 << 20)
|
||||
suffix = "M"
|
||||
case x < 1<<40:
|
||||
scaled = float64(x) / (1 << 30)
|
||||
suffix = "G"
|
||||
case x < 1<<50:
|
||||
scaled = float64(x) / (1 << 40)
|
||||
suffix = "T"
|
||||
default:
|
||||
scaled = float64(x) / (1 << 50)
|
||||
suffix = "P"
|
||||
}
|
||||
if math.Floor(scaled) == scaled {
|
||||
return fmt.Sprintf("%.0f", scaled), suffix
|
||||
}
|
||||
return fmt.Sprintf("%.3f", scaled), suffix
|
||||
}
|
||||
|
||||
// String turns SizeSuffix into a string
|
||||
func (x SizeSuffix) String() string {
|
||||
val, suffix := x.string()
|
||||
return val + suffix
|
||||
}
|
||||
|
||||
// Unit turns SizeSuffix into a string with a unit
|
||||
func (x SizeSuffix) Unit(unit string) string {
|
||||
val, suffix := x.string()
|
||||
if val == "off" {
|
||||
return val
|
||||
}
|
||||
return val + " " + suffix + unit
|
||||
}
|
||||
|
||||
// Set a SizeSuffix
|
||||
func (x *SizeSuffix) Set(s string) error {
|
||||
if len(s) == 0 {
|
||||
return errors.New("empty string")
|
||||
}
|
||||
if strings.ToLower(s) == "off" {
|
||||
*x = -1
|
||||
return nil
|
||||
}
|
||||
suffix := s[len(s)-1]
|
||||
suffixLen := 1
|
||||
var multiplier float64
|
||||
switch suffix {
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
|
||||
suffixLen = 0
|
||||
multiplier = 1 << 10
|
||||
case 'b', 'B':
|
||||
multiplier = 1
|
||||
case 'k', 'K':
|
||||
multiplier = 1 << 10
|
||||
case 'm', 'M':
|
||||
multiplier = 1 << 20
|
||||
case 'g', 'G':
|
||||
multiplier = 1 << 30
|
||||
case 't', 'T':
|
||||
multiplier = 1 << 40
|
||||
case 'p', 'P':
|
||||
multiplier = 1 << 50
|
||||
default:
|
||||
return errors.Errorf("bad suffix %q", suffix)
|
||||
}
|
||||
s = s[:len(s)-suffixLen]
|
||||
value, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if value < 0 {
|
||||
return errors.Errorf("size can't be negative %q", s)
|
||||
}
|
||||
value *= multiplier
|
||||
*x = SizeSuffix(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (x *SizeSuffix) Type() string {
|
||||
return "int64"
|
||||
}
|
||||
|
||||
// Scan implements the fmt.Scanner interface
|
||||
func (x *SizeSuffix) Scan(s fmt.ScanState, ch rune) error {
|
||||
token, err := s.Token(true, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return x.Set(string(token))
|
||||
}
|
||||
104
.rclone_repo/fs/sizesuffix_test.go
Executable file
104
.rclone_repo/fs/sizesuffix_test.go
Executable file
@@ -0,0 +1,104 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*SizeSuffix)(nil)
|
||||
|
||||
func TestSizeSuffixString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in float64
|
||||
want string
|
||||
}{
|
||||
{0, "0"},
|
||||
{102, "102"},
|
||||
{1024, "1k"},
|
||||
{1024 * 1024, "1M"},
|
||||
{1024 * 1024 * 1024, "1G"},
|
||||
{10 * 1024 * 1024 * 1024, "10G"},
|
||||
{10.1 * 1024 * 1024 * 1024, "10.100G"},
|
||||
{-1, "off"},
|
||||
{-100, "off"},
|
||||
} {
|
||||
ss := SizeSuffix(test.in)
|
||||
got := ss.String()
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSizeSuffixUnit(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in float64
|
||||
want string
|
||||
}{
|
||||
{0, "0 Bytes"},
|
||||
{102, "102 Bytes"},
|
||||
{1024, "1 kBytes"},
|
||||
{1024 * 1024, "1 MBytes"},
|
||||
{1024 * 1024 * 1024, "1 GBytes"},
|
||||
{10 * 1024 * 1024 * 1024, "10 GBytes"},
|
||||
{10.1 * 1024 * 1024 * 1024, "10.100 GBytes"},
|
||||
{10 * 1024 * 1024 * 1024 * 1024, "10 TBytes"},
|
||||
{10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 PBytes"},
|
||||
{1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1024 PBytes"},
|
||||
{-1, "off"},
|
||||
{-100, "off"},
|
||||
} {
|
||||
ss := SizeSuffix(test.in)
|
||||
got := ss.Unit("Bytes")
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSizeSuffixSet(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want int64
|
||||
err bool
|
||||
}{
|
||||
{"0", 0, false},
|
||||
{"1b", 1, false},
|
||||
{"102B", 102, false},
|
||||
{"0.1k", 102, false},
|
||||
{"0.1", 102, false},
|
||||
{"1K", 1024, false},
|
||||
{"1", 1024, false},
|
||||
{"2.5", 1024 * 2.5, false},
|
||||
{"1M", 1024 * 1024, false},
|
||||
{"1.g", 1024 * 1024 * 1024, false},
|
||||
{"10G", 10 * 1024 * 1024 * 1024, false},
|
||||
{"10T", 10 * 1024 * 1024 * 1024 * 1024, false},
|
||||
{"10P", 10 * 1024 * 1024 * 1024 * 1024 * 1024, false},
|
||||
{"off", -1, false},
|
||||
{"OFF", -1, false},
|
||||
{"", 0, true},
|
||||
{"1q", 0, true},
|
||||
{"1.q", 0, true},
|
||||
{"1q", 0, true},
|
||||
{"-1K", 0, true},
|
||||
} {
|
||||
ss := SizeSuffix(0)
|
||||
err := ss.Set(test.in)
|
||||
if test.err {
|
||||
require.Error(t, err, test.in)
|
||||
} else {
|
||||
require.NoError(t, err, test.in)
|
||||
}
|
||||
assert.Equal(t, test.want, int64(ss))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSizeSuffixScan(t *testing.T) {
|
||||
var v SizeSuffix
|
||||
n, err := fmt.Sscan(" 17M ", &v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, SizeSuffix(17<<20), v)
|
||||
}
|
||||
100
.rclone_repo/fs/sync/pipe.go
Executable file
100
.rclone_repo/fs/sync/pipe.go
Executable file
@@ -0,0 +1,100 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// pipe provides an unbounded channel like experience
|
||||
//
|
||||
// Note unlike channels these aren't strictly ordered.
|
||||
type pipe struct {
|
||||
mu sync.Mutex
|
||||
c chan struct{}
|
||||
queue []fs.ObjectPair
|
||||
closed bool
|
||||
totalSize int64
|
||||
stats func(items int, totalSize int64)
|
||||
}
|
||||
|
||||
func newPipe(stats func(items int, totalSize int64), maxBacklog int) *pipe {
|
||||
return &pipe{
|
||||
c: make(chan struct{}, maxBacklog),
|
||||
stats: stats,
|
||||
}
|
||||
}
|
||||
|
||||
// Put an pair into the pipe
|
||||
//
|
||||
// It returns ok = false if the context was cancelled
|
||||
//
|
||||
// It will panic if you call it after Close()
|
||||
func (p *pipe) Put(ctx context.Context, pair fs.ObjectPair) (ok bool) {
|
||||
if ctx.Err() != nil {
|
||||
return false
|
||||
}
|
||||
p.mu.Lock()
|
||||
p.queue = append(p.queue, pair)
|
||||
size := pair.Src.Size()
|
||||
if size > 0 {
|
||||
p.totalSize += size
|
||||
}
|
||||
p.stats(len(p.queue), p.totalSize)
|
||||
p.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
case p.c <- struct{}{}:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Get a pair from the pipe
|
||||
//
|
||||
// It returns ok = false if the context was cancelled or Close() has
|
||||
// been called.
|
||||
func (p *pipe) Get(ctx context.Context) (pair fs.ObjectPair, ok bool) {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case _, ok = <-p.c:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
p.mu.Lock()
|
||||
pair, p.queue = p.queue[0], p.queue[1:]
|
||||
size := pair.Src.Size()
|
||||
if size > 0 {
|
||||
p.totalSize -= size
|
||||
}
|
||||
if p.totalSize < 0 {
|
||||
p.totalSize = 0
|
||||
}
|
||||
p.stats(len(p.queue), p.totalSize)
|
||||
p.mu.Unlock()
|
||||
return pair, true
|
||||
}
|
||||
|
||||
// Stats reads the number of items in the queue and the totalSize
|
||||
func (p *pipe) Stats() (items int, totalSize int64) {
|
||||
p.mu.Lock()
|
||||
items, totalSize = len(p.queue), p.totalSize
|
||||
p.mu.Unlock()
|
||||
return items, totalSize
|
||||
}
|
||||
|
||||
// Close the pipe
|
||||
//
|
||||
// Writes to a closed pipe will panic as will double closing a pipe
|
||||
func (p *pipe) Close() {
|
||||
p.mu.Lock()
|
||||
close(p.c)
|
||||
p.closed = true
|
||||
p.mu.Unlock()
|
||||
}
|
||||
122
.rclone_repo/fs/sync/pipe_test.go
Executable file
122
.rclone_repo/fs/sync/pipe_test.go
Executable file
@@ -0,0 +1,122 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPipe(t *testing.T) {
|
||||
var queueLength int
|
||||
var queueSize int64
|
||||
stats := func(n int, size int64) {
|
||||
queueLength, queueSize = n, size
|
||||
}
|
||||
|
||||
// Make a new pipe
|
||||
p := newPipe(stats, 10)
|
||||
|
||||
checkStats := func(expectedN int, expectedSize int64) {
|
||||
n, size := p.Stats()
|
||||
assert.Equal(t, expectedN, n)
|
||||
assert.Equal(t, expectedSize, size)
|
||||
assert.Equal(t, expectedN, queueLength)
|
||||
assert.Equal(t, expectedSize, queueSize)
|
||||
}
|
||||
|
||||
checkStats(0, 0)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
obj1 := mockobject.New("potato").WithContent([]byte("hello"), mockobject.SeekModeNone)
|
||||
|
||||
pair1 := fs.ObjectPair{Src: obj1, Dst: nil}
|
||||
|
||||
// Put an object
|
||||
ok := p.Put(ctx, pair1)
|
||||
assert.Equal(t, true, ok)
|
||||
checkStats(1, 5)
|
||||
|
||||
// Close the pipe showing reading on closed pipe is OK
|
||||
p.Close()
|
||||
|
||||
// Read from pipe
|
||||
pair2, ok := p.Get(ctx)
|
||||
assert.Equal(t, pair1, pair2)
|
||||
assert.Equal(t, true, ok)
|
||||
checkStats(0, 0)
|
||||
|
||||
// Check read on closed pipe
|
||||
pair2, ok = p.Get(ctx)
|
||||
assert.Equal(t, fs.ObjectPair{}, pair2)
|
||||
assert.Equal(t, false, ok)
|
||||
|
||||
// Check panic on write to closed pipe
|
||||
assert.Panics(t, func() { p.Put(ctx, pair1) })
|
||||
|
||||
// Make a new pipe
|
||||
p = newPipe(stats, 10)
|
||||
ctx2, cancel := context.WithCancel(ctx)
|
||||
|
||||
// cancel it in the background - check read ceases
|
||||
go cancel()
|
||||
pair2, ok = p.Get(ctx2)
|
||||
assert.Equal(t, fs.ObjectPair{}, pair2)
|
||||
assert.Equal(t, false, ok)
|
||||
|
||||
// check we can't write
|
||||
ok = p.Put(ctx2, pair1)
|
||||
assert.Equal(t, false, ok)
|
||||
|
||||
}
|
||||
|
||||
// TestPipeConcurrent runs concurrent Get and Put to flush out any
|
||||
// race conditions and concurrency problems.
|
||||
func TestPipeConcurrent(t *testing.T) {
|
||||
const (
|
||||
N = 1000
|
||||
readWriters = 10
|
||||
)
|
||||
|
||||
stats := func(n int, size int64) {}
|
||||
|
||||
// Make a new pipe
|
||||
p := newPipe(stats, 10)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
obj1 := mockobject.New("potato").WithContent([]byte("hello"), mockobject.SeekModeNone)
|
||||
pair1 := fs.ObjectPair{Src: obj1, Dst: nil}
|
||||
ctx := context.Background()
|
||||
var count int64
|
||||
|
||||
for j := 0; j < readWriters; j++ {
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < N; i++ {
|
||||
// Read from pipe
|
||||
pair2, ok := p.Get(ctx)
|
||||
assert.Equal(t, pair1, pair2)
|
||||
assert.Equal(t, true, ok)
|
||||
atomic.AddInt64(&count, -1)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < N; i++ {
|
||||
// Put an object
|
||||
ok := p.Put(ctx, pair1)
|
||||
assert.Equal(t, true, ok)
|
||||
atomic.AddInt64(&count, 1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
assert.Equal(t, int64(0), count)
|
||||
}
|
||||
915
.rclone_repo/fs/sync/sync.go
Executable file
915
.rclone_repo/fs/sync/sync.go
Executable file
@@ -0,0 +1,915 @@
|
||||
// Package sync is the implementation of sync/copy/move
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/march"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type syncCopyMove struct {
|
||||
// parameters
|
||||
fdst fs.Fs
|
||||
fsrc fs.Fs
|
||||
deleteMode fs.DeleteMode // how we are doing deletions
|
||||
DoMove bool
|
||||
deleteEmptySrcDirs bool
|
||||
dir string
|
||||
// internal state
|
||||
ctx context.Context // internal context for controlling go-routines
|
||||
cancel func() // cancel the context
|
||||
deletersWg sync.WaitGroup // for delete before go routine
|
||||
deleteFilesCh chan fs.Object // channel to receive deletes if delete before
|
||||
trackRenames bool // set if we should do server side renames
|
||||
dstFilesMu sync.Mutex // protect dstFiles
|
||||
dstFiles map[string]fs.Object // dst files, always filled
|
||||
srcFiles map[string]fs.Object // src files, only used if deleteBefore
|
||||
srcFilesChan chan fs.Object // passes src objects
|
||||
srcFilesResult chan error // error result of src listing
|
||||
dstFilesResult chan error // error result of dst listing
|
||||
dstEmptyDirsMu sync.Mutex // protect dstEmptyDirs
|
||||
dstEmptyDirs map[string]fs.DirEntry // potentially empty directories
|
||||
srcEmptyDirsMu sync.Mutex // protect srcEmptyDirs
|
||||
srcEmptyDirs map[string]fs.DirEntry // potentially empty directories
|
||||
checkerWg sync.WaitGroup // wait for checkers
|
||||
toBeChecked *pipe // checkers channel
|
||||
transfersWg sync.WaitGroup // wait for transfers
|
||||
toBeUploaded *pipe // copiers channel
|
||||
errorMu sync.Mutex // Mutex covering the errors variables
|
||||
err error // normal error from copy process
|
||||
noRetryErr error // error with NoRetry set
|
||||
fatalErr error // fatal error
|
||||
commonHash hash.Type // common hash type between src and dst
|
||||
renameMapMu sync.Mutex // mutex to protect the below
|
||||
renameMap map[string][]fs.Object // dst files by hash - only used by trackRenames
|
||||
renamerWg sync.WaitGroup // wait for renamers
|
||||
toBeRenamed *pipe // renamers channel
|
||||
trackRenamesWg sync.WaitGroup // wg for background track renames
|
||||
trackRenamesCh chan fs.Object // objects are pumped in here
|
||||
renameCheck []fs.Object // accumulate files to check for rename here
|
||||
backupDir fs.Fs // place to store overwrites/deletes
|
||||
suffix string // suffix to add to files placed in backupDir
|
||||
}
|
||||
|
||||
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) {
|
||||
s := &syncCopyMove{
|
||||
fdst: fdst,
|
||||
fsrc: fsrc,
|
||||
deleteMode: deleteMode,
|
||||
DoMove: DoMove,
|
||||
deleteEmptySrcDirs: deleteEmptySrcDirs,
|
||||
dir: "",
|
||||
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
|
||||
srcFilesResult: make(chan error, 1),
|
||||
dstFilesResult: make(chan error, 1),
|
||||
dstEmptyDirs: make(map[string]fs.DirEntry),
|
||||
srcEmptyDirs: make(map[string]fs.DirEntry),
|
||||
toBeChecked: newPipe(accounting.Stats.SetCheckQueue, fs.Config.MaxBacklog),
|
||||
toBeUploaded: newPipe(accounting.Stats.SetTransferQueue, fs.Config.MaxBacklog),
|
||||
deleteFilesCh: make(chan fs.Object, fs.Config.Checkers),
|
||||
trackRenames: fs.Config.TrackRenames,
|
||||
commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(),
|
||||
toBeRenamed: newPipe(accounting.Stats.SetRenameQueue, fs.Config.MaxBacklog),
|
||||
trackRenamesCh: make(chan fs.Object, fs.Config.Checkers),
|
||||
}
|
||||
s.ctx, s.cancel = context.WithCancel(context.Background())
|
||||
if s.trackRenames {
|
||||
// Don't track renames for remotes without server-side move support.
|
||||
if !operations.CanServerSideMove(fdst) {
|
||||
fs.Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy")
|
||||
s.trackRenames = false
|
||||
}
|
||||
if s.commonHash == hash.None {
|
||||
fs.Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash")
|
||||
s.trackRenames = false
|
||||
}
|
||||
if s.deleteMode == fs.DeleteModeOff {
|
||||
fs.Errorf(fdst, "Ignoring --track-renames as it doesn't work with copy or move, only sync")
|
||||
s.trackRenames = false
|
||||
}
|
||||
}
|
||||
if s.trackRenames {
|
||||
// track renames needs delete after
|
||||
if s.deleteMode != fs.DeleteModeOff {
|
||||
s.deleteMode = fs.DeleteModeAfter
|
||||
}
|
||||
}
|
||||
// Make Fs for --backup-dir if required
|
||||
if fs.Config.BackupDir != "" {
|
||||
var err error
|
||||
s.backupDir, err = fs.NewFs(fs.Config.BackupDir)
|
||||
if err != nil {
|
||||
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", fs.Config.BackupDir, err))
|
||||
}
|
||||
if !operations.CanServerSideMove(s.backupDir) {
|
||||
return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))
|
||||
}
|
||||
if !operations.SameConfig(fdst, s.backupDir) {
|
||||
return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
|
||||
}
|
||||
if operations.Overlapping(fdst, s.backupDir) {
|
||||
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap"))
|
||||
}
|
||||
if operations.Overlapping(fsrc, s.backupDir) {
|
||||
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
|
||||
}
|
||||
s.suffix = fs.Config.Suffix
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Check to see if the context has been cancelled
|
||||
func (s *syncCopyMove) aborting() bool {
|
||||
return s.ctx.Err() != nil
|
||||
}
|
||||
|
||||
// This reads the map and pumps it into the channel passed in, closing
|
||||
// the channel at the end
|
||||
func (s *syncCopyMove) pumpMapToChan(files map[string]fs.Object, out chan<- fs.Object) {
|
||||
outer:
|
||||
for _, o := range files {
|
||||
if s.aborting() {
|
||||
break outer
|
||||
}
|
||||
select {
|
||||
case out <- o:
|
||||
case <-s.ctx.Done():
|
||||
break outer
|
||||
}
|
||||
}
|
||||
close(out)
|
||||
s.srcFilesResult <- nil
|
||||
}
|
||||
|
||||
// This checks the types of errors returned while copying files
|
||||
func (s *syncCopyMove) processError(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
s.errorMu.Lock()
|
||||
defer s.errorMu.Unlock()
|
||||
switch {
|
||||
case fserrors.IsFatalError(err):
|
||||
if !s.aborting() {
|
||||
fs.Errorf(nil, "Cancelling sync due to fatal error: %v", err)
|
||||
s.cancel()
|
||||
}
|
||||
s.fatalErr = err
|
||||
case fserrors.IsNoRetryError(err):
|
||||
s.noRetryErr = err
|
||||
default:
|
||||
s.err = err
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the current error (if any) in the order of prececedence
|
||||
// fatalErr
|
||||
// normal error
|
||||
// noRetryErr
|
||||
func (s *syncCopyMove) currentError() error {
|
||||
s.errorMu.Lock()
|
||||
defer s.errorMu.Unlock()
|
||||
if s.fatalErr != nil {
|
||||
return s.fatalErr
|
||||
}
|
||||
if s.err != nil {
|
||||
return s.err
|
||||
}
|
||||
return s.noRetryErr
|
||||
}
|
||||
|
||||
// pairChecker reads Objects~s on in send to out if they need transferring.
|
||||
//
|
||||
// FIXME potentially doing lots of hashes at once
|
||||
func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
pair, ok := in.Get(s.ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.Src
|
||||
accounting.Stats.Checking(src.Remote())
|
||||
// Check to see if can store this
|
||||
if src.Storable() {
|
||||
if operations.NeedTransfer(pair.Dst, pair.Src) {
|
||||
// If files are treated as immutable, fail if destination exists and does not match
|
||||
if fs.Config.Immutable && pair.Dst != nil {
|
||||
fs.Errorf(pair.Dst, "Source and destination exist but do not match: immutable file modified")
|
||||
s.processError(fs.ErrorImmutableModified)
|
||||
} else {
|
||||
// If destination already exists, then we must move it into --backup-dir if required
|
||||
if pair.Dst != nil && s.backupDir != nil {
|
||||
remoteWithSuffix := pair.Dst.Remote() + s.suffix
|
||||
overwritten, _ := s.backupDir.NewObject(remoteWithSuffix)
|
||||
_, err := operations.Move(s.backupDir, overwritten, remoteWithSuffix, pair.Dst)
|
||||
if err != nil {
|
||||
s.processError(err)
|
||||
} else {
|
||||
// If successful zero out the dst as it is no longer there and copy the file
|
||||
pair.Dst = nil
|
||||
ok = out.Put(s.ctx, pair)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ok = out.Put(s.ctx, pair)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If moving need to delete the files we don't need to copy
|
||||
if s.DoMove {
|
||||
// Delete src if no error on copy
|
||||
s.processError(operations.DeleteFile(src))
|
||||
}
|
||||
}
|
||||
}
|
||||
accounting.Stats.DoneChecking(src.Remote())
|
||||
}
|
||||
}
|
||||
|
||||
// pairRenamer reads Objects~s on in and attempts to rename them,
|
||||
// otherwise it sends them out if they need transferring.
|
||||
func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
pair, ok := in.Get(s.ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.Src
|
||||
if !s.tryRename(src) {
|
||||
// pass on if not renamed
|
||||
ok = out.Put(s.ctx, pair)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pairCopyOrMove reads Objects on in and moves or copies them.
|
||||
func (s *syncCopyMove) pairCopyOrMove(in *pipe, fdst fs.Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
var err error
|
||||
for {
|
||||
pair, ok := in.Get(s.ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.Src
|
||||
accounting.Stats.Transferring(src.Remote())
|
||||
if s.DoMove {
|
||||
_, err = operations.Move(fdst, pair.Dst, src.Remote(), src)
|
||||
} else {
|
||||
_, err = operations.Copy(fdst, pair.Dst, src.Remote(), src)
|
||||
}
|
||||
s.processError(err)
|
||||
accounting.Stats.DoneTransferring(src.Remote(), err == nil)
|
||||
}
|
||||
}
|
||||
|
||||
// This starts the background checkers.
|
||||
func (s *syncCopyMove) startCheckers() {
|
||||
s.checkerWg.Add(fs.Config.Checkers)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
go s.pairChecker(s.toBeChecked, s.toBeUploaded, &s.checkerWg)
|
||||
}
|
||||
}
|
||||
|
||||
// This stops the background checkers
|
||||
func (s *syncCopyMove) stopCheckers() {
|
||||
s.toBeChecked.Close()
|
||||
fs.Infof(s.fdst, "Waiting for checks to finish")
|
||||
s.checkerWg.Wait()
|
||||
}
|
||||
|
||||
// This starts the background transfers
|
||||
func (s *syncCopyMove) startTransfers() {
|
||||
s.transfersWg.Add(fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
go s.pairCopyOrMove(s.toBeUploaded, s.fdst, &s.transfersWg)
|
||||
}
|
||||
}
|
||||
|
||||
// This stops the background transfers
|
||||
func (s *syncCopyMove) stopTransfers() {
|
||||
s.toBeUploaded.Close()
|
||||
fs.Infof(s.fdst, "Waiting for transfers to finish")
|
||||
s.transfersWg.Wait()
|
||||
}
|
||||
|
||||
// This starts the background renamers.
|
||||
func (s *syncCopyMove) startRenamers() {
|
||||
if !s.trackRenames {
|
||||
return
|
||||
}
|
||||
s.renamerWg.Add(fs.Config.Checkers)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, &s.renamerWg)
|
||||
}
|
||||
}
|
||||
|
||||
// This stops the background renamers
|
||||
func (s *syncCopyMove) stopRenamers() {
|
||||
if !s.trackRenames {
|
||||
return
|
||||
}
|
||||
s.toBeRenamed.Close()
|
||||
fs.Infof(s.fdst, "Waiting for renames to finish")
|
||||
s.renamerWg.Wait()
|
||||
}
|
||||
|
||||
// This starts the collection of possible renames
|
||||
func (s *syncCopyMove) startTrackRenames() {
|
||||
if !s.trackRenames {
|
||||
return
|
||||
}
|
||||
s.trackRenamesWg.Add(1)
|
||||
go func() {
|
||||
defer s.trackRenamesWg.Done()
|
||||
for o := range s.trackRenamesCh {
|
||||
s.renameCheck = append(s.renameCheck, o)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// This stops the background rename collection
|
||||
func (s *syncCopyMove) stopTrackRenames() {
|
||||
if !s.trackRenames {
|
||||
return
|
||||
}
|
||||
close(s.trackRenamesCh)
|
||||
s.trackRenamesWg.Wait()
|
||||
}
|
||||
|
||||
// This starts the background deletion of files for --delete-during
|
||||
func (s *syncCopyMove) startDeleters() {
|
||||
if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly {
|
||||
return
|
||||
}
|
||||
s.deletersWg.Add(1)
|
||||
go func() {
|
||||
defer s.deletersWg.Done()
|
||||
err := operations.DeleteFilesWithBackupDir(s.deleteFilesCh, s.backupDir)
|
||||
s.processError(err)
|
||||
}()
|
||||
}
|
||||
|
||||
// This stops the background deleters
|
||||
func (s *syncCopyMove) stopDeleters() {
|
||||
if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly {
|
||||
return
|
||||
}
|
||||
close(s.deleteFilesCh)
|
||||
s.deletersWg.Wait()
|
||||
}
|
||||
|
||||
// This deletes the files in the dstFiles map. If checkSrcMap is set
|
||||
// then it checks to see if they exist first in srcFiles the source
|
||||
// file map, otherwise it unconditionally deletes them. If
|
||||
// checkSrcMap is clear then it assumes that the any source files that
|
||||
// have been found have been removed from dstFiles already.
|
||||
func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
|
||||
if accounting.Stats.Errored() && !fs.Config.IgnoreErrors {
|
||||
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
|
||||
return fs.ErrorNotDeleting
|
||||
}
|
||||
|
||||
// Delete the spare files
|
||||
toDelete := make(fs.ObjectsChan, fs.Config.Transfers)
|
||||
go func() {
|
||||
outer:
|
||||
for remote, o := range s.dstFiles {
|
||||
if checkSrcMap {
|
||||
_, exists := s.srcFiles[remote]
|
||||
if exists {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if s.aborting() {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
break outer
|
||||
case toDelete <- o:
|
||||
}
|
||||
}
|
||||
close(toDelete)
|
||||
}()
|
||||
return operations.DeleteFilesWithBackupDir(toDelete, s.backupDir)
|
||||
}
|
||||
|
||||
// This deletes the empty directories in the slice passed in. It
|
||||
// ignores any errors deleting directories
|
||||
func deleteEmptyDirectories(f fs.Fs, entriesMap map[string]fs.DirEntry) error {
|
||||
if len(entriesMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
if accounting.Stats.Errored() && !fs.Config.IgnoreErrors {
|
||||
fs.Errorf(f, "%v", fs.ErrorNotDeletingDirs)
|
||||
return fs.ErrorNotDeletingDirs
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
for _, entry := range entriesMap {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Now delete the empty directories starting from the longest path
|
||||
sort.Sort(entries)
|
||||
var errorCount int
|
||||
var okCount int
|
||||
for i := len(entries) - 1; i >= 0; i-- {
|
||||
entry := entries[i]
|
||||
dir, ok := entry.(fs.Directory)
|
||||
if ok {
|
||||
// TryRmdir only deletes empty directories
|
||||
err := operations.TryRmdir(f, dir.Remote())
|
||||
if err != nil {
|
||||
fs.Debugf(fs.LogDirName(f, dir.Remote()), "Failed to Rmdir: %v", err)
|
||||
errorCount++
|
||||
} else {
|
||||
okCount++
|
||||
}
|
||||
} else {
|
||||
fs.Errorf(f, "Not a directory: %v", entry)
|
||||
}
|
||||
}
|
||||
if errorCount > 0 {
|
||||
fs.Debugf(f, "failed to delete %d directories", errorCount)
|
||||
}
|
||||
if okCount > 0 {
|
||||
fs.Debugf(f, "deleted %d directories", okCount)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This copies the empty directories in the slice passed in and logs
|
||||
// any errors copying the directories
|
||||
func copyEmptyDirectories(f fs.Fs, entries map[string]fs.DirEntry) error {
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var okCount int
|
||||
for _, entry := range entries {
|
||||
dir, ok := entry.(fs.Directory)
|
||||
if ok {
|
||||
err := f.Mkdir(dir.Remote())
|
||||
if err != nil {
|
||||
fs.Errorf(fs.LogDirName(f, dir.Remote()), "Failed to Mkdir: %v", err)
|
||||
accounting.Stats.Error(err)
|
||||
} else {
|
||||
okCount++
|
||||
}
|
||||
} else {
|
||||
fs.Errorf(f, "Not a directory: %v", entry)
|
||||
}
|
||||
}
|
||||
|
||||
if accounting.Stats.Errored() {
|
||||
fs.Debugf(f, "failed to copy %d directories", accounting.Stats.GetErrors())
|
||||
}
|
||||
|
||||
if okCount > 0 {
|
||||
fs.Debugf(f, "copied %d directories", okCount)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *syncCopyMove) srcParentDirCheck(entry fs.DirEntry) {
|
||||
// If we are moving files then we don't want to remove directories with files in them
|
||||
// from the srcEmptyDirs as we are about to move them making the directory empty.
|
||||
if s.DoMove {
|
||||
return
|
||||
}
|
||||
parentDir := path.Dir(entry.Remote())
|
||||
if parentDir == "." {
|
||||
parentDir = ""
|
||||
}
|
||||
if _, ok := s.srcEmptyDirs[parentDir]; ok {
|
||||
delete(s.srcEmptyDirs, parentDir)
|
||||
}
|
||||
}
|
||||
|
||||
// renameHash makes a string with the size and the hash for rename detection
|
||||
//
|
||||
// it may return an empty string in which case no hash could be made
|
||||
func (s *syncCopyMove) renameHash(obj fs.Object) (hash string) {
|
||||
var err error
|
||||
hash, err = obj.Hash(s.commonHash)
|
||||
if err != nil {
|
||||
fs.Debugf(obj, "Hash failed: %v", err)
|
||||
return ""
|
||||
}
|
||||
if hash == "" {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%d,%s", obj.Size(), hash)
|
||||
}
|
||||
|
||||
// pushRenameMap adds the object with hash to the rename map
|
||||
func (s *syncCopyMove) pushRenameMap(hash string, obj fs.Object) {
|
||||
s.renameMapMu.Lock()
|
||||
s.renameMap[hash] = append(s.renameMap[hash], obj)
|
||||
s.renameMapMu.Unlock()
|
||||
}
|
||||
|
||||
// popRenameMap finds the object with hash and pop the first match from
|
||||
// renameMap or returns nil if not found.
|
||||
func (s *syncCopyMove) popRenameMap(hash string) (dst fs.Object) {
|
||||
s.renameMapMu.Lock()
|
||||
dsts, ok := s.renameMap[hash]
|
||||
if ok && len(dsts) > 0 {
|
||||
dst, dsts = dsts[0], dsts[1:]
|
||||
if len(dsts) > 0 {
|
||||
s.renameMap[hash] = dsts
|
||||
} else {
|
||||
delete(s.renameMap, hash)
|
||||
}
|
||||
}
|
||||
s.renameMapMu.Unlock()
|
||||
return dst
|
||||
}
|
||||
|
||||
// makeRenameMap builds a map of the destination files by hash that
|
||||
// match sizes in the slice of objects in s.renameCheck
|
||||
func (s *syncCopyMove) makeRenameMap() {
|
||||
fs.Infof(s.fdst, "Making map for --track-renames")
|
||||
|
||||
// first make a map of possible sizes we need to check
|
||||
possibleSizes := map[int64]struct{}{}
|
||||
for _, obj := range s.renameCheck {
|
||||
possibleSizes[obj.Size()] = struct{}{}
|
||||
}
|
||||
|
||||
// pump all the dstFiles into in
|
||||
in := make(chan fs.Object, fs.Config.Checkers)
|
||||
go s.pumpMapToChan(s.dstFiles, in)
|
||||
|
||||
// now make a map of size,hash for all dstFiles
|
||||
s.renameMap = make(map[string][]fs.Object)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for obj := range in {
|
||||
// only create hash for dst fs.Object if its size could match
|
||||
if _, found := possibleSizes[obj.Size()]; found {
|
||||
accounting.Stats.Checking(obj.Remote())
|
||||
hash := s.renameHash(obj)
|
||||
if hash != "" {
|
||||
s.pushRenameMap(hash, obj)
|
||||
}
|
||||
accounting.Stats.DoneChecking(obj.Remote())
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
fs.Infof(s.fdst, "Finished making map for --track-renames")
|
||||
}
|
||||
|
||||
// tryRename renames a src object when doing track renames if
|
||||
// possible, it returns true if the object was renamed.
|
||||
func (s *syncCopyMove) tryRename(src fs.Object) bool {
|
||||
accounting.Stats.Checking(src.Remote())
|
||||
defer accounting.Stats.DoneChecking(src.Remote())
|
||||
|
||||
// Calculate the hash of the src object
|
||||
hash := s.renameHash(src)
|
||||
if hash == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Get a match on fdst
|
||||
dst := s.popRenameMap(hash)
|
||||
if dst == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Find dst object we are about to overwrite if it exists
|
||||
dstOverwritten, _ := s.fdst.NewObject(src.Remote())
|
||||
|
||||
// Rename dst to have name src.Remote()
|
||||
_, err := operations.Move(s.fdst, dstOverwritten, src.Remote(), dst)
|
||||
if err != nil {
|
||||
fs.Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err)
|
||||
return false
|
||||
}
|
||||
|
||||
// remove file from dstFiles if present
|
||||
s.dstFilesMu.Lock()
|
||||
delete(s.dstFiles, dst.Remote())
|
||||
s.dstFilesMu.Unlock()
|
||||
|
||||
fs.Infof(src, "Renamed from %q", dst.Remote())
|
||||
return true
|
||||
}
|
||||
|
||||
// Syncs fsrc into fdst
|
||||
//
|
||||
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
||||
//
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func (s *syncCopyMove) run() error {
|
||||
if operations.Same(s.fdst, s.fsrc) {
|
||||
fs.Errorf(s.fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start background checking and transferring pipeline
|
||||
s.startCheckers()
|
||||
s.startRenamers()
|
||||
s.startTransfers()
|
||||
s.startDeleters()
|
||||
s.dstFiles = make(map[string]fs.Object)
|
||||
|
||||
s.startTrackRenames()
|
||||
|
||||
// set up a march over fdst and fsrc
|
||||
m := march.New(s.ctx, s.fdst, s.fsrc, s.dir, s)
|
||||
m.Run()
|
||||
|
||||
s.stopTrackRenames()
|
||||
if s.trackRenames {
|
||||
// Build the map of the remaining dstFiles by hash
|
||||
s.makeRenameMap()
|
||||
// Attempt renames for all the files which don't have a matching dst
|
||||
for _, src := range s.renameCheck {
|
||||
ok := s.toBeRenamed.Put(s.ctx, fs.ObjectPair{Src: src, Dst: nil})
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop background checking and transferring pipeline
|
||||
s.stopCheckers()
|
||||
s.stopRenamers()
|
||||
s.stopTransfers()
|
||||
s.stopDeleters()
|
||||
|
||||
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
|
||||
|
||||
// Delete files after
|
||||
if s.deleteMode == fs.DeleteModeAfter {
|
||||
if s.currentError() != nil && !fs.Config.IgnoreErrors {
|
||||
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
|
||||
} else {
|
||||
s.processError(s.deleteFiles(false))
|
||||
}
|
||||
}
|
||||
|
||||
// Prune empty directories
|
||||
if s.deleteMode != fs.DeleteModeOff {
|
||||
if s.currentError() != nil && !fs.Config.IgnoreErrors {
|
||||
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs)
|
||||
} else {
|
||||
s.processError(deleteEmptyDirectories(s.fdst, s.dstEmptyDirs))
|
||||
}
|
||||
}
|
||||
|
||||
// Delete empty fsrc subdirectories
|
||||
// if DoMove and --delete-empty-src-dirs flag is set
|
||||
if s.DoMove && s.deleteEmptySrcDirs {
|
||||
//delete empty subdirectories that were part of the move
|
||||
s.processError(deleteEmptyDirectories(s.fsrc, s.srcEmptyDirs))
|
||||
}
|
||||
|
||||
// cancel the context to free resources
|
||||
s.cancel()
|
||||
return s.currentError()
|
||||
}
|
||||
|
||||
// DstOnly have an object which is in the destination only
|
||||
func (s *syncCopyMove) DstOnly(dst fs.DirEntry) (recurse bool) {
|
||||
if s.deleteMode == fs.DeleteModeOff {
|
||||
return false
|
||||
}
|
||||
switch x := dst.(type) {
|
||||
case fs.Object:
|
||||
switch s.deleteMode {
|
||||
case fs.DeleteModeAfter:
|
||||
// record object as needs deleting
|
||||
s.dstFilesMu.Lock()
|
||||
s.dstFiles[x.Remote()] = x
|
||||
s.dstFilesMu.Unlock()
|
||||
case fs.DeleteModeDuring, fs.DeleteModeOnly:
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case s.deleteFilesCh <- x:
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected delete mode %d", s.deleteMode))
|
||||
}
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
// Record directory as it is potentially empty and needs deleting
|
||||
if s.fdst.Features().CanHaveEmptyDirectories {
|
||||
s.dstEmptyDirsMu.Lock()
|
||||
s.dstEmptyDirs[dst.Remote()] = dst
|
||||
s.dstEmptyDirsMu.Unlock()
|
||||
}
|
||||
return true
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SrcOnly have an object which is in the source only
|
||||
func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
if s.deleteMode == fs.DeleteModeOnly {
|
||||
return false
|
||||
}
|
||||
switch x := src.(type) {
|
||||
case fs.Object:
|
||||
// If it's a copy operation,
|
||||
// remove parent directory from srcEmptyDirs
|
||||
// since it's not really empty
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
|
||||
if s.trackRenames {
|
||||
// Save object to check for a rename later
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case s.trackRenamesCh <- x:
|
||||
}
|
||||
} else {
|
||||
// No need to check since doesn't exist
|
||||
ok := s.toBeUploaded.Put(s.ctx, fs.ObjectPair{Src: x, Dst: nil})
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
// Record the directory for deletion
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirs[src.Remote()] = src
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
return true
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Match is called when src and dst are present, so sync src to dst
|
||||
func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
|
||||
switch srcX := src.(type) {
|
||||
case fs.Object:
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
|
||||
if s.deleteMode == fs.DeleteModeOnly {
|
||||
return false
|
||||
}
|
||||
dstX, ok := dst.(fs.Object)
|
||||
if ok {
|
||||
ok = s.toBeChecked.Put(s.ctx, fs.ObjectPair{Src: srcX, Dst: dstX})
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// FIXME src is file, dst is directory
|
||||
err := errors.New("can't overwrite directory with file")
|
||||
fs.Errorf(dst, "%v", err)
|
||||
s.processError(err)
|
||||
}
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
_, ok := dst.(fs.Directory)
|
||||
if ok {
|
||||
// Record the src directory for deletion
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirs[src.Remote()] = src
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
return true
|
||||
}
|
||||
// FIXME src is dir, dst is file
|
||||
err := errors.New("can't overwrite file with directory")
|
||||
fs.Errorf(dst, "%v", err)
|
||||
s.processError(err)
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Syncs fsrc into fdst
|
||||
//
|
||||
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
||||
//
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error {
|
||||
if deleteMode != fs.DeleteModeOff && DoMove {
|
||||
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
|
||||
}
|
||||
// Run an extra pass to delete only
|
||||
if deleteMode == fs.DeleteModeBefore {
|
||||
if fs.Config.TrackRenames {
|
||||
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
|
||||
}
|
||||
// only delete stuff during in this pass
|
||||
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = do.run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Next pass does a copy only
|
||||
deleteMode = fs.DeleteModeOff
|
||||
}
|
||||
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return do.run()
|
||||
}
|
||||
|
||||
// Sync fsrc into fdst
|
||||
func Sync(fdst, fsrc fs.Fs) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false)
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
func CopyDir(fdst, fsrc fs.Fs) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false)
|
||||
}
|
||||
|
||||
// moveDir moves fsrc into fdst
|
||||
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs)
|
||||
}
|
||||
|
||||
// MoveDir moves fsrc into fdst
|
||||
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
|
||||
if operations.Same(fdst, fsrc) {
|
||||
fs.Errorf(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
// First attempt to use DirMover if exists, same Fs and no filters are active
|
||||
if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && operations.SameConfig(fsrc, fdst) && filter.Active.InActive() {
|
||||
if fs.Config.DryRun {
|
||||
fs.Logf(fdst, "Not doing server side directory move as --dry-run")
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(fdst, "Using server side directory move")
|
||||
err := fdstDirMove(fsrc, "", "")
|
||||
switch err {
|
||||
case fs.ErrorCantDirMove, fs.ErrorDirExists:
|
||||
fs.Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err)
|
||||
case nil:
|
||||
fs.Infof(fdst, "Server side directory move succeeded")
|
||||
return nil
|
||||
default:
|
||||
fs.CountError(err)
|
||||
fs.Errorf(fdst, "Server side directory move failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// The two remotes mustn't overlap if we didn't do server side move
|
||||
if operations.Overlapping(fdst, fsrc) {
|
||||
err := fs.ErrorCantMoveOverlapping
|
||||
fs.Errorf(fdst, "%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise move the files one by one
|
||||
return moveDir(fdst, fsrc, deleteEmptySrcDirs)
|
||||
}
|
||||
1218
.rclone_repo/fs/sync/sync_test.go
Executable file
1218
.rclone_repo/fs/sync/sync_test.go
Executable file
File diff suppressed because it is too large
Load Diff
4
.rclone_repo/fs/version.go
Executable file
4
.rclone_repo/fs/version.go
Executable file
@@ -0,0 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
var Version = "v1.43.1"
|
||||
7
.rclone_repo/fs/versioncheck.go
Executable file
7
.rclone_repo/fs/versioncheck.go
Executable file
@@ -0,0 +1,7 @@
|
||||
//+build !go1.7
|
||||
|
||||
package fs
|
||||
|
||||
// Upgrade to Go version 1.7 to compile rclone - latest stable go
|
||||
// compiler recommended.
|
||||
func init() { Go_version_1_7_required_for_compilation() }
|
||||
552
.rclone_repo/fs/walk/walk.go
Executable file
552
.rclone_repo/fs/walk/walk.go
Executable file
@@ -0,0 +1,552 @@
|
||||
// Package walk walks directories
|
||||
package walk
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/filter"
|
||||
"github.com/ncw/rclone/fs/list"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ErrorSkipDir is used as a return value from Walk to indicate that the
|
||||
// directory named in the call is to be skipped. It is not returned as
|
||||
// an error by any function.
|
||||
var ErrorSkipDir = errors.New("skip this directory")
|
||||
|
||||
// ErrorCantListR is returned by WalkR if the underlying Fs isn't
|
||||
// capable of doing a recursive listing.
|
||||
var ErrorCantListR = errors.New("recursive directory listing not available")
|
||||
|
||||
// Func is the type of the function called for directory
|
||||
// visited by Walk. The path argument contains remote path to the directory.
|
||||
//
|
||||
// If there was a problem walking to directory named by path, the
|
||||
// incoming error will describe the problem and the function can
|
||||
// decide how to handle that error (and Walk will not descend into
|
||||
// that directory). If an error is returned, processing stops. The
|
||||
// sole exception is when the function returns the special value
|
||||
// ErrorSkipDir. If the function returns ErrorSkipDir, Walk skips the
|
||||
// directory's contents entirely.
|
||||
type Func func(path string, entries fs.DirEntries, err error) error
|
||||
|
||||
// Walk lists the directory.
|
||||
//
|
||||
// If includeAll is not set it will use the filters defined.
|
||||
//
|
||||
// If maxLevel is < 0 then it will recurse indefinitely, else it will
|
||||
// only do maxLevel levels.
|
||||
//
|
||||
// It calls fn for each tranche of DirEntries read.
|
||||
//
|
||||
// Note that fn will not be called concurrently whereas the directory
|
||||
// listing will proceed concurrently.
|
||||
//
|
||||
// Parent directories are always listed before their children
|
||||
//
|
||||
// This is implemented by WalkR if Config.UseRecursiveListing is true
|
||||
// and f supports it and level > 1, or WalkN otherwise.
|
||||
//
|
||||
// NB (f, path) to be replaced by fs.Dir at some point
|
||||
func Walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
|
||||
if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil {
|
||||
return walkListR(f, path, includeAll, maxLevel, fn)
|
||||
}
|
||||
return walkListDirSorted(f, path, includeAll, maxLevel, fn)
|
||||
}
|
||||
|
||||
// walkListDirSorted lists the directory.
|
||||
//
|
||||
// It implements Walk using non recursive directory listing.
|
||||
func walkListDirSorted(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
|
||||
return walk(f, path, includeAll, maxLevel, fn, list.DirSorted)
|
||||
}
|
||||
|
||||
// walkListR lists the directory.
|
||||
//
|
||||
// It implements Walk using recursive directory listing if
|
||||
// available, or returns ErrorCantListR if not.
|
||||
func walkListR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
|
||||
listR := f.Features().ListR
|
||||
if listR == nil {
|
||||
return ErrorCantListR
|
||||
}
|
||||
return walkR(f, path, includeAll, maxLevel, fn, listR)
|
||||
}
|
||||
|
||||
type listDirFunc func(fs fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error)
|
||||
|
||||
func walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error {
|
||||
var (
|
||||
wg sync.WaitGroup // sync closing of go routines
|
||||
traversing sync.WaitGroup // running directory traversals
|
||||
doClose sync.Once // close the channel once
|
||||
mu sync.Mutex // stop fn being called concurrently
|
||||
)
|
||||
// listJob describe a directory listing that needs to be done
|
||||
type listJob struct {
|
||||
remote string
|
||||
depth int
|
||||
}
|
||||
|
||||
in := make(chan listJob, fs.Config.Checkers)
|
||||
errs := make(chan error, 1)
|
||||
quit := make(chan struct{})
|
||||
closeQuit := func() {
|
||||
doClose.Do(func() {
|
||||
close(quit)
|
||||
go func() {
|
||||
for range in {
|
||||
traversing.Done()
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case job, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
entries, err := listDir(f, includeAll, job.remote)
|
||||
var jobs []listJob
|
||||
if err == nil && job.depth != 0 {
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
// Recurse for the directory
|
||||
jobs = append(jobs, listJob{
|
||||
remote: dir.Remote(),
|
||||
depth: job.depth - 1,
|
||||
})
|
||||
})
|
||||
}
|
||||
mu.Lock()
|
||||
err = fn(job.remote, entries, err)
|
||||
mu.Unlock()
|
||||
// NB once we have passed entries to fn we mustn't touch it again
|
||||
if err != nil && err != ErrorSkipDir {
|
||||
traversing.Done()
|
||||
fs.CountError(err)
|
||||
fs.Errorf(job.remote, "error listing: %v", err)
|
||||
closeQuit()
|
||||
// Send error to error channel if space
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err == nil && len(jobs) > 0 {
|
||||
traversing.Add(len(jobs))
|
||||
go func() {
|
||||
// Now we have traversed this directory, send these
|
||||
// jobs off for traversal in the background
|
||||
for _, newJob := range jobs {
|
||||
in <- newJob
|
||||
}
|
||||
}()
|
||||
}
|
||||
traversing.Done()
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Start the process
|
||||
traversing.Add(1)
|
||||
in <- listJob{
|
||||
remote: path,
|
||||
depth: maxLevel - 1,
|
||||
}
|
||||
traversing.Wait()
|
||||
close(in)
|
||||
wg.Wait()
|
||||
close(errs)
|
||||
// return the first error returned or nil
|
||||
return <-errs
|
||||
}
|
||||
|
||||
// DirTree is a map of directories to entries
|
||||
type DirTree map[string]fs.DirEntries
|
||||
|
||||
// parentDir finds the parent directory of path
|
||||
func parentDir(entryPath string) string {
|
||||
dirPath := path.Dir(entryPath)
|
||||
if dirPath == "." {
|
||||
dirPath = ""
|
||||
}
|
||||
return dirPath
|
||||
}
|
||||
|
||||
// add an entry to the tree
|
||||
func (dt DirTree) add(entry fs.DirEntry) {
|
||||
dirPath := parentDir(entry.Remote())
|
||||
dt[dirPath] = append(dt[dirPath], entry)
|
||||
}
|
||||
|
||||
// add a directory entry to the tree
|
||||
func (dt DirTree) addDir(entry fs.DirEntry) {
|
||||
dt.add(entry)
|
||||
// create the directory itself if it doesn't exist already
|
||||
dirPath := entry.Remote()
|
||||
if _, ok := dt[dirPath]; !ok {
|
||||
dt[dirPath] = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Find returns the DirEntry for filePath or nil if not found
|
||||
func (dt DirTree) Find(filePath string) (parentPath string, entry fs.DirEntry) {
|
||||
parentPath = parentDir(filePath)
|
||||
for _, entry := range dt[parentPath] {
|
||||
if entry.Remote() == filePath {
|
||||
return parentPath, entry
|
||||
}
|
||||
}
|
||||
return parentPath, nil
|
||||
}
|
||||
|
||||
// check that dirPath has a *Dir in its parent
|
||||
func (dt DirTree) checkParent(root, dirPath string) {
|
||||
if dirPath == root {
|
||||
return
|
||||
}
|
||||
parentPath, entry := dt.Find(dirPath)
|
||||
if entry != nil {
|
||||
return
|
||||
}
|
||||
dt[parentPath] = append(dt[parentPath], fs.NewDir(dirPath, time.Now()))
|
||||
dt.checkParent(root, parentPath)
|
||||
}
|
||||
|
||||
// check every directory in the tree has *Dir in its parent
|
||||
func (dt DirTree) checkParents(root string) {
|
||||
for dirPath := range dt {
|
||||
dt.checkParent(root, dirPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort sorts all the Entries
|
||||
func (dt DirTree) Sort() {
|
||||
for _, entries := range dt {
|
||||
sort.Stable(entries)
|
||||
}
|
||||
}
|
||||
|
||||
// Dirs returns the directories in sorted order
|
||||
func (dt DirTree) Dirs() (dirNames []string) {
|
||||
for dirPath := range dt {
|
||||
dirNames = append(dirNames, dirPath)
|
||||
}
|
||||
sort.Strings(dirNames)
|
||||
return dirNames
|
||||
}
|
||||
|
||||
// Prune remove directories from a directory tree. dirNames contains
|
||||
// all directories to remove as keys, with true as values. dirNames
|
||||
// will be modified in the function.
|
||||
func (dt DirTree) Prune(dirNames map[string]bool) error {
|
||||
// We use map[string]bool to avoid recursion (and potential
|
||||
// stack exhaustion).
|
||||
|
||||
// First we need delete directories from their parents.
|
||||
for dName, remove := range dirNames {
|
||||
if !remove {
|
||||
// Currently all values should be
|
||||
// true, therefore this should not
|
||||
// happen. But this makes function
|
||||
// more predictable.
|
||||
fs.Infof(dName, "Directory in the map for prune, but the value is false")
|
||||
continue
|
||||
}
|
||||
if dName == "" {
|
||||
// if dName is root, do nothing (no parent exist)
|
||||
continue
|
||||
}
|
||||
parent := parentDir(dName)
|
||||
// It may happen that dt does not have a dName key,
|
||||
// since directory was excluded based on a filter. In
|
||||
// such case the loop will be skipped.
|
||||
for i, entry := range dt[parent] {
|
||||
switch x := entry.(type) {
|
||||
case fs.Directory:
|
||||
if x.Remote() == dName {
|
||||
// the slice is not sorted yet
|
||||
// to delete item
|
||||
// a) replace it with the last one
|
||||
dt[parent][i] = dt[parent][len(dt[parent])-1]
|
||||
// b) remove last
|
||||
dt[parent] = dt[parent][:len(dt[parent])-1]
|
||||
// we modify a slice within a loop, but we stop
|
||||
// iterating immediately
|
||||
break
|
||||
}
|
||||
case fs.Object:
|
||||
// do nothing
|
||||
default:
|
||||
return errors.Errorf("unknown object type %T", entry)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for len(dirNames) > 0 {
|
||||
// According to golang specs, if new keys were added
|
||||
// during range iteration, they may be skipped.
|
||||
for dName, remove := range dirNames {
|
||||
if !remove {
|
||||
fs.Infof(dName, "Directory in the map for prune, but the value is false")
|
||||
continue
|
||||
}
|
||||
// First, add all subdirectories to dirNames.
|
||||
|
||||
// It may happen that dt[dName] does not exist.
|
||||
// If so, the loop will be skipped.
|
||||
for _, entry := range dt[dName] {
|
||||
switch x := entry.(type) {
|
||||
case fs.Directory:
|
||||
excludeDir := x.Remote()
|
||||
dirNames[excludeDir] = true
|
||||
case fs.Object:
|
||||
// do nothing
|
||||
default:
|
||||
return errors.Errorf("unknown object type %T", entry)
|
||||
|
||||
}
|
||||
}
|
||||
// Then remove current directory from DirTree
|
||||
delete(dt, dName)
|
||||
// and from dirNames
|
||||
delete(dirNames, dName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// String emits a simple representation of the DirTree
|
||||
func (dt DirTree) String() string {
|
||||
out := new(bytes.Buffer)
|
||||
for _, dir := range dt.Dirs() {
|
||||
_, _ = fmt.Fprintf(out, "%s/\n", dir)
|
||||
for _, entry := range dt[dir] {
|
||||
flag := ""
|
||||
if _, ok := entry.(fs.Directory); ok {
|
||||
flag = "/"
|
||||
}
|
||||
_, _ = fmt.Fprintf(out, " %s%s\n", path.Base(entry.Remote()), flag)
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func walkRDirTree(f fs.Fs, startPath string, includeAll bool, maxLevel int, listR fs.ListRFn) (DirTree, error) {
|
||||
dirs := make(DirTree)
|
||||
// Entries can come in arbitrary order. We use toPrune to keep
|
||||
// all directories to exclude later.
|
||||
toPrune := make(map[string]bool)
|
||||
includeDirectory := filter.Active.IncludeDirectory(f)
|
||||
var mu sync.Mutex
|
||||
err := listR(startPath, func(entries fs.DirEntries) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
for _, entry := range entries {
|
||||
slashes := strings.Count(entry.Remote(), "/")
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
// Make sure we don't delete excluded files if not required
|
||||
if includeAll || filter.Active.IncludeObject(x) {
|
||||
if maxLevel < 0 || slashes <= maxLevel-1 {
|
||||
dirs.add(x)
|
||||
} else {
|
||||
// Make sure we include any parent directories of excluded objects
|
||||
dirPath := x.Remote()
|
||||
for ; slashes > maxLevel-1; slashes-- {
|
||||
dirPath = parentDir(dirPath)
|
||||
}
|
||||
dirs.checkParent(startPath, dirPath)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(x, "Excluded from sync (and deletion)")
|
||||
}
|
||||
// Check if we need to prune a directory later.
|
||||
if !includeAll && len(filter.Active.Opt.ExcludeFile) > 0 {
|
||||
basename := path.Base(x.Remote())
|
||||
if basename == filter.Active.Opt.ExcludeFile {
|
||||
excludeDir := parentDir(x.Remote())
|
||||
toPrune[excludeDir] = true
|
||||
fs.Debugf(basename, "Excluded from sync (and deletion) based on exclude file")
|
||||
}
|
||||
}
|
||||
case fs.Directory:
|
||||
inc, err := includeDirectory(x.Remote())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if includeAll || inc {
|
||||
if maxLevel < 0 || slashes <= maxLevel-1 {
|
||||
if slashes == maxLevel-1 {
|
||||
// Just add the object if at maxLevel
|
||||
dirs.add(x)
|
||||
} else {
|
||||
dirs.addDir(x)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(x, "Excluded from sync (and deletion)")
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirs.checkParents(startPath)
|
||||
if len(dirs) == 0 {
|
||||
dirs[startPath] = nil
|
||||
}
|
||||
err = dirs.Prune(toPrune)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirs.Sort()
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
// Create a DirTree using List
|
||||
func walkNDirTree(f fs.Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (DirTree, error) {
|
||||
dirs := make(DirTree)
|
||||
fn := func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err == nil {
|
||||
dirs[dirPath] = entries
|
||||
}
|
||||
return err
|
||||
}
|
||||
err := walk(f, path, includeAll, maxLevel, fn, listDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
// NewDirTree returns a DirTree filled with the directory listing
|
||||
// using the parameters supplied.
|
||||
//
|
||||
// If includeAll is not set it will use the filters defined.
|
||||
//
|
||||
// If maxLevel is < 0 then it will recurse indefinitely, else it will
|
||||
// only do maxLevel levels.
|
||||
//
|
||||
// This is implemented by WalkR if Config.UseRecursiveListing is true
|
||||
// and f supports it and level > 1, or WalkN otherwise.
|
||||
//
|
||||
// NB (f, path) to be replaced by fs.Dir at some point
|
||||
func NewDirTree(f fs.Fs, path string, includeAll bool, maxLevel int) (DirTree, error) {
|
||||
if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && ListR != nil {
|
||||
return walkRDirTree(f, path, includeAll, maxLevel, ListR)
|
||||
}
|
||||
return walkNDirTree(f, path, includeAll, maxLevel, list.DirSorted)
|
||||
}
|
||||
|
||||
func walkR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR fs.ListRFn) error {
|
||||
dirs, err := walkRDirTree(f, path, includeAll, maxLevel, listR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
skipping := false
|
||||
skipPrefix := ""
|
||||
emptyDir := fs.DirEntries{}
|
||||
for _, dirPath := range dirs.Dirs() {
|
||||
if skipping {
|
||||
// Skip over directories as required
|
||||
if strings.HasPrefix(dirPath, skipPrefix) {
|
||||
continue
|
||||
}
|
||||
skipping = false
|
||||
}
|
||||
entries := dirs[dirPath]
|
||||
if entries == nil {
|
||||
entries = emptyDir
|
||||
}
|
||||
err = fn(dirPath, entries, nil)
|
||||
if err == ErrorSkipDir {
|
||||
skipping = true
|
||||
skipPrefix = dirPath
|
||||
if skipPrefix != "" {
|
||||
skipPrefix += "/"
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAll runs Walk getting all the results
|
||||
func GetAll(f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) {
|
||||
err = Walk(f, path, includeAll, maxLevel, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
objs = append(objs, x)
|
||||
case fs.Directory:
|
||||
dirs = append(dirs, x)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// ListRHelper is used in the implementation of ListR to accumulate DirEntries
|
||||
type ListRHelper struct {
|
||||
callback fs.ListRCallback
|
||||
entries fs.DirEntries
|
||||
}
|
||||
|
||||
// NewListRHelper should be called from ListR with the callback passed in
|
||||
func NewListRHelper(callback fs.ListRCallback) *ListRHelper {
|
||||
return &ListRHelper{
|
||||
callback: callback,
|
||||
}
|
||||
}
|
||||
|
||||
// send sends the stored entries to the callback if there are >= max
|
||||
// entries.
|
||||
func (lh *ListRHelper) send(max int) (err error) {
|
||||
if len(lh.entries) >= max {
|
||||
err = lh.callback(lh.entries)
|
||||
lh.entries = lh.entries[:0]
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Add an entry to the stored entries and send them if there are more
|
||||
// than a certain amount
|
||||
func (lh *ListRHelper) Add(entry fs.DirEntry) error {
|
||||
if entry == nil {
|
||||
return nil
|
||||
}
|
||||
lh.entries = append(lh.entries, entry)
|
||||
return lh.send(100)
|
||||
}
|
||||
|
||||
// Flush the stored entries (if any) sending them to the callback
|
||||
func (lh *ListRHelper) Flush() error {
|
||||
return lh.send(1)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user