overdue
This commit is contained in:
501
.rclone_repo/fstest/fstest.go
Executable file
501
.rclone_repo/fstest/fstest.go
Executable file
@@ -0,0 +1,501 @@
|
||||
// Package fstest provides utilities for testing the Fs
|
||||
package fstest
|
||||
|
||||
// FIXME put name of test FS in Fs structure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
|
||||
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
|
||||
Verbose = flag.Bool("verbose", false, "Set to enable logging")
|
||||
DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
|
||||
DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
|
||||
Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
|
||||
LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries")
|
||||
UseListR = flag.Bool("fast-list", false, "Use recursive list if available. Uses more memory but fewer transactions.")
|
||||
// ListRetries is the number of times to retry a listing to overcome eventual consistency
|
||||
ListRetries = flag.Int("list-retries", 6, "Number or times to retry listing")
|
||||
// MatchTestRemote matches the remote names used for testing
|
||||
MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{24}$`)
|
||||
)
|
||||
|
||||
// Seed the random number generator
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
}
|
||||
|
||||
// Initialise rclone for testing
|
||||
func Initialise() {
|
||||
// Never ask for passwords, fail instead.
|
||||
// If your local config is encrypted set environment variable
|
||||
// "RCLONE_CONFIG_PASS=hunter2" (or your password)
|
||||
fs.Config.AskPassword = false
|
||||
// Override the config file from the environment - we don't
|
||||
// parse the flags any more so this doesn't happen
|
||||
// automatically
|
||||
if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" {
|
||||
config.ConfigPath = envConfig
|
||||
}
|
||||
config.LoadConfig()
|
||||
if *Verbose {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
}
|
||||
if *DumpHeaders {
|
||||
fs.Config.Dump |= fs.DumpHeaders
|
||||
}
|
||||
if *DumpBodies {
|
||||
fs.Config.Dump |= fs.DumpBodies
|
||||
}
|
||||
fs.Config.LowLevelRetries = *LowLevelRetries
|
||||
fs.Config.UseListR = *UseListR
|
||||
}
|
||||
|
||||
// Item represents an item for checking
|
||||
type Item struct {
|
||||
Path string
|
||||
Hashes map[hash.Type]string
|
||||
ModTime time.Time
|
||||
Size int64
|
||||
WinPath string
|
||||
}
|
||||
|
||||
// NewItem creates an item from a string content
|
||||
func NewItem(Path, Content string, modTime time.Time) Item {
|
||||
i := Item{
|
||||
Path: Path,
|
||||
ModTime: modTime,
|
||||
Size: int64(len(Content)),
|
||||
}
|
||||
hash := hash.NewMultiHasher()
|
||||
buf := bytes.NewBufferString(Content)
|
||||
_, err := io.Copy(hash, buf)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create item: %v", err)
|
||||
}
|
||||
i.Hashes = hash.Sums()
|
||||
return i
|
||||
}
|
||||
|
||||
// CheckTimeEqualWithPrecision checks the times are equal within the
|
||||
// precision, returns the delta and a flag
|
||||
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
dt := t0.Sub(t1)
|
||||
if dt >= precision || dt <= -precision {
|
||||
return dt, false
|
||||
}
|
||||
return dt, true
|
||||
}
|
||||
|
||||
// CheckModTime checks the mod time to the given precision
|
||||
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
|
||||
dt, ok := CheckTimeEqualWithPrecision(modTime, i.ModTime, precision)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision))
|
||||
}
|
||||
|
||||
// CheckHashes checks all the hashes the object supports are correct
|
||||
func (i *Item) CheckHashes(t *testing.T, obj fs.Object) {
|
||||
require.NotNil(t, obj)
|
||||
types := obj.Fs().Hashes().Array()
|
||||
for _, Hash := range types {
|
||||
// Check attributes
|
||||
sum, err := obj.Hash(Hash)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, hash.Equals(i.Hashes[Hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), Hash, i.Hashes[Hash], sum))
|
||||
}
|
||||
}
|
||||
|
||||
// Check checks all the attributes of the object are correct
|
||||
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
i.CheckHashes(t, obj)
|
||||
assert.Equal(t, i.Size, obj.Size(), fmt.Sprintf("%s: size incorrect file=%d vs obj=%d", i.Path, i.Size, obj.Size()))
|
||||
i.CheckModTime(t, obj, obj.ModTime(), precision)
|
||||
}
|
||||
|
||||
// WinPath converts a path into a windows safe path
|
||||
func WinPath(s string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case '<', '>', '"', '|', '?', '*', ':':
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}, s)
|
||||
}
|
||||
|
||||
// Normalize runs a utf8 normalization on the string if running on OS
|
||||
// X. This is because OS X denormalizes file names it writes to the
|
||||
// local file system.
|
||||
func Normalize(name string) string {
|
||||
if runtime.GOOS == "darwin" {
|
||||
name = norm.NFC.String(name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Items represents all items for checking
|
||||
type Items struct {
|
||||
byName map[string]*Item
|
||||
byNameAlt map[string]*Item
|
||||
items []Item
|
||||
}
|
||||
|
||||
// NewItems makes an Items
|
||||
func NewItems(items []Item) *Items {
|
||||
is := &Items{
|
||||
byName: make(map[string]*Item),
|
||||
byNameAlt: make(map[string]*Item),
|
||||
items: items,
|
||||
}
|
||||
// Fill up byName
|
||||
for i := range items {
|
||||
is.byName[Normalize(items[i].Path)] = &items[i]
|
||||
is.byNameAlt[Normalize(items[i].WinPath)] = &items[i]
|
||||
}
|
||||
return is
|
||||
}
|
||||
|
||||
// Find checks off an item
|
||||
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
remote := Normalize(obj.Remote())
|
||||
i, ok := is.byName[remote]
|
||||
if !ok {
|
||||
i, ok = is.byNameAlt[remote]
|
||||
assert.True(t, ok, fmt.Sprintf("Unexpected file %q", remote))
|
||||
}
|
||||
if i != nil {
|
||||
delete(is.byName, i.Path)
|
||||
delete(is.byName, i.WinPath)
|
||||
i.Check(t, obj, precision)
|
||||
}
|
||||
}
|
||||
|
||||
// Done checks all finished
|
||||
func (is *Items) Done(t *testing.T) {
|
||||
if len(is.byName) != 0 {
|
||||
for name := range is.byName {
|
||||
t.Logf("Not found %q", name)
|
||||
}
|
||||
}
|
||||
assert.Equal(t, 0, len(is.byName), fmt.Sprintf("%d objects not found", len(is.byName)))
|
||||
}
|
||||
|
||||
// makeListingFromItems returns a string representation of the items
|
||||
//
|
||||
// it returns two possible strings, one normal and one for windows
|
||||
func makeListingFromItems(items []Item) (string, string) {
|
||||
nameLengths1 := make([]string, len(items))
|
||||
nameLengths2 := make([]string, len(items))
|
||||
for i, item := range items {
|
||||
remote1 := Normalize(item.Path)
|
||||
remote2 := remote1
|
||||
if item.WinPath != "" {
|
||||
remote2 = item.WinPath
|
||||
}
|
||||
nameLengths1[i] = fmt.Sprintf("%s (%d)", remote1, item.Size)
|
||||
nameLengths2[i] = fmt.Sprintf("%s (%d)", remote2, item.Size)
|
||||
}
|
||||
sort.Strings(nameLengths1)
|
||||
sort.Strings(nameLengths2)
|
||||
return strings.Join(nameLengths1, ", "), strings.Join(nameLengths2, ", ")
|
||||
}
|
||||
|
||||
// makeListingFromObjects returns a string representation of the objects
|
||||
func makeListingFromObjects(objs []fs.Object) string {
|
||||
nameLengths := make([]string, len(objs))
|
||||
for i, obj := range objs {
|
||||
nameLengths[i] = fmt.Sprintf("%s (%d)", Normalize(obj.Remote()), obj.Size())
|
||||
}
|
||||
sort.Strings(nameLengths)
|
||||
return strings.Join(nameLengths, ", ")
|
||||
}
|
||||
|
||||
// filterEmptyDirs removes any empty (or containing only directories)
|
||||
// directories from expectedDirs
|
||||
func filterEmptyDirs(t *testing.T, items []Item, expectedDirs []string) (newExpectedDirs []string) {
|
||||
dirs := map[string]struct{}{"": struct{}{}}
|
||||
for _, item := range items {
|
||||
base := item.Path
|
||||
for {
|
||||
base = path.Dir(base)
|
||||
if base == "." || base == "/" {
|
||||
break
|
||||
}
|
||||
dirs[base] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, expectedDir := range expectedDirs {
|
||||
if _, found := dirs[expectedDir]; found {
|
||||
newExpectedDirs = append(newExpectedDirs, expectedDir)
|
||||
} else {
|
||||
t.Logf("Filtering empty directory %q", expectedDir)
|
||||
}
|
||||
}
|
||||
return newExpectedDirs
|
||||
}
|
||||
|
||||
// CheckListingWithPrecision checks the fs to see if it has the
|
||||
// expected contents with the given precision.
|
||||
//
|
||||
// If expectedDirs is non nil then we check those too. Note that no
|
||||
// directories returned is also OK as some remotes don't return
|
||||
// directories.
|
||||
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs []string, precision time.Duration) {
|
||||
if expectedDirs != nil && !f.Features().CanHaveEmptyDirectories {
|
||||
expectedDirs = filterEmptyDirs(t, items, expectedDirs)
|
||||
}
|
||||
is := NewItems(items)
|
||||
oldErrors := accounting.Stats.GetErrors()
|
||||
var objs []fs.Object
|
||||
var dirs []fs.Directory
|
||||
var err error
|
||||
var retries = *ListRetries
|
||||
sleep := time.Second / 2
|
||||
wantListing1, wantListing2 := makeListingFromItems(items)
|
||||
gotListing := "<unset>"
|
||||
listingOK := false
|
||||
for i := 1; i <= retries; i++ {
|
||||
objs, dirs, err = walk.GetAll(f, "", true, -1)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
t.Fatalf("Error listing: %v", err)
|
||||
}
|
||||
|
||||
gotListing = makeListingFromObjects(objs)
|
||||
listingOK = wantListing1 == gotListing || wantListing2 == gotListing
|
||||
if listingOK && (expectedDirs == nil || len(dirs) == len(expectedDirs)) {
|
||||
// Put an extra sleep in if we did any retries just to make sure it really
|
||||
// is consistent (here is looking at you Amazon Drive!)
|
||||
if i != 1 {
|
||||
extraSleep := 5*time.Second + sleep
|
||||
t.Logf("Sleeping for %v just to make sure", extraSleep)
|
||||
time.Sleep(extraSleep)
|
||||
}
|
||||
break
|
||||
}
|
||||
sleep *= 2
|
||||
t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
|
||||
time.Sleep(sleep)
|
||||
if doDirCacheFlush := f.Features().DirCacheFlush; doDirCacheFlush != nil {
|
||||
t.Logf("Flushing the directory cache")
|
||||
doDirCacheFlush()
|
||||
}
|
||||
}
|
||||
assert.True(t, listingOK, fmt.Sprintf("listing wrong, want\n %s or\n %s got\n %s", wantListing1, wantListing2, gotListing))
|
||||
for _, obj := range objs {
|
||||
require.NotNil(t, obj)
|
||||
is.Find(t, obj, precision)
|
||||
}
|
||||
is.Done(t)
|
||||
// Don't notice an error when listing an empty directory
|
||||
if len(items) == 0 && oldErrors == 0 && accounting.Stats.GetErrors() == 1 {
|
||||
accounting.Stats.ResetErrors()
|
||||
}
|
||||
// Check the directories
|
||||
if expectedDirs != nil {
|
||||
expectedDirsCopy := make([]string, len(expectedDirs))
|
||||
for i, dir := range expectedDirs {
|
||||
expectedDirsCopy[i] = WinPath(Normalize(dir))
|
||||
}
|
||||
actualDirs := []string{}
|
||||
for _, dir := range dirs {
|
||||
actualDirs = append(actualDirs, WinPath(Normalize(dir.Remote())))
|
||||
}
|
||||
sort.Strings(actualDirs)
|
||||
sort.Strings(expectedDirsCopy)
|
||||
assert.Equal(t, expectedDirsCopy, actualDirs, "directories")
|
||||
}
|
||||
}
|
||||
|
||||
// CheckListing checks the fs to see if it has the expected contents
|
||||
func CheckListing(t *testing.T, f fs.Fs, items []Item) {
|
||||
precision := f.Precision()
|
||||
CheckListingWithPrecision(t, f, items, nil, precision)
|
||||
}
|
||||
|
||||
// CheckItems checks the fs to see if it has only the items passed in
|
||||
// using a precision of fs.Config.ModifyWindow
|
||||
func CheckItems(t *testing.T, f fs.Fs, items ...Item) {
|
||||
CheckListingWithPrecision(t, f, items, nil, fs.GetModifyWindow(f))
|
||||
}
|
||||
|
||||
// Time parses a time string or logs a fatal error
|
||||
func Time(timeString string) time.Time {
|
||||
t, err := time.Parse(time.RFC3339Nano, timeString)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse time %q: %v", timeString, err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// RandomString create a random string for test purposes
|
||||
func RandomString(n int) string {
|
||||
const (
|
||||
vowel = "aeiou"
|
||||
consonant = "bcdfghjklmnpqrstvwxyz"
|
||||
digit = "0123456789"
|
||||
)
|
||||
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
|
||||
out := make([]byte, n)
|
||||
p := 0
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// LocalRemote creates a temporary directory name for local remotes
|
||||
func LocalRemote() (path string, err error) {
|
||||
path, err = ioutil.TempDir("", "rclone")
|
||||
if err == nil {
|
||||
// Now remove the directory
|
||||
err = os.Remove(path)
|
||||
}
|
||||
path = filepath.ToSlash(path)
|
||||
return
|
||||
}
|
||||
|
||||
// RandomRemoteName makes a random bucket or subdirectory name
|
||||
//
|
||||
// Returns a random remote name plus the leaf name
|
||||
func RandomRemoteName(remoteName string) (string, string, error) {
|
||||
var err error
|
||||
var leafName string
|
||||
|
||||
// Make a directory if remote name is null
|
||||
if remoteName == "" {
|
||||
remoteName, err = LocalRemote()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
} else {
|
||||
if !strings.HasSuffix(remoteName, ":") {
|
||||
remoteName += "/"
|
||||
}
|
||||
leafName = "rclone-test-" + RandomString(24)
|
||||
if !MatchTestRemote.MatchString(leafName) {
|
||||
log.Fatalf("%q didn't match the test remote name regexp", leafName)
|
||||
}
|
||||
remoteName += leafName
|
||||
}
|
||||
return remoteName, leafName, nil
|
||||
}
|
||||
|
||||
// RandomRemote makes a random bucket or subdirectory on the remote
|
||||
//
|
||||
// Call the finalise function returned to Purge the fs at the end (and
|
||||
// the parent if necessary)
|
||||
//
|
||||
// Returns the remote, its url, a finaliser and an error
|
||||
func RandomRemote(remoteName string, subdir bool) (fs.Fs, string, func(), error) {
|
||||
var err error
|
||||
var parentRemote fs.Fs
|
||||
|
||||
remoteName, _, err = RandomRemoteName(remoteName)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
}
|
||||
|
||||
if subdir {
|
||||
parentRemote, err = fs.NewFs(remoteName)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
}
|
||||
remoteName += "/rclone-test-subdir-" + RandomString(8)
|
||||
}
|
||||
|
||||
remote, err := fs.NewFs(remoteName)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
}
|
||||
|
||||
finalise := func() {
|
||||
Purge(remote)
|
||||
if parentRemote != nil {
|
||||
Purge(parentRemote)
|
||||
if err != nil {
|
||||
log.Printf("Failed to purge %v: %v", parentRemote, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return remote, remoteName, finalise, nil
|
||||
}
|
||||
|
||||
// Purge is a simplified re-implementation of operations.Purge for the
|
||||
// test routine cleanup to avoid circular dependencies.
|
||||
//
|
||||
// It logs errors rather than returning them
|
||||
func Purge(f fs.Fs) {
|
||||
var err error
|
||||
doFallbackPurge := true
|
||||
if doPurge := f.Features().Purge; doPurge != nil {
|
||||
doFallbackPurge = false
|
||||
fs.Debugf(f, "Purge remote")
|
||||
err = doPurge()
|
||||
if err == fs.ErrorCantPurge {
|
||||
doFallbackPurge = true
|
||||
}
|
||||
}
|
||||
if doFallbackPurge {
|
||||
dirs := []string{""}
|
||||
err = walk.Walk(f, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
log.Printf("purge walk returned error: %v", err)
|
||||
return nil
|
||||
}
|
||||
entries.ForObject(func(obj fs.Object) {
|
||||
fs.Debugf(f, "Purge object %q", obj.Remote())
|
||||
err = obj.Remove()
|
||||
if err != nil {
|
||||
log.Printf("purge failed to remove %q: %v", obj.Remote(), err)
|
||||
}
|
||||
})
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
dirs = append(dirs, dir.Remote())
|
||||
})
|
||||
return nil
|
||||
})
|
||||
sort.Strings(dirs)
|
||||
for i := len(dirs) - 1; i >= 0; i-- {
|
||||
dir := dirs[i]
|
||||
fs.Debugf(f, "Purge dir %q", dir)
|
||||
err := f.Rmdir(dir)
|
||||
if err != nil {
|
||||
log.Printf("purge failed to rmdir %q: %v", dir, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("purge failed: %v", err)
|
||||
}
|
||||
}
|
||||
1156
.rclone_repo/fstest/fstests/fstests.go
Executable file
1156
.rclone_repo/fstest/fstests/fstests.go
Executable file
File diff suppressed because it is too large
Load Diff
13
.rclone_repo/fstest/mockdir/dir.go
Executable file
13
.rclone_repo/fstest/mockdir/dir.go
Executable file
@@ -0,0 +1,13 @@
|
||||
// Package mockdir makes a mock fs.Directory object
|
||||
package mockdir
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// New makes a mock directory object with the name given
|
||||
func New(name string) fs.Directory {
|
||||
return fs.NewDir(name, time.Time{})
|
||||
}
|
||||
180
.rclone_repo/fstest/mockobject/mockobject.go
Executable file
180
.rclone_repo/fstest/mockobject/mockobject.go
Executable file
@@ -0,0 +1,180 @@
|
||||
// Package mockobject provides a mock object which can be created from a string
|
||||
package mockobject
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
)
|
||||
|
||||
var errNotImpl = errors.New("not implemented")
|
||||
|
||||
// Object is a mock fs.Object useful for testing
|
||||
type Object string
|
||||
|
||||
// New returns mock fs.Object useful for testing
|
||||
func New(name string) Object {
|
||||
return Object(name)
|
||||
}
|
||||
|
||||
// String returns a description of the Object
|
||||
func (o Object) String() string {
|
||||
return string(o)
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o Object) Fs() fs.Info {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o Object) Remote() string {
|
||||
return string(o)
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o Object) Hash(hash.Type) (string, error) {
|
||||
return "", errNotImpl
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o Object) ModTime() (t time.Time) {
|
||||
return t
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o Object) Size() int64 { return 0 }
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o Object) SetModTime(time.Time) error {
|
||||
return errNotImpl
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
return nil, errNotImpl
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errNotImpl
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o Object) Remove() error {
|
||||
return errNotImpl
|
||||
}
|
||||
|
||||
// SeekMode specifies the optional Seek interface for the ReadCloser returned by Open
|
||||
type SeekMode int
|
||||
|
||||
const (
|
||||
// SeekModeNone specifies no seek interface
|
||||
SeekModeNone SeekMode = iota
|
||||
// SeekModeRegular specifies the regular io.Seek interface
|
||||
SeekModeRegular
|
||||
// SeekModeRange specifies the fs.RangeSeek interface
|
||||
SeekModeRange
|
||||
)
|
||||
|
||||
// SeekModes contains all valid SeekMode's
|
||||
var SeekModes = []SeekMode{SeekModeNone, SeekModeRegular, SeekModeRange}
|
||||
|
||||
type contentMockObject struct {
|
||||
Object
|
||||
content []byte
|
||||
seekMode SeekMode
|
||||
}
|
||||
|
||||
// WithContent returns a fs.Object with the given content.
|
||||
func (o Object) WithContent(content []byte, mode SeekMode) fs.Object {
|
||||
return &contentMockObject{
|
||||
Object: o,
|
||||
content: content,
|
||||
seekMode: mode,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *contentMockObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
return nil, fmt.Errorf("Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit == -1 || offset+limit > o.Size() {
|
||||
limit = o.Size() - offset
|
||||
}
|
||||
|
||||
var r *bytes.Reader
|
||||
if o.seekMode == SeekModeNone {
|
||||
r = bytes.NewReader(o.content[offset : offset+limit])
|
||||
} else {
|
||||
r = bytes.NewReader(o.content)
|
||||
_, err := r.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
switch o.seekMode {
|
||||
case SeekModeNone:
|
||||
return &readCloser{r}, nil
|
||||
case SeekModeRegular:
|
||||
return &readSeekCloser{r}, nil
|
||||
case SeekModeRange:
|
||||
return &readRangeSeekCloser{r}, nil
|
||||
default:
|
||||
return nil, errors.New(o.seekMode.String())
|
||||
}
|
||||
}
|
||||
func (o *contentMockObject) Size() int64 {
|
||||
return int64(len(o.content))
|
||||
}
|
||||
|
||||
type readCloser struct{ io.Reader }
|
||||
|
||||
func (r *readCloser) Close() error { return nil }
|
||||
|
||||
type readSeekCloser struct{ io.ReadSeeker }
|
||||
|
||||
func (r *readSeekCloser) Close() error { return nil }
|
||||
|
||||
type readRangeSeekCloser struct{ io.ReadSeeker }
|
||||
|
||||
func (r *readRangeSeekCloser) RangeSeek(offset int64, whence int, length int64) (int64, error) {
|
||||
return r.ReadSeeker.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (r *readRangeSeekCloser) Close() error { return nil }
|
||||
|
||||
func (m SeekMode) String() string {
|
||||
switch m {
|
||||
case SeekModeNone:
|
||||
return "SeekModeNone"
|
||||
case SeekModeRegular:
|
||||
return "SeekModeRegular"
|
||||
case SeekModeRange:
|
||||
return "SeekModeRange"
|
||||
default:
|
||||
return fmt.Sprintf("SeekModeInvalid(%d)", m)
|
||||
}
|
||||
}
|
||||
313
.rclone_repo/fstest/run.go
Executable file
313
.rclone_repo/fstest/run.go
Executable file
@@ -0,0 +1,313 @@
|
||||
/*
|
||||
|
||||
This provides Run for use in creating test suites
|
||||
|
||||
To use this declare a TestMain
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
And then make and destroy a Run in each test
|
||||
|
||||
func TestMkdir(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
// test stuff
|
||||
}
|
||||
|
||||
This will make r.Fremote and r.Flocal for a remote remote and a local
|
||||
remote. The remote is determined by the -remote flag passed in.
|
||||
|
||||
*/
|
||||
|
||||
package fstest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Run holds the remotes for a test run
|
||||
type Run struct {
|
||||
LocalName string
|
||||
Flocal fs.Fs
|
||||
Fremote fs.Fs
|
||||
FremoteName string
|
||||
cleanRemote func()
|
||||
mkdir map[string]bool // whether the remote has been made yet for the fs name
|
||||
Logf, Fatalf func(text string, args ...interface{})
|
||||
}
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
if !*Individual {
|
||||
oneRun = newRun()
|
||||
}
|
||||
rc := m.Run()
|
||||
if !*Individual {
|
||||
oneRun.Finalise()
|
||||
}
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
// oneRun holds the master run data if individual is not set
|
||||
var oneRun *Run
|
||||
|
||||
// newRun initialise the remote and local for testing and returns a
|
||||
// run object.
|
||||
//
|
||||
// r.Flocal is an empty local Fs
|
||||
// r.Fremote is an empty remote Fs
|
||||
//
|
||||
// Finalise() will tidy them away when done.
|
||||
func newRun() *Run {
|
||||
r := &Run{
|
||||
Logf: log.Printf,
|
||||
Fatalf: log.Fatalf,
|
||||
mkdir: make(map[string]bool),
|
||||
}
|
||||
|
||||
Initialise()
|
||||
|
||||
var err error
|
||||
r.Fremote, r.FremoteName, r.cleanRemote, err = RandomRemote(*RemoteName, *SubDir)
|
||||
if err != nil {
|
||||
r.Fatalf("Failed to open remote %q: %v", *RemoteName, err)
|
||||
}
|
||||
|
||||
r.LocalName, err = ioutil.TempDir("", "rclone")
|
||||
if err != nil {
|
||||
r.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
r.LocalName = filepath.ToSlash(r.LocalName)
|
||||
r.Flocal, err = fs.NewFs(r.LocalName)
|
||||
if err != nil {
|
||||
r.Fatalf("Failed to make %q: %v", r.LocalName, err)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// run f(), retrying it until it returns with no error or the limit
|
||||
// expires and it calls t.Fatalf
|
||||
func retry(t *testing.T, what string, f func() error) {
|
||||
var err error
|
||||
for try := 1; try <= *ListRetries; try++ {
|
||||
err = f()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
t.Logf("%s failed - try %d/%d: %v", what, try, *ListRetries, err)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
t.Logf("%s failed: %v", what, err)
|
||||
}
|
||||
|
||||
// NewRun initialise the remote and local for testing and returns a
|
||||
// run object. Call this from the tests.
|
||||
//
|
||||
// r.Flocal is an empty local Fs
|
||||
// r.Fremote is an empty remote Fs
|
||||
//
|
||||
// Finalise() will tidy them away when done.
|
||||
func NewRun(t *testing.T) *Run {
|
||||
var r *Run
|
||||
if *Individual {
|
||||
r = newRun()
|
||||
} else {
|
||||
// If not individual, use the global one with the clean method overridden
|
||||
r = new(Run)
|
||||
*r = *oneRun
|
||||
r.cleanRemote = func() {
|
||||
var toDelete []string
|
||||
err := walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil
|
||||
}
|
||||
t.Fatalf("Error listing: %v", err)
|
||||
}
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
retry(t, fmt.Sprintf("removing file %q", x.Remote()), x.Remove)
|
||||
case fs.Directory:
|
||||
toDelete = append(toDelete, x.Remote())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
sort.Strings(toDelete)
|
||||
for i := len(toDelete) - 1; i >= 0; i-- {
|
||||
dir := toDelete[i]
|
||||
retry(t, fmt.Sprintf("removing dir %q", dir), func() error {
|
||||
return r.Fremote.Rmdir(dir)
|
||||
})
|
||||
}
|
||||
// Check remote is empty
|
||||
CheckListingWithPrecision(t, r.Fremote, []Item{}, []string{}, r.Fremote.Precision())
|
||||
}
|
||||
}
|
||||
r.Logf = t.Logf
|
||||
r.Fatalf = t.Fatalf
|
||||
r.Logf("Remote %q, Local %q, Modify Window %q", r.Fremote, r.Flocal, fs.GetModifyWindow(r.Fremote))
|
||||
return r
|
||||
}
|
||||
|
||||
// RenameFile renames a file in local
|
||||
func (r *Run) RenameFile(item Item, newpath string) Item {
|
||||
oldFilepath := path.Join(r.LocalName, item.Path)
|
||||
newFilepath := path.Join(r.LocalName, newpath)
|
||||
if err := os.Rename(oldFilepath, newFilepath); err != nil {
|
||||
r.Fatalf("Failed to rename file from %q to %q: %v", item.Path, newpath, err)
|
||||
}
|
||||
|
||||
item.Path = newpath
|
||||
|
||||
return item
|
||||
}
|
||||
|
||||
// WriteFile writes a file to local
|
||||
func (r *Run) WriteFile(filePath, content string, t time.Time) Item {
|
||||
item := NewItem(filePath, content, t)
|
||||
// FIXME make directories?
|
||||
filePath = path.Join(r.LocalName, filePath)
|
||||
dirPath := path.Dir(filePath)
|
||||
err := os.MkdirAll(dirPath, 0770)
|
||||
if err != nil {
|
||||
r.Fatalf("Failed to make directories %q: %v", dirPath, err)
|
||||
}
|
||||
err = ioutil.WriteFile(filePath, []byte(content), 0600)
|
||||
if err != nil {
|
||||
r.Fatalf("Failed to write file %q: %v", filePath, err)
|
||||
}
|
||||
err = os.Chtimes(filePath, t, t)
|
||||
if err != nil {
|
||||
r.Fatalf("Failed to chtimes file %q: %v", filePath, err)
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
// ForceMkdir creates the remote
|
||||
func (r *Run) ForceMkdir(f fs.Fs) {
|
||||
err := f.Mkdir("")
|
||||
if err != nil {
|
||||
r.Fatalf("Failed to mkdir %q: %v", f, err)
|
||||
}
|
||||
r.mkdir[f.String()] = true
|
||||
}
|
||||
|
||||
// Mkdir creates the remote if it hasn't been created already
|
||||
func (r *Run) Mkdir(f fs.Fs) {
|
||||
if !r.mkdir[f.String()] {
|
||||
r.ForceMkdir(f)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteObjectTo writes an object to the fs, remote passed in
|
||||
func (r *Run) WriteObjectTo(f fs.Fs, remote, content string, modTime time.Time, useUnchecked bool) Item {
|
||||
put := f.Put
|
||||
if useUnchecked {
|
||||
put = f.Features().PutUnchecked
|
||||
if put == nil {
|
||||
r.Fatalf("Fs doesn't support PutUnchecked")
|
||||
}
|
||||
}
|
||||
r.Mkdir(f)
|
||||
const maxTries = 10
|
||||
for tries := 1; ; tries++ {
|
||||
in := bytes.NewBufferString(content)
|
||||
objinfo := object.NewStaticObjectInfo(remote, modTime, int64(len(content)), true, nil, nil)
|
||||
_, err := put(in, objinfo)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
// Retry if err returned a retry error
|
||||
if fserrors.IsRetryError(err) && tries < maxTries {
|
||||
r.Logf("Retry Put of %q to %v: %d/%d (%v)", remote, f, tries, maxTries, err)
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
r.Fatalf("Failed to put %q to %q: %v", remote, f, err)
|
||||
}
|
||||
return NewItem(remote, content, modTime)
|
||||
}
|
||||
|
||||
// WriteObject writes an object to the remote
|
||||
func (r *Run) WriteObject(remote, content string, modTime time.Time) Item {
|
||||
return r.WriteObjectTo(r.Fremote, remote, content, modTime, false)
|
||||
}
|
||||
|
||||
// WriteUncheckedObject writes an object to the remote not checking for duplicates
|
||||
func (r *Run) WriteUncheckedObject(remote, content string, modTime time.Time) Item {
|
||||
return r.WriteObjectTo(r.Fremote, remote, content, modTime, true)
|
||||
}
|
||||
|
||||
// WriteBoth calls WriteObject and WriteFile with the same arguments
|
||||
func (r *Run) WriteBoth(remote, content string, modTime time.Time) Item {
|
||||
r.WriteFile(remote, content, modTime)
|
||||
return r.WriteObject(remote, content, modTime)
|
||||
}
|
||||
|
||||
// CheckWithDuplicates does a test but allows duplicates
|
||||
func (r *Run) CheckWithDuplicates(t *testing.T, items ...Item) {
|
||||
var want, got []string
|
||||
|
||||
// construct a []string of desired items
|
||||
for _, item := range items {
|
||||
want = append(want, fmt.Sprintf("%q %d", item.Path, item.Size))
|
||||
}
|
||||
sort.Strings(want)
|
||||
|
||||
// do the listing
|
||||
objs, _, err := walk.GetAll(r.Fremote, "", true, -1)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
t.Fatalf("Error listing: %v", err)
|
||||
}
|
||||
|
||||
// construct a []string of actual items
|
||||
for _, o := range objs {
|
||||
got = append(got, fmt.Sprintf("%q %d", o.Remote(), o.Size()))
|
||||
}
|
||||
sort.Strings(got)
|
||||
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
// Clean the temporary directory
|
||||
func (r *Run) cleanTempDir() {
|
||||
err := os.RemoveAll(r.LocalName)
|
||||
if err != nil {
|
||||
r.Logf("Failed to clean temporary directory %q: %v", r.LocalName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Finalise cleans the remote and local
|
||||
func (r *Run) Finalise() {
|
||||
// r.Logf("Cleaning remote %q", r.Fremote)
|
||||
r.cleanRemote()
|
||||
// r.Logf("Cleaning local %q", r.LocalName)
|
||||
r.cleanTempDir()
|
||||
}
|
||||
483
.rclone_repo/fstest/test_all/test_all.go
Executable file
483
.rclone_repo/fstest/test_all/test_all.go
Executable file
@@ -0,0 +1,483 @@
|
||||
// Run tests for all the remotes. Run this with package names which
|
||||
// need integration testing.
|
||||
//
|
||||
// See the `test` target in the Makefile.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"go/build"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/all" // import all fs
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/list"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
)
|
||||
|
||||
type remoteConfig struct {
|
||||
Name string
|
||||
SubDir bool
|
||||
FastList bool
|
||||
}
|
||||
|
||||
var (
|
||||
remotes = []remoteConfig{
|
||||
// {
|
||||
// Name: "TestAmazonCloudDrive:",
|
||||
// SubDir: false,
|
||||
// FastList: false,
|
||||
// },
|
||||
{
|
||||
Name: "TestB2:",
|
||||
SubDir: true,
|
||||
FastList: true,
|
||||
},
|
||||
{
|
||||
Name: "TestCryptDrive:",
|
||||
SubDir: false,
|
||||
FastList: true,
|
||||
},
|
||||
{
|
||||
Name: "TestCryptSwift:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestDrive:",
|
||||
SubDir: false,
|
||||
FastList: true,
|
||||
},
|
||||
{
|
||||
Name: "TestDropbox:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestGoogleCloudStorage:",
|
||||
SubDir: true,
|
||||
FastList: true,
|
||||
},
|
||||
{
|
||||
Name: "TestHubic:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestJottacloud:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestOneDrive:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestS3:",
|
||||
SubDir: true,
|
||||
FastList: true,
|
||||
},
|
||||
{
|
||||
Name: "TestSftp:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestSwift:",
|
||||
SubDir: true,
|
||||
FastList: true,
|
||||
},
|
||||
{
|
||||
Name: "TestYandex:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestFTP:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestBox:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestQingStor:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestAzureBlob:",
|
||||
SubDir: true,
|
||||
FastList: true,
|
||||
},
|
||||
{
|
||||
Name: "TestPcloud:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestWebdav:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestCache:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestMega:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestOpenDrive:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
}
|
||||
// Flags
|
||||
maxTries = flag.Int("maxtries", 5, "Number of times to try each test")
|
||||
runTests = flag.String("remotes", "", "Comma separated list of remotes to test, eg 'TestSwift:,TestS3'")
|
||||
clean = flag.Bool("clean", false, "Instead of testing, clean all left over test directories")
|
||||
runOnly = flag.String("run", "", "Run only those tests matching the regexp supplied")
|
||||
timeout = flag.Duration("timeout", 30*time.Minute, "Maximum time to run each test for before giving up")
|
||||
)
|
||||
|
||||
// test holds info about a running test
|
||||
type test struct {
|
||||
pkg string
|
||||
remote string
|
||||
subdir bool
|
||||
cmdLine []string
|
||||
cmdString string
|
||||
try int
|
||||
err error
|
||||
output []byte
|
||||
failedTests []string
|
||||
runFlag string
|
||||
}
|
||||
|
||||
// newTest creates a new test
|
||||
func newTest(pkg, remote string, subdir bool, fastlist bool) *test {
|
||||
binary := pkgBinary(pkg)
|
||||
t := &test{
|
||||
pkg: pkg,
|
||||
remote: remote,
|
||||
subdir: subdir,
|
||||
cmdLine: []string{binary, "-test.timeout", timeout.String(), "-remote", remote},
|
||||
try: 1,
|
||||
}
|
||||
if *fstest.Verbose {
|
||||
t.cmdLine = append(t.cmdLine, "-test.v")
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
}
|
||||
if *runOnly != "" {
|
||||
t.cmdLine = append(t.cmdLine, "-test.run", *runOnly)
|
||||
}
|
||||
if subdir {
|
||||
t.cmdLine = append(t.cmdLine, "-subdir")
|
||||
}
|
||||
if fastlist {
|
||||
t.cmdLine = append(t.cmdLine, "-fast-list")
|
||||
}
|
||||
t.cmdString = toShell(t.cmdLine)
|
||||
return t
|
||||
}
|
||||
|
||||
// dumpOutput prints the error output
|
||||
func (t *test) dumpOutput() {
|
||||
log.Println("------------------------------------------------------------")
|
||||
log.Printf("---- %q ----", t.cmdString)
|
||||
log.Println(string(t.output))
|
||||
log.Println("------------------------------------------------------------")
|
||||
}
|
||||
|
||||
var failRe = regexp.MustCompile(`(?m)^--- FAIL: (Test\w*) \(`)
|
||||
|
||||
// findFailures looks for all the tests which failed
|
||||
func (t *test) findFailures() {
|
||||
oldFailedTests := t.failedTests
|
||||
t.failedTests = nil
|
||||
for _, matches := range failRe.FindAllSubmatch(t.output, -1) {
|
||||
t.failedTests = append(t.failedTests, string(matches[1]))
|
||||
}
|
||||
if len(t.failedTests) != 0 {
|
||||
t.runFlag = "^(" + strings.Join(t.failedTests, "|") + ")$"
|
||||
} else {
|
||||
t.runFlag = ""
|
||||
}
|
||||
if t.passed() && len(t.failedTests) != 0 {
|
||||
log.Printf("%q - Expecting no errors but got: %v", t.cmdString, t.failedTests)
|
||||
t.dumpOutput()
|
||||
} else if !t.passed() && len(t.failedTests) == 0 {
|
||||
log.Printf("%q - Expecting errors but got none: %v", t.cmdString, t.failedTests)
|
||||
t.dumpOutput()
|
||||
t.failedTests = oldFailedTests
|
||||
}
|
||||
}
|
||||
|
||||
// nextCmdLine returns the next command line
|
||||
func (t *test) nextCmdLine() []string {
|
||||
cmdLine := t.cmdLine
|
||||
if t.runFlag != "" {
|
||||
cmdLine = append(cmdLine, "-test.run", t.runFlag)
|
||||
}
|
||||
return cmdLine
|
||||
}
|
||||
|
||||
// if matches then is definitely OK in the shell
|
||||
var shellOK = regexp.MustCompile("^[A-Za-z0-9./_:-]+$")
|
||||
|
||||
// converts a argv style input into a shell command
|
||||
func toShell(args []string) (result string) {
|
||||
for _, arg := range args {
|
||||
if result != "" {
|
||||
result += " "
|
||||
}
|
||||
if shellOK.MatchString(arg) {
|
||||
result += arg
|
||||
} else {
|
||||
result += "'" + arg + "'"
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// trial runs a single test
|
||||
func (t *test) trial() {
|
||||
cmdLine := t.nextCmdLine()
|
||||
cmdString := toShell(cmdLine)
|
||||
log.Printf("%q - Starting (try %d/%d)", cmdString, t.try, *maxTries)
|
||||
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
|
||||
start := time.Now()
|
||||
t.output, t.err = cmd.CombinedOutput()
|
||||
duration := time.Since(start)
|
||||
t.findFailures()
|
||||
if t.passed() {
|
||||
log.Printf("%q - Finished OK in %v (try %d/%d)", cmdString, duration, t.try, *maxTries)
|
||||
} else {
|
||||
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v: Failed %v", cmdString, duration, t.try, *maxTries, t.err, t.failedTests)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanFs runs a single clean fs for left over directories
|
||||
func (t *test) cleanFs() error {
|
||||
f, err := fs.NewFs(t.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries, err := list.DirSorted(f, true, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return entries.ForDirError(func(dir fs.Directory) error {
|
||||
remote := dir.Remote()
|
||||
if fstest.MatchTestRemote.MatchString(remote) {
|
||||
log.Printf("Purging %s%s", t.remote, remote)
|
||||
dir, err := fs.NewFs(t.remote + remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return operations.Purge(dir, "")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// clean runs a single clean on a fs for left over directories
|
||||
func (t *test) clean() {
|
||||
log.Printf("%q - Starting clean (try %d/%d)", t.remote, t.try, *maxTries)
|
||||
start := time.Now()
|
||||
t.err = t.cleanFs()
|
||||
if t.err != nil {
|
||||
log.Printf("%q - Failed to purge %v", t.remote, t.err)
|
||||
}
|
||||
duration := time.Since(start)
|
||||
if t.passed() {
|
||||
log.Printf("%q - Finished OK in %v (try %d/%d)", t.cmdString, duration, t.try, *maxTries)
|
||||
} else {
|
||||
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v", t.cmdString, duration, t.try, *maxTries, t.err)
|
||||
}
|
||||
}
|
||||
|
||||
// passed returns true if the test passed
|
||||
func (t *test) passed() bool {
|
||||
return t.err == nil
|
||||
}
|
||||
|
||||
// run runs all the trials for this test
|
||||
func (t *test) run(result chan<- *test) {
|
||||
for t.try = 1; t.try <= *maxTries; t.try++ {
|
||||
if *clean {
|
||||
if !t.subdir {
|
||||
t.clean()
|
||||
}
|
||||
} else {
|
||||
t.trial()
|
||||
}
|
||||
if t.passed() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !t.passed() {
|
||||
t.dumpOutput()
|
||||
}
|
||||
result <- t
|
||||
}
|
||||
|
||||
// GOPATH returns the current GOPATH
|
||||
func GOPATH() string {
|
||||
gopath := os.Getenv("GOPATH")
|
||||
if gopath == "" {
|
||||
gopath = build.Default.GOPATH
|
||||
}
|
||||
return gopath
|
||||
}
|
||||
|
||||
// turn a package name into a binary name
|
||||
func pkgBinaryName(pkg string) string {
|
||||
binary := path.Base(pkg) + ".test"
|
||||
if runtime.GOOS == "windows" {
|
||||
binary += ".exe"
|
||||
}
|
||||
return binary
|
||||
}
|
||||
|
||||
// turn a package name into a binary path
|
||||
func pkgBinary(pkg string) string {
|
||||
return path.Join(pkgPath(pkg), pkgBinaryName(pkg))
|
||||
}
|
||||
|
||||
// returns the path to the package
|
||||
func pkgPath(pkg string) string {
|
||||
return path.Join(GOPATH(), "src", pkg)
|
||||
}
|
||||
|
||||
// cd into the package directory
|
||||
func pkgChdir(pkg string) {
|
||||
err := os.Chdir(pkgPath(pkg))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to chdir to package %q: %v", pkg, err)
|
||||
}
|
||||
}
|
||||
|
||||
// makeTestBinary makes the binary we will run
|
||||
func makeTestBinary(pkg string) {
|
||||
binaryName := pkgBinaryName(pkg)
|
||||
log.Printf("%s: Making test binary %q", pkg, binaryName)
|
||||
pkgChdir(pkg)
|
||||
err := exec.Command("go", "test", "-c", "-o", binaryName).Run()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make test binary: %v", err)
|
||||
}
|
||||
binary := pkgBinary(pkg)
|
||||
if _, err := os.Stat(binary); err != nil {
|
||||
log.Fatalf("Couldn't find test binary %q", binary)
|
||||
}
|
||||
}
|
||||
|
||||
// removeTestBinary removes the binary made in makeTestBinary
|
||||
func removeTestBinary(pkg string) {
|
||||
binary := pkgBinary(pkg)
|
||||
err := os.Remove(binary) // Delete the binary when finished
|
||||
if err != nil {
|
||||
log.Printf("Error removing test binary %q: %v", binary, err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
packages := flag.Args()
|
||||
log.Printf("Testing packages: %s", strings.Join(packages, ", "))
|
||||
if *runTests != "" {
|
||||
newRemotes := []remoteConfig{}
|
||||
for _, name := range strings.Split(*runTests, ",") {
|
||||
for i := range remotes {
|
||||
if remotes[i].Name == name {
|
||||
newRemotes = append(newRemotes, remotes[i])
|
||||
goto found
|
||||
}
|
||||
}
|
||||
log.Printf("Remote %q not found - inserting with default flags", name)
|
||||
newRemotes = append(newRemotes, remoteConfig{Name: name})
|
||||
found:
|
||||
}
|
||||
remotes = newRemotes
|
||||
}
|
||||
var names []string
|
||||
for _, remote := range remotes {
|
||||
names = append(names, remote.Name)
|
||||
}
|
||||
log.Printf("Testing remotes: %s", strings.Join(names, ", "))
|
||||
|
||||
start := time.Now()
|
||||
if *clean {
|
||||
config.LoadConfig()
|
||||
packages = []string{"clean"}
|
||||
} else {
|
||||
for _, pkg := range packages {
|
||||
makeTestBinary(pkg)
|
||||
defer removeTestBinary(pkg)
|
||||
}
|
||||
}
|
||||
|
||||
// workaround for cache backend as we run simultaneous tests
|
||||
_ = os.Setenv("RCLONE_CACHE_DB_WAIT_TIME", "30m")
|
||||
|
||||
// start the tests
|
||||
results := make(chan *test, 8)
|
||||
awaiting := 0
|
||||
bools := []bool{false, true}
|
||||
if *clean {
|
||||
// Don't run -subdir and -fast-list if -clean
|
||||
bools = bools[:1]
|
||||
}
|
||||
for _, pkg := range packages {
|
||||
for _, remote := range remotes {
|
||||
for _, subdir := range bools {
|
||||
for _, fastlist := range bools {
|
||||
if (!subdir || subdir && remote.SubDir) && (!fastlist || fastlist && remote.FastList) {
|
||||
go newTest(pkg, remote.Name, subdir, fastlist).run(results)
|
||||
awaiting++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the tests to finish
|
||||
var failed []*test
|
||||
for ; awaiting > 0; awaiting-- {
|
||||
t := <-results
|
||||
if !t.passed() {
|
||||
failed = append(failed, t)
|
||||
}
|
||||
}
|
||||
duration := time.Since(start)
|
||||
|
||||
// Summarise results
|
||||
log.Printf("SUMMARY")
|
||||
if len(failed) == 0 {
|
||||
log.Printf("PASS: All tests finished OK in %v", duration)
|
||||
} else {
|
||||
log.Printf("FAIL: %d tests failed in %v", len(failed), duration)
|
||||
for _, t := range failed {
|
||||
log.Printf(" * %s", toShell(t.nextCmdLine()))
|
||||
log.Printf(" * Failed tests: %v", t.failedTests)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user