This commit is contained in:
bel
2020-01-13 03:37:51 +00:00
commit c8eb52f9ba
2023 changed files with 702080 additions and 0 deletions

View File

@@ -0,0 +1,28 @@
package readers
import "io"
// NewCountingReader returns a CountingReader, which will read from the given
// reader while keeping track of how many bytes were read.
func NewCountingReader(in io.Reader) *CountingReader {
return &CountingReader{in: in}
}
// CountingReader holds a reader and a read count of how many bytes were read
// so far.
type CountingReader struct {
in io.Reader
read uint64
}
// Read reads from the underlying reader.
func (cr *CountingReader) Read(b []byte) (int, error) {
n, err := cr.in.Read(b)
cr.read += uint64(n)
return n, err
}
// BytesRead returns how many bytes were read from the underlying reader so far.
func (cr *CountingReader) BytesRead() uint64 {
return cr.read
}

View File

@@ -0,0 +1,22 @@
package readers
import "io"
// LimitedReadCloser adds io.Closer to io.LimitedReader. Create one with NewLimitedReadCloser
type LimitedReadCloser struct {
*io.LimitedReader
io.Closer
}
// NewLimitedReadCloser returns a LimitedReadCloser wrapping rc to
// limit it to reading limit bytes. If limit < 0 then it does not
// wrap rc, it just returns it.
func NewLimitedReadCloser(rc io.ReadCloser, limit int64) (lrc io.ReadCloser) {
if limit < 0 {
return rc
}
return &LimitedReadCloser{
LimitedReader: &io.LimitedReader{R: rc, N: limit},
Closer: rc,
}
}

View File

@@ -0,0 +1,18 @@
package readers
import "io"
// ReadFill reads as much data from r into buf as it can
//
// It reads until the buffer is full or r.Read returned an error.
//
// This is io.ReadFull but when you just want as much data as
// possible, not an exact size of block.
func ReadFill(r io.Reader, buf []byte) (n int, err error) {
var nn int
for n < len(buf) && err == nil {
nn, err = r.Read(buf[n:])
n += nn
}
return n, err
}

View File

@@ -0,0 +1,42 @@
package readers
import (
"io"
"testing"
"github.com/stretchr/testify/assert"
)
type byteReader struct {
c byte
}
func (br *byteReader) Read(p []byte) (n int, err error) {
if br.c == 0 {
err = io.EOF
} else if len(p) >= 1 {
p[0] = br.c
n = 1
br.c--
}
return
}
func TestReadFill(t *testing.T) {
buf := []byte{9, 9, 9, 9, 9}
n, err := ReadFill(&byteReader{0}, buf)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
assert.Equal(t, []byte{9, 9, 9, 9, 9}, buf)
n, err = ReadFill(&byteReader{3}, buf)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 3, n)
assert.Equal(t, []byte{3, 2, 1, 9, 9}, buf)
n, err = ReadFill(&byteReader{8}, buf)
assert.Equal(t, nil, err)
assert.Equal(t, 5, n)
assert.Equal(t, []byte{8, 7, 6, 5, 4}, buf)
}

View File

@@ -0,0 +1,96 @@
package readers
import (
"io"
"github.com/pkg/errors"
)
// A RepeatableReader implements the io.ReadSeeker it allow to seek cached data
// back and forth within the reader but will only read data from the internal Reader as necessary
// and will play nicely with the Account and io.LimitedReader to reflect current speed
type RepeatableReader struct {
in io.Reader // Input reader
i int64 // current reading index
b []byte // internal cache buffer
}
var _ io.ReadSeeker = (*RepeatableReader)(nil)
// Seek implements the io.Seeker interface.
// If seek position is passed the cache buffer length the function will return
// the maximum offset that can be used and "fs.RepeatableReader.Seek: offset is unavailable" Error
func (r *RepeatableReader) Seek(offset int64, whence int) (int64, error) {
var abs int64
cacheLen := int64(len(r.b))
switch whence {
case io.SeekStart:
abs = offset
case io.SeekCurrent:
abs = r.i + offset
case io.SeekEnd:
abs = cacheLen + offset
default:
return 0, errors.New("fs.RepeatableReader.Seek: invalid whence")
}
if abs < 0 {
return 0, errors.New("fs.RepeatableReader.Seek: negative position")
}
if abs > cacheLen {
return offset - (abs - cacheLen), errors.New("fs.RepeatableReader.Seek: offset is unavailable")
}
r.i = abs
return abs, nil
}
// Read data from original Reader into bytes
// Data is either served from the underlying Reader or from cache if was already read
func (r *RepeatableReader) Read(b []byte) (n int, err error) {
cacheLen := int64(len(r.b))
if r.i == cacheLen {
n, err = r.in.Read(b)
if n > 0 {
r.b = append(r.b, b[:n]...)
}
} else {
n = copy(b, r.b[r.i:])
}
r.i += int64(n)
return n, err
}
// NewRepeatableReader create new repeatable reader from Reader r
func NewRepeatableReader(r io.Reader) *RepeatableReader {
return &RepeatableReader{in: r}
}
// NewRepeatableReaderSized create new repeatable reader from Reader r
// with an initial buffer of size.
func NewRepeatableReaderSized(r io.Reader, size int) *RepeatableReader {
return &RepeatableReader{
in: r,
b: make([]byte, 0, size),
}
}
// NewRepeatableLimitReader create new repeatable reader from Reader r
// with an initial buffer of size wrapped in a io.LimitReader to read
// only size.
func NewRepeatableLimitReader(r io.Reader, size int) *RepeatableReader {
return NewRepeatableReaderSized(io.LimitReader(r, int64(size)), size)
}
// NewRepeatableReaderBuffer create new repeatable reader from Reader r
// using the buffer passed in.
func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader {
return &RepeatableReader{
in: r,
b: buf[:0],
}
}
// NewRepeatableLimitReaderBuffer create new repeatable reader from
// Reader r and buf wrapped in a io.LimitReader to read only size.
func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader {
return NewRepeatableReaderBuffer(io.LimitReader(r, int64(size)), buf)
}

View File

@@ -0,0 +1,100 @@
package readers
import (
"bytes"
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRepeatableReader(t *testing.T) {
var dst []byte
var n int
var pos int64
var err error
b := []byte("Testbuffer")
buf := bytes.NewBuffer(b)
r := NewRepeatableReader(buf)
dst = make([]byte, 100)
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 10, n)
require.Equal(t, b, dst[0:10])
// Test read EOF
n, err = r.Read(dst)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
// Test Seek Back to start
dst = make([]byte, 10)
pos, err = r.Seek(0, io.SeekStart)
assert.Nil(t, err)
require.Equal(t, 0, int(pos))
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 10, n)
require.Equal(t, b, dst)
// Test partial read
buf = bytes.NewBuffer(b)
r = NewRepeatableReader(buf)
dst = make([]byte, 5)
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
require.Equal(t, b[0:5], dst)
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
require.Equal(t, b[5:], dst)
// Test Seek
buf = bytes.NewBuffer(b)
r = NewRepeatableReader(buf)
// Should not allow seek past cache index
pos, err = r.Seek(5, io.SeekCurrent)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: offset is unavailable", err.Error())
assert.Equal(t, 0, int(pos))
// Should not allow seek to negative position start
pos, err = r.Seek(-1, io.SeekCurrent)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: negative position", err.Error())
assert.Equal(t, 0, int(pos))
// Should not allow seek with invalid whence
pos, err = r.Seek(0, 3)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: invalid whence", err.Error())
assert.Equal(t, 0, int(pos))
// Should seek from index with io.SeekCurrent(1) whence
dst = make([]byte, 5)
_, _ = r.Read(dst)
pos, err = r.Seek(-3, io.SeekCurrent)
assert.Nil(t, err)
require.Equal(t, 2, int(pos))
pos, err = r.Seek(1, io.SeekCurrent)
assert.Nil(t, err)
require.Equal(t, 3, int(pos))
// Should seek from cache end with io.SeekEnd(2) whence
pos, err = r.Seek(-3, io.SeekEnd)
assert.Nil(t, err)
require.Equal(t, 2, int(pos))
// Should read from seek postion and past it
dst = make([]byte, 5)
n, err = io.ReadFull(r, dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
assert.Equal(t, b[2:7], dst)
}