This turned out to actually make testing more difficult, as the FileStore objects are generally mocked themselves and moving the Close() call inside them introduced IO problems in the test suite. This reverts commit a063f85eca5dd3f983bc9b790fd68108820cc730.
157 lines
5.3 KiB
Go
157 lines
5.3 KiB
Go
package filestore_test
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"io"
|
|
"io/ioutil"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"git.netflux.io/rob/clipper/filestore"
|
|
"git.netflux.io/rob/clipper/generated/mocks"
|
|
signerv4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
|
"github.com/aws/aws-sdk-go-v2/service/s3"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/mock"
|
|
"github.com/stretchr/testify/require"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
func TestS3GetObject(t *testing.T) {
|
|
const (
|
|
bucket = "some-bucket"
|
|
key = "foo/bar"
|
|
content = "hello world"
|
|
)
|
|
body := io.NopCloser(strings.NewReader(content))
|
|
|
|
var s3Client mocks.S3Client
|
|
s3Client.On("GetObject", mock.Anything, mock.MatchedBy(func(input *s3.GetObjectInput) bool {
|
|
return *input.Bucket == bucket && *input.Key == key
|
|
})).Return(&s3.GetObjectOutput{Body: body}, nil)
|
|
defer s3Client.AssertExpectations(t)
|
|
|
|
store := filestore.NewS3FileStore(filestore.S3API{S3Client: &s3Client}, bucket, time.Minute, zap.NewNop().Sugar())
|
|
output, err := store.GetObject(context.Background(), key)
|
|
require.NoError(t, err)
|
|
defer output.Close()
|
|
|
|
actual, err := ioutil.ReadAll(output)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, content, string(actual))
|
|
}
|
|
|
|
func TestS3GetObjectWithRange(t *testing.T) {
|
|
const (
|
|
bucket = "some-bucket"
|
|
key = "foo/bar"
|
|
content = "hello world"
|
|
start = 256
|
|
end = 32_768
|
|
)
|
|
body := io.NopCloser(strings.NewReader(content))
|
|
|
|
var s3Client mocks.S3Client
|
|
s3Client.On("GetObject", mock.Anything, mock.MatchedBy(func(input *s3.GetObjectInput) bool {
|
|
return *input.Bucket == bucket && *input.Key == key && *input.Range == "bytes=256-32768"
|
|
})).Return(&s3.GetObjectOutput{Body: body}, nil)
|
|
defer s3Client.AssertExpectations(t)
|
|
|
|
store := filestore.NewS3FileStore(filestore.S3API{S3Client: &s3Client}, bucket, time.Minute, zap.NewNop().Sugar())
|
|
output, err := store.GetObjectWithRange(context.Background(), key, start, end)
|
|
require.NoError(t, err)
|
|
defer output.Close()
|
|
|
|
actual, err := ioutil.ReadAll(output)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, content, string(actual))
|
|
}
|
|
|
|
func TestS3GetURL(t *testing.T) {
|
|
const (
|
|
bucket = "some-bucket"
|
|
key = "foo/bar"
|
|
urlExpiry = time.Minute * 10
|
|
wantURL = "https://foo.s3.example.com/foo/bar"
|
|
)
|
|
var presignClient mocks.S3PresignClient
|
|
presignClient.On("PresignGetObject", mock.Anything, mock.MatchedBy(func(input *s3.GetObjectInput) bool {
|
|
return *input.Bucket == bucket && *input.Key == key
|
|
}), mock.Anything).Return(&signerv4.PresignedHTTPRequest{URL: wantURL}, nil)
|
|
defer presignClient.AssertExpectations(t)
|
|
|
|
store := filestore.NewS3FileStore(filestore.S3API{S3PresignClient: &presignClient}, bucket, urlExpiry, zap.NewNop().Sugar())
|
|
url, err := store.GetURL(context.Background(), key)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, wantURL, url)
|
|
}
|
|
|
|
type testReader struct {
|
|
count, exp int
|
|
}
|
|
|
|
func (r *testReader) Read(p []byte) (int, error) {
|
|
max := r.exp - r.count
|
|
if max <= len(p) {
|
|
r.count += max
|
|
return max, io.EOF
|
|
}
|
|
r.count += len(p)
|
|
return len(p), nil
|
|
}
|
|
|
|
func TestS3PutObject(t *testing.T) {
|
|
const (
|
|
bucket = "some-bucket"
|
|
key = "foo/bar"
|
|
contentType = "audio/mp3"
|
|
contentLength = 20_000_000
|
|
)
|
|
|
|
uploadID := "abc123"
|
|
|
|
mockS3Client := func() *mocks.S3Client {
|
|
var s3Client mocks.S3Client
|
|
s3Client.On("CreateMultipartUpload", mock.Anything, mock.MatchedBy(func(input *s3.CreateMultipartUploadInput) bool {
|
|
return *input.Bucket == bucket && *input.Key == key && *input.ContentType == contentType
|
|
})).Return(&s3.CreateMultipartUploadOutput{UploadId: &uploadID}, nil)
|
|
return &s3Client
|
|
}
|
|
|
|
t.Run("OK", func(t *testing.T) {
|
|
s3Client := mockS3Client()
|
|
eTag := `"foo"`
|
|
var partLengths []int64
|
|
s3Client.On("UploadPart", mock.Anything, mock.MatchedBy(func(input *s3.UploadPartInput) bool {
|
|
partLengths = append(partLengths, input.ContentLength)
|
|
return *input.Bucket == bucket && *input.Key == key && *input.UploadId == uploadID
|
|
})).Return(&s3.UploadPartOutput{ETag: &eTag}, nil)
|
|
s3Client.On("CompleteMultipartUpload", mock.Anything, mock.MatchedBy(func(input *s3.CompleteMultipartUploadInput) bool {
|
|
return *input.Bucket == bucket && *input.Key == key && *input.UploadId == uploadID
|
|
})).Return(nil, nil)
|
|
defer s3Client.AssertExpectations(t)
|
|
|
|
store := filestore.NewS3FileStore(filestore.S3API{S3Client: s3Client}, bucket, time.Hour, zap.NewNop().Sugar())
|
|
|
|
n, err := store.PutObject(context.Background(), key, &testReader{exp: contentLength}, contentType)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, int64(contentLength), n)
|
|
assert.ElementsMatch(t, []int64{5_242_880, 5_242_880, 5_242_880, 4_271_360}, partLengths)
|
|
})
|
|
|
|
t.Run("NOK,UploadPartFailure", func(t *testing.T) {
|
|
s3Client := mockS3Client()
|
|
s3Client.On("UploadPart", mock.Anything, mock.Anything).Return(nil, errors.New("boom"))
|
|
s3Client.On("AbortMultipartUpload", mock.Anything, mock.MatchedBy(func(input *s3.AbortMultipartUploadInput) bool {
|
|
return *input.Bucket == bucket && *input.Key == key && *input.UploadId == uploadID
|
|
})).Return(nil, nil)
|
|
defer s3Client.AssertExpectations(t)
|
|
|
|
store := filestore.NewS3FileStore(filestore.S3API{S3Client: s3Client}, bucket, time.Hour, zap.NewNop().Sugar())
|
|
_, err := store.PutObject(context.Background(), key, &testReader{exp: contentLength}, contentType)
|
|
assert.EqualError(t, err, "error while uploading part: boom")
|
|
})
|
|
}
|