clipper/backend/media/uploader.go

215 lines
5.9 KiB
Go
Raw Normal View History

2021-10-27 19:34:59 +00:00
package media
import (
"bytes"
"context"
2021-11-08 01:54:43 +00:00
"errors"
2021-10-27 19:34:59 +00:00
"fmt"
"log"
2021-11-08 01:54:43 +00:00
"sync"
2021-10-27 19:34:59 +00:00
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
// multipartUploadWriter is a Writer that uploads transparently to an S3 bucket
// in 5MB parts. It buffers data internally until a part is ready to send over
// the network. Parts are sent as soon as they exceed the minimum part size of
// 5MB.
//
// The caller must call either Complete() or Abort() after finishing writing.
// Failure to do so will leave S3 in an inconsistent state.
type multipartUploadWriter struct {
ctx context.Context
2021-11-08 01:54:43 +00:00
wg sync.WaitGroup
2021-10-27 19:34:59 +00:00
s3 S3Client
buf *bytes.Buffer
bucket, key, contentType string
2021-11-08 01:54:43 +00:00
partNum int32
uploadResults chan uploadResult
2021-10-27 19:34:59 +00:00
uploadID string
2021-11-08 01:54:43 +00:00
}
type uploadResult struct {
completedPart types.CompletedPart
size int64
err error
2021-10-27 19:34:59 +00:00
}
const targetPartSizeBytes = 5 * 1024 * 1024 // 5MB
// newMultipartUploadWriter creates a new multipart upload writer, including
// creating the upload on S3. Either Complete or Abort must be called after
// calling this function.
func newMultipartUploadWriter(ctx context.Context, s3Client S3Client, bucket, key, contentType string) (*multipartUploadWriter, error) {
input := s3.CreateMultipartUploadInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
ContentType: aws.String(contentType),
}
output, err := s3Client.CreateMultipartUpload(ctx, &input)
if err != nil {
return nil, fmt.Errorf("error creating multipart upload: %v", err)
}
2021-11-02 16:20:47 +00:00
const bufferOverflowSize = 16_384
b := make([]byte, 0, targetPartSizeBytes+bufferOverflowSize)
2021-10-27 19:34:59 +00:00
return &multipartUploadWriter{
2021-11-08 01:54:43 +00:00
ctx: ctx,
s3: s3Client,
buf: bytes.NewBuffer(b),
bucket: bucket,
key: key,
contentType: contentType,
partNum: 1,
uploadResults: make(chan uploadResult),
uploadID: *output.UploadId,
2021-10-27 19:34:59 +00:00
}, nil
}
func (u *multipartUploadWriter) Write(p []byte) (int, error) {
n, err := u.buf.Write(p)
if err != nil {
return n, fmt.Errorf("error writing to buffer: %v", err)
}
if u.buf.Len() >= targetPartSizeBytes {
2021-11-08 01:54:43 +00:00
buf := make([]byte, u.buf.Len())
copy(buf, u.buf.Bytes())
u.buf.Truncate(0)
2021-10-27 19:34:59 +00:00
2021-11-08 01:54:43 +00:00
u.wg.Add(1)
go u.uploadPart(buf, u.partNum)
2021-10-27 19:34:59 +00:00
2021-11-08 01:54:43 +00:00
u.partNum++
2021-10-27 19:34:59 +00:00
}
return n, err
}
2021-11-08 01:54:43 +00:00
func (u *multipartUploadWriter) uploadPart(buf []byte, partNum int32) {
defer u.wg.Done()
2021-10-27 19:34:59 +00:00
2021-11-08 01:54:43 +00:00
partLen := len(buf)
log.Printf("uploading part num = %d, len = %d", partNum, partLen)
input := s3.UploadPartInput{
Body: bytes.NewReader(buf),
Bucket: aws.String(u.bucket),
Key: aws.String(u.key),
PartNumber: partNum,
UploadId: aws.String(u.uploadID),
ContentLength: int64(partLen),
2021-10-27 19:34:59 +00:00
}
2021-11-08 01:54:43 +00:00
output, uploadErr := u.s3.UploadPart(u.ctx, &input)
if uploadErr != nil {
// TODO: retry on failure
u.uploadResults <- uploadResult{err: fmt.Errorf("error uploading part: %v", uploadErr)}
return
2021-10-27 19:34:59 +00:00
}
2021-11-08 01:54:43 +00:00
log.Printf("uploaded part num = %d, etag = %s, bytes = %d", partNum, *output.ETag, partLen)
2021-10-27 19:34:59 +00:00
2021-11-08 01:54:43 +00:00
u.uploadResults <- uploadResult{
completedPart: types.CompletedPart{ETag: output.ETag, PartNumber: partNum},
size: int64(partLen),
}
2021-10-27 19:34:59 +00:00
}
// Close signals that no further data will be written to the writer.
// Always returns nil.
func (u *multipartUploadWriter) Close() error {
// TODO: trigger Complete() here too?
close(u.uploadResults)
return nil
}
2021-11-08 01:54:43 +00:00
// Complete waits for all currently uploading parts to be uploaded, and
// finalizes the object in S3.
//
// Close() must have been been called first.
2021-11-02 18:03:26 +00:00
func (u *multipartUploadWriter) Complete() (int64, error) {
completedParts := make([]types.CompletedPart, 0, 64)
2021-11-08 01:54:43 +00:00
var uploadedBytes int64
// Write() launches multiple goroutines to upload the parts asynchronously.
// We need a waitgroup to ensure that all parts are complete, and the channel
// has been closed, before we continue.
uploadDone := make(chan struct{})
2021-11-08 01:54:43 +00:00
go func() {
u.wg.Wait()
close(u.uploadResults)
uploadDone <- struct{}{}
2021-11-08 01:54:43 +00:00
}()
outer:
for {
select {
case uploadResult, ok := <-u.uploadResults:
if !ok {
break outer
}
// if len(completedParts) == 3 {
// return 0, errors.New("nope")
// }
2021-11-08 01:54:43 +00:00
if uploadResult.err != nil {
return 0, uploadResult.err
}
log.Println("APPENDING PART, len now", len(completedParts))
2021-11-08 01:54:43 +00:00
completedParts = append(completedParts, uploadResult.completedPart)
uploadedBytes += uploadResult.size
case <-uploadDone:
2021-11-08 01:54:43 +00:00
break outer
case <-u.ctx.Done():
return 0, u.ctx.Err()
}
}
if len(completedParts) == 0 {
return 0, errors.New("no parts available to upload")
2021-10-27 19:34:59 +00:00
}
log.Printf("parts - %+v, bucket - %s, key - %s, id - %s", completedParts, u.bucket, u.key, u.uploadID)
log.Printf("len(parts) = %d, cap(parts) = %d", len(completedParts), cap(completedParts))
2021-10-27 19:34:59 +00:00
input := s3.CompleteMultipartUploadInput{
2021-11-08 01:54:43 +00:00
Bucket: aws.String(u.bucket),
Key: aws.String(u.key),
UploadId: aws.String(u.uploadID),
MultipartUpload: &types.CompletedMultipartUpload{Parts: completedParts},
}
_, err := u.s3.CompleteMultipartUpload(u.ctx, &input)
if err != nil {
return 0, fmt.Errorf("error completing upload: %v", err)
}
log.Printf("completed upload, key = %s, bytesUploaded = %d", u.key, uploadedBytes)
return uploadedBytes, nil
}
// Abort aborts the upload process, cancelling the upload on S3. It accepts a
// separate context to the associated writer in case it is called during
// cleanup after the original context was killed.
func (u *multipartUploadWriter) Abort(ctx context.Context) error {
input := s3.AbortMultipartUploadInput{
2021-10-27 19:34:59 +00:00
Bucket: aws.String(u.bucket),
Key: aws.String(u.key),
UploadId: aws.String(u.uploadID),
}
2021-11-08 01:54:43 +00:00
_, err := u.s3.AbortMultipartUpload(ctx, &input)
2021-10-27 19:34:59 +00:00
if err != nil {
2021-11-08 01:54:43 +00:00
return fmt.Errorf("error aborting upload: %v", err)
2021-10-27 19:34:59 +00:00
}
2021-11-08 01:54:43 +00:00
log.Printf("aborted upload, key = %s", u.key)
return nil
2021-10-27 19:34:59 +00:00
}