Skip to content

Commit

Permalink
Merge branch 'master' into feature-PutObjectTagging
Browse files Browse the repository at this point in the history
  • Loading branch information
johannesboyne authored Feb 17, 2024
2 parents 4d9f6f2 + c55a48f commit 5b1d549
Show file tree
Hide file tree
Showing 22 changed files with 640 additions and 227 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -199,4 +199,4 @@ A big thank you to all the [contributors](https://github.com/johannesboyne/gofak
especially [Blake @shabbyrobe](https://github.com/shabbyrobe) who pushed this
little project to the next level!

**Help wanred**
**Help wanted**
41 changes: 41 additions & 0 deletions backend.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
package gofakes3

import (
"encoding/hex"
"io"
"time"

"github.com/aws/aws-sdk-go/aws/awserr"
)
Expand Down Expand Up @@ -231,6 +233,8 @@ type Backend interface {
PutObject(bucketName, key string, meta map[string]string, tags map[string]string, input io.Reader, size int64) (PutObjectResult, error)

DeleteMulti(bucketName string, objects ...string) (MultiDeleteResult, error)

CopyObject(srcBucket, srcKey, dstBucket, dstKey string, meta map[string]string) (CopyObjectResult, error)
}

// VersionedBackend may be optionally implemented by a Backend in order to support
Expand Down Expand Up @@ -310,6 +314,43 @@ type VersionedBackend interface {
ListBucketVersions(bucketName string, prefix *Prefix, page *ListBucketVersionsPage) (*ListBucketVersionsResult, error)
}

// MultipartBackend may be optionally implemented by a Backend in order to
// support S3 multiplart uploads.
// If you don't implement MultipartBackend, GoFakeS3 will fall back to an
// in-memory implementation which holds all parts in memory until the upload
// gets finalised and pushed to the backend.
type MultipartBackend interface {
CreateMultipartUpload(bucket, object string, meta map[string]string) (UploadID, error)
UploadPart(bucket, object string, id UploadID, partNumber int, contentLength int64, input io.Reader) (etag string, err error)

ListMultipartUploads(bucket string, marker *UploadListMarker, prefix Prefix, limit int64) (*ListMultipartUploadsResult, error)
ListParts(bucket, object string, uploadID UploadID, marker int, limit int64) (*ListMultipartUploadPartsResult, error)

AbortMultipartUpload(bucket, object string, id UploadID) error
CompleteMultipartUpload(bucket, object string, id UploadID, input *CompleteMultipartUploadRequest) (versionID VersionID, etag string, err error)
}

// CopyObject is a helper function useful for quickly implementing CopyObject on
// a backend that already supports GetObject and PutObject. This isn't very
// efficient so only use this if performance isn't important.
func CopyObject(db Backend, srcBucket, srcKey, dstBucket, dstKey string, meta map[string]string) (result CopyObjectResult, err error) {
c, err := db.GetObject(srcBucket, srcKey, nil)
if err != nil {
return
}
defer c.Contents.Close()

_, err = db.PutObject(dstBucket, dstKey, meta, c.Contents, c.Size)
if err != nil {
return
}

return CopyObjectResult{
ETag: `"` + hex.EncodeToString(c.Hash) + `"`,
LastModified: NewContentTime(time.Now()),
}, nil
}

func MergeMetadata(db Backend, bucketName string, objectName string, meta map[string]string) error {
// get potential existing object to potentially carry metadata over
existingObj, err := db.GetObject(bucketName, objectName, nil)
Expand Down
2 changes: 1 addition & 1 deletion backend/s3afero/backend_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ func TestMultiCreateBucket(t *testing.T) {
}
defer os.RemoveAll(tmp)

fs, err := FsPath(tmp)
fs, err := FsPath(tmp, 0)
if err != nil {
t.Fatal(err)
}
Expand Down
33 changes: 25 additions & 8 deletions backend/s3afero/multi.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ type MultiBucketBackend struct {
bucketFs afero.Fs
metaStore *metaStore
dirMode os.FileMode
flags FsFlags

// FIXME(bw): values in here should not be used beyond the configuration
// step; maybe this can be cleaned up later using a builder struct or
Expand All @@ -47,19 +48,28 @@ func MultiBucket(fs afero.Fs, opts ...MultiOption) (*MultiBucketBackend, error)
return nil, err
}

b := &MultiBucketBackend{
baseFs: fs,
bucketFs: afero.NewBasePathFs(fs, "buckets"),
dirMode: 0700,
}
b := &MultiBucketBackend{}
for _, opt := range opts {
if err := opt(b); err != nil {
return nil, err
}
}

bucketsFs, err := NewBasePathFs(fs, "buckets", FsPathCreateAll)
if err != nil {
return nil, err
}

b.baseFs = fs
b.bucketFs = bucketsFs
b.dirMode = 0700

if b.configOnly.metaFs == nil {
b.configOnly.metaFs = afero.NewBasePathFs(fs, "metadata")
metaFs, err := NewBasePathFs(fs, "metadata", FsPathCreateAll)
if err != nil {
return nil, err
}
b.configOnly.metaFs = metaFs
}
b.metaStore = newMetaStore(b.configOnly.metaFs, modTimeFsCalc(fs))

Expand Down Expand Up @@ -141,7 +151,7 @@ func (db *MultiBucketBackend) getBucketWithFilePrefixLocked(bucket string, prefi
}

if entry.IsDir() {
response.AddPrefix(path.Join(prefixPath, prefixPart))
response.AddPrefix(path.Join(prefixPath, prefixPart, entry.Name()) + "/")

} else {
size := entry.Size()
Expand Down Expand Up @@ -292,6 +302,8 @@ func (db *MultiBucketBackend) HeadObject(bucketName, objectName string) (*gofake
return nil, gofakes3.KeyNotFound(objectName)
} else if err != nil {
return nil, err
} else if stat.IsDir() {
return nil, gofakes3.KeyNotFound(objectName)
}

size, mtime := stat.Size(), stat.ModTime()
Expand Down Expand Up @@ -331,7 +343,6 @@ func (db *MultiBucketBackend) GetObject(bucketName, objectName string, rangeRequ
} else if err != nil {
return nil, err
}

defer func() {
// If an error occurs, the caller may not have access to Object.Body in order to close it:
if obj == nil && rerr != nil {
Expand All @@ -342,6 +353,8 @@ func (db *MultiBucketBackend) GetObject(bucketName, objectName string, rangeRequ
stat, err := f.Stat()
if err != nil {
return nil, err
} else if stat.IsDir() {
return nil, gofakes3.KeyNotFound(objectName)
}

size, mtime := stat.Size(), stat.ModTime()
Expand Down Expand Up @@ -456,6 +469,10 @@ func (db *MultiBucketBackend) PutObject(
return result, nil
}

func (db *MultiBucketBackend) CopyObject(srcBucket, srcKey, dstBucket, dstKey string, meta map[string]string) (result gofakes3.CopyObjectResult, err error) {
return gofakes3.CopyObject(db, srcBucket, srcKey, dstBucket, dstKey, meta)
}

func (db *MultiBucketBackend) DeleteObject(bucketName, objectName string) (result gofakes3.ObjectDeleteResult, rerr error) {
db.lock.Lock()
defer db.lock.Unlock()
Expand Down
7 changes: 7 additions & 0 deletions backend/s3afero/option.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,11 @@ func MultiWithMetaFs(fs afero.Fs) MultiOption {
}
}

func MultiFsFlags(flags FsFlags) MultiOption {
return func(b *MultiBucketBackend) error {
b.flags = flags
return nil
}
}

type SingleOption func(b *SingleBucketBackend) error
75 changes: 57 additions & 18 deletions backend/s3afero/single.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package s3afero
import (
"crypto/md5"
"encoding/hex"
"fmt"
"errors"
"io"
"log"
"os"
Expand Down Expand Up @@ -131,13 +131,12 @@ func (db *SingleBucketBackend) getBucketWithFilePrefixLocked(bucket string, pref
}

if entry.IsDir() {
response.AddPrefix(path.Join(prefixPath, prefixPart))
response.AddPrefix(path.Join(prefixPath, entry.Name()) + "/")

} else {
size := entry.Size()
mtime := entry.ModTime()

meta, err := db.metaStore.loadMeta(bucket, objectPath, size, mtime)
meta, err := db.ensureMeta(bucket, objectPath, size, mtime)
if err != nil {
return nil, err
}
Expand All @@ -157,31 +156,25 @@ func (db *SingleBucketBackend) getBucketWithFilePrefixLocked(bucket string, pref
func (db *SingleBucketBackend) getBucketWithArbitraryPrefixLocked(bucket string, prefix *gofakes3.Prefix) (*gofakes3.ObjectList, error) {
response := gofakes3.NewObjectList()

if err := afero.Walk(db.fs, filepath.FromSlash(bucket), func(path string, info os.FileInfo, err error) error {
if err := afero.Walk(db.fs, filepath.FromSlash("."), func(path string, info os.FileInfo, err error) error {
if err != nil || info.IsDir() {
return err
}

objectPath := filepath.ToSlash(path)
parts := strings.SplitN(objectPath, "/", 2)
if len(parts) != 2 {
panic(fmt.Errorf("unexpected path %q", path)) // should never happen
}
objectName := parts[1]

if !prefix.Match(objectName, nil) {
if !prefix.Match(objectPath, nil) {
return nil
}

size := info.Size()
mtime := info.ModTime()
meta, err := db.metaStore.loadMeta(bucket, objectName, size, mtime)
meta, err := db.ensureMeta(bucket, objectPath, size, mtime)
if err != nil {
return err
}

response.Add(&gofakes3.Content{
Key: objectName,
Key: objectPath,
LastModified: gofakes3.NewContentTime(mtime),
ETag: `"` + hex.EncodeToString(meta.Hash) + `"`,
Size: size,
Expand All @@ -196,6 +189,46 @@ func (db *SingleBucketBackend) getBucketWithArbitraryPrefixLocked(bucket string,
return response, nil
}

func (db *SingleBucketBackend) ensureMeta(
bucket string,
objectPath string,
size int64,
mtime time.Time,
) (meta *Metadata, err error) {
existingMeta, err := db.metaStore.loadMeta(bucket, objectPath, size, mtime)
if errors.Is(err, os.ErrNotExist) {
f, err := db.fs.Open(filepath.FromSlash(objectPath))
if err != nil {
return nil, err
}
defer f.Close()

hasher := md5.New()
if _, err := io.Copy(hasher, f); err != nil {
return nil, err
}

hash, err := hasher.Sum(nil), nil
if err != nil {
return nil, err
}

return &Metadata{
objectPath,
mtime,
size,
hash,
map[string]string{},
}, nil

} else if err != nil {
return nil, err

} else {
return existingMeta, nil
}
}

func (db *SingleBucketBackend) HeadObject(bucketName, objectName string) (*gofakes3.Object, error) {
if bucketName != db.name {
return nil, gofakes3.BucketNotFound(bucketName)
Expand All @@ -209,11 +242,12 @@ func (db *SingleBucketBackend) HeadObject(bucketName, objectName string) (*gofak
return nil, gofakes3.KeyNotFound(objectName)
} else if err != nil {
return nil, err
} else if stat.IsDir() {
return nil, gofakes3.KeyNotFound(objectName)
}

size, mtime := stat.Size(), stat.ModTime()

meta, err := db.metaStore.loadMeta(bucketName, objectName, size, mtime)
meta, err := db.ensureMeta(bucketName, objectName, size, mtime)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -242,7 +276,6 @@ func (db *SingleBucketBackend) GetObject(bucketName, objectName string, rangeReq
} else if err != nil {
return nil, err
}

defer func() {
// If an error occurs, the caller may not have access to Object.Body in order to close it:
if err != nil && obj == nil {
Expand All @@ -253,6 +286,8 @@ func (db *SingleBucketBackend) GetObject(bucketName, objectName string, rangeReq
stat, err := f.Stat()
if err != nil {
return nil, err
} else if stat.IsDir() {
return nil, gofakes3.KeyNotFound(objectName)
}

size, mtime := stat.Size(), stat.ModTime()
Expand All @@ -270,7 +305,7 @@ func (db *SingleBucketBackend) GetObject(bucketName, objectName string, rangeReq
rdr = limitReadCloser(rdr, f.Close, rnge.Length)
}

meta, err := db.metaStore.loadMeta(bucketName, objectName, size, mtime)
meta, err := db.ensureMeta(bucketName, objectName, size, mtime)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -387,6 +422,10 @@ func (db *SingleBucketBackend) DeleteMulti(bucketName string, objects ...string)
return result, nil
}

func (db *SingleBucketBackend) CopyObject(srcBucket, srcKey, dstBucket, dstKey string, meta map[string]string) (result gofakes3.CopyObjectResult, err error) {
return gofakes3.CopyObject(db, srcBucket, srcKey, dstBucket, dstKey, meta)
}

func (db *SingleBucketBackend) DeleteObject(bucketName, objectName string) (result gofakes3.ObjectDeleteResult, rerr error) {
if bucketName != db.name {
return result, gofakes3.BucketNotFound(bucketName)
Expand Down
Loading

0 comments on commit 5b1d549

Please sign in to comment.