dashboard / erock/pico / chore(pobj.fs): feature parity with minio #66 rss

accepted · opened on 2025-04-18T17:16:07Z by erock
Help
checkout latest patchset:
ssh pr.pico.sh print pr-66 | git am -3
checkout any patchset in a patch request:
ssh pr.pico.sh print ps-X | git am -3
add changes to patch request:
git format-patch main --stdout | ssh pr.pico.sh pr add 66
add review to patch request:
git format-patch main --stdout | ssh pr.pico.sh pr add --review 66
accept PR:
ssh pr.pico.sh pr accept 66
close PR:
ssh pr.pico.sh pr close 66

Logs

erock created pr with ps-133 on 2025-04-18T17:16:07Z
erock changed pr name on 2025-04-18T17:18:11Z {"name":"chore(pobj.fs): feature parity with minio"}
erock added ps-134 on 2025-04-21T18:44:44Z
erock changed status on 2025-04-22T22:17:41Z {"status":"accepted"}

Patchsets

ps-133 by erock on 2025-04-18T17:16:07Z
Range Diff ↕ rd-134
1: a3f9ef2 = 1: a3f9ef2 chore: rm UserMetadata
2: ccd89b0 = 2: ccd89b0 refactor(shared): mime type pkg
3: 8621e35 = 3: 8621e35 chore(pobj.fs): provide metadata
-: ------- > 4: 831b150 chore(storage.fs): add tests
ps-134 by erock on 2025-04-21T18:44:44Z

Range-diff rd-134

title
chore: rm UserMetadata
description
Patch equal
old #1
a3f9ef2
new #1
a3f9ef2
title
refactor(shared): mime type pkg
description
Patch equal
old #2
ccd89b0
new #2
ccd89b0
title
chore(pobj.fs): provide metadata
description
Patch equal
old #3
8621e35
new #3
8621e35
title
chore(storage.fs): add tests
description
Patch added
old #0
(none)
new #4
831b150
Back to top
1: a3f9ef2 = 1: a3f9ef2 chore: rm UserMetadata
2: ccd89b0 = 2: ccd89b0 refactor(shared): mime type pkg
3: 8621e35 = 3: 8621e35 chore(pobj.fs): provide metadata
-: ------- > 4: 831b150 chore(storage.fs): add tests

old


                    

new

old:pkg/pobj/storage/fs.go new:pkg/pobj/storage/fs.go
 		return bucket, nil
 	}
 
-	dir := filepath.Join(s.Dir, bucket.Path)
+	dir := filepath.Join(s.Dir, name)
 	s.Logger.Info("bucket not found, creating", "dir", dir, "err", err)
 	err = os.MkdirAll(dir, os.ModePerm)
 	if err != nil {
 	return uint64(dsize), err
 }
 
+// DeleteBucket will delete all contents regardless if files exist inside of it.
+// This is different from minio impl which requires all files be deleted first.
 func (s *StorageFS) DeleteBucket(bucket Bucket) error {
 	return os.RemoveAll(bucket.Path)
 }
 		return err
 	}
 
-	// try to remove dir if it is empty
+	// traverse up the folder tree and remove all empty folders
 	dir := filepath.Dir(loc)
-	_ = os.Remove(dir)
+	for dir != "" {
+		err = os.Remove(dir)
+		if err != nil {
+			break
+		}
+		fp := strings.Split(dir, "/")
+		dir = "/" + filepath.Join(fp[:len(fp)-1]...)
+	}
 
 	return nil
 }
 
 func (s *StorageFS) ListBuckets() ([]string, error) {
-	return []string{}, fmt.Errorf("not implemented")
+	entries, err := os.ReadDir(s.Dir)
+	if err != nil {
+		return []string{}, err
+	}
+
+	buckets := []string{}
+	for _, e := range entries {
+		if !e.IsDir() {
+			continue
+		}
+		buckets = append(buckets, e.Name())
+	}
+	return buckets, nil
 }
 
 func (s *StorageFS) ListObjects(bucket Bucket, dir string, recursive bool) ([]os.FileInfo, error) {

old


                    

new

new:pkg/shared/storage/fs_test.go
+package storage
+
+import (
+	"io"
+	"io/fs"
+	"log/slog"
+	"os"
+	"path/filepath"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/google/go-cmp/cmp"
+	"github.com/google/go-cmp/cmp/cmpopts"
+	"github.com/picosh/pico/pkg/send/utils"
+)
+
+func TestFsAdapter(t *testing.T) {
+	logger := slog.Default()
+	f, err := os.MkdirTemp("", "fs-tests-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(f)
+
+	st, err := NewStorageFS(logger, f)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	bucketName := "main"
+	// create bucket
+	bucket, err := st.UpsertBucket(bucketName)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// ensure bucket exists
+	file, err := os.Stat(bucket.Path)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !file.IsDir() {
+		t.Fatal("bucket must be directory")
+	}
+
+	bucketCheck, err := st.GetBucket(bucketName)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bucketCheck.Path != bucket.Path || bucketCheck.Name != bucket.Name {
+		t.Fatal("upsert and get bucket incongruent")
+	}
+
+	modTime := time.Now()
+
+	str := "here is a test file"
+	reader := strings.NewReader(str)
+	actualPath, size, err := st.PutObject(bucket, "./nice/test.txt", reader, &utils.FileEntry{
+		Mtime: modTime.Unix(),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	if size != int64(len(str)) {
+		t.Fatalf("size, actual: %d, expected: %d", size, int64(len(str)))
+	}
+	expectedPath := filepath.Join(bucket.Path, "nice", "test.txt")
+	if actualPath != expectedPath {
+		t.Fatalf("path, actual: %s, expected: %s", actualPath, expectedPath)
+	}
+
+	// ensure file exists
+	_, err = os.Stat(expectedPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// get file
+	r, info, err := st.GetObject(bucket, "nice/test.txt")
+	if err != nil {
+		t.Fatal(err)
+	}
+	buf := new(strings.Builder)
+	_, err = io.Copy(buf, r)
+	if err != nil {
+		t.Fatal(err)
+	}
+	actualStr := buf.String()
+	if actualStr != str {
+		t.Fatalf("contents, actual: %s, expected: %s", actualStr, str)
+	}
+	if info.Size != size {
+		t.Fatalf("size, actual: %d, expected: %d", size, info.Size)
+	}
+
+	str = "a deeply nested test file"
+	reader = strings.NewReader(str)
+	_, _, err = st.PutObject(bucket, "./here/we/go/again.txt", reader, &utils.FileEntry{
+		Mtime: modTime.Unix(),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// list objects
+	objs, err := st.ListObjects(bucket, "/", true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedObjs := []fs.FileInfo{
+		&utils.VirtualFile{
+			FName:  "main",
+			FIsDir: true,
+			FSize:  80,
+		},
+		&utils.VirtualFile{
+			FName:  "here",
+			FIsDir: true,
+			FSize:  60,
+		},
+		&utils.VirtualFile{
+			FName:  "we",
+			FIsDir: true,
+			FSize:  60,
+		},
+		&utils.VirtualFile{
+			FName:  "go",
+			FIsDir: true,
+			FSize:  60,
+		},
+		&utils.VirtualFile{FName: "again.txt", FSize: 25},
+		&utils.VirtualFile{
+			FName:  "nice",
+			FIsDir: true,
+			FSize:  60,
+		},
+		&utils.VirtualFile{FName: "test.txt", FSize: 19},
+	}
+	ignore := cmpopts.IgnoreFields(utils.VirtualFile{}, "FModTime")
+	if cmp.Equal(objs, expectedObjs, ignore) == false {
+		//nolint
+		t.Fatal(cmp.Diff(objs, expectedObjs, ignore))
+	}
+
+	// list buckets
+	aBucket, _ := st.UpsertBucket("another")
+	_, _ = st.UpsertBucket("and-another")
+	buckets, err := st.ListBuckets()
+	if err != nil {
+		t.Fatal(err)
+	}
+	expectedBuckets := []string{"and-another", "another", "main"}
+	if cmp.Equal(buckets, expectedBuckets) == false {
+		//nolint
+		t.Fatal(cmp.Diff(buckets, expectedBuckets))
+	}
+
+	// delete bucket
+	err = st.DeleteBucket(aBucket)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// ensure bucket was actually deleted
+	_, err = os.Stat(aBucket.Path)
+	if !os.IsNotExist(err) {
+		t.Fatal("directory should have been deleted")
+	}
+
+	err = st.DeleteObject(bucket, "nice/test.txt")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// ensure file was actually deleted
+	_, err = os.Stat(filepath.Join(bucket.Path, "nice/test.txt"))
+	if !os.IsNotExist(err) {
+		t.Fatal("file should have been deleted")
+	}
+
+	// ensure containing folder was also deleted
+	_, err = os.Stat(filepath.Join(bucket.Path, "nice"))
+	if !os.IsNotExist(err) {
+		t.Fatal("containing folder should have been deleted")
+	}
+
+	str = "a deeply nested test file"
+	reader = strings.NewReader(str)
+	_, _, err = st.PutObject(bucket, "./here/yes/we/can.txt", reader, &utils.FileEntry{
+		Mtime: modTime.Unix(),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// delete deeply nested file and all parent folders that are now empty
+	err = st.DeleteObject(bucket, "here/yes/we/can.txt")
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = os.Stat(filepath.Join(bucket.Path, "here"))
+	if os.IsNotExist(err) {
+		t.Fatal("this folder had multiple files and should not have been deleted")
+	}
+	_, err = os.Stat(filepath.Join(bucket.Path, "here/yes"))
+	if !os.IsNotExist(err) {
+		t.Fatal("containing folder should have been deleted")
+	}
+
+	// delete bucket even with file contents
+	err = st.DeleteBucket(bucket)
+	if err != nil {
+		t.Fatal(err)
+	}
+}