dashboard / erock/pico / feat(pgs): lru cache for object info and special files #59 rss

accepted · opened on 2025-04-06T03:01:44Z by erock
Help
checkout latest patchset:
ssh pr.pico.sh print pr-59 | git am -3
checkout any patchset in a patch request:
ssh pr.pico.sh print ps-X | git am -3
add changes to patch request:
git format-patch main --stdout | ssh pr.pico.sh pr add 59
add review to patch request:
git format-patch main --stdout | ssh pr.pico.sh pr add --review 59
accept PR:
ssh pr.pico.sh pr accept 59
close PR:
ssh pr.pico.sh pr close 59

Logs

erock created pr with ps-119 on 2025-04-06T03:01:44Z
erock added ps-120 on 2025-04-06T03:03:49Z
erock added ps-122 on 2025-04-06T19:08:31Z
erock added ps-123 on 2025-04-06T19:41:38Z
erock changed status on 2025-04-06T22:13:51Z {"status":"accepted"}

Patchsets

ps-119 by erock on 2025-04-06T03:01:44Z
Range Diff ↕ rd-120
1: 2cf56f0 ! 1: 26daea4 feat(pgs): lru cache for object info and special files
2: caace51 ! 2: b004b64 chore(pgs): use http cache clear event to rm lru cache for special files
ps-120 by erock on 2025-04-06T03:03:49Z
Range Diff ↕ rd-122
1: 26daea4 = 1: 26daea4 feat(pgs): lru cache for object info and special files
2: b004b64 = 2: b004b64 chore(pgs): use http cache clear event to rm lru cache for special files
-: ------- > 3: 59f5618 refactor(pgs): store lru cache on web router
ps-122 by erock on 2025-04-06T19:08:31Z
Range Diff ↕ rd-123
1: 26daea4 = 1: 26daea4 feat(pgs): lru cache for object info and special files
2: b004b64 = 2: b004b64 chore(pgs): use http cache clear event to rm lru cache for special files
3: 59f5618 = 3: 59f5618 refactor(pgs): store lru cache on web router
-: ------- > 4: ee12290 refactor(pgs): update minio lru and remove object info cache
ps-123 by erock on 2025-04-06T19:41:38Z

feat(pgs): lru cache for object info and special files

go.mod link
+1 -0
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
diff --git a/go.mod b/go.mod
index 505a3d9..cd671f2 100644
--- a/go.mod
+++ b/go.mod
@@ -35,6 +35,7 @@ require (
 	github.com/google/uuid v1.6.0
 	github.com/gorilla/feeds v1.2.0
 	github.com/gorilla/websocket v1.5.3
+	github.com/hashicorp/golang-lru/v2 v2.0.7
 	github.com/jmoiron/sqlx v1.4.0
 	github.com/lib/pq v1.10.9
 	github.com/matryer/is v1.4.1
go.sum link
+2 -0
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
diff --git a/go.sum b/go.sum
index 78b7d75..ce59014 100644
--- a/go.sum
+++ b/go.sum
@@ -451,6 +451,8 @@ github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
 github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
 github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
 github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
 github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
 github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
pkg/apps/pgs/uploader.go link
+5 -5
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
diff --git a/pkg/apps/pgs/uploader.go b/pkg/apps/pgs/uploader.go
index 1857cca..8691b52 100644
--- a/pkg/apps/pgs/uploader.go
+++ b/pkg/apps/pgs/uploader.go
@@ -395,7 +395,7 @@ func (h *UploadAssetHandler) Write(s *pssh.SSHServerConnSession, entry *sendutil
 	)
 
 	specialFileMax := featureFlag.Data.SpecialFileMax
-	if isSpecialFile(entry) {
+	if isSpecialFile(entry.Filepath) {
 		sizeRemaining = min(sizeRemaining, specialFileMax)
 	}
 
@@ -441,9 +441,9 @@ func (h *UploadAssetHandler) Write(s *pssh.SSHServerConnSession, entry *sendutil
 	return str, err
 }
 
-func isSpecialFile(entry *sendutils.FileEntry) bool {
-	fname := filepath.Base(entry.Filepath)
-	return fname == "_headers" || fname == "_redirects"
+func isSpecialFile(entry string) bool {
+	fname := filepath.Base(entry)
+	return fname == "_headers" || fname == "_redirects" || fname == "_pgs_ignore"
 }
 
 func (h *UploadAssetHandler) Delete(s *pssh.SSHServerConnSession, entry *sendutils.FileEntry) error {
@@ -525,7 +525,7 @@ func (h *UploadAssetHandler) validateAsset(data *FileData) (bool, error) {
 	}
 
 	// special files we use for custom routing
-	if fname == "_pgs_ignore" || fname == "_redirects" || fname == "_headers" {
+	if isSpecialFile(fname) {
 		return true, nil
 	}
 
pkg/apps/pgs/web.go link
+2 -1
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
diff --git a/pkg/apps/pgs/web.go b/pkg/apps/pgs/web.go
index 7fca712..845190e 100644
--- a/pkg/apps/pgs/web.go
+++ b/pkg/apps/pgs/web.go
@@ -9,6 +9,7 @@ import (
 	"net/http"
 	"net/url"
 	"os"
+	"path/filepath"
 	"regexp"
 	"strings"
 	"time"
@@ -426,7 +427,7 @@ func (web *WebRouter) ServeAsset(fname string, opts *storage.ImgProcessOpts, fro
 		"host", r.Host,
 	)
 
-	if fname == "_headers" || fname == "_redirects" || fname == "_pgs_ignore" {
+	if isSpecialFile(fname) {
 		logger.Info("special file names are not allowed to be served over http")
 		http.Error(w, "404 not found", http.StatusNotFound)
 		return
pkg/apps/pgs/web_asset_handler.go link
+64 -41
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
diff --git a/pkg/apps/pgs/web_asset_handler.go b/pkg/apps/pgs/web_asset_handler.go
index 44b0532..d134936 100644
--- a/pkg/apps/pgs/web_asset_handler.go
+++ b/pkg/apps/pgs/web_asset_handler.go
@@ -14,10 +14,17 @@ import (
 	"net/http/httputil"
 	_ "net/http/pprof"
 
+	"github.com/hashicorp/golang-lru/v2/expirable"
+	"github.com/picosh/pico/pkg/cache"
 	sst "github.com/picosh/pico/pkg/pobj/storage"
 	"github.com/picosh/pico/pkg/shared/storage"
 )
 
+var (
+	redirectsCache = expirable.NewLRU[string, []*RedirectRule](2048, nil, cache.CacheTimeout)
+	headersCache   = expirable.NewLRU[string, []*HeaderRule](2048, nil, cache.CacheTimeout)
+)
+
 type ApiAssetHandler struct {
 	*WebRouter
 	Logger *slog.Logger
@@ -41,28 +48,36 @@ func hasProtocol(url string) bool {
 func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	logger := h.Logger
 	var redirects []*RedirectRule
-	redirectFp, redirectInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_redirects"))
-	if err == nil {
-		defer redirectFp.Close()
-		if redirectInfo != nil && redirectInfo.Size > h.Cfg.MaxSpecialFileSize {
-			errMsg := fmt.Sprintf("_redirects file is too large (%d > %d)", redirectInfo.Size, h.Cfg.MaxSpecialFileSize)
-			logger.Error(errMsg)
-			http.Error(w, errMsg, http.StatusInternalServerError)
-			return
-		}
-		buf := new(strings.Builder)
-		lr := io.LimitReader(redirectFp, h.Cfg.MaxSpecialFileSize)
-		_, err := io.Copy(buf, lr)
-		if err != nil {
-			logger.Error("io copy", "err", err.Error())
-			http.Error(w, "cannot read _redirects file", http.StatusInternalServerError)
-			return
-		}
 
-		redirects, err = parseRedirectText(buf.String())
-		if err != nil {
-			logger.Error("could not parse redirect text", "err", err.Error())
+	redirectsCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_redirects")
+	if cachedRedirects, found := redirectsCache.Get(redirectsCacheKey); found {
+		redirects = cachedRedirects
+	} else {
+		redirectFp, redirectInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_redirects"))
+		if err == nil {
+			defer redirectFp.Close()
+			if redirectInfo != nil && redirectInfo.Size > h.Cfg.MaxSpecialFileSize {
+				errMsg := fmt.Sprintf("_redirects file is too large (%d > %d)", redirectInfo.Size, h.Cfg.MaxSpecialFileSize)
+				logger.Error(errMsg)
+				http.Error(w, errMsg, http.StatusInternalServerError)
+				return
+			}
+			buf := new(strings.Builder)
+			lr := io.LimitReader(redirectFp, h.Cfg.MaxSpecialFileSize)
+			_, err := io.Copy(buf, lr)
+			if err != nil {
+				logger.Error("io copy", "err", err.Error())
+				http.Error(w, "cannot read _redirects file", http.StatusInternalServerError)
+				return
+			}
+
+			redirects, err = parseRedirectText(buf.String())
+			if err != nil {
+				logger.Error("could not parse redirect text", "err", err.Error())
+			}
 		}
+
+		redirectsCache.Add(redirectsCacheKey, redirects)
 	}
 
 	routes := calcRoutes(h.ProjectDir, h.Filepath, redirects)
@@ -163,28 +178,36 @@ func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	defer contents.Close()
 
 	var headers []*HeaderRule
-	headersFp, headersInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_headers"))
-	if err == nil {
-		defer headersFp.Close()
-		if headersInfo != nil && headersInfo.Size > h.Cfg.MaxSpecialFileSize {
-			errMsg := fmt.Sprintf("_headers file is too large (%d > %d)", headersInfo.Size, h.Cfg.MaxSpecialFileSize)
-			logger.Error(errMsg)
-			http.Error(w, errMsg, http.StatusInternalServerError)
-			return
-		}
-		buf := new(strings.Builder)
-		lr := io.LimitReader(headersFp, h.Cfg.MaxSpecialFileSize)
-		_, err := io.Copy(buf, lr)
-		if err != nil {
-			logger.Error("io copy", "err", err.Error())
-			http.Error(w, "cannot read _headers file", http.StatusInternalServerError)
-			return
-		}
 
-		headers, err = parseHeaderText(buf.String())
-		if err != nil {
-			logger.Error("could not parse header text", "err", err.Error())
+	headersCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_headers")
+	if cachedHeaders, found := headersCache.Get(headersCacheKey); found {
+		headers = cachedHeaders
+	} else {
+		headersFp, headersInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_headers"))
+		if err == nil {
+			defer headersFp.Close()
+			if headersInfo != nil && headersInfo.Size > h.Cfg.MaxSpecialFileSize {
+				errMsg := fmt.Sprintf("_headers file is too large (%d > %d)", headersInfo.Size, h.Cfg.MaxSpecialFileSize)
+				logger.Error(errMsg)
+				http.Error(w, errMsg, http.StatusInternalServerError)
+				return
+			}
+			buf := new(strings.Builder)
+			lr := io.LimitReader(headersFp, h.Cfg.MaxSpecialFileSize)
+			_, err := io.Copy(buf, lr)
+			if err != nil {
+				logger.Error("io copy", "err", err.Error())
+				http.Error(w, "cannot read _headers file", http.StatusInternalServerError)
+				return
+			}
+
+			headers, err = parseHeaderText(buf.String())
+			if err != nil {
+				logger.Error("could not parse header text", "err", err.Error())
+			}
 		}
+
+		headersCache.Add(headersCacheKey, headers)
 	}
 
 	userHeaders := []*HeaderLine{}
@@ -236,7 +259,7 @@ func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 	w.WriteHeader(status)
-	_, err = io.Copy(w, contents)
+	_, err := io.Copy(w, contents)
 
 	if err != nil {
 		logger.Error("io copy", "err", err.Error())
pkg/cache/cache.go link
+21 -0
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go
new file mode 100644
index 0000000..44de9e3
--- /dev/null
+++ b/pkg/cache/cache.go
@@ -0,0 +1,21 @@
+package cache
+
+import (
+	"log/slog"
+	"time"
+
+	"github.com/picosh/utils"
+)
+
+var CacheTimeout time.Duration
+
+func init() {
+	cacheDuration := utils.GetEnv("STORAGE_MINIO_CACHE_DURATION", "1m")
+	duration, err := time.ParseDuration(cacheDuration)
+	if err != nil {
+		slog.Error("Invalid STORAGE_MINIO_CACHE_DURATION value, using default 1m", "error", err)
+		duration = 1 * time.Minute
+	}
+
+	CacheTimeout = duration
+}
pkg/pobj/storage/minio.go link
+58 -18
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
diff --git a/pkg/pobj/storage/minio.go b/pkg/pobj/storage/minio.go
index e72a099..1f5e29b 100644
--- a/pkg/pobj/storage/minio.go
+++ b/pkg/pobj/storage/minio.go
@@ -7,13 +7,16 @@ import (
 	"io"
 	"net/url"
 	"os"
+	"path/filepath"
 	"strconv"
 	"strings"
 	"time"
 
+	"github.com/hashicorp/golang-lru/v2/expirable"
 	"github.com/minio/madmin-go/v3"
 	"github.com/minio/minio-go/v7"
 	"github.com/minio/minio-go/v7/pkg/credentials"
+	"github.com/picosh/pico/pkg/cache"
 	"github.com/picosh/pico/pkg/send/utils"
 )
 
@@ -22,8 +25,23 @@ type StorageMinio struct {
 	Admin  *madmin.AdminClient
 }
 
-var _ ObjectStorage = &StorageMinio{}
-var _ ObjectStorage = (*StorageMinio)(nil)
+type CachedBucket struct {
+	Bucket
+	Error error
+}
+
+type CachedObjectInfo struct {
+	*ObjectInfo
+	Error error
+}
+
+var (
+	_ ObjectStorage = &StorageMinio{}
+	_ ObjectStorage = (*StorageMinio)(nil)
+
+	bucketCache     = expirable.NewLRU[string, CachedBucket](2048, nil, cache.CacheTimeout)
+	objectInfoCache = expirable.NewLRU[string, CachedObjectInfo](2048, nil, cache.CacheTimeout)
+)
 
 func NewStorageMinio(address, user, pass string) (*StorageMinio, error) {
 	endpoint, err := url.Parse(address)
@@ -59,6 +77,10 @@ func NewStorageMinio(address, user, pass string) (*StorageMinio, error) {
 }
 
 func (s *StorageMinio) GetBucket(name string) (Bucket, error) {
+	if cachedBucket, found := bucketCache.Get(name); found {
+		return cachedBucket.Bucket, cachedBucket.Error
+	}
+
 	bucket := Bucket{
 		Name: name,
 	}
@@ -68,9 +90,13 @@ func (s *StorageMinio) GetBucket(name string) (Bucket, error) {
 		if err == nil {
 			err = errors.New("bucket does not exist")
 		}
+
+		bucketCache.Add(name, CachedBucket{bucket, err})
 		return bucket, err
 	}
 
+	bucketCache.Add(name, CachedBucket{bucket, nil})
+
 	return bucket, nil
 }
 
@@ -160,29 +186,43 @@ func (s *StorageMinio) GetObject(bucket Bucket, fpath string) (utils.ReadAndRead
 		ETag:         "",
 	}
 
-	info, err := s.Client.StatObject(context.Background(), bucket.Name, fpath, minio.StatObjectOptions{})
-	if err != nil {
-		return nil, objInfo, err
-	}
+	cacheKey := filepath.Join(bucket.Name, fpath)
+
+	cachedInfo, found := objectInfoCache.Get(cacheKey)
+	if found {
+		objInfo = cachedInfo.ObjectInfo
 
-	objInfo.LastModified = info.LastModified
-	objInfo.ETag = info.ETag
-	objInfo.Metadata = info.Metadata
-	objInfo.UserMetadata = info.UserMetadata
-	objInfo.Size = info.Size
+		if cachedInfo.Error != nil {
+			return nil, objInfo, cachedInfo.Error
+		}
+	} else {
+		info, err := s.Client.StatObject(context.Background(), bucket.Name, fpath, minio.StatObjectOptions{})
+		if err != nil {
+			objectInfoCache.Add(cacheKey, CachedObjectInfo{objInfo, err})
+			return nil, objInfo, err
+		}
+
+		objInfo.LastModified = info.LastModified
+		objInfo.ETag = info.ETag
+		objInfo.Metadata = info.Metadata
+		objInfo.UserMetadata = info.UserMetadata
+		objInfo.Size = info.Size
+
+		if mtime, ok := info.UserMetadata["Mtime"]; ok {
+			mtimeUnix, err := strconv.Atoi(mtime)
+			if err == nil {
+				objInfo.LastModified = time.Unix(int64(mtimeUnix), 0)
+			}
+		}
+
+		objectInfoCache.Add(cacheKey, CachedObjectInfo{objInfo, nil})
+	}
 
 	obj, err := s.Client.GetObject(context.Background(), bucket.Name, fpath, minio.GetObjectOptions{})
 	if err != nil {
 		return nil, objInfo, err
 	}
 
-	if mtime, ok := info.UserMetadata["Mtime"]; ok {
-		mtimeUnix, err := strconv.Atoi(mtime)
-		if err == nil {
-			objInfo.LastModified = time.Unix(int64(mtimeUnix), 0)
-		}
-	}
-
 	return obj, objInfo, nil
 }
 
pkg/shared/storage/proxy.go link
+5 -0
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
diff --git a/pkg/shared/storage/proxy.go b/pkg/shared/storage/proxy.go
index 428c91f..a3a2c51 100644
--- a/pkg/shared/storage/proxy.go
+++ b/pkg/shared/storage/proxy.go
@@ -166,6 +166,7 @@ type ImgProcessOpts struct {
 	Ratio   *Ratio
 	Rotate  int
 	Ext     string
+	NoRaw   bool
 }
 
 func (img *ImgProcessOpts) String() string {
@@ -204,6 +205,10 @@ func (img *ImgProcessOpts) String() string {
 		processOpts = fmt.Sprintf("%s/ext:%s", processOpts, img.Ext)
 	}
 
+	if processOpts == "" && !img.NoRaw {
+		processOpts = fmt.Sprintf("%s/raw:true", processOpts)
+	}
+
 	return processOpts
 }
 

chore(pgs): use http cache clear event to rm lru cache for special files

pkg/apps/pgs/web.go link
+12 -2
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
diff --git a/pkg/apps/pgs/web.go b/pkg/apps/pgs/web.go
index 845190e..d7032fe 100644
--- a/pkg/apps/pgs/web.go
+++ b/pkg/apps/pgs/web.go
@@ -75,7 +75,7 @@ func StartApiServer(cfg *PgsConfig) {
 		routes:  routes,
 	}
 
-	go routes.cacheMgmt(ctx, httpCache)
+	go routes.cacheMgmt(ctx, httpCache, cfg.CacheClearingQueue)
 
 	portStr := fmt.Sprintf(":%s", cfg.WebPort)
 	cfg.Logger.Info(
@@ -260,7 +260,7 @@ func (web *WebRouter) checkHandler(w http.ResponseWriter, r *http.Request) {
 	w.WriteHeader(http.StatusNotFound)
 }
 
-func (web *WebRouter) cacheMgmt(ctx context.Context, httpCache *middleware.SouinBaseHandler) {
+func (web *WebRouter) cacheMgmt(ctx context.Context, httpCache *middleware.SouinBaseHandler, notify chan string) {
 	storer := httpCache.Storers[0]
 	drain := createSubCacheDrain(ctx, web.Cfg.Logger)
 
@@ -270,6 +270,7 @@ func (web *WebRouter) cacheMgmt(ctx context.Context, httpCache *middleware.Souin
 		for scanner.Scan() {
 			surrogateKey := strings.TrimSpace(scanner.Text())
 			web.Cfg.Logger.Info("received cache-drain item", "surrogateKey", surrogateKey)
+			notify <- surrogateKey
 
 			if surrogateKey == "*" {
 				storer.DeleteMany(".+")
@@ -509,6 +510,15 @@ func (web *WebRouter) ServeAsset(fname string, opts *storage.ImgProcessOpts, fro
 		}
 	}
 
+	go func() {
+		for key := range web.Cfg.CacheClearingQueue {
+			rKey := filepath.Join(key, "_redirects")
+			redirectsCache.Remove(rKey)
+			hKey := filepath.Join(key, "_headers")
+			headersCache.Remove(hKey)
+		}
+	}()
+
 	asset := &ApiAssetHandler{
 		WebRouter: web,
 		Logger:    logger,
pkg/apps/pgs/web_asset_handler.go link
+2 -2
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
diff --git a/pkg/apps/pgs/web_asset_handler.go b/pkg/apps/pgs/web_asset_handler.go
index d134936..1948c8a 100644
--- a/pkg/apps/pgs/web_asset_handler.go
+++ b/pkg/apps/pgs/web_asset_handler.go
@@ -49,7 +49,7 @@ func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	logger := h.Logger
 	var redirects []*RedirectRule
 
-	redirectsCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_redirects")
+	redirectsCacheKey := filepath.Join(getSurrogateKey(h.UserID, h.ProjectDir), "_redirects")
 	if cachedRedirects, found := redirectsCache.Get(redirectsCacheKey); found {
 		redirects = cachedRedirects
 	} else {
@@ -179,7 +179,7 @@ func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 
 	var headers []*HeaderRule
 
-	headersCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_headers")
+	headersCacheKey := filepath.Join(getSurrogateKey(h.UserID, h.ProjectDir), "_headers")
 	if cachedHeaders, found := headersCache.Get(headersCacheKey); found {
 		headers = cachedHeaders
 	} else {