Logs
Patchsets
Range Diff ↕ rd-120
2: caace51 ! 1: 26daea4 feat(pgs): lru cache for object info and special files
1: 2cf56f0 ! 2: b004b64 chore(pgs): use http cache clear event to rm lru cache for special files
Range Diff ↕ rd-122
1: 26daea4 = 1: 26daea4 feat(pgs): lru cache for object info and special files
2: b004b64 = 2: b004b64 chore(pgs): use http cache clear event to rm lru cache for special files
-: ------- > 3: 59f5618 refactor(pgs): store lru cache on web router
Range Diff ↕ rd-123
1: 26daea4 = 1: 26daea4 feat(pgs): lru cache for object info and special files
2: b004b64 = 2: b004b64 chore(pgs): use http cache clear event to rm lru cache for special files
3: 59f5618 = 3: 59f5618 refactor(pgs): store lru cache on web router
-: ------- > 4: ee12290 refactor(pgs): update minio lru and remove object info cache
Range-diff rd-120
- title
- feat(pgs): lru cache for object info and special files
- description
-
Patch changed - old #2
caace51- new #1
26daea4
- title
- chore(pgs): use http cache clear event to rm lru cache for special files
- description
-
Patch changed - old #1
2cf56f0- new #2
b004b64
2: caace51 ! 1: 26daea4 feat(pgs): lru cache for object info and special files
pkg/apps/pgs/web.go
pkg/apps/pgs/web.go
routes: routes, } - go routes.cacheMgmt(ctx, httpCache) + go routes.cacheMgmt(ctx, httpCache, cfg.CacheClearingQueue) portStr := fmt.Sprintf(":%s", cfg.WebPort) cfg.Logger.Info( w.WriteHeader(http.StatusNotFound) } -func (web *WebRouter) cacheMgmt(ctx context.Context, httpCache *middleware.SouinBaseHandler) { +func (web *WebRouter) cacheMgmt(ctx context.Context, httpCache *middleware.SouinBaseHandler, notify chan string) { storer := httpCache.Storers[0] drain := createSubCacheDrain(ctx, web.Cfg.Logger) for scanner.Scan() { surrogateKey := strings.TrimSpace(scanner.Text()) web.Cfg.Logger.Info("received cache-drain item", "surrogateKey", surrogateKey) + notify <- surrogateKey if surrogateKey == "*" { storer.DeleteMany(".+") } } + go func() { + for key := range web.Cfg.CacheClearingQueue { + rKey := filepath.Join(key, "_redirects") + redirectsCache.Remove(rKey) + hKey := filepath.Join(key, "_headers") + headersCache.Remove(hKey) + } + }() + asset := &ApiAssetHandler{ WebRouter: web, Logger: logger,
pkg/apps/pgs/web.go
pkg/apps/pgs/web.go
"net/http" "net/url" "os" + "path/filepath" "regexp" "strings" "time" "host", r.Host, ) - if fname == "_headers" || fname == "_redirects" || fname == "_pgs_ignore" { + if isSpecialFile(fname) { logger.Info("special file names are not allowed to be served over http") http.Error(w, "404 not found", http.StatusNotFound) return
pkg/apps/pgs/web_asset_handler.go
pkg/apps/pgs/web_asset_handler.go
logger := h.Logger var redirects []*RedirectRule - redirectsCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_redirects") + redirectsCacheKey := filepath.Join(getSurrogateKey(h.UserID, h.ProjectDir), "_redirects") if cachedRedirects, found := redirectsCache.Get(redirectsCacheKey); found { redirects = cachedRedirects } else { var headers []*HeaderRule - headersCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_headers") + headersCacheKey := filepath.Join(getSurrogateKey(h.UserID, h.ProjectDir), "_headers") if cachedHeaders, found := headersCache.Get(headersCacheKey); found { headers = cachedHeaders } else {
pkg/apps/pgs/web_asset_handler.go
pkg/apps/pgs/web_asset_handler.go
"net/http/httputil" _ "net/http/pprof" + "github.com/hashicorp/golang-lru/v2/expirable" + "github.com/picosh/pico/pkg/cache" sst "github.com/picosh/pico/pkg/pobj/storage" "github.com/picosh/pico/pkg/shared/storage" ) +var ( + redirectsCache = expirable.NewLRU[string, []*RedirectRule](2048, nil, cache.CacheTimeout) + headersCache = expirable.NewLRU[string, []*HeaderRule](2048, nil, cache.CacheTimeout) +) + type ApiAssetHandler struct { *WebRouter Logger *slog.Logger func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { logger := h.Logger var redirects []*RedirectRule - redirectFp, redirectInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_redirects")) - if err == nil { - defer redirectFp.Close() - if redirectInfo != nil && redirectInfo.Size > h.Cfg.MaxSpecialFileSize { - errMsg := fmt.Sprintf("_redirects file is too large (%d > %d)", redirectInfo.Size, h.Cfg.MaxSpecialFileSize) - logger.Error(errMsg) - http.Error(w, errMsg, http.StatusInternalServerError) - return - } - buf := new(strings.Builder) - lr := io.LimitReader(redirectFp, h.Cfg.MaxSpecialFileSize) - _, err := io.Copy(buf, lr) - if err != nil { - logger.Error("io copy", "err", err.Error()) - http.Error(w, "cannot read _redirects file", http.StatusInternalServerError) - return - } - redirects, err = parseRedirectText(buf.String()) - if err != nil { - logger.Error("could not parse redirect text", "err", err.Error()) + redirectsCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_redirects") + if cachedRedirects, found := redirectsCache.Get(redirectsCacheKey); found { + redirects = cachedRedirects + } else { + redirectFp, redirectInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_redirects")) + if err == nil { + defer redirectFp.Close() + if redirectInfo != nil && redirectInfo.Size > h.Cfg.MaxSpecialFileSize { + errMsg := fmt.Sprintf("_redirects file is too large (%d > %d)", redirectInfo.Size, h.Cfg.MaxSpecialFileSize) + logger.Error(errMsg) + http.Error(w, errMsg, http.StatusInternalServerError) + return + } + buf := new(strings.Builder) + lr := io.LimitReader(redirectFp, h.Cfg.MaxSpecialFileSize) + _, err := io.Copy(buf, lr) + if err != nil { + logger.Error("io copy", "err", err.Error()) + http.Error(w, "cannot read _redirects file", http.StatusInternalServerError) + return + } + + redirects, err = parseRedirectText(buf.String()) + if err != nil { + logger.Error("could not parse redirect text", "err", err.Error()) + } } + + redirectsCache.Add(redirectsCacheKey, redirects) } routes := calcRoutes(h.ProjectDir, h.Filepath, redirects) defer contents.Close() var headers []*HeaderRule - headersFp, headersInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_headers")) - if err == nil { - defer headersFp.Close() - if headersInfo != nil && headersInfo.Size > h.Cfg.MaxSpecialFileSize { - errMsg := fmt.Sprintf("_headers file is too large (%d > %d)", headersInfo.Size, h.Cfg.MaxSpecialFileSize) - logger.Error(errMsg) - http.Error(w, errMsg, http.StatusInternalServerError) - return - } - buf := new(strings.Builder) - lr := io.LimitReader(headersFp, h.Cfg.MaxSpecialFileSize) - _, err := io.Copy(buf, lr) - if err != nil { - logger.Error("io copy", "err", err.Error()) - http.Error(w, "cannot read _headers file", http.StatusInternalServerError) - return - } - headers, err = parseHeaderText(buf.String()) - if err != nil { - logger.Error("could not parse header text", "err", err.Error()) + headersCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_headers") + if cachedHeaders, found := headersCache.Get(headersCacheKey); found { + headers = cachedHeaders + } else { + headersFp, headersInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_headers")) + if err == nil { + defer headersFp.Close() + if headersInfo != nil && headersInfo.Size > h.Cfg.MaxSpecialFileSize { + errMsg := fmt.Sprintf("_headers file is too large (%d > %d)", headersInfo.Size, h.Cfg.MaxSpecialFileSize) + logger.Error(errMsg) + http.Error(w, errMsg, http.StatusInternalServerError) + return + } + buf := new(strings.Builder) + lr := io.LimitReader(headersFp, h.Cfg.MaxSpecialFileSize) + _, err := io.Copy(buf, lr) + if err != nil { + logger.Error("io copy", "err", err.Error()) + http.Error(w, "cannot read _headers file", http.StatusInternalServerError) + return + } + + headers, err = parseHeaderText(buf.String()) + if err != nil { + logger.Error("could not parse header text", "err", err.Error()) + } } + + headersCache.Add(headersCacheKey, headers) } userHeaders := []*HeaderLine{} return } w.WriteHeader(status) - _, err = io.Copy(w, contents) + _, err := io.Copy(w, contents) if err != nil { logger.Error("io copy", "err", err.Error())
go.mod
go.mod
github.com/google/uuid v1.6.0 github.com/gorilla/feeds v1.2.0 github.com/gorilla/websocket v1.5.3 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/jmoiron/sqlx v1.4.0 github.com/lib/pq v1.10.9 github.com/matryer/is v1.4.1
go.sum
go.sum
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
pkg/apps/pgs/uploader.go
pkg/apps/pgs/uploader.go
) specialFileMax := featureFlag.Data.SpecialFileMax - if isSpecialFile(entry) { + if isSpecialFile(entry.Filepath) { sizeRemaining = min(sizeRemaining, specialFileMax) } return str, err } -func isSpecialFile(entry *sendutils.FileEntry) bool { - fname := filepath.Base(entry.Filepath) - return fname == "_headers" || fname == "_redirects" +func isSpecialFile(entry string) bool { + fname := filepath.Base(entry) + return fname == "_headers" || fname == "_redirects" || fname == "_pgs_ignore" } func (h *UploadAssetHandler) Delete(s *pssh.SSHServerConnSession, entry *sendutils.FileEntry) error { } // special files we use for custom routing - if fname == "_pgs_ignore" || fname == "_redirects" || fname == "_headers" { + if isSpecialFile(fname) { return true, nil }
pkg/pobj/storage/minio.go
pkg/pobj/storage/minio.go
"io" "net/url" "os" + "path/filepath" "strconv" "strings" "time" + "github.com/hashicorp/golang-lru/v2/expirable" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/picosh/pico/pkg/cache" "github.com/picosh/pico/pkg/send/utils" ) Admin *madmin.AdminClient } -var _ ObjectStorage = &StorageMinio{} -var _ ObjectStorage = (*StorageMinio)(nil) +type CachedBucket struct { + Bucket + Error error +} + +type CachedObjectInfo struct { + *ObjectInfo + Error error +} + +var ( + _ ObjectStorage = &StorageMinio{} + _ ObjectStorage = (*StorageMinio)(nil) + + bucketCache = expirable.NewLRU[string, CachedBucket](2048, nil, cache.CacheTimeout) + objectInfoCache = expirable.NewLRU[string, CachedObjectInfo](2048, nil, cache.CacheTimeout) +) func NewStorageMinio(address, user, pass string) (*StorageMinio, error) { endpoint, err := url.Parse(address) } func (s *StorageMinio) GetBucket(name string) (Bucket, error) { + if cachedBucket, found := bucketCache.Get(name); found { + return cachedBucket.Bucket, cachedBucket.Error + } + bucket := Bucket{ Name: name, } if err == nil { err = errors.New("bucket does not exist") } + + bucketCache.Add(name, CachedBucket{bucket, err}) return bucket, err } + bucketCache.Add(name, CachedBucket{bucket, nil}) + return bucket, nil } ETag: "", } - info, err := s.Client.StatObject(context.Background(), bucket.Name, fpath, minio.StatObjectOptions{}) - if err != nil { - return nil, objInfo, err - } + cacheKey := filepath.Join(bucket.Name, fpath) + + cachedInfo, found := objectInfoCache.Get(cacheKey) + if found { + objInfo = cachedInfo.ObjectInfo - objInfo.LastModified = info.LastModified - objInfo.ETag = info.ETag - objInfo.Metadata = info.Metadata - objInfo.UserMetadata = info.UserMetadata - objInfo.Size = info.Size + if cachedInfo.Error != nil { + return nil, objInfo, cachedInfo.Error + } + } else { + info, err := s.Client.StatObject(context.Background(), bucket.Name, fpath, minio.StatObjectOptions{}) + if err != nil { + objectInfoCache.Add(cacheKey, CachedObjectInfo{objInfo, err}) + return nil, objInfo, err + } + + objInfo.LastModified = info.LastModified + objInfo.ETag = info.ETag + objInfo.Metadata = info.Metadata + objInfo.UserMetadata = info.UserMetadata + objInfo.Size = info.Size + + if mtime, ok := info.UserMetadata["Mtime"]; ok { + mtimeUnix, err := strconv.Atoi(mtime) + if err == nil { + objInfo.LastModified = time.Unix(int64(mtimeUnix), 0) + } + } + + objectInfoCache.Add(cacheKey, CachedObjectInfo{objInfo, nil}) + } obj, err := s.Client.GetObject(context.Background(), bucket.Name, fpath, minio.GetObjectOptions{}) if err != nil { return nil, objInfo, err } - if mtime, ok := info.UserMetadata["Mtime"]; ok { - mtimeUnix, err := strconv.Atoi(mtime) - if err == nil { - objInfo.LastModified = time.Unix(int64(mtimeUnix), 0) - } - } - return obj, objInfo, nil }
pkg/shared/storage/proxy.go
pkg/shared/storage/proxy.go
Ratio *Ratio Rotate int Ext string + NoRaw bool } func (img *ImgProcessOpts) String() string { processOpts = fmt.Sprintf("%s/ext:%s", processOpts, img.Ext) } + if processOpts == "" && !img.NoRaw { + processOpts = fmt.Sprintf("%s/raw:true", processOpts) + } + return processOpts }
1: 2cf56f0 ! 2: b004b64 chore(pgs): use http cache clear event to rm lru cache for special files
go.mod
go.mod
github.com/google/uuid v1.6.0 github.com/gorilla/feeds v1.2.0 github.com/gorilla/websocket v1.5.3 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/jmoiron/sqlx v1.4.0 github.com/lib/pq v1.10.9 github.com/matryer/is v1.4.1