pkg/apps/pgs/web_asset_handler.go
pkg/apps/pgs/web_asset_handler.go
logger := h.Logger
var redirects []*RedirectRule
- redirectsCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_redirects")
+ redirectsCacheKey := filepath.Join(getSurrogateKey(h.UserID, h.ProjectDir), "_redirects")
if cachedRedirects, found := redirectsCache.Get(redirectsCacheKey); found {
redirects = cachedRedirects
} else {
var headers []*HeaderRule
- headersCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_headers")
+ headersCacheKey := filepath.Join(getSurrogateKey(h.UserID, h.ProjectDir), "_headers")
if cachedHeaders, found := headersCache.Get(headersCacheKey); found {
headers = cachedHeaders
} else {
pkg/apps/pgs/web_asset_handler.go
pkg/apps/pgs/web_asset_handler.go
"net/http/httputil"
_ "net/http/pprof"
+ "github.com/hashicorp/golang-lru/v2/expirable"
+ "github.com/picosh/pico/pkg/cache"
sst "github.com/picosh/pico/pkg/pobj/storage"
"github.com/picosh/pico/pkg/shared/storage"
)
+var (
+ redirectsCache = expirable.NewLRU[string, []*RedirectRule](2048, nil, cache.CacheTimeout)
+ headersCache = expirable.NewLRU[string, []*HeaderRule](2048, nil, cache.CacheTimeout)
+)
+
type ApiAssetHandler struct {
*WebRouter
Logger *slog.Logger
func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
logger := h.Logger
var redirects []*RedirectRule
- redirectFp, redirectInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_redirects"))
- if err == nil {
- defer redirectFp.Close()
- if redirectInfo != nil && redirectInfo.Size > h.Cfg.MaxSpecialFileSize {
- errMsg := fmt.Sprintf("_redirects file is too large (%d > %d)", redirectInfo.Size, h.Cfg.MaxSpecialFileSize)
- logger.Error(errMsg)
- http.Error(w, errMsg, http.StatusInternalServerError)
- return
- }
- buf := new(strings.Builder)
- lr := io.LimitReader(redirectFp, h.Cfg.MaxSpecialFileSize)
- _, err := io.Copy(buf, lr)
- if err != nil {
- logger.Error("io copy", "err", err.Error())
- http.Error(w, "cannot read _redirects file", http.StatusInternalServerError)
- return
- }
- redirects, err = parseRedirectText(buf.String())
- if err != nil {
- logger.Error("could not parse redirect text", "err", err.Error())
+ redirectsCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_redirects")
+ if cachedRedirects, found := redirectsCache.Get(redirectsCacheKey); found {
+ redirects = cachedRedirects
+ } else {
+ redirectFp, redirectInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_redirects"))
+ if err == nil {
+ defer redirectFp.Close()
+ if redirectInfo != nil && redirectInfo.Size > h.Cfg.MaxSpecialFileSize {
+ errMsg := fmt.Sprintf("_redirects file is too large (%d > %d)", redirectInfo.Size, h.Cfg.MaxSpecialFileSize)
+ logger.Error(errMsg)
+ http.Error(w, errMsg, http.StatusInternalServerError)
+ return
+ }
+ buf := new(strings.Builder)
+ lr := io.LimitReader(redirectFp, h.Cfg.MaxSpecialFileSize)
+ _, err := io.Copy(buf, lr)
+ if err != nil {
+ logger.Error("io copy", "err", err.Error())
+ http.Error(w, "cannot read _redirects file", http.StatusInternalServerError)
+ return
+ }
+
+ redirects, err = parseRedirectText(buf.String())
+ if err != nil {
+ logger.Error("could not parse redirect text", "err", err.Error())
+ }
}
+
+ redirectsCache.Add(redirectsCacheKey, redirects)
}
routes := calcRoutes(h.ProjectDir, h.Filepath, redirects)
defer contents.Close()
var headers []*HeaderRule
- headersFp, headersInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_headers"))
- if err == nil {
- defer headersFp.Close()
- if headersInfo != nil && headersInfo.Size > h.Cfg.MaxSpecialFileSize {
- errMsg := fmt.Sprintf("_headers file is too large (%d > %d)", headersInfo.Size, h.Cfg.MaxSpecialFileSize)
- logger.Error(errMsg)
- http.Error(w, errMsg, http.StatusInternalServerError)
- return
- }
- buf := new(strings.Builder)
- lr := io.LimitReader(headersFp, h.Cfg.MaxSpecialFileSize)
- _, err := io.Copy(buf, lr)
- if err != nil {
- logger.Error("io copy", "err", err.Error())
- http.Error(w, "cannot read _headers file", http.StatusInternalServerError)
- return
- }
- headers, err = parseHeaderText(buf.String())
- if err != nil {
- logger.Error("could not parse header text", "err", err.Error())
+ headersCacheKey := filepath.Join(h.Bucket.Name, h.ProjectDir, "_headers")
+ if cachedHeaders, found := headersCache.Get(headersCacheKey); found {
+ headers = cachedHeaders
+ } else {
+ headersFp, headersInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_headers"))
+ if err == nil {
+ defer headersFp.Close()
+ if headersInfo != nil && headersInfo.Size > h.Cfg.MaxSpecialFileSize {
+ errMsg := fmt.Sprintf("_headers file is too large (%d > %d)", headersInfo.Size, h.Cfg.MaxSpecialFileSize)
+ logger.Error(errMsg)
+ http.Error(w, errMsg, http.StatusInternalServerError)
+ return
+ }
+ buf := new(strings.Builder)
+ lr := io.LimitReader(headersFp, h.Cfg.MaxSpecialFileSize)
+ _, err := io.Copy(buf, lr)
+ if err != nil {
+ logger.Error("io copy", "err", err.Error())
+ http.Error(w, "cannot read _headers file", http.StatusInternalServerError)
+ return
+ }
+
+ headers, err = parseHeaderText(buf.String())
+ if err != nil {
+ logger.Error("could not parse header text", "err", err.Error())
+ }
}
+
+ headersCache.Add(headersCacheKey, headers)
}
userHeaders := []*HeaderLine{}
return
}
w.WriteHeader(status)
- _, err = io.Copy(w, contents)
+ _, err := io.Copy(w, contents)
if err != nil {
logger.Error("io copy", "err", err.Error())