Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP implementation of caching #151

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 14 additions & 5 deletions db/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,13 +224,14 @@ type FeatureFlag struct {
Data FeatureFlagData `json:"data"`
}

func NewFeatureFlag(userID, name string, storageMax uint64, fileMax int64) *FeatureFlag {
func NewFeatureFlag(userID, name string, storageMax uint64, fileMax int64, specialFileMax int64) *FeatureFlag {
return &FeatureFlag{
UserID: userID,
Name: name,
Data: FeatureFlagData{
StorageMax: storageMax,
FileMax: fileMax,
StorageMax: storageMax,
FileMax: fileMax,
SpecialFileMax: specialFileMax,
},
}
}
Expand All @@ -249,6 +250,13 @@ func (ff *FeatureFlag) FindFileMax(defaultSize int64) int64 {
return ff.Data.FileMax
}

func (ff *FeatureFlag) FindSpecialFileMax(defaultSize int64) int64 {
if ff.Data.SpecialFileMax == 0 {
return defaultSize
}
return ff.Data.SpecialFileMax
}

func (ff *FeatureFlag) IsValid() bool {
if ff.ExpiresAt.IsZero() {
return false
Expand All @@ -257,8 +265,9 @@ func (ff *FeatureFlag) IsValid() bool {
}

type FeatureFlagData struct {
StorageMax uint64 `json:"storage_max"`
FileMax int64 `json:"file_max"`
StorageMax uint64 `json:"storage_max"`
FileMax int64 `json:"file_max"`
SpecialFileMax int64 `json:"special_file_max"`
}

// Make the Attrs struct implement the driver.Valuer interface. This method
Expand Down
2 changes: 1 addition & 1 deletion dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ echo dotenv > .envrc && direnv allow
Boot up database (or bring your own)

```bash
docker compose up -f docker-compose.yml -f docker-compose.override.yml --profile db -d
docker compose -f docker-compose.yml -f docker-compose.override.yml --profile db up -d
```

Create db and migrate
Expand Down
13 changes: 12 additions & 1 deletion filehandlers/assets/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -363,18 +363,24 @@ func (h *UploadAssetHandler) Write(s ssh.Session, entry *sendutils.FileEntry) (s
"sizeRemaining", sizeRemaining,
)

specialFileMax := featureFlag.Data.SpecialFileMax
if isSpecialFile(entry) {
sizeRemaining = min(sizeRemaining, specialFileMax)
}

fsize, err := h.writeAsset(
utils.NewMaxBytesReader(data.Reader, int64(sizeRemaining)),
data,
)
if err != nil {
logger.Error("could not write asset", "err", err.Error())
cerr := fmt.Errorf(
"%s: storage size %.2fmb, storage max %.2fmb, file max %.2fmb",
"%s: storage size %.2fmb, storage max %.2fmb, file max %.2fmb, special file max %.2fmb",
err,
utils.BytesToMB(int(curStorageSize)),
utils.BytesToMB(int(storageMax)),
utils.BytesToMB(int(fileMax)),
utils.BytesToMB(int(specialFileMax)),
)
return "", cerr
}
Expand All @@ -400,6 +406,11 @@ func (h *UploadAssetHandler) Write(s ssh.Session, entry *sendutils.FileEntry) (s
return str, nil
}

func isSpecialFile(entry *sendutils.FileEntry) bool {
fname := filepath.Base(entry.Filepath)
return fname == "_headers" || fname == "_redirects"
}

func (h *UploadAssetHandler) Delete(s ssh.Session, entry *sendutils.FileEntry) error {
user, err := shared.GetUser(s.Context())
if err != nil {
Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ require (
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
github.com/gorilla/feeds v1.1.2
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/lib/pq v1.10.9
github.com/microcosm-cc/bluemonday v1.0.26
github.com/minio/minio-go/v7 v7.0.77
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,8 @@ github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/gorilla/feeds v1.1.2 h1:pxzZ5PD3RJdhFH2FsJJ4x6PqMqbgFk1+Vez4XWBW8Iw=
github.com/gorilla/feeds v1.1.2/go.mod h1:WMib8uJP3BbY+X8Szd1rA5Pzhdfh+HCCAYT2z7Fza6Y=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
Expand Down
162 changes: 109 additions & 53 deletions pgs/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
_ "net/http/pprof"

"github.com/gorilla/feeds"
"github.com/hashicorp/golang-lru/v2/expirable"
"github.com/picosh/pico/db"
"github.com/picosh/pico/db/postgres"
"github.com/picosh/pico/shared"
Expand All @@ -39,6 +40,16 @@
HasPicoPlus bool
}

// CachedContext holds slow-to-fetch data for a particular project
// which gets saved in the LRU cache.
type CachedContext struct {
User *db.User
Project *db.Project
Routes []*HttpReply
Headers []*HeaderRule
HasPicoPlus bool
}

func checkHandler(w http.ResponseWriter, r *http.Request) {
dbpool := shared.GetDB(r)
cfg := shared.GetCfg(r)
Expand Down Expand Up @@ -167,26 +178,8 @@
return isFullUrl
}

func (h *AssetHandler) handle(logger *slog.Logger, w http.ResponseWriter, r *http.Request) {
var redirects []*RedirectRule
redirectFp, _, err := h.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_redirects"))
if err == nil {
defer redirectFp.Close()
buf := new(strings.Builder)
_, err := io.Copy(buf, redirectFp)
if err != nil {
logger.Error("io copy", "err", err.Error())
http.Error(w, "cannot read _redirects file", http.StatusInternalServerError)
return
}

redirects, err = parseRedirectText(buf.String())
if err != nil {
logger.Error("could not parse redirect text", "err", err.Error())
}
}

routes := calcRoutes(h.ProjectDir, h.Filepath, redirects)
func (h *AssetHandler) handle(logger *slog.Logger, w http.ResponseWriter, r *http.Request, cache CachedContext) {
routes := cache.Routes

var contents io.ReadCloser
contentType := ""
Expand Down Expand Up @@ -279,7 +272,7 @@
)
// track 404s
ch := shared.GetAnalyticsQueue(r)
view, err := shared.AnalyticsVisitFromRequest(r, h.UserID, h.Cfg.Secret)
view, err := shared.AnalyticsVisitFromRequest(r, h.UserID, h.Cfg.Secret) // TODO: this checks analytics flag from DB
if err == nil {
view.ProjectID = h.ProjectID
view.Status = http.StatusNotFound
Expand All @@ -298,23 +291,7 @@
contentType = storage.GetMimeType(assetFilepath)
}

var headers []*HeaderRule
headersFp, _, err := h.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_headers"))
if err == nil {
defer headersFp.Close()
buf := new(strings.Builder)
_, err := io.Copy(buf, headersFp)
if err != nil {
logger.Error("io copy", "err", err.Error())
http.Error(w, "cannot read _headers file", http.StatusInternalServerError)
return
}

headers, err = parseHeaderText(buf.String())
if err != nil {
logger.Error("could not parse header text", "err", err.Error())
}
}
headers := cache.Headers

userHeaders := []*HeaderLine{}
for _, headerRule := range headers {
Expand Down Expand Up @@ -367,7 +344,7 @@
)

w.WriteHeader(status)
_, err = io.Copy(w, contents)
_, err := io.Copy(w, contents)

if err != nil {
logger.Error("io copy", "err", err.Error())
Expand Down Expand Up @@ -399,6 +376,7 @@
dbpool := shared.GetDB(r)
st := shared.GetStorage(r)
ologger := shared.GetLogger(r)
cache := shared.GetCache(r)

logger := ologger.With(
"subdomain", subdomain,
Expand All @@ -419,16 +397,16 @@

logger = logger.With(
"project", props.ProjectName,
"user", props.Username,

Check failure on line 400 in pgs/api.go

View workflow job for this annotation

GitHub Actions / test

not enough arguments in call to populateCache
)

user, err := dbpool.FindUserForName(props.Username)
cachedData, err := populateCache(r, logger, fromImgs)
if err != nil {
logger.Info("user not found")
http.Error(w, "user not found", http.StatusNotFound)
return
// TODO throw the correct error code
}
cache.Add(subdomain, cachedData)

user := cachedData.User
logger = logger.With(
"userId", user.ID,
)
Expand All @@ -443,13 +421,7 @@
bucket, err = st.GetBucket(shared.GetImgsBucketName(user.ID))
} else {
bucket, err = st.GetBucket(shared.GetAssetBucketName(user.ID))
project, err := dbpool.FindProjectByName(user.ID, props.ProjectName)
if err != nil {
logger.Info("project not found")
http.Error(w, "project not found", http.StatusNotFound)
return
}

project := cachedData.Project
logger = logger.With(
"projectId", project.ID,
"project", project.Name,
Expand All @@ -475,8 +447,6 @@
return
}

hasPicoPlus := dbpool.HasFeatureForUser(user.ID, "plus")

asset := &AssetHandler{
Username: props.Username,
UserID: user.ID,
Expand All @@ -490,14 +460,97 @@
Bucket: bucket,
ImgProcessOpts: opts,
ProjectID: projectID,
HasPicoPlus: hasPicoPlus,
HasPicoPlus: cachedData.HasPicoPlus,
// Or maybe just put cache in here and remove other stuff?

Check failure on line 464 in pgs/api.go

View workflow job for this annotation

GitHub Actions / test

not enough arguments in call to asset.handle
}

asset.handle(logger, w, r)
}

type HasPerm = func(proj *db.Project) bool

// populateCache is called for any request where there is no existing cache.
// Any expensive operations should be performed here and saved to CachedContext.
func populateCache(r *http.Request, logger *slog.Logger, fromImgs bool, fname string) (CachedContext, error) {
dbpool := shared.GetDB(r)
st := shared.GetStorage(r)

Check failure on line 476 in pgs/api.go

View workflow job for this annotation

GitHub Actions / test

undefined: props
cfg := shared.GetCfg(r)

user, err := dbpool.FindUserForName(props.Username)
if err != nil {
return CachedContext{}, err // 404

Check failure on line 481 in pgs/api.go

View workflow job for this annotation

GitHub Actions / test

undefined: props
}

project, err := dbpool.FindProjectByName(user.ID, props.ProjectName)
if err != nil {
return CachedContext{}, err // 404
}

Check failure on line 488 in pgs/api.go

View workflow job for this annotation

GitHub Actions / test

undefined: bucket
// imgs has a different bucket directory
if fromImgs {

Check failure on line 490 in pgs/api.go

View workflow job for this annotation

GitHub Actions / test

undefined: bucket
bucket, err = st.GetBucket(shared.GetImgsBucketName(user.ID)) // Probably these bucket should just be created once, or dynamically from Bucket structs
} else {
bucket, err = st.GetBucket(shared.GetAssetBucketName(user.ID))
}

Check failure on line 494 in pgs/api.go

View workflow job for this annotation

GitHub Actions / test

undefined: bucket

var redirects []*RedirectRule
redirectFp, redirectInfo, err := st.GetObject(bucket, filepath.Join(project.ProjectDir, "_redirects"))
if err == nil {
defer redirectFp.Close()
if redirectInfo != nil && redirectInfo.Size > cfg.MaxSpecialFileSize {
errMsg := fmt.Sprintf("_redirects file is too large (%d > %d)", redirectInfo.Size, cfg.MaxSpecialFileSize)
return CachedContext{}, err // 500

Check failure on line 502 in pgs/api.go

View workflow job for this annotation

GitHub Actions / test

undefined: h
}
buf := new(strings.Builder)
lr := io.LimitReader(redirectFp, h.Cfg.MaxSpecialFileSize)
_, err := io.Copy(buf, lr)

Check failure on line 506 in pgs/api.go

View workflow job for this annotation

GitHub Actions / test

undefined: w
if err != nil {
logger.Error("io copy", "err", err.Error())
http.Error(w, "cannot read _redirects file", http.StatusInternalServerError)
return CachedContext{}, err // 500
}

redirects, err = parseRedirectText(buf.String())
if err != nil {
logger.Error("could not parse redirect text", "err", err.Error())
}
}

routes := calcRoutes(project.ProjectDir, fname, redirects)

var headers []*HeaderRule
headersFp, headersInfo, err := st.GetObject(bucket, filepath.Join(project.ProjectDir, "_headers"))
if err == nil {
defer headersFp.Close()
if headersInfo != nil && headersInfo.Size > cfg.MaxSpecialFileSize {
errMsg := fmt.Sprintf("_headers file is too large (%d > %d)", headersInfo.Size, cfg.MaxSpecialFileSize)
return CachedContext{}, err // 500
}
buf := new(strings.Builder)
lr := io.LimitReader(headersFp, h.Cfg.MaxSpecialFileSize)
_, err := io.Copy(buf, lr)
if err != nil {
logger.Error("io copy", "err", err.Error())
http.Error(w, "cannot read _headers file", http.StatusInternalServerError)
return CachedContext{}, err // 500
}

headers, err = parseHeaderText(buf.String())
if err != nil {
logger.Error("could not parse header text", "err", err.Error())
// continue
}
}

return CachedContext{
User: user,
Project: project,
Routes: routes,
Headers: headers,
HasPicoPlus: dbpool.HasFeatureForUser(user.ID, "plus"),
}, nil
}

func ImgAssetRequest(hasPerm HasPerm) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
logger := shared.GetLogger(r)
Expand Down Expand Up @@ -558,6 +611,8 @@
dbpool := postgres.NewDB(cfg.DbURL, cfg.Logger)
defer dbpool.Close()

cache := expirable.NewLRU[string, any](cfg.CacheSize, nil, time.Second*time.Duration(cfg.CacheExpireSeconds))

var st storage.StorageServe
var err error
if cfg.MinioURL == "" {
Expand All @@ -578,6 +633,7 @@
Dbpool: dbpool,
Storage: st,
AnalyticsQueue: ch,
Cache: cache,
}
handler := shared.CreateServe(mainRoutes, createSubdomainRoutes(publicPerm), apiConfig)
router := http.HandlerFunc(handler)
Expand Down
2 changes: 1 addition & 1 deletion pgs/cli.go
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ func (c *Cmd) statsSites() error {
func (c *Cmd) stats(cfgMaxSize uint64) error {
ff, err := c.Dbpool.FindFeatureForUser(c.User.ID, "plus")
if err != nil {
ff = db.NewFeatureFlag(c.User.ID, "plus", cfgMaxSize, 0)
ff = db.NewFeatureFlag(c.User.ID, "plus", cfgMaxSize, 0, 0)
}
// this is jank
ff.Data.StorageMax = ff.FindStorageMax(cfgMaxSize)
Expand Down
Loading
Loading