- commit
- d812172
- parent
- bb9229a
- author
- Eric Bower
- date
- 2024-01-25 20:21:46 +0000 UTC
pico pro (#69)
M
Makefile
+2,
-1
1@@ -109,10 +109,11 @@ migrate:
2 $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20230326_add_feed_items.sql
3 $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20230707_add_projects_table.sql
4 $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20230921_add_tokens_table.sql
5+ $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20240120_add_payment_history.sql
6 .PHONY: migrate
7
8 latest:
9- $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20230921_add_tokens_table.sql
10+ $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20240120_add_payment_history.sql
11 .PHONY: latest
12
13 psql:
M
db/db.go
+50,
-1
1@@ -79,7 +79,6 @@ func (p *FeedItemData) Scan(value interface{}) error {
2 if !ok {
3 return errors.New("type assertion to []byte failed")
4 }
5-
6 return json.Unmarshal(b, &p)
7 }
8
9@@ -148,6 +147,55 @@ type Token struct {
10 ExpiresAt *time.Time
11 }
12
13+type FeatureFlag struct {
14+ ID string
15+ UserID string
16+ PaymentHistoryID string
17+ Name string
18+ CreatedAt *time.Time
19+ ExpiresAt *time.Time
20+ Data FeatureFlagData
21+}
22+
23+func NewFeatureFlag(userID, name string, storageMax uint64, fileMax int64) *FeatureFlag {
24+ return &FeatureFlag{
25+ UserID: userID,
26+ Name: name,
27+ Data: FeatureFlagData{
28+ StorageMax: storageMax,
29+ FileMax: fileMax,
30+ },
31+ }
32+}
33+
34+func (ff *FeatureFlag) IsValid() bool {
35+ if ff.ExpiresAt.IsZero() {
36+ return false
37+ }
38+ return ff.ExpiresAt.After(time.Now())
39+}
40+
41+type FeatureFlagData struct {
42+ StorageMax uint64 `json:"storage_max"`
43+ FileMax int64 `json:"file_max"`
44+}
45+
46+// Make the Attrs struct implement the driver.Valuer interface. This method
47+// simply returns the JSON-encoded representation of the struct.
48+func (p FeatureFlagData) Value() (driver.Value, error) {
49+ return json.Marshal(p)
50+}
51+
52+// Make the Attrs struct implement the sql.Scanner interface. This method
53+// simply decodes a JSON-encoded value into the struct fields.
54+func (p *FeatureFlagData) Scan(value interface{}) error {
55+ b, ok := value.([]byte)
56+ if !ok {
57+ return errors.New("type assertion to []byte failed")
58+ }
59+ return json.Unmarshal(b, &p)
60+}
61+
62 type ErrMultiplePublicKeys struct{}
63
64 func (m *ErrMultiplePublicKeys) Error() string {
65@@ -216,6 +264,7 @@ type DB interface {
66
67 AddViewCount(postID string) (int, error)
68
69+ FindFeatureForUser(userID string, feature string) (*FeatureFlag, error)
70 HasFeatureForUser(userID string, feature string) bool
71 FindTotalSizeForUser(userID string) (int, error)
72
+26,
-4
1@@ -158,7 +158,7 @@ const (
2 sqlSelectTotalPostsAfterDate = `SELECT count(id) FROM posts WHERE created_at >= $1 AND cur_space = $2`
3 sqlSelectUsersWithPost = `SELECT count(app_users.id) FROM app_users WHERE EXISTS (SELECT 1 FROM posts WHERE user_id = app_users.id AND cur_space = $1);`
4
5- sqlSelectFeatureForUser = `SELECT id FROM feature_flags WHERE user_id = $1 AND name = $2`
6+ sqlSelectFeatureForUser = `SELECT id, user_id, payment_history_id, name, data, created_at, expires_at FROM feature_flags WHERE user_id = $1 AND name = $2 ORDER BY expires_at DESC LIMIT 1`
7 sqlSelectSizeForUser = `SELECT COALESCE(sum(file_size), 0) FROM posts WHERE user_id = $1`
8
9 sqlSelectPostIdByAliasSlug = `SELECT post_id FROM post_aliases WHERE slug = $1`
10@@ -1138,13 +1138,35 @@ func (me *PsqlDB) FindTagsForPost(postID string) ([]string, error) {
11 return tags, nil
12 }
13
14+func (me *PsqlDB) FindFeatureForUser(userID string, feature string) (*db.FeatureFlag, error) {
15+ ff := &db.FeatureFlag{}
16+ // payment history is allowed to be null
17+ // https://devtidbits.com/2020/08/03/go-sql-error-converting-null-to-string-is-unsupported/
18+ var paymentHistoryID sql.NullString
19+ err := me.Db.QueryRow(sqlSelectFeatureForUser, userID, feature).Scan(
20+ &ff.ID,
21+ &ff.UserID,
22+ &paymentHistoryID,
23+ &ff.Name,
24+ &ff.Data,
25+ &ff.CreatedAt,
26+ &ff.ExpiresAt,
27+ )
28+ if err != nil {
29+ return nil, err
30+ }
31+
32+ ff.PaymentHistoryID = paymentHistoryID.String
33+
34+ return ff, nil
35+}
36+
37 func (me *PsqlDB) HasFeatureForUser(userID string, feature string) bool {
38- var id string
39- err := me.Db.QueryRow(sqlSelectFeatureForUser, userID, feature).Scan(&id)
40+ ff, err := me.FindFeatureForUser(userID, feature)
41 if err != nil {
42 return false
43 }
44- return id != ""
45+ return ff.IsValid()
46 }
47
48 func (me *PsqlDB) FindTotalSizeForUser(userID string) (int, error) {
+0,
-95
1@@ -1,95 +0,0 @@
2-package uploadassets
3-
4-import (
5- "bytes"
6- "fmt"
7- "path/filepath"
8- "strings"
9-
10- "github.com/picosh/pico/shared"
11- "github.com/picosh/send/send/utils"
12-)
13-
14-func (h *UploadAssetHandler) validateAsset(data *FileData) (bool, error) {
15- if data.BucketQuota >= uint64(h.Cfg.MaxSize) {
16- return false, fmt.Errorf(
17- "ERROR: user (%s) has exceeded (%d bytes) max (%d bytes)",
18- data.User.Name,
19- data.BucketQuota,
20- h.Cfg.MaxSize,
21- )
22- }
23-
24- projectName := shared.GetProjectName(data.FileEntry)
25- if projectName == "" || projectName == "/" || projectName == "." {
26- return false, fmt.Errorf("ERROR: invalid project name, you must copy files to a non-root folder (e.g. pgs.sh:/project-name)")
27- }
28-
29- fname := filepath.Base(data.Filepath)
30- if int(data.Size) > h.Cfg.MaxAssetSize {
31- return false, fmt.Errorf("ERROR: file (%s) has exceeded maximum file size (%d bytes)", fname, h.Cfg.MaxAssetSize)
32- }
33-
34- // ".well-known" is a special case
35- if strings.Contains(fname, "/.well-known/") {
36- if shared.IsTextFile(string(data.Text)) {
37- return true, nil
38- } else {
39- return false, fmt.Errorf("(%s) not a utf-8 text file", data.Filepath)
40- }
41- }
42-
43- // special file we use for custom routing
44- if fname == "_redirects" {
45- return true, nil
46- }
47-
48- if !shared.IsExtAllowed(fname, h.Cfg.AllowedExt) {
49- extStr := strings.Join(h.Cfg.AllowedExt, ",")
50- err := fmt.Errorf(
51- "ERROR: (%s) invalid file, format must be (%s), skipping",
52- fname,
53- extStr,
54- )
55- return false, err
56- }
57-
58- return true, nil
59-}
60-
61-func (h *UploadAssetHandler) writeAsset(data *FileData) error {
62- valid, err := h.validateAsset(data)
63- if !valid {
64- return err
65- }
66-
67- assetFilename := shared.GetAssetFileName(data.FileEntry)
68-
69- if data.Size == 0 {
70- err = h.Storage.DeleteFile(data.Bucket, assetFilename)
71- if err != nil {
72- return err
73- }
74- } else {
75- reader := bytes.NewReader(data.Text)
76-
77- h.Cfg.Logger.Infof(
78- "(%s) uploading to (bucket: %s) (%s)",
79- data.User.Name,
80- data.Bucket.Name,
81- assetFilename,
82- )
83-
84- _, err := h.Storage.PutFile(
85- data.Bucket,
86- assetFilename,
87- utils.NopReaderAtCloser(reader),
88- data.FileEntry,
89- )
90- if err != nil {
91- return err
92- }
93- }
94-
95- return nil
96-}
+156,
-19
1@@ -1,6 +1,7 @@
2 package uploadassets
3
4 import (
5+ "bytes"
6 "encoding/binary"
7 "fmt"
8 "io"
9@@ -19,8 +20,9 @@ import (
10 )
11
12 type ctxUserKey struct{}
13+type ctxFeatureFlagKey struct{}
14 type ctxBucketKey struct{}
15-type ctxBucketQuotaKey struct{}
16+type ctxStorageSizeKey struct{}
17 type ctxProjectKey struct{}
18
19 func getProject(s ssh.Session) *db.Project {
20@@ -40,8 +42,23 @@ func getBucket(s ssh.Session) (storage.Bucket, error) {
21 return bucket, nil
22 }
23
24-func getBucketQuota(s ssh.Session) uint64 {
25- return s.Context().Value(ctxBucketQuotaKey{}).(uint64)
26+func getFeatureFlag(s ssh.Session) (*db.FeatureFlag, error) {
27+ ff := s.Context().Value(ctxFeatureFlagKey{}).(*db.FeatureFlag)
28+ if ff.Name == "" {
29+ return ff, fmt.Errorf("feature flag not set on `ssh.Context()` for connection")
30+ }
31+ return ff, nil
32+}
33+
34+func getStorageSize(s ssh.Session) uint64 {
35+ return s.Context().Value(ctxStorageSizeKey{}).(uint64)
36+}
37+
38+func incrementStorageSize(s ssh.Session, fileSize int64) uint64 {
39+ curSize := getStorageSize(s)
40+ nextStorageSize := curSize + uint64(fileSize)
41+ s.Context().SetValue(ctxStorageSizeKey{}, nextStorageSize)
42+ return nextStorageSize
43 }
44
45 func getUser(s ssh.Session) (*db.User, error) {
46@@ -54,10 +71,12 @@ func getUser(s ssh.Session) (*db.User, error) {
47
48 type FileData struct {
49 *utils.FileEntry
50- Text []byte
51- User *db.User
52- Bucket storage.Bucket
53- BucketQuota uint64
54+ Text []byte
55+ User *db.User
56+ Bucket storage.Bucket
57+ StorageSize uint64
58+ FeatureFlag *db.FeatureFlag
59+ DeltaFileSize int64
60 }
61
62 type UploadAssetHandler struct {
63@@ -170,9 +189,18 @@ func (h *UploadAssetHandler) Validate(s ssh.Session) error {
64 return fmt.Errorf("must have username set")
65 }
66
67- if !h.DBPool.HasFeatureForUser(user.ID, "pgs") {
68- return fmt.Errorf("you do not have access to this service")
69+ ff, err := h.DBPool.FindFeatureForUser(user.ID, "pgs")
70+ // pgs.sh has a free tier so users might not have a feature flag
71+ // in which case we set sane defaults
72+ if err != nil {
73+ ff = db.NewFeatureFlag(
74+ user.ID,
75+ "pgs",
76+ h.Cfg.MaxSize,
77+ h.Cfg.MaxAssetSize,
78+ )
79 }
80+ s.Context().SetValue(ctxFeatureFlagKey{}, ff)
81
82 assetBucket := shared.GetAssetBucketName(user.ID)
83 bucket, err := h.Storage.UpsertBucket(assetBucket)
84@@ -181,12 +209,12 @@ func (h *UploadAssetHandler) Validate(s ssh.Session) error {
85 }
86 s.Context().SetValue(ctxBucketKey{}, bucket)
87
88- totalFileSize, err := h.Storage.GetBucketQuota(bucket)
89+ totalStorageSize, err := h.Storage.GetBucketQuota(bucket)
90 if err != nil {
91 return err
92 }
93- s.Context().SetValue(ctxBucketQuotaKey{}, totalFileSize)
94- h.Cfg.Logger.Infof("(%s) bucket size is current (%d bytes)", user.Name, totalFileSize)
95+ s.Context().SetValue(ctxStorageSizeKey{}, totalStorageSize)
96+ h.Cfg.Logger.Infof("(%s) bucket size is current (%d bytes)", user.Name, totalStorageSize)
97
98 s.Context().SetValue(ctxUserKey{}, user)
99 h.Cfg.Logger.Infof("(%s) attempting to upload files to (%s)", user.Name, h.Cfg.Space)
100@@ -243,19 +271,32 @@ func (h *UploadAssetHandler) Write(s ssh.Session, entry *utils.FileEntry) (strin
101 s.Context().SetValue(ctxProjectKey{}, project)
102 }
103
104- bucketQuota := getBucketQuota(s)
105+ storageSize := getStorageSize(s)
106+ featureFlag, err := getFeatureFlag(s)
107+ if err != nil {
108+ return "", err
109+ }
110+ // calculate the filsize difference between the same file already
111+ // stored and the updated file being uploaded
112+ assetFilename := shared.GetAssetFileName(entry)
113+ curFileSize, _ := h.Storage.GetFileSize(bucket, assetFilename)
114+ deltaFileSize := curFileSize - entry.Size
115+
116 data := &FileData{
117- FileEntry: entry,
118- User: user,
119- Text: origText,
120- Bucket: bucket,
121- BucketQuota: bucketQuota,
122+ FileEntry: entry,
123+ User: user,
124+ Text: origText,
125+ Bucket: bucket,
126+ StorageSize: storageSize,
127+ FeatureFlag: featureFlag,
128+ DeltaFileSize: deltaFileSize,
129 }
130 err = h.writeAsset(data)
131 if err != nil {
132 h.Cfg.Logger.Error(err)
133 return "", err
134 }
135+ nextStorageSize := incrementStorageSize(s, deltaFileSize)
136
137 url := h.Cfg.AssetURL(
138 user.Name,
139@@ -263,5 +304,101 @@ func (h *UploadAssetHandler) Write(s ssh.Session, entry *utils.FileEntry) (strin
140 strings.Replace(data.Filepath, "/"+projectName+"/", "", 1),
141 )
142
143- return url, nil
144+ maxSize := int(featureFlag.Data.StorageMax)
145+ str := fmt.Sprintf(
146+ "%s (space: %.2f/%.2fGB, %.2f%%)",
147+ url,
148+ shared.BytesToGB(int(nextStorageSize)),
149+ shared.BytesToGB(maxSize),
150+ (float32(nextStorageSize)/float32(maxSize))*100,
151+ )
152+
153+ return str, nil
154+}
155+
156+func (h *UploadAssetHandler) validateAsset(data *FileData) (bool, error) {
157+ storageMax := data.FeatureFlag.Data.StorageMax
158+ if data.StorageSize+uint64(data.DeltaFileSize) >= storageMax {
159+ return false, fmt.Errorf(
160+ "ERROR: user (%s) has exceeded (%d bytes) max (%d bytes)",
161+ data.User.Name,
162+ data.StorageSize,
163+ storageMax,
164+ )
165+ }
166+
167+ projectName := shared.GetProjectName(data.FileEntry)
168+ if projectName == "" || projectName == "/" || projectName == "." {
169+ return false, fmt.Errorf("ERROR: invalid project name, you must copy files to a non-root folder (e.g. pgs.sh:/project-name)")
170+ }
171+
172+ fileSize := data.Size
173+ fname := filepath.Base(data.Filepath)
174+ fileMax := data.FeatureFlag.Data.FileMax
175+ if fileSize > fileMax {
176+ return false, fmt.Errorf("ERROR: file (%s) has exceeded maximum file size (%d bytes)", fname, fileMax)
177+ }
178+
179+ // ".well-known" is a special case
180+ if strings.Contains(fname, "/.well-known/") {
181+ if shared.IsTextFile(string(data.Text)) {
182+ return true, nil
183+ } else {
184+ return false, fmt.Errorf("(%s) not a utf-8 text file", data.Filepath)
185+ }
186+ }
187+
188+ // special file we use for custom routing
189+ if fname == "_redirects" {
190+ return true, nil
191+ }
192+
193+ if !shared.IsExtAllowed(fname, h.Cfg.AllowedExt) {
194+ extStr := strings.Join(h.Cfg.AllowedExt, ",")
195+ err := fmt.Errorf(
196+ "ERROR: (%s) invalid file, format must be (%s), skipping",
197+ fname,
198+ extStr,
199+ )
200+ return false, err
201+ }
202+
203+ return true, nil
204+}
205+
206+func (h *UploadAssetHandler) writeAsset(data *FileData) error {
207+ valid, err := h.validateAsset(data)
208+ if !valid {
209+ return err
210+ }
211+
212+ assetFilename := shared.GetAssetFileName(data.FileEntry)
213+
214+ if data.Size == 0 {
215+ err = h.Storage.DeleteFile(data.Bucket, assetFilename)
216+ if err != nil {
217+ return err
218+ }
219+ } else {
220+ reader := bytes.NewReader(data.Text)
221+
222+ h.Cfg.Logger.Infof(
223+ "(%s) uploading to (bucket: %s) (%s)",
224+ data.User.Name,
225+ data.Bucket.Name,
226+ assetFilename,
227+ )
228+
229+ _, err := h.Storage.PutFile(
230+ data.Bucket,
231+ assetFilename,
232+ utils.NopReaderAtCloser(reader),
233+ data.FileEntry,
234+ )
235+ if err != nil {
236+ return err
237+ }
238+ }
239+
240+ return nil
241 }
+0,
-4
1@@ -24,10 +24,6 @@ func NewImgsAPI(dbpool db.DB, st storage.ObjectStorage) *ImgsAPI {
2 }
3 }
4
5-func (img *ImgsAPI) HasAccess(userID string) bool {
6- return img.Db.HasFeatureForUser(userID, "imgs")
7-}
8-
9 func (img *ImgsAPI) Upload(s ssh.Session, file *utils.FileEntry) (string, error) {
10 handler := NewUploadImgHandler(img.Db, img.Cfg, img.St)
11 err := handler.Validate(s)
+34,
-8
1@@ -21,10 +21,8 @@ import (
2 "go.uber.org/zap"
3 )
4
5-var maxSize = 1 * shared.GB
6-var maxImgSize = 10 * shared.MB
7-
8 type ctxUserKey struct{}
9+type ctxFeatureFlagKey struct{}
10
11 func getUser(s ssh.Session) (*db.User, error) {
12 user := s.Context().Value(ctxUserKey{}).(*db.User)
13@@ -34,6 +32,14 @@ func getUser(s ssh.Session) (*db.User, error) {
14 return user, nil
15 }
16
17+func getFeatureFlag(s ssh.Session) (*db.FeatureFlag, error) {
18+ ff := s.Context().Value(ctxFeatureFlagKey{}).(*db.FeatureFlag)
19+ if ff.Name == "" {
20+ return ff, fmt.Errorf("feature flag not set on `ssh.Context()` for connection")
21+ }
22+ return ff, nil
23+}
24+
25 type PostMetaData struct {
26 *db.Post
27 OrigText []byte
28@@ -41,6 +47,7 @@ type PostMetaData struct {
29 Tags []string
30 User *db.User
31 *utils.FileEntry
32+ FeatureFlag *db.FeatureFlag
33 }
34
35 type UploadImgHandler struct {
36@@ -178,6 +185,19 @@ func (h *UploadImgHandler) Validate(s ssh.Session) error {
37 return fmt.Errorf("must have username set")
38 }
39
40+ ff, _ := h.DBPool.FindFeatureForUser(user.ID, "imgs")
41+ // imgs.sh has a free tier so users might not have a feature flag
42+ // in which case we set sane defaults
43+ if ff == nil {
44+ ff = db.NewFeatureFlag(
45+ user.ID,
46+ "imgs",
47+ h.Cfg.MaxSize,
48+ h.Cfg.MaxAssetSize,
49+ )
50+ }
51+ s.Context().SetValue(ctxFeatureFlagKey{}, ff)
52+
53 s.Context().SetValue(ctxUserKey{}, user)
54 h.Cfg.Logger.Infof("(%s) attempting to upload files to (%s)", user.Name, h.Cfg.Space)
55 return nil
56@@ -245,12 +265,17 @@ func (h *UploadImgHandler) Write(s ssh.Session, entry *utils.FileEntry) (string,
57 h.Cfg.Logger.Infof("(%s) unable to find image (%s), continuing", nextPost.Filename, err)
58 }
59
60+ featureFlag, err := getFeatureFlag(s)
61+ if err != nil {
62+ return "", err
63+ }
64 metadata := PostMetaData{
65- OrigText: text,
66- Post: &nextPost,
67- User: user,
68- FileEntry: entry,
69- Cur: post,
70+ OrigText: text,
71+ Post: &nextPost,
72+ User: user,
73+ FileEntry: entry,
74+ Cur: post,
75+ FeatureFlag: featureFlag,
76 }
77
78 if post != nil {
79@@ -275,6 +300,7 @@ func (h *UploadImgHandler) Write(s ssh.Session, entry *utils.FileEntry) (string,
80 user.Name,
81 metadata.Slug,
82 )
83+ maxSize := int(featureFlag.Data.StorageMax)
84 str := fmt.Sprintf(
85 "%s (space: %.2f/%.2fGB, %.2f%%)",
86 url,
+6,
-4
1@@ -18,12 +18,14 @@ func (h *UploadImgHandler) validateImg(data *PostMetaData) (bool, error) {
2 return false, err
3 }
4
5- if data.FileSize > maxImgSize {
6- return false, fmt.Errorf("ERROR: file (%s) has exceeded maximum file size (%d bytes)", data.Filename, maxImgSize)
7+ fileMax := data.FeatureFlag.Data.FileMax
8+ if int64(data.FileSize) > fileMax {
9+ return false, fmt.Errorf("ERROR: file (%s) has exceeded maximum file size (%d bytes)", data.Filename, fileMax)
10 }
11
12- if totalFileSize+data.FileSize > maxSize {
13- return false, fmt.Errorf("ERROR: user (%s) has exceeded (%d bytes) max (%d bytes)", data.User.Name, totalFileSize, maxSize)
14+ storageMax := data.FeatureFlag.Data.StorageMax
15+ if uint64(totalFileSize+data.FileSize) > storageMax {
16+ return false, fmt.Errorf("ERROR: user (%s) has exceeded (%d bytes) max (%d bytes)", data.User.Name, totalFileSize, storageMax)
17 }
18
19 if !shared.IsExtAllowed(data.Filepath, h.Cfg.AllowedExt) {
+5,
-0
1@@ -24,6 +24,9 @@ func (i *ImgsLinkify) Create(fname string) string {
2 return i.Cfg.ImgFullURL(i.Username, fname)
3 }
4
5+var maxSize = uint64(500 * shared.MB)
6+var maxImgSize = int64(10 * shared.MB)
7+
8 func NewConfigSite() *shared.ConfigSite {
9 debug := shared.GetEnv("IMGS_DEBUG", "0")
10 domain := shared.GetEnv("IMGS_DOMAIN", "prose.sh")
11@@ -64,6 +67,8 @@ func NewConfigSite() *shared.ConfigSite {
12 AllowedExt: []string{".jpg", ".jpeg", ".png", ".gif", ".webp", ".svg"},
13 Logger: shared.CreateLogger(debug == "1"),
14 AllowRegister: allowRegister == "1",
15+ MaxSize: maxSize,
16+ MaxAssetSize: maxImgSize,
17 },
18 }
19
+9,
-3
1@@ -143,7 +143,13 @@ func (c *Cmd) help() {
2 c.output(getHelpText(c.User.Name, "project-a"))
3 }
4
5-func (c *Cmd) stats(maxSize int) error {
6+func (c *Cmd) stats(cfgMaxSize uint64) error {
7+ ff, err := c.Dbpool.FindFeatureForUser(c.User.ID, "pgs")
8+ if err != nil {
9+ ff = db.NewFeatureFlag(c.User.ID, "pgs", cfgMaxSize, 0)
10+ }
11+ storageMax := ff.Data.StorageMax
12+
13 bucketName := shared.GetAssetBucketName(c.User.ID)
14 bucket, err := c.Store.UpsertBucket(bucketName)
15 if err != nil {
16@@ -165,8 +171,8 @@ func (c *Cmd) stats(maxSize int) error {
17 str += fmt.Sprintf(
18 "space:\t\t%.4f/%.4fGB, %.4f%%\n",
19 shared.BytesToGB(int(totalFileSize)),
20- shared.BytesToGB(maxSize),
21- (float32(totalFileSize)/float32(maxSize))*100,
22+ shared.BytesToGB(int(storageMax)),
23+ (float32(totalFileSize)/float32(storageMax))*100,
24 )
25 str += fmt.Sprintf("projects:\t%d", len(projects))
26 c.output(str)
+2,
-2
1@@ -5,8 +5,8 @@ import (
2 "github.com/picosh/pico/wish/cms/config"
3 )
4
5-var maxSize = 1 * shared.GB
6-var maxAssetSize = 50 * shared.MB
7+var maxSize = uint64(15 * shared.MB)
8+var maxAssetSize = int64(5 * shared.MB)
9
10 func NewConfigSite() *shared.ConfigSite {
11 debug := shared.GetEnv("PGS_DEBUG", "0")
1@@ -84,6 +84,15 @@ func (s *StorageFS) DeleteBucket(bucket Bucket) error {
2 return os.RemoveAll(bucket.Path)
3 }
4
5+func (s *StorageFS) GetFileSize(bucket Bucket, fpath string) (int64, error) {
6+ fi, err := os.Stat(filepath.Join(bucket.Path, fpath))
7+ if err != nil {
8+ return 0, err
9+ }
10+ size := fi.Size()
11+ return size, nil
12+}
13+
14 func (s *StorageFS) GetFile(bucket Bucket, fpath string) (utils.ReaderAtCloser, int64, time.Time, error) {
15 dat, err := os.Open(filepath.Join(bucket.Path, fpath))
16 if err != nil {
1@@ -152,6 +152,14 @@ func (s *StorageMinio) DeleteBucket(bucket Bucket) error {
2 return s.Client.RemoveBucket(context.TODO(), bucket.Name)
3 }
4
5+func (s *StorageMinio) GetFileSize(bucket Bucket, fpath string) (int64, error) {
6+ info, err := s.Client.StatObject(context.Background(), bucket.Name, fpath, minio.StatObjectOptions{})
7+ if err != nil {
8+ return 0, err
9+ }
10+ return info.Size, nil
11+}
12+
13 func (s *StorageMinio) GetFile(bucket Bucket, fpath string) (utils.ReaderAtCloser, int64, time.Time, error) {
14 modTime := time.Time{}
15
1@@ -21,6 +21,7 @@ type ObjectStorage interface {
2
3 DeleteBucket(bucket Bucket) error
4 GetBucketQuota(bucket Bucket) (uint64, error)
5+ GetFileSize(bucket Bucket, fpath string) (int64, error)
6 GetFile(bucket Bucket, fpath string) (utils.ReaderAtCloser, int64, time.Time, error)
7 ServeFile(bucket Bucket, fpath string, ratio *Ratio, original bool, useProxy bool) (io.ReadCloser, string, error)
8 PutFile(bucket Bucket, fpath string, contents utils.ReaderAtCloser, entry *utils.FileEntry) (string, error)
1@@ -0,0 +1,24 @@
2+CREATE TABLE IF NOT EXISTS payment_history (
3+ id uuid NOT NULL DEFAULT uuid_generate_v4(),
4+ user_id uuid,
5+ amount bigint NOT NULL,
6+ payment_type character varying(50),
7+ data jsonb NOT NULL DEFAULT '{"notes": ""}'::jsonb,
8+ created_at timestamp without time zone NOT NULL DEFAULT NOW(),
9+ CONSTRAINT payment_history_aliases_pkey PRIMARY KEY (id),
10+ CONSTRAINT fk_payment_history_users
11+ FOREIGN KEY(user_id)
12+ REFERENCES app_users(id)
13+);
14+
15+ALTER TABLE feature_flags DROP CONSTRAINT user_features_unique_name;
16+ALTER TABLE feature_flags ADD COLUMN expires_at timestamp without time zone
17+ NOT NULL DEFAULT NOW() + '1 year'::interval;
18+ALTER TABLE feature_flags ADD COLUMN data jsonb
19+ NOT NULL DEFAULT '{}'::jsonb;
20+ALTER TABLE feature_flags ADD COLUMN payment_history_id uuid;
21+ALTER TABLE feature_flags ADD CONSTRAINT fk_features_payment_history
22+ FOREIGN KEY(payment_history_id)
23+ REFERENCES payment_history(id)
24+ ON DELETE CASCADE
25+ ON UPDATE CASCADE;
+26,
-0
1@@ -0,0 +1,26 @@
2+-- find user id
3+SELECT id FROM app_users WHERE name = '{user}';
4+
5+-- add payment record
6+-- amount should be multiplied by 1 million and then later divded by the same
7+-- https://stackoverflow.com/a/51238749
8+INSERT INTO payment_history (user_id, payment_type, amount, data)
9+VALUES ('', 'stripe', 20 * 1000000, '{"notes": ""}'::jsonb) RETURNING id;
10+
11+-- enable pro features
12+
13+-- pgs
14+-- storage max is 10gb
15+-- file max is 50mb
16+INSERT INTO feature_flags (user_id, name, data, expires_at)
17+VALUES ('', 'pgs', '{"storage_max":10737418240, "file_max":52428800}'::jsonb, now() + '1 year'::interval);
18+
19+-- imgs
20+-- storage max is 10gb
21+-- file max is 50mb
22+INSERT INTO feature_flags (user_id, name, data, expires_at)
23+VALUES ('', 'imgs', '{"storage_max":10737418240, "file_max":52428800}'::jsonb, now() + '1 year'::interval);
24+
25+-- tuns
26+INSERT INTO feature_flags (user_id, name, expires_at)
27+VALUES ('', 'tuns', now() + '1 year'::interval);
+2,
-2
1@@ -26,8 +26,8 @@ type ConfigCms struct {
2 HiddenPosts []string
3 Logger *zap.SugaredLogger
4 AllowRegister bool
5- MaxSize int
6- MaxAssetSize int
7+ MaxSize uint64
8+ MaxAssetSize int64
9 }
10
11 func NewConfigCms() *ConfigCms {