- commit
- 4e0839a
- parent
- be23a31
- author
- Eric Bower
- date
- 2024-11-15 15:02:24 +0000 UTC
reactor(metric-drain): use caddy access logs Previously we were sending site usage analytics within our web app code. This worked well for our use case because we could filter, parse, and send the analytics to our pipe `metric-drain` which would then store the analytics into our database. Because we want to enable HTTP caching for pgs we won't always reach our web app code since usage analytics will terminate at our cache layer. Instead, we want to record analytics higher in the request stack. In this case, we want to record site analytics from Caddy access logs. Here's how it works: - `pub` caddy access logs to our pipe `container-drain` - `auth/web` will `sub` to `container-drain`, filter, deserialize, and `pub` to `metric-drain` - `auth/web` will `sub` to `metric-drain` and store the analytics in our database
15 files changed,
+222,
-209
M
Makefile
+2,
-1
1@@ -135,10 +135,11 @@ migrate:
2 $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20240819_add_projects_blocked.sql
3 $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20241028_add_analytics_indexes.sql
4 $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20241114_add_namespace_to_analytics.sql
5+ $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20241125_add_content_type_to_analytics.sql
6 .PHONY: migrate
7
8 latest:
9- $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20241114_add_namespace_to_analytics.sql
10+ $(DOCKER_CMD) exec -i $(DB_CONTAINER) psql -U $(PGUSER) -d $(PGDATABASE) < ./sql/migrations/20241125_add_content_type_to_analytics.sql
11 .PHONY: latest
12
13 psql:
+162,
-12
1@@ -14,6 +14,7 @@ import (
2 "log/slog"
3 "net/http"
4 "net/url"
5+ "strings"
6 "time"
7
8 "github.com/gorilla/feeds"
9@@ -21,6 +22,7 @@ import (
10 "github.com/picosh/pico/db/postgres"
11 "github.com/picosh/pico/shared"
12 "github.com/picosh/utils"
13+ "github.com/picosh/utils/pipe"
14 "github.com/picosh/utils/pipe/metrics"
15 )
16
17@@ -578,6 +580,155 @@ func checkoutHandler() http.HandlerFunc {
18 }
19 }
20
21+type AccessLogReq struct {
22+ RemoteIP string `json:"remote_ip"`
23+ RemotePort string `json:"remote_port"`
24+ ClientIP string `json:"client_ip"`
25+ Method string `json:"method"`
26+ Host string `json:"host"`
27+ Uri string `json:"uri"`
28+ Headers struct {
29+ UserAgent []string `json:"User-Agent"`
30+ Referer []string `json:"Referer"`
31+ } `json:"headers"`
32+ Tls struct {
33+ ServerName string `json:"server_name"`
34+ } `json:"tls"`
35+}
36+
37+type RespHeaders struct {
38+ ContentType []string `json:"Content-Type"`
39+}
40+
41+type CaddyAccessLog struct {
42+ Request AccessLogReq `json:"request"`
43+ Status int `json:"status"`
44+ RespHeaders RespHeaders `json:"resp_headers"`
45+}
46+
47+func deserializeCaddyAccessLog(dbpool db.DB, access *CaddyAccessLog) (*db.AnalyticsVisits, error) {
48+ spaceRaw := strings.SplitN(access.Request.Tls.ServerName, ".", 2)
49+ space := spaceRaw[0]
50+ host := access.Request.Host
51+ path := access.Request.Uri
52+ subdomain := ""
53+
54+ // grab subdomain based on host
55+ if strings.HasSuffix(host, "tuns.sh") {
56+ subdomain = strings.TrimSuffix(host, ".tuns.sh")
57+ } else if strings.HasSuffix(host, "pgs.sh") {
58+ subdomain = strings.TrimSuffix(host, ".pgs.sh")
59+ } else if strings.HasSuffix(host, "prose.sh") {
60+ subdomain = strings.TrimSuffix(host, ".prose.sh")
61+ } else {
62+ subdomain = shared.GetCustomDomain(host, space)
63+ }
64+
65+ // get user and namespace details from subdomain
66+ props, err := shared.GetProjectFromSubdomain(subdomain)
67+ if err != nil {
68+ return nil, err
69+ }
70+ // get user ID
71+ user, err := dbpool.FindUserForName(props.Username)
72+ if err != nil {
73+ return nil, err
74+ }
75+
76+ projectID := ""
77+ postID := ""
78+ if space == "pgs" { // figure out project ID
79+ project, err := dbpool.FindProjectByName(user.ID, props.ProjectName)
80+ if err != nil {
81+ return nil, err
82+ }
83+ projectID = project.ID
84+ } else if space == "prose" { // figure out post ID
85+ if path == "" || path == "/" {
86+ } else {
87+ post, err := dbpool.FindPostWithSlug(path, user.ID, space)
88+ if err != nil {
89+ return nil, err
90+ }
91+ postID = post.ID
92+ }
93+ }
94+
95+ return &db.AnalyticsVisits{
96+ UserID: user.ID,
97+ ProjectID: projectID,
98+ PostID: postID,
99+ Namespace: space,
100+ Host: host,
101+ Path: path,
102+ IpAddress: access.Request.ClientIP,
103+ UserAgent: strings.Join(access.Request.Headers.UserAgent, " "),
104+ Referer: strings.Join(access.Request.Headers.Referer, " "),
105+ ContentType: strings.Join(access.RespHeaders.ContentType, " "),
106+ Status: access.Status,
107+ }, nil
108+}
109+
110+// this feels really stupid because i'm taking containter-drain,
111+// filtering it, and then sending it to metric-drain. The
112+// metricDrainSub function listens on the metric-drain and saves it.
113+// So why not just call the necessary functions to save the visit?
114+// We want to be able to use pipe as a debugging tool which means we
115+// can manually sub to `metric-drain` and have a nice clean output to view.
116+func containerDrainSub(ctx context.Context, dbpool db.DB, logger *slog.Logger) {
117+ info := shared.NewPicoPipeClient()
118+ drain := pipe.NewReconnectReadWriteCloser(
119+ ctx,
120+ logger,
121+ info,
122+ "container drain",
123+ "sub container-drain -k",
124+ 100,
125+ -1,
126+ )
127+
128+ send := pipe.NewReconnectReadWriteCloser(
129+ ctx,
130+ logger,
131+ info,
132+ "from container drain to metric drain",
133+ "pub metric-drain -b=false",
134+ 100,
135+ -1,
136+ )
137+
138+ for {
139+ scanner := bufio.NewScanner(drain)
140+ for scanner.Scan() {
141+ line := scanner.Text()
142+ if strings.Contains(line, "http.log.access") {
143+ clean := strings.TrimSpace(line)
144+ visit, err := accessLogToVisit(dbpool, clean)
145+ if err != nil {
146+ logger.Debug("could not convert access log to a visit", "err", err)
147+ continue
148+ }
149+ jso, err := json.Marshal(visit)
150+ if err != nil {
151+ logger.Error("could not marshal json of a visit", "err", err)
152+ continue
153+ }
154+ _, _ = send.Write(jso)
155+ }
156+ }
157+ }
158+}
159+
160+func accessLogToVisit(dbpool db.DB, line string) (*db.AnalyticsVisits, error) {
161+ accessLog := CaddyAccessLog{}
162+ err := json.Unmarshal([]byte(line), &accessLog)
163+ if err != nil {
164+ return nil, err
165+ }
166+
167+ return deserializeCaddyAccessLog(dbpool, &accessLog)
168+}
169+
170 func metricDrainSub(ctx context.Context, dbpool db.DB, logger *slog.Logger, secret string) {
171 drain := metrics.ReconnectReadMetrics(
172 ctx,
173@@ -594,30 +745,26 @@ func metricDrainSub(ctx context.Context, dbpool db.DB, logger *slog.Logger, secr
174 visit := db.AnalyticsVisits{}
175 err := json.Unmarshal([]byte(line), &visit)
176 if err != nil {
177- logger.Error("json unmarshal", "err", err)
178+ logger.Info("could not unmarshal json", "err", err, "line", line)
179 continue
180 }
181-
182- user := slog.Any("userId", visit.UserID)
183-
184 err = shared.AnalyticsVisitFromVisit(&visit, dbpool, secret)
185 if err != nil {
186 if !errors.Is(err, shared.ErrAnalyticsDisabled) {
187- logger.Info("could not record analytics visit", "reason", err, "visit", visit, user)
188- continue
189+ logger.Info("could not record analytics visit", "reason", err)
190 }
191 }
192
193- logger.Info("inserting visit", "visit", visit, user)
194+ if visit.ContentType != "" && !strings.HasPrefix(visit.ContentType, "text/html") {
195+ continue
196+ }
197+
198+ logger.Info("inserting visit", "visit", visit)
199 err = dbpool.InsertVisit(&visit)
200 if err != nil {
201- logger.Error("could not insert visit record", "err", err, "visit", visit, user)
202+ logger.Error("could not insert visit record", "err", err)
203 }
204 }
205-
206- if scanner.Err() != nil {
207- logger.Error("scanner error", "err", scanner.Err())
208- }
209 }
210 }
211
212@@ -689,6 +836,9 @@ func StartApiServer() {
213
214 // gather metrics in the auth service
215 go metricDrainSub(ctx, db, logger, cfg.Secret)
216+ // convert container logs to access logs
217+ go containerDrainSub(ctx, db, logger)
218+
219 defer ctx.Done()
220
221 apiConfig := &shared.ApiConfig{
M
db/db.go
+12,
-11
1@@ -161,17 +161,18 @@ type PostAnalytics struct {
2 }
3
4 type AnalyticsVisits struct {
5- ID string `json:"id"`
6- UserID string `json:"user_id"`
7- ProjectID string `json:"project_id"`
8- PostID string `json:"post_id"`
9- Namespace string `json:"namespace"`
10- Host string `json:"host"`
11- Path string `json:"path"`
12- IpAddress string `json:"ip_address"`
13- UserAgent string `json:"user_agent"`
14- Referer string `json:"referer"`
15- Status int `json:"status"`
16+ ID string `json:"id"`
17+ UserID string `json:"user_id"`
18+ ProjectID string `json:"project_id"`
19+ PostID string `json:"post_id"`
20+ Namespace string `json:"namespace"`
21+ Host string `json:"host"`
22+ Path string `json:"path"`
23+ IpAddress string `json:"ip_address"`
24+ UserAgent string `json:"user_agent"`
25+ Referer string `json:"referer"`
26+ Status int `json:"status"`
27+ ContentType string `json:"content_type"`
28 }
29
30 type VisitInterval struct {
+2,
-1
1@@ -986,7 +986,7 @@ func newNullString(s string) sql.NullString {
2
3 func (me *PsqlDB) InsertVisit(visit *db.AnalyticsVisits) error {
4 _, err := me.Db.Exec(
5- `INSERT INTO analytics_visits (user_id, project_id, post_id, namespace, host, path, ip_address, user_agent, referer, status) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10);`,
6+ `INSERT INTO analytics_visits (user_id, project_id, post_id, namespace, host, path, ip_address, user_agent, referer, status, content_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);`,
7 visit.UserID,
8 newNullString(visit.ProjectID),
9 newNullString(visit.PostID),
10@@ -997,6 +997,7 @@ func (me *PsqlDB) InsertVisit(visit *db.AnalyticsVisits) error {
11 visit.UserAgent,
12 visit.Referer,
13 visit.Status,
14+ visit.ContentType,
15 )
16 return err
17 }
+0,
-2
1@@ -177,7 +177,6 @@ func ImgRequest(w http.ResponseWriter, r *http.Request) {
2 dbpool := shared.GetDB(r)
3 logger := shared.GetLogger(r)
4 username := shared.GetUsernameFromRequest(r)
5- analytics := shared.GetAnalyticsQueue(r)
6
7 user, err := dbpool.FindUserForName(username)
8 if err != nil {
9@@ -241,7 +240,6 @@ func ImgRequest(w http.ResponseWriter, r *http.Request) {
10 logger,
11 dbpool,
12 st,
13- analytics,
14 )
15 router.ServeAsset(fname, opts, true, anyPerm, w, r)
16 }
+0,
-5
1@@ -59,11 +59,6 @@ type PostPageData struct {
2 Unlisted bool
3 }
4
5-type TransparencyPageData struct {
6- Site shared.SitePageData
7- Analytics *db.Analytics
8-}
9-
10 type Link struct {
11 URL string
12 Text string
+3,
-7
1@@ -11,7 +11,6 @@ import (
2 "github.com/charmbracelet/promwish"
3 "github.com/charmbracelet/ssh"
4 "github.com/charmbracelet/wish"
5- "github.com/picosh/pico/db"
6 "github.com/picosh/pico/db/postgres"
7 "github.com/picosh/pico/shared"
8 "github.com/picosh/pico/shared/storage"
9@@ -81,13 +80,10 @@ func StartSshServer() {
10 st,
11 )
12
13- ch := make(chan *db.AnalyticsVisits, 100)
14- go shared.AnalyticsCollect(ch, dbpool, logger)
15 apiConfig := &shared.ApiConfig{
16- Cfg: cfg,
17- Dbpool: dbpool,
18- Storage: st,
19- AnalyticsQueue: ch,
20+ Cfg: cfg,
21+ Dbpool: dbpool,
22+ Storage: st,
23 }
24
25 webTunnel := &tunkit.WebTunnelHandler{
+1,
-2
1@@ -51,7 +51,7 @@ func createHttpHandler(apiConfig *shared.ApiConfig) CtxHttpBridge {
2 "pubkey", pubkeyStr,
3 )
4
5- props, err := getProjectFromSubdomain(subdomain)
6+ props, err := shared.GetProjectFromSubdomain(subdomain)
7 if err != nil {
8 log.Error(err.Error())
9 return http.HandlerFunc(shared.UnauthorizedHandler)
10@@ -121,7 +121,6 @@ func createHttpHandler(apiConfig *shared.ApiConfig) CtxHttpBridge {
11 logger,
12 apiConfig.Dbpool,
13 apiConfig.Storage,
14- apiConfig.AnalyticsQueue,
15 )
16 tunnelRouter := TunnelWebRouter{routes}
17 router := http.NewServeMux()
+14,
-36
1@@ -40,10 +40,7 @@ func StartApiServer() {
2 return
3 }
4
5- ch := make(chan *db.AnalyticsVisits, 100)
6- go shared.AnalyticsCollect(ch, dbpool, logger)
7-
8- routes := NewWebRouter(cfg, logger, dbpool, st, ch)
9+ routes := NewWebRouter(cfg, logger, dbpool, st)
10
11 portStr := fmt.Sprintf(":%s", cfg.Port)
12 logger.Info(
13@@ -61,22 +58,20 @@ func StartApiServer() {
14 type HasPerm = func(proj *db.Project) bool
15
16 type WebRouter struct {
17- Cfg *shared.ConfigSite
18- Logger *slog.Logger
19- Dbpool db.DB
20- Storage storage.StorageServe
21- AnalyticsQueue chan *db.AnalyticsVisits
22- RootRouter *http.ServeMux
23- UserRouter *http.ServeMux
24+ Cfg *shared.ConfigSite
25+ Logger *slog.Logger
26+ Dbpool db.DB
27+ Storage storage.StorageServe
28+ RootRouter *http.ServeMux
29+ UserRouter *http.ServeMux
30 }
31
32-func NewWebRouter(cfg *shared.ConfigSite, logger *slog.Logger, dbpool db.DB, st storage.StorageServe, analytics chan *db.AnalyticsVisits) *WebRouter {
33+func NewWebRouter(cfg *shared.ConfigSite, logger *slog.Logger, dbpool db.DB, st storage.StorageServe) *WebRouter {
34 router := &WebRouter{
35- Cfg: cfg,
36- Logger: logger,
37- Dbpool: dbpool,
38- Storage: st,
39- AnalyticsQueue: analytics,
40+ Cfg: cfg,
41+ Logger: logger,
42+ Dbpool: dbpool,
43+ Storage: st,
44 }
45 router.initRouters()
46 return router
47@@ -177,7 +172,7 @@ func (web *WebRouter) checkHandler(w http.ResponseWriter, r *http.Request) {
48
49 if !strings.Contains(hostDomain, appDomain) {
50 subdomain := shared.GetCustomDomain(hostDomain, cfg.Space)
51- props, err := getProjectFromSubdomain(subdomain)
52+ props, err := shared.GetProjectFromSubdomain(subdomain)
53 if err != nil {
54 logger.Error(
55 "could not get project from subdomain",
56@@ -333,7 +328,7 @@ func (web *WebRouter) ServeAsset(fname string, opts *storage.ImgProcessOpts, fro
57 "host", r.Host,
58 )
59
60- props, err := getProjectFromSubdomain(subdomain)
61+ props, err := shared.GetProjectFromSubdomain(subdomain)
62 if err != nil {
63 logger.Info(
64 "could not determine project from subdomain",
65@@ -450,20 +445,3 @@ func (web *WebRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
66 ctx = context.WithValue(ctx, shared.CtxSubdomainKey{}, subdomain)
67 router.ServeHTTP(w, r.WithContext(ctx))
68 }
69-
70-type SubdomainProps struct {
71- ProjectName string
72- Username string
73-}
74-
75-func getProjectFromSubdomain(subdomain string) (*SubdomainProps, error) {
76- props := &SubdomainProps{}
77- strs := strings.SplitN(subdomain, "-", 2)
78- props.Username = strs[0]
79- if len(strs) == 2 {
80- props.ProjectName = strs[1]
81- } else {
82- props.ProjectName = props.Username
83- }
84- return props, nil
85-}
+0,
-37
1@@ -1,7 +1,6 @@
2 package pgs
3
4 import (
5- "errors"
6 "fmt"
7 "io"
8 "log/slog"
9@@ -15,7 +14,6 @@ import (
10 "net/http/httputil"
11 _ "net/http/pprof"
12
13- "github.com/picosh/pico/shared"
14 "github.com/picosh/pico/shared/storage"
15 sst "github.com/picosh/pobj/storage"
16 )
17@@ -155,22 +153,6 @@ func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
18 "routes", strings.Join(attempts, ", "),
19 "status", http.StatusNotFound,
20 )
21- // track 404s
22- ch := h.AnalyticsQueue
23- view, err := shared.AnalyticsVisitFromRequest(r, h.Dbpool, h.UserID)
24- if err == nil {
25- view.ProjectID = h.ProjectID
26- view.Status = http.StatusNotFound
27- select {
28- case ch <- view:
29- default:
30- logger.Error("could not send analytics view to channel", "view", view)
31- }
32- } else {
33- if !errors.Is(err, shared.ErrAnalyticsDisabled) {
34- logger.Error("could not record analytics view", "err", err, "view", view)
35- }
36- }
37 http.Error(w, "404 not found", http.StatusNotFound)
38 return
39 }
40@@ -236,25 +218,6 @@ func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
41
42 finContentType := w.Header().Get("content-type")
43
44- // only track pages, not individual assets
45- if finContentType == "text/html" {
46- // track visit
47- ch := h.AnalyticsQueue
48- view, err := shared.AnalyticsVisitFromRequest(r, h.Dbpool, h.UserID)
49- if err == nil {
50- view.ProjectID = h.ProjectID
51- select {
52- case ch <- view:
53- default:
54- logger.Error("could not send analytics view to channel", "view", view)
55- }
56- } else {
57- if !errors.Is(err, shared.ErrAnalyticsDisabled) {
58- logger.Error("could not record analytics view", "err", err, "view", view)
59- }
60- }
61- }
62-
63 logger.Info(
64 "serving asset",
65 "asset", assetFilepath,
+2,
-42
1@@ -8,7 +8,6 @@ import (
2 "net/http/httptest"
3 "strings"
4 "testing"
5- "time"
6
7 "github.com/picosh/pico/db"
8 "github.com/picosh/pico/db/stub"
9@@ -219,8 +218,7 @@ func TestApiBasic(t *testing.T) {
10 responseRecorder := httptest.NewRecorder()
11
12 st, _ := storage.NewStorageMemory(tc.storage)
13- ch := make(chan *db.AnalyticsVisits, 100)
14- router := NewWebRouter(cfg, cfg.Logger, tc.dbpool, st, ch)
15+ router := NewWebRouter(cfg, cfg.Logger, tc.dbpool, st)
16 router.ServeHTTP(responseRecorder, request)
17
18 if responseRecorder.Code != tc.status {
19@@ -240,43 +238,6 @@ func TestApiBasic(t *testing.T) {
20 }
21 }
22
23-func TestAnalytics(t *testing.T) {
24- bucketName := shared.GetAssetBucketName(testUserID)
25- cfg := NewConfigSite()
26- cfg.Domain = "pgs.test"
27- expectedPath := "/app"
28- request := httptest.NewRequest("GET", mkpath(expectedPath), strings.NewReader(""))
29- responseRecorder := httptest.NewRecorder()
30-
31- sto := map[string]map[string]string{
32- bucketName: {
33- "test/app.html": "hello world!",
34- },
35- }
36- st, _ := storage.NewStorageMemory(sto)
37- ch := make(chan *db.AnalyticsVisits, 100)
38- dbpool := NewPgsAnalticsDb(cfg.Logger)
39- router := NewWebRouter(cfg, cfg.Logger, dbpool, st, ch)
40-
41- go func() {
42- for analytics := range ch {
43- if analytics.Path != expectedPath {
44- t.Errorf("Want path '%s', got '%s'", expectedPath, analytics.Path)
45- }
46- close(ch)
47- }
48- }()
49-
50- router.ServeHTTP(responseRecorder, request)
51-
52- select {
53- case <-ch:
54- return
55- case <-time.After(time.Second * 1):
56- t.Error("didnt receive analytics event within time limit")
57- }
58-}
59-
60 type ImageStorageMemory struct {
61 *storage.StorageMemory
62 Opts *storage.ImgProcessOpts
63@@ -337,8 +298,7 @@ func TestImageManipulation(t *testing.T) {
64 Ratio: &storage.Ratio{},
65 },
66 }
67- ch := make(chan *db.AnalyticsVisits, 100)
68- router := NewWebRouter(cfg, cfg.Logger, tc.dbpool, st, ch)
69+ router := NewWebRouter(cfg, cfg.Logger, tc.dbpool, st)
70 router.ServeHTTP(responseRecorder, request)
71
72 if responseRecorder.Code != tc.status {
+3,
-43
1@@ -2,7 +2,6 @@ package prose
2
3 import (
4 "bytes"
5- "errors"
6 "fmt"
7 "html/template"
8 "net/http"
9@@ -89,11 +88,6 @@ type PostPageData struct {
10 Diff template.HTML
11 }
12
13-type TransparencyPageData struct {
14- Site shared.SitePageData
15- Analytics *db.Analytics
16-}
17-
18 type HeaderTxt struct {
19 Title string
20 Bio string
21@@ -270,21 +264,6 @@ func blogHandler(w http.ResponseWriter, r *http.Request) {
22 postCollection = append(postCollection, p)
23 }
24
25- // track visit
26- ch := shared.GetAnalyticsQueue(r)
27- view, err := shared.AnalyticsVisitFromRequest(r, dbpool, user.ID)
28- if err == nil {
29- select {
30- case ch <- view:
31- default:
32- logger.Error("could not send analytics view to channel", "view", view)
33- }
34- } else {
35- if !errors.Is(err, shared.ErrAnalyticsDisabled) {
36- logger.Error("could not record analytics view", "err", err, "view", view)
37- }
38- }
39-
40 data := BlogPageData{
41 Site: *cfg.GetSiteData(),
42 PageTitle: headerTxt.Title,
43@@ -350,7 +329,6 @@ func postHandler(w http.ResponseWriter, r *http.Request) {
44 username := shared.GetUsernameFromRequest(r)
45 subdomain := shared.GetSubdomain(r)
46 cfg := shared.GetCfg(r)
47- ch := shared.GetAnalyticsQueue(r)
48
49 var slug string
50 if !cfg.IsSubdomains() || subdomain == "" {
51@@ -429,21 +407,6 @@ func postHandler(w http.ResponseWriter, r *http.Request) {
52 ogImageCard = parsedText.ImageCard
53 }
54
55- // track visit
56- view, err := shared.AnalyticsVisitFromRequest(r, dbpool, user.ID)
57- if err == nil {
58- view.PostID = post.ID
59- select {
60- case ch <- view:
61- default:
62- logger.Error("could not send analytics view to channel", "view", view)
63- }
64- } else {
65- if !errors.Is(err, shared.ErrAnalyticsDisabled) {
66- logger.Error("could not record analytics view", "err", err, "view", view)
67- }
68- }
69-
70 unlisted := false
71 if post.Hidden || post.PublishAt.After(time.Now()) {
72 unlisted = true
73@@ -953,13 +916,10 @@ func StartApiServer() {
74 mainRoutes := createMainRoutes(staticRoutes)
75 subdomainRoutes := createSubdomainRoutes(staticRoutes)
76
77- ch := make(chan *db.AnalyticsVisits, 100)
78- go shared.AnalyticsCollect(ch, dbpool, logger)
79 apiConfig := &shared.ApiConfig{
80- Cfg: cfg,
81- Dbpool: dbpool,
82- Storage: st,
83- AnalyticsQueue: ch,
84+ Cfg: cfg,
85+ Dbpool: dbpool,
86+ Storage: st,
87 }
88 handler := shared.CreateServe(mainRoutes, subdomainRoutes, apiConfig)
89 router := http.HandlerFunc(handler)
1@@ -13,6 +13,23 @@ import (
2 "github.com/picosh/utils"
3 )
4
5+type SubdomainProps struct {
6+ ProjectName string
7+ Username string
8+}
9+
10+func GetProjectFromSubdomain(subdomain string) (*SubdomainProps, error) {
11+ props := &SubdomainProps{}
12+ strs := strings.SplitN(subdomain, "-", 2)
13+ props.Username = strs[0]
14+ if len(strs) == 2 {
15+ props.ProjectName = strs[1]
16+ } else {
17+ props.ProjectName = props.Username
18+ }
19+ return props, nil
20+}
21+
22 func CorsHeaders(headers http.Header) {
23 headers.Add("Access-Control-Allow-Origin", "*")
24 headers.Add("Vary", "Origin")
1@@ -69,10 +69,9 @@ func CreatePProfRoutesMux(mux *http.ServeMux) {
2 }
3
4 type ApiConfig struct {
5- Cfg *ConfigSite
6- Dbpool db.DB
7- Storage storage.StorageServe
8- AnalyticsQueue chan *db.AnalyticsVisits
9+ Cfg *ConfigSite
10+ Dbpool db.DB
11+ Storage storage.StorageServe
12 }
13
14 func (hc *ApiConfig) HasPrivilegedAccess(apiToken string) bool {
15@@ -93,7 +92,6 @@ func (hc *ApiConfig) CreateCtx(prevCtx context.Context, subdomain string) contex
16 ctx = context.WithValue(ctx, ctxDBKey{}, hc.Dbpool)
17 ctx = context.WithValue(ctx, ctxStorageKey{}, hc.Storage)
18 ctx = context.WithValue(ctx, ctxCfg{}, hc.Cfg)
19- ctx = context.WithValue(ctx, ctxAnalyticsQueue{}, hc.AnalyticsQueue)
20 return ctx
21 }
22
23@@ -172,7 +170,6 @@ type ctxDBKey struct{}
24 type ctxStorageKey struct{}
25 type ctxLoggerKey struct{}
26 type ctxCfg struct{}
27-type ctxAnalyticsQueue struct{}
28
29 type CtxSubdomainKey struct{}
30 type ctxKey struct{}
31@@ -228,10 +225,6 @@ func GetCustomDomain(host string, space string) string {
32 return ""
33 }
34
35-func GetAnalyticsQueue(r *http.Request) chan *db.AnalyticsVisits {
36- return r.Context().Value(ctxAnalyticsQueue{}).(chan *db.AnalyticsVisits)
37-}
38-
39 func GetApiToken(r *http.Request) string {
40 authHeader := r.Header.Get("authorization")
41 if authHeader == "" {
1@@ -0,0 +1 @@
2+ALTER TABLE analytics_visits ADD COLUMN content_type varchar(256);