diff --git a/Makefile b/Makefile index 8712a40cc7..9bb1eb3c66 100644 --- a/Makefile +++ b/Makefile @@ -236,7 +236,7 @@ ci-test-watch: ginkgo go-test: . ./env.test.sh && $(TIMING_CMD) go test $(GOTEST_FLAGS) $(GOTEST_PKGS) -go-ci-test: GOTEST_FLAGS += -count=1 -race -shuffle=on -cover +go-ci-test: override GOTEST_FLAGS += -count=1 -race -shuffle=on -cover go-ci-test: GOTEST_PKGS = ./... go-ci-test: go-test diff --git a/alerts/client.go b/alerts/client.go index dcaafce96c..4ff7656ba8 100644 --- a/alerts/client.go +++ b/alerts/client.go @@ -3,11 +3,12 @@ package alerts import ( "context" "net/http" + "time" "github.com/kelseyhightower/envconfig" - "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/errors" platformlog "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/log/null" "github.com/tidepool-org/platform/platform" @@ -16,22 +17,20 @@ import ( // Client for managing alerts configs. type Client struct { - client PlatformClient - logger platformlog.Logger - tokenProvider auth.ServerSessionTokenProvider + client PlatformClient + logger platformlog.Logger } // NewClient builds a client for interacting with alerts API endpoints. // // If no logger is provided, a null logger is used. -func NewClient(client PlatformClient, tokenProvider auth.ServerSessionTokenProvider, logger platformlog.Logger) *Client { +func NewClient(client PlatformClient, logger platformlog.Logger) *Client { if logger == nil { logger = null.NewLogger() } return &Client{ - client: client, - logger: logger, - tokenProvider: tokenProvider, + client: client, + logger: logger, } } @@ -44,34 +43,69 @@ type PlatformClient interface { // request performs common operations before passing a request off to the // underlying platform.Client. -func (c *Client) request(ctx context.Context, method, url string, body any) error { +func (c *Client) request(ctx context.Context, method, url string, reqBody, resBody any) error { // Platform's client.Client expects a logger to exist in the request's // context. If it doesn't exist, request processing will panic. loggingCtx := platformlog.NewContextWithLogger(ctx, c.logger) - // Make sure the auth token is injected into the request's headers. - return c.requestWithAuth(loggingCtx, method, url, body) -} - -// requestWithAuth injects an auth token before calling platform.Client.RequestData. -// -// At time of writing, this is the only way to inject credentials into -// platform.Client. It might be nice to be able to use a mutator, but the auth -// is specifically handled by the platform.Client via the context field, and -// if left blank, platform.Client errors. -func (c *Client) requestWithAuth(ctx context.Context, method, url string, body any) error { - return c.client.RequestData(auth.NewContextWithServerSessionTokenProvider(ctx, c.tokenProvider), method, url, nil, body, nil) + return c.client.RequestData(loggingCtx, method, url, nil, reqBody, resBody) } // Upsert updates cfg if it exists or creates it if it doesn't. func (c *Client) Upsert(ctx context.Context, cfg *Config) error { url := c.client.ConstructURL("v1", "users", cfg.FollowedUserID, "followers", cfg.UserID, "alerts") - return c.request(ctx, http.MethodPost, url, cfg) + return c.request(ctx, http.MethodPost, url, cfg, nil) } // Delete the alerts config. func (c *Client) Delete(ctx context.Context, cfg *Config) error { url := c.client.ConstructURL("v1", "users", cfg.FollowedUserID, "followers", cfg.UserID, "alerts") - return c.request(ctx, http.MethodDelete, url, nil) + return c.request(ctx, http.MethodDelete, url, nil, nil) +} + +// Get a user's alerts configuration for the followed user. +func (c *Client) Get(ctx context.Context, followedUserID, userID string) (*Config, error) { + url := c.client.ConstructURL("v1", "users", followedUserID, "followers", userID, "alerts") + config := &Config{} + err := c.request(ctx, http.MethodGet, url, nil, config) + if err != nil { + return nil, errors.Wrap(err, "Unable to request alerts config") + } + return config, nil +} + +// List the alerts configurations that follow the given user. +// +// This method should only be called via an authenticated service session. +func (c *Client) List(ctx context.Context, followedUserID string) ([]*Config, error) { + url := c.client.ConstructURL("v1", "users", followedUserID, "followers", "alerts") + configs := []*Config{} + err := c.request(ctx, http.MethodGet, url, nil, &configs) + if err != nil { + c.logger.Debugf("unable to request alerts configs list: %+v %T", err, err) + return nil, errors.Wrap(err, "Unable to request alerts configs list") + } + return configs, nil +} + +// OverdueCommunications are those that haven't communicated in some time. +// +// This method should only be called via an authenticated service session. +func (c *Client) OverdueCommunications(ctx context.Context) ([]LastCommunication, error) { + url := c.client.ConstructURL("v1", "overdue_communications") + lastComms := []LastCommunication{} + err := c.request(ctx, http.MethodGet, url, nil, &lastComms) + if err != nil { + c.logger.Debugf("getting users overdue to communicate: \"%+v\" %T", err, err) + return nil, errors.Wrap(err, "Unable to list overdue communications") + } + return lastComms, nil +} + +// LastCommunication records the last time data was received from a user. +type LastCommunication struct { + UserID string `bson:"userId" json:"userId"` + DataSetID string `bson:"dataSetId" json:"dataSetId"` + LastReceivedDeviceData time.Time `bson:"lastReceivedDeviceData" json:"lastReceivedDeviceData"` } // ConfigLoader abstracts the method by which config values are loaded. diff --git a/alerts/client_test.go b/alerts/client_test.go index c5a771256f..cb647cbfa4 100644 --- a/alerts/client_test.go +++ b/alerts/client_test.go @@ -8,7 +8,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/client" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/log/null" @@ -16,10 +15,13 @@ import ( ) const testToken = "auth-me" +const testUserID = "test-user-id" +const testFollowedUserID = "test-followed-user-id" +const testDataSetID = "upid_000000000000" var _ = Describe("Client", func() { - var test404Server, test200Server *httptest.Server - var testAuthServer func(*string) *httptest.Server + var test404Server *httptest.Server + var test200Server func(string) *httptest.Server BeforeEach(func() { t := GinkgoT() @@ -28,72 +30,103 @@ var _ = Describe("Client", func() { test404Server = testServer(t, func(w http.ResponseWriter, r *http.Request) { http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) }) - test200Server = testServer(t, func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - testAuthServer = func(token *string) *httptest.Server { + test200Server = func(resp string) *httptest.Server { return testServer(t, func(w http.ResponseWriter, r *http.Request) { - *token = r.Header.Get(auth.TidepoolSessionTokenHeaderKey) w.WriteHeader(http.StatusOK) + w.Write([]byte(resp)) }) } }) - Context("Delete", func() { - It("returns an error on non-200 responses", func() { + ItReturnsAnErrorOnNon200Responses := func(f func(context.Context, *Client) error) { + GinkgoHelper() + It("returns an error on non-200 respnoses", func() { client, ctx := newAlertsClientTest(test404Server) - err := client.Delete(ctx, &Config{}) + err := f(ctx, client) Expect(err).Should(HaveOccurred()) Expect(err).To(MatchError(ContainSubstring("resource not found"))) }) + } - It("returns nil on success", func() { - client, ctx := newAlertsClientTest(test200Server) - err := client.Delete(ctx, &Config{}) - Expect(err).ShouldNot(HaveOccurred()) + ItReturnsANilErrorOnSuccess := func(resp string, f func(context.Context, *Client) error) { + GinkgoHelper() + It("returns a nil error on success", func() { + client, ctx := newAlertsClientTest(test200Server(resp)) + err := f(ctx, client) + Expect(err).To(Succeed()) + }) + } + + Context("Delete", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + return client.Delete(ctx, &Config{}) }) - It("injects an auth token", func() { - token := "" - client, ctx := newAlertsClientTest(testAuthServer(&token)) - _ = client.Delete(ctx, &Config{}) - Expect(token).To(Equal(testToken)) + ItReturnsANilErrorOnSuccess("", func(ctx context.Context, client *Client) error { + return client.Delete(ctx, &Config{}) }) }) Context("Upsert", func() { - It("returns an error on non-200 responses", func() { - client, ctx := newAlertsClientTest(test404Server) - err := client.Upsert(ctx, &Config{}) - Expect(err).Should(HaveOccurred()) - Expect(err).To(MatchError(ContainSubstring("resource not found"))) + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + return client.Upsert(ctx, &Config{}) + }) + + ItReturnsANilErrorOnSuccess("", func(ctx context.Context, client *Client) error { + return client.Upsert(ctx, &Config{}) + }) + }) + + Context("Get", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + _, err := client.Get(ctx, testFollowedUserID, testUserID) + return err }) - It("returns nil on success", func() { - client, ctx := newAlertsClientTest(test200Server) - err := client.Upsert(ctx, &Config{}) - Expect(err).ShouldNot(HaveOccurred()) + ret := `{ + "userId": "14ee703f-ca9b-4a6b-9ce3-41d886514e7f", + "followedUserId": "ce5863bc-cc0b-4177-97d7-e8de0c558820", + "uploadId": "upid_00000000000000000000000000000000" + }` + ItReturnsANilErrorOnSuccess(ret, func(ctx context.Context, client *Client) error { + _, err := client.Get(ctx, testFollowedUserID, testUserID) + return err }) + }) - It("injects an auth token", func() { - token := "" - client, ctx := newAlertsClientTest(testAuthServer(&token)) - _ = client.Upsert(ctx, &Config{}) - Expect(token).To(Equal(testToken)) + Context("List", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + _, err := client.List(ctx, "") + return err + }) + + ItReturnsANilErrorOnSuccess("[]", func(ctx context.Context, client *Client) error { + _, err := client.List(ctx, "") + return err + }) + }) + + Context("OverdueCommunications", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + _, err := client.OverdueCommunications(ctx) + return err + }) + + ItReturnsANilErrorOnSuccess("[]", func(ctx context.Context, client *Client) error { + _, err := client.OverdueCommunications(ctx) + return err }) }) }) func buildTestClient(s *httptest.Server) *Client { pCfg := &platform.Config{ - Config: &client.Config{ - Address: s.URL, - }, + Config: &client.Config{Address: s.URL}, + ServiceSecret: "auth-me", } - token := mockTokenProvider(testToken) pc, err := platform.NewClient(pCfg, platform.AuthorizeAsService) Expect(err).ToNot(HaveOccurred()) - client := NewClient(pc, token, null.NewLogger()) + client := NewClient(pc, null.NewLogger()) return client } @@ -101,14 +134,14 @@ func newAlertsClientTest(server *httptest.Server) (*Client, context.Context) { return buildTestClient(server), contextWithNullLogger() } -func contextWithNullLogger() context.Context { - return log.NewContextWithLogger(context.Background(), null.NewLogger()) +func contextWithNullLoggerDeluxe() (context.Context, log.Logger) { + lgr := null.NewLogger() + return log.NewContextWithLogger(context.Background(), lgr), lgr } -type mockTokenProvider string - -func (p mockTokenProvider) ServerSessionToken() (string, error) { - return string(p), nil +func contextWithNullLogger() context.Context { + ctx, _ := contextWithNullLoggerDeluxe() + return ctx } func testServer(t GinkgoTInterface, handler http.HandlerFunc) *httptest.Server { diff --git a/alerts/config.go b/alerts/config.go index 67f2b1d72c..a1cae72a3c 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -6,10 +6,17 @@ import ( "bytes" "context" "encoding/json" + "os" + "slices" "time" "github.com/tidepool-org/platform/data" - "github.com/tidepool-org/platform/data/blood/glucose" + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" "github.com/tidepool-org/platform/structure" "github.com/tidepool-org/platform/structure/validator" "github.com/tidepool-org/platform/user" @@ -17,9 +24,8 @@ import ( // Config wraps Alerts to include user relationships. // -// As a wrapper type, Config provides a clear demarcation of what a user -// controls (Alerts) and what is set by the service (the other values in -// Config). +// As a wrapper type, Config provides a clear demarcation of what a user controls (Alerts) +// and what is set by the service (the other values in Config). type Config struct { // UserID receives the configured alerts and owns this Config. UserID string `json:"userId" bson:"userId"` @@ -31,57 +37,251 @@ type Config struct { // UploadID identifies the device dataset for which these alerts apply. UploadID string `json:"uploadId" bson:"uploadId,omitempty"` - Alerts `bson:",inline,omitempty"` + // Alerts collects the user settings for each type of alert, and tracks their statuses. + Alerts `bson:"alerts,omitempty"` + + Activity `bson:"activity,omitempty" json:"activity,omitempty"` } -// Alerts models a user's desired alerts. +// Alerts is a wrapper to collect the user-modifiable parts of a Config. type Alerts struct { UrgentLow *UrgentLowAlert `json:"urgentLow,omitempty" bson:"urgentLow,omitempty"` Low *LowAlert `json:"low,omitempty" bson:"low,omitempty"` High *HighAlert `json:"high,omitempty" bson:"high,omitempty"` NotLooping *NotLoopingAlert `json:"notLooping,omitempty" bson:"notLooping,omitempty"` - NoCommunication *NoCommunicationAlert `json:"noCommunication,omitempty" bson:"noCommunication,omitempty"` + NoCommunication *NoCommunicationAlert `bson:"noCommunication,omitempty" json:"noCommunication,omitempty"` +} + +type Activity struct { + UrgentLow AlertActivity `json:"urgentLow,omitempty" bson:"urgentLow,omitempty"` + Low AlertActivity `json:"low,omitempty" bson:"low,omitempty"` + High AlertActivity `json:"high,omitempty" bson:"high,omitempty"` + NotLooping AlertActivity `json:"notLooping,omitempty" bson:"notLooping,omitempty"` + NoCommunication AlertActivity `json:"noCommunication,omitempty" bson:"noCommunication,omitempty"` } func (c Config) Validate(validator structure.Validator) { validator.String("userID", &c.UserID).Using(user.IDValidator) validator.String("followedUserID", &c.FollowedUserID).Using(user.IDValidator) validator.String("uploadID", &c.UploadID).Exists().Using(data.SetIDValidator) - c.Alerts.Validate(validator) + if c.Alerts.UrgentLow != nil { + c.Alerts.UrgentLow.Validate(validator) + } + if c.Alerts.Low != nil { + c.Alerts.Low.Validate(validator) + } + if c.Alerts.High != nil { + c.Alerts.High.Validate(validator) + } + if c.Alerts.NotLooping != nil { + c.Alerts.NotLooping.Validate(validator) + } + if c.Alerts.NoCommunication != nil { + c.Alerts.NoCommunication.Validate(validator) + } +} + +// EvaluateData alerts in the context of the provided data. +// +// While this method, or the methods it calls, can fail, there's no point in returning an +// error. Instead errors are logged before continuing. This is to ensure that any possible +// alert that should be triggered, will be triggered. +func (c *Config) EvaluateData(ctx context.Context, gd []*Glucose, + dd []*DosingDecision) (*Notification, bool) { + + var n *Notification + var needsUpsert bool + + ul, low, high, nl := EvalResult{}, EvalResult{}, EvalResult{}, EvalResult{} + if c.Alerts.UrgentLow != nil && c.Alerts.UrgentLow.Enabled { + ul = c.Alerts.UrgentLow.Evaluate(ctx, gd) + needsUpsert = needsUpsert || c.Activity.UrgentLow.Update(ul.OutOfRange) + } + if c.Alerts.Low != nil && c.Alerts.Low.Enabled { + low = c.Alerts.Low.Evaluate(ctx, gd) + needsUpsert = needsUpsert || c.Activity.Low.Update(low.OutOfRange) + } + if c.Alerts.High != nil && c.Alerts.High.Enabled { + high = c.Alerts.High.Evaluate(ctx, gd) + needsUpsert = needsUpsert || c.Activity.High.Update(high.OutOfRange) + } + if c.Alerts.NotLooping != nil && c.Alerts.NotLooping.Enabled { + nl = c.Alerts.NotLooping.Evaluate(ctx, dd) + needsUpsert = needsUpsert || c.Activity.NotLooping.Update(nl.OutOfRange) + } + + if ul.OutOfRange { + if isReEval(c.Activity.UrgentLow.Sent, ul.NewestTime) { + return nil, needsUpsert + } + msg := genGlucoseThresholdMessage("below urgent low") + return c.newNotification(msg, &c.Activity.UrgentLow), needsUpsert + } + if low.OutOfRange { + if isReEval(c.Activity.Low.Sent, low.NewestTime) { + return nil, needsUpsert + } + delay := c.Alerts.Low.Delay.Duration() + + if time.Since(low.Started) > delay { + repeat := c.Alerts.Low.Repeat + if !c.Activity.Low.IsSent() || mayRepeat(repeat, c.Activity.Low.Sent) { + msg := genGlucoseThresholdMessage("below low") + return c.newNotification(msg, &c.Activity.Low), needsUpsert + + } + } + return nil, needsUpsert + } + if high.OutOfRange { + if isReEval(c.Activity.High.Sent, high.NewestTime) { + return nil, needsUpsert + } + delay := c.Alerts.High.Delay.Duration() + if time.Since(high.Started) > delay { + repeat := c.Alerts.High.Repeat + if !c.Activity.High.IsSent() || mayRepeat(repeat, c.Activity.High.Sent) { + msg := genGlucoseThresholdMessage("above high") + return c.newNotification(msg, &c.Activity.High), needsUpsert + } + } + } + if nl.OutOfRange { + // Because not looping doesn't use a threshold, re-evaluations aren't treated any + // differently. + delay := c.Alerts.NotLooping.Delay.Duration() + if delay == 0 { + delay = NotLoopingRepeat + } + if time.Since(c.Activity.NotLooping.Sent) > delay { + return c.newNotification(NotLoopingMessage, &c.Activity.NotLooping), needsUpsert + } + } + + return n, needsUpsert +} + +func mayRepeat(repeat DurationMinutes, lastSent time.Time) bool { + return repeat.Duration() > 0 && time.Since(lastSent) > repeat.Duration() +} + +func (c *Config) newNotification(msg string, act *AlertActivity) *Notification { + return &Notification{ + FollowedUserID: c.FollowedUserID, + RecipientUserID: c.UserID, + Message: msg, + Sent: func(t time.Time) { + if t.After(act.Sent) { + act.Sent = t + } + }, + } +} + +func (c Config) LoggerWithFields(lgr log.Logger) log.Logger { + return lgr.WithFields(log.Fields{ + "userID": c.UserID, + "followedUserID": c.FollowedUserID, + "dataSetID": c.UploadID, + }) +} + +func isReEval(t1, t2 time.Time) bool { + return t1.After(t2) } -func (a Alerts) Validate(validator structure.Validator) { - if a.UrgentLow != nil { - a.UrgentLow.Validate(validator) +func (c *Config) EvaluateNoCommunication(ctx context.Context, + lgr log.Logger, last time.Time) (*Notification, bool) { + + if c.Alerts.NoCommunication == nil || !c.Alerts.NoCommunication.Enabled { + return nil, false } - if a.Low != nil { - a.Low.Validate(validator) + + ctx = log.NewContextWithLogger(ctx, lgr) + nc := c.Alerts.NoCommunication.Evaluate(ctx, last) + needsUpsert := c.Activity.NoCommunication.Update(nc.OutOfRange) + delay := c.Alerts.NoCommunication.Delay.Duration() + if delay == 0 { + delay = DefaultNoCommunicationDelay + } + if time.Since(nc.Started) > delay && time.Since(c.Activity.NoCommunication.Sent) > delay { + n := c.newNotification(NoCommunicationMessage, &c.Activity.NoCommunication) + return n, needsUpsert + } + return nil, needsUpsert +} + +// LongestDelay of the delays set on enabled alerts. +func (a Alerts) LongestDelay() time.Duration { + delays := []time.Duration{} + if a.Low != nil && a.Low.Enabled { + delays = append(delays, a.Low.Delay.Duration()) } - if a.High != nil { - a.High.Validate(validator) + if a.High != nil && a.High.Enabled { + delays = append(delays, a.High.Delay.Duration()) } - if a.NotLooping != nil { - a.NotLooping.Validate(validator) + if a.NotLooping != nil && a.NotLooping.Enabled { + delays = append(delays, a.NotLooping.Delay.Duration()) } - if a.NoCommunication != nil { - a.NoCommunication.Validate(validator) + if len(delays) == 0 { + return 0 } + return slices.Max(delays) } // Base describes the minimum specifics of a desired alert. type Base struct { // Enabled controls whether notifications should be sent for this alert. Enabled bool `json:"enabled" bson:"enabled"` - // Repeat is measured in minutes. - // - // A value of 0 (the default) disables repeat notifications. - Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) - dur := b.Repeat.Duration() - validator.Duration("repeat", &dur).Using(validateRepeat) +} + +func (b Base) Evaluate(ctx context.Context, data []*Glucose) *Notification { + if lgr := log.LoggerFromContext(ctx); lgr != nil { + lgr.Warn("alerts.Base.Evaluate called, this shouldn't happen!") + } + return nil +} + +func (b Base) lgr(ctx context.Context) log.Logger { + var lgr log.Logger = log.LoggerFromContext(ctx) + if lgr == nil { + // NewLogger can only fail if os.Stderr is nil. + lgr, _ = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + } + return lgr +} + +type AlertActivity struct { + // Triggered records the last time this alert was triggered. + Triggered time.Time `json:"triggered" bson:"triggered"` + // Sent records the last time this alert was sent. + Sent time.Time `json:"sent" bson:"sent"` + // Resolved records the last time this alert was resolved. + Resolved time.Time `json:"resolved" bson:"resolved"` +} + +func (a AlertActivity) IsActive() bool { + return a.Triggered.After(a.Resolved) +} + +func (a AlertActivity) IsSent() bool { + return a.Sent.After(a.Triggered) +} + +func (a *AlertActivity) Update(outOfRange bool) bool { + changed := false + if outOfRange && !a.IsActive() { + a.Triggered = time.Now() + changed = true + } else if !outOfRange && a.IsActive() { + a.Resolved = time.Now() + changed = true + } + return changed } const ( @@ -110,7 +310,7 @@ type UrgentLowAlert struct { Base `bson:",inline"` // Threshold is compared the current value to determine if an alert should // be triggered. - Threshold `json:"threshold"` + Threshold `json:"threshold" bson:"threshold"` } func (a UrgentLowAlert) Validate(validator structure.Validator) { @@ -118,6 +318,112 @@ func (a UrgentLowAlert) Validate(validator structure.Validator) { a.Threshold.Validate(validator) } +type EvalResult struct { + Name string + Started time.Time + Threshold float64 + NewestTime time.Time + NewestValue float64 + Evaluator func(dv, tv float64) bool `json:"-"` + OutOfRange bool +} + +func (r EvalResult) String() string { + b, err := json.Marshal(r) + if err != nil { + return "" + } + return string(b) +} + +func (r *EvalResult) Process(ctx context.Context, t Threshold, data []*Glucose) { + for _, datum := range data { + dv, tv, err := normalizeUnits(datum, t) + if err != nil { + r.lgr(ctx).WithError(err).Info("Unable to normalize datum") + continue + } + + if datum.Time == nil { + r.lgr(ctx).Warn("Unable to process: Time == nil; that shouldn't be possible") + continue + } + + outOfRange := r.Evaluator(dv, tv) + + if r.NewestValue == 0 { + r.NewestValue = dv + r.NewestTime = *datum.Time + r.OutOfRange = outOfRange + r.Threshold = tv + r.logGlucoseEval(ctx) + } + + if !outOfRange { + break + } + + if datum.Time != nil && (r.Started.IsZero() || datum.Time.Before(r.Started)) { + r.Started = *datum.Time + } + } +} + +// Evaluate urgent low condition. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*Glucose) EvalResult { + er := EvalResult{ + Name: "urgent low", + Evaluator: func(dv, tv float64) bool { return dv < tv }, + } + er.Process(ctx, a.Threshold, data) + return er +} + +func (r EvalResult) logGlucoseEval(ctx context.Context) { + fields := log.Fields{ + "isAlerting?": r.Evaluator(r.NewestValue, r.Threshold), + "threshold": r.Threshold, + "value": r.NewestValue, + } + r.lgr(ctx).WithFields(fields).Info(r.Name) +} + +func (r EvalResult) lgr(ctx context.Context) log.Logger { + var lgr log.Logger = log.LoggerFromContext(ctx) + if lgr == nil { + // NewLogger can only fail if os.Stderr is nil. + lgr, _ = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + } + return lgr +} + +func normalizeUnits(datum *Glucose, t Threshold) (float64, float64, error) { + if datum == nil || datum.Blood.Units == nil || datum.Blood.Value == nil { + return 0, 0, errors.Newf("Unable to evaluate datum: Units or Value is nil") + } + + // Both units are the same, no need to convert either. + if t.Units == *datum.Blood.Units { + return *datum.Blood.Value, t.Value, nil + } + + // The units don't match. There exists a known good function that converts to MmolL, so + // we'll convert whichever value isn't in MmolL to MmolL. + + if dataBloodGlucose.IsMmolL(t.Units) { + n := dataBloodGlucose.NormalizeValueForUnits(datum.Blood.Value, datum.Blood.Units) + return *n, t.Value, nil + } else if dataBloodGlucose.IsMmolL(*datum.Blood.Units) { + n := dataBloodGlucose.NormalizeValueForUnits(&t.Value, &t.Units) + return *datum.Blood.Value, *n, nil + } + + // This shouldn't happen. It indicates a new, third glucose unit is in use. + return 0, 0, errors.New("Unable to handle unit conversion, neither is MmolL") +} + // NotLoopingAlert extends Base with a delay. type NotLoopingAlert struct { Base `bson:",inline"` @@ -130,18 +436,103 @@ func (a NotLoopingAlert) Validate(validator structure.Validator) { validator.Duration("delay", &dur).InRange(0, 2*time.Hour) } -// NoCommunicationAlert extends Base with a delay. +// Evaluate if the device is looping. +func (a *NotLoopingAlert) Evaluate(ctx context.Context, decisions []*DosingDecision) EvalResult { + er := EvalResult{} + for _, decision := range decisions { + if decision.Reason == nil || *decision.Reason != DosingDecisionReasonLoop { + continue + } + if decision.Time == nil { + a.lgr(ctx).Warn("Unable to process: Time == nil; that shouldn't be possible") + continue + } + if !decision.Time.IsZero() { + er.NewestTime = *decision.Time + break + } + } + delay := a.Delay.Duration() + if delay == 0 { + delay = DefaultNotLoopingDelay + } + er.OutOfRange = time.Since(er.NewestTime) > delay + logNotLoopingEvaluation(a.lgr(ctx), er.OutOfRange, time.Since(er.NewestTime), delay) + + return er +} + +// DefaultNotLoopingDelay is used when the delay has a Zero value (its default). +const DefaultNotLoopingDelay = 30 * time.Minute + +func logNotLoopingEvaluation(lgr log.Logger, isAlerting bool, since, threshold time.Duration) { + fields := log.Fields{ + "isAlerting?": isAlerting, + "value": since, + "threshold": threshold, + } + lgr.WithFields(fields).Info("not looping") +} + +const NotLoopingMessage = "Loop is not able to loop" + +// DosingDecisionReasonLoop is specified in a [DosingDecision] to indicate +// that the decision is part of a loop adjustment (as opposed to bolus or something else). +const DosingDecisionReasonLoop string = "loop" + +// NotLoopingRepeat is the interval between sending notifications when not looping. +const NotLoopingRepeat = 5 * time.Minute + +// NoCommunicationAlert is configured to send notifications when no data is received. +// +// It differs fundamentally from DataAlerts in that it is polled instead of being triggered +// when data is received. type NoCommunicationAlert struct { - Base `bson:",inline"` + Base `bson:",inline"` + // Delay represents the time after which a No Communication alert should be sent. + // + // A value of 0 is the default, and is treated as five minutes. Delay DurationMinutes `json:"delay,omitempty"` } func (a NoCommunicationAlert) Validate(validator structure.Validator) { a.Base.Validate(validator) dur := a.Delay.Duration() - validator.Duration("delay", &dur).InRange(0, 6*time.Hour) + if dur != 0 { + validator.Duration("delay", &dur). + InRange(MinimumNoCommunicationDelay, MaximumNoCommunicationDelay) + } } +// Evaluate if the time since data was last received warrants a notification. +func (a *NoCommunicationAlert) Evaluate(ctx context.Context, lastReceived time.Time) EvalResult { + er := EvalResult{} + + if lastReceived.IsZero() { + a.lgr(ctx).Info("Unable to evaluate no communication: time is Zero") + return er + } + + delay := a.Delay.Duration() + if delay == 0 { + delay = DefaultNoCommunicationDelay + } + er.OutOfRange = time.Since(lastReceived) > delay + er.Started = lastReceived + er.NewestTime = lastReceived + a.lgr(ctx).WithField("isAlerting?", er.OutOfRange).Info("no communication") + + return er +} + +const ( + DefaultNoCommunicationDelay = 5 * time.Minute + MinimumNoCommunicationDelay = 5 * time.Minute + MaximumNoCommunicationDelay = 6 * time.Hour +) + +const NoCommunicationMessage = "Tidepool is unable to communicate with a user's device" + // LowAlert extends Base with threshold and a delay. type LowAlert struct { Base `bson:",inline"` @@ -149,13 +540,35 @@ type LowAlert struct { // be triggered. Threshold `json:"threshold"` Delay DurationMinutes `json:"delay,omitempty"` + // Repeat is measured in minutes. + // + // A value of 0 (the default) disables repeat notifications. + Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (a LowAlert) Validate(validator structure.Validator) { a.Base.Validate(validator) - dur := a.Delay.Duration() - validator.Duration("delay", &dur).InRange(0, 2*time.Hour) + delayDur := a.Delay.Duration() + validator.Duration("delay", &delayDur).InRange(0, 2*time.Hour) a.Threshold.Validate(validator) + repeatDur := a.Repeat.Duration() + validator.Duration("repeat", &repeatDur).Using(validateRepeat) +} + +// Evaluate the given data to determine if an alert should be sent. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *LowAlert) Evaluate(ctx context.Context, data []*Glucose) EvalResult { + er := EvalResult{ + Name: "low", + Evaluator: func(dv, tv float64) bool { return dv < tv }, + } + er.Process(ctx, a.Threshold, data) + return er +} + +func genGlucoseThresholdMessage(alertType string) string { + return "Glucose reading " + alertType + " threshold" } // HighAlert extends Base with a threshold and a delay. @@ -165,13 +578,31 @@ type HighAlert struct { // be triggered. Threshold `json:"threshold"` Delay DurationMinutes `json:"delay,omitempty"` + // Repeat is measured in minutes. + // + // A value of 0 (the default) disables repeat notifications. + Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (a HighAlert) Validate(validator structure.Validator) { a.Base.Validate(validator) a.Threshold.Validate(validator) - dur := a.Delay.Duration() - validator.Duration("delay", &dur).InRange(0, 6*time.Hour) + delayDur := a.Delay.Duration() + validator.Duration("delay", &delayDur).InRange(0, 6*time.Hour) + repeatDur := a.Repeat.Duration() + validator.Duration("repeat", &repeatDur).Using(validateRepeat) +} + +// Evaluate the given data to determine if an alert should be sent. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *HighAlert) Evaluate(ctx context.Context, data []*Glucose) EvalResult { + er := &EvalResult{ + Name: "high", + Evaluator: func(dv, tv float64) bool { return dv > tv }, + } + er.Process(ctx, a.Threshold, data) + return *er } // DurationMinutes reads a JSON integer and converts it to a time.Duration. @@ -201,7 +632,7 @@ func (m DurationMinutes) Duration() time.Duration { return time.Duration(m) } -// ValueWithUnits binds a value to its units. +// ValueWithUnits binds a value with its units. // // Other types can extend it to parse and validate the Units. type ValueWithUnits struct { @@ -214,31 +645,59 @@ type Threshold ValueWithUnits // Validate implements structure.Validatable func (t Threshold) Validate(v structure.Validator) { - v.String("units", &t.Units).OneOf(glucose.MgdL, glucose.MmolL) - // This is a sanity check. Client software will likely further constrain these values. The - // broadness of these values allows clients to change their own min and max values - // independently, and it sidesteps rounding and conversion conflicts between the backend and - // clients. + v.String("units", &t.Units).OneOf(dataBloodGlucose.MgdL, dataBloodGlucose.MmolL) + // This is a sanity check. Client software will likely further constrain these + // values. The broadness of these values allows clients to change their own min and max + // values independently, and it sidesteps rounding and conversion conflicts between the + // backend and clients. var max, min float64 switch t.Units { - case glucose.MgdL, glucose.Mgdl: - max = glucose.MgdLMaximum - min = glucose.MgdLMinimum + case dataBloodGlucose.MgdL, dataBloodGlucose.Mgdl: + max = dataBloodGlucose.MgdLMaximum + min = dataBloodGlucose.MgdLMinimum v.Float64("value", &t.Value).InRange(min, max) - case glucose.MmolL, glucose.Mmoll: - max = glucose.MmolLMaximum - min = glucose.MmolLMinimum + case dataBloodGlucose.MmolL, dataBloodGlucose.Mmoll: + max = dataBloodGlucose.MmolLMaximum + min = dataBloodGlucose.MmolLMinimum v.Float64("value", &t.Value).InRange(min, max) default: v.WithReference("value").ReportError(validator.ErrorValueNotValid()) } } -// Repository abstracts persistent storage for Config data. +// Repository abstracts persistent storage in the alerts collection for Config data. type Repository interface { Get(ctx context.Context, conf *Config) (*Config, error) Upsert(ctx context.Context, conf *Config) error Delete(ctx context.Context, conf *Config) error + List(ctx context.Context, userID string) ([]*Config, error) EnsureIndexes() error } + +// Notification gathers information necessary for sending an alert notification. +type Notification struct { + // Message communicates the alert to the recipient. + Message string + RecipientUserID string + FollowedUserID string + Sent func(time.Time) +} + +// LastCommunicationsRepository encapsulates queries of the [LastCommunication] records +// collection for use with alerts. +type LastCommunicationsRepository interface { + // RecordReceivedDeviceData upserts the time of last communication from a user. + RecordReceivedDeviceData(context.Context, LastCommunication) error + // OverdueCommunications lists records for those users that haven't communicated for a + // time. + OverdueCommunications(context.Context) ([]LastCommunication, error) + + EnsureIndexes() error +} + +// DosingDecision is an alias of convenience. +type DosingDecision = dosingdecision.DosingDecision + +// Glucose is an alias of convenience. +type Glucose = glucose.Glucose diff --git a/alerts/config_test.go b/alerts/config_test.go index ec479d8fb4..8a229b1608 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -3,6 +3,7 @@ package alerts import ( "bytes" "context" + "encoding/json" "fmt" "strings" "testing" @@ -11,8 +12,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/tidepool-org/platform/data/blood/glucose" + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types" + "github.com/tidepool-org/platform/data/types/blood" + "github.com/tidepool-org/platform/log" logTest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/pointer" "github.com/tidepool-org/platform/request" "github.com/tidepool-org/platform/structure/validator" "github.com/tidepool-org/platform/test" @@ -23,9 +28,10 @@ func TestSuite(t *testing.T) { } const ( - mockUserID1 = "008c7f79-6545-4466-95fb-34e3ba728d38" - mockUserID2 = "b1880201-30d5-4190-92bb-6afcf08ca15e" - mockUploadID = "4d3b1abc280511ef9f41abf13a093b64" + mockUserID1 = "11111111-7357-7357-7357-111111111111" + mockUserID2 = "22222222-7357-7357-7357-222222222222" + mockUserID3 = "33333333-7357-7357-7357-333333333333" + mockDataSetID = "73577357735773577357735773577357" ) var _ = Describe("Config", func() { @@ -45,7 +51,6 @@ var _ = Describe("Config", func() { }, "urgentLow": { "enabled": false, - "repeat": 30, "threshold": { "units": "mg/dL", "value": 47.5 @@ -62,77 +67,358 @@ var _ = Describe("Config", func() { }, "notLooping": { "enabled": true, - "repeat": 32, "delay": 4 }, "noCommunication": { "enabled": true, - "repeat": 33, "delay": 6 } -}`, mockUserID1, mockUserID2, mockUploadID) - conf := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, conf) +}`, mockUserID1, mockUserID2, mockDataSetID) + cfg := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, cfg) Expect(err).ToNot(HaveOccurred()) - Expect(conf.UserID).To(Equal(mockUserID1)) - Expect(conf.FollowedUserID).To(Equal(mockUserID2)) - Expect(conf.UploadID).To(Equal(mockUploadID)) - Expect(conf.High.Enabled).To(Equal(false)) - Expect(conf.High.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) - Expect(conf.High.Delay).To(Equal(DurationMinutes(5 * time.Minute))) - Expect(conf.High.Threshold.Value).To(Equal(10.0)) - Expect(conf.High.Threshold.Units).To(Equal(glucose.MmolL)) - Expect(conf.Low.Enabled).To(Equal(true)) - Expect(conf.Low.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) - Expect(conf.Low.Delay).To(Equal(DurationMinutes(10 * time.Minute))) - Expect(conf.Low.Threshold.Value).To(Equal(80.0)) - Expect(conf.Low.Threshold.Units).To(Equal(glucose.MgdL)) - Expect(conf.UrgentLow.Enabled).To(Equal(false)) - Expect(conf.UrgentLow.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) - Expect(conf.UrgentLow.Threshold.Value).To(Equal(47.5)) - Expect(conf.UrgentLow.Threshold.Units).To(Equal(glucose.MgdL)) - Expect(conf.NotLooping.Enabled).To(Equal(true)) - Expect(conf.NotLooping.Repeat).To(Equal(DurationMinutes(32 * time.Minute))) - Expect(conf.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) - Expect(conf.NoCommunication.Enabled).To(Equal(true)) - Expect(conf.NoCommunication.Repeat).To(Equal(DurationMinutes(33 * time.Minute))) - Expect(conf.NoCommunication.Delay).To(Equal(DurationMinutes(6 * time.Minute))) + Expect(cfg.UserID).To(Equal(mockUserID1)) + Expect(cfg.FollowedUserID).To(Equal(mockUserID2)) + Expect(cfg.UploadID).To(Equal(mockDataSetID)) + Expect(cfg.Alerts.High.Enabled).To(Equal(false)) + Expect(cfg.Alerts.High.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) + Expect(cfg.Alerts.High.Delay).To(Equal(DurationMinutes(5 * time.Minute))) + Expect(cfg.Alerts.High.Threshold.Value).To(Equal(10.0)) + Expect(cfg.Alerts.High.Threshold.Units).To(Equal(dataBloodGlucose.MmolL)) + Expect(cfg.Alerts.Low.Enabled).To(Equal(true)) + Expect(cfg.Alerts.Low.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) + Expect(cfg.Alerts.Low.Delay).To(Equal(DurationMinutes(10 * time.Minute))) + Expect(cfg.Alerts.Low.Threshold.Value).To(Equal(80.0)) + Expect(cfg.Alerts.Low.Threshold.Units).To(Equal(dataBloodGlucose.MgdL)) + Expect(cfg.Alerts.UrgentLow.Enabled).To(Equal(false)) + Expect(cfg.Alerts.UrgentLow.Threshold.Value).To(Equal(47.5)) + Expect(cfg.Alerts.UrgentLow.Threshold.Units).To(Equal(dataBloodGlucose.MgdL)) + Expect(cfg.Alerts.NotLooping.Enabled).To(Equal(true)) + Expect(cfg.Alerts.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) + // Expect(conf.Alerts.NoCommunication.Enabled).To(Equal(true)) + // Expect(conf.Alerts.NoCommunication.Delay).To(Equal(DurationMinutes(6 * time.Minute))) }) Context("validations", func() { - testConfig := func() Config { - return Config{ - UserID: mockUserID1, - FollowedUserID: mockUserID2, - UploadID: mockUploadID, - } - } - It("requires an UploadID", func() { - c := testConfig() - c.UploadID = "" + cfg := testConfig() + cfg.UploadID = "" val := validator.New(logTest.NewLogger()) - c.Validate(val) + cfg.Validate(val) Expect(val.Error()).To(MatchError(ContainSubstring("value is empty"))) }) It("requires an FollowedUserID", func() { - c := testConfig() - c.FollowedUserID = "" + cfg := testConfig() + cfg.FollowedUserID = "" val := validator.New(logTest.NewLogger()) - c.Validate(val) + cfg.Validate(val) Expect(val.Error()).To(MatchError(ContainSubstring("value is empty"))) }) It("requires an UserID", func() { - c := testConfig() - c.UserID = "" + cfg := testConfig() + cfg.UserID = "" val := validator.New(logTest.NewLogger()) - c.Validate(val) + cfg.Validate(val) Expect(val.Error()).To(MatchError(ContainSubstring("value is empty"))) }) }) + Context("when a notification is returned", func() { + Describe("EvaluateNoCommunication", func() { + It("injects user ids", func() { + ctx, lgr, cfg := newConfigTest() + cfg.Alerts.NoCommunication.Enabled = true + + when := time.Now().Add(-(DefaultNoCommunicationDelay + time.Second)) + n, _ := cfg.EvaluateNoCommunication(ctx, lgr, when) + + Expect(n).ToNot(BeNil()) + Expect(n.RecipientUserID).To(Equal(mockUserID1)) + Expect(n.FollowedUserID).To(Equal(mockUserID2)) + }) + }) + }) + + Describe("EvaluateData", func() { + var okGlucose = []*Glucose{testInRangeDatum()} + var okDosing = []*DosingDecision{testDosingDecision(time.Second)} + + type evalTest struct { + Name string + Activity func(*Config) *AlertActivity + Glucose []*Glucose + Dosing []*DosingDecision + } + + tests := []evalTest{ + {"UrgentLow", func(c *Config) *AlertActivity { return &c.Activity.UrgentLow }, + []*Glucose{testUrgentLowDatum()}, nil}, + {"Low", func(c *Config) *AlertActivity { return &c.Activity.Low }, + []*Glucose{testLowDatum()}, nil}, + {"High", func(c *Config) *AlertActivity { return &c.Activity.High }, + []*Glucose{testHighDatum()}, nil}, + {"NotLooping", func(c *Config) *AlertActivity { return &c.Activity.NotLooping }, + nil, []*DosingDecision{testDosingDecision(-30 * time.Hour)}}, + } + for _, test := range tests { + Context(test.Name, func() { + It("is triggered", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + cfg.EvaluateData(ctx, okGlucose, okDosing) + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Triggered).ToNot(BeZero()) + }) + + It("doesn't update its triggered time", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + cfg.EvaluateData(ctx, okGlucose, okDosing) + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Triggered).ToNot(BeZero()) + prev := test.Activity(cfg).Triggered + n, _ = cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Triggered).To(Equal(prev)) + }) + + It("is resolved", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Resolved).To(BeZero()) + n, _ = cfg.EvaluateData(ctx, okGlucose, okDosing) + Expect(n).To(BeNil()) + Expect(test.Activity(cfg).Resolved).To(BeTemporally("~", time.Now())) + }) + + It("doesn't update its resolved time", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + n, _ = cfg.EvaluateData(ctx, okGlucose, okDosing) + Expect(n).To(BeNil()) + prev := test.Activity(cfg).Resolved + n, _ = cfg.EvaluateData(ctx, okGlucose, okDosing) + Expect(n).To(BeNil()) + Expect(test.Activity(cfg).Resolved).To(Equal(prev)) + }) + }) + } + + type logTest struct { + Name string + Msg string + Fields log.Fields + } + + logTests := []logTest{ + {"UrgentLow", "urgent low", log.Fields{ + "isAlerting?": false, "value": 6.0, "threshold": 3.0}}, + {"Low", "low", log.Fields{ + "isAlerting?": false, "value": 6.0, "threshold": 4.0}}, + {"High", "high", log.Fields{ + "isAlerting?": false, "value": 6.0, "threshold": 10.0}}, + {"NotLooping", "not looping", log.Fields{ + "isAlerting?": false, + // "value" is time-dependent, and would require a lot of work to mock. This + // should be close enough. + "threshold": DefaultNotLoopingDelay, + }}, + } + for _, test := range logTests { + It(test.Name+" logs evaluations", func() { + ctx, lgr, cfg := newConfigTest() + cfg.Alerts.NotLooping.Base.Enabled = true + glucose := []*Glucose{testInRangeDatum()} + dosing := []*DosingDecision{testDosingDecision(-1)} + cfg.EvaluateData(ctx, glucose, dosing) + + Expect(func() { + lgr.AssertLog(log.InfoLevel, test.Msg, test.Fields) + }).ToNot(Panic(), quickJSON(map[string]any{ + "got": lgr.SerializedFields, + "expected": map[string]any{"message": test.Msg, "fields": test.Fields}, + })) + }) + } + + It("injects user IDs into the returned Notification", func() { + ctx, _, cfg := newConfigTest() + mockGlucoseData := []*Glucose{testUrgentLowDatum()} + + n, _ := cfg.EvaluateData(ctx, mockGlucoseData, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.RecipientUserID).To(Equal(mockUserID1)) + Expect(n.FollowedUserID).To(Equal(mockUserID2)) + }) + + It("ripples the needs upsert value (from urgent low)", func() { + ctx, _, cfg := newConfigTest() + + // Generate an urgent low notification. + n, _ := cfg.EvaluateData(ctx, []*Glucose{testUrgentLowDatum()}, nil) + Expect(n).ToNot(Equal(nil)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + n, needsUpsert := cfg.EvaluateData(ctx, []*Glucose{testInRangeDatum()}, nil) + Expect(n).To(BeNil()) + Expect(needsUpsert).To(Equal(true)) + }) + + It("ripples the needs upsert value (from low)", func() { + ctx, _, cfg := newConfigTest() + + // Generate a low notification. + n, needsUpsert := cfg.EvaluateData(ctx, []*Glucose{testLowDatum()}, nil) + Expect(n).ToNot(BeNil()) + Expect(needsUpsert).To(Equal(true)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + n, needsUpsert = cfg.EvaluateData(ctx, []*Glucose{testInRangeDatum()}, nil) + Expect(n).To(BeNil()) + Expect(needsUpsert).To(Equal(true)) + }) + + It("ripples the needs upsert value (form high)", func() { + ctx, _, cfg := newConfigTest() + + // Generate a high notification. + n, needsUpsert := cfg.EvaluateData(ctx, []*Glucose{testHighDatum()}, nil) + Expect(n).ToNot(BeNil()) + Expect(needsUpsert).To(Equal(true)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + n, needsUpsert = cfg.EvaluateData(ctx, []*Glucose{testInRangeDatum()}, nil) + Expect(n).To(BeNil()) + Expect(needsUpsert).To(Equal(true)) + }) + + Describe("Repeat", func() { + It("Low is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.Low.Repeat = DurationMinutes(10 * time.Minute) + cfg.Alerts.Low.Delay = DurationMinutes(1 * time.Nanosecond) + cfg.Activity.Low.Triggered = time.Now().Add(-time.Hour) + cfg.Activity.Low.Sent = time.Now().Add((-10 * time.Minute) + time.Second) + testData := []*Glucose{testLowDatum()} + + n, _ := cfg.EvaluateData(ctx, testData, nil) + Expect(n).To(BeNil()) + + cfg.Activity.Low.Sent = time.Now().Add((-10 * time.Minute) - time.Second) + + n, _ = cfg.EvaluateData(ctx, testData, nil) + Expect(n).ToNot(BeNil()) + }) + + It("High is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.High.Repeat = DurationMinutes(10 * time.Minute) + cfg.Alerts.High.Delay = DurationMinutes(1 * time.Nanosecond) + cfg.Activity.High.Triggered = time.Now().Add(-time.Hour) + cfg.Activity.High.Sent = time.Now().Add((-10 * time.Minute) + time.Second) + delayed := []*Glucose{testHighDatum()} + + n, _ := cfg.EvaluateData(ctx, delayed, nil) + Expect(n).To(BeNil()) + + cfg.Activity.High.Sent = time.Now().Add((-10 * time.Minute) - time.Second) + + n, _ = cfg.EvaluateData(ctx, delayed, nil) + Expect(n).ToNot(BeNil()) + }) + }) + + Describe("Delay", func() { + It("Low is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.Low.Delay = DurationMinutes(5 * time.Minute) + cfg.Alerts.Low.Repeat = DurationMinutes(1 * time.Nanosecond) + delayed := []*Glucose{testLowDatum()} + + n, _ := cfg.EvaluateData(ctx, delayed, nil) + Expect(n).To(BeNil()) + + delayed[0].Time = pointer.FromAny(time.Now().Add(-5 * time.Minute)) + + n, _ = cfg.EvaluateData(ctx, delayed, nil) + Expect(n).ToNot(BeNil()) + }) + + It("High is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.High.Delay = DurationMinutes(5 * time.Minute) + cfg.Alerts.High.Repeat = DurationMinutes(1 * time.Nanosecond) + delayed := []*Glucose{testHighDatum()} + + n, _ := cfg.EvaluateData(ctx, delayed, nil) + Expect(n).To(BeNil()) + + delayed[0].Time = pointer.FromAny(time.Now().Add(-5 * time.Minute)) + + n, _ = cfg.EvaluateData(ctx, delayed, nil) + Expect(n).ToNot(BeNil()) + }) + + It("NotLooping is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping.Enabled = true + delay := 10 * time.Minute + lessThanDelay := delay - time.Second + cfg.Alerts.NotLooping.Delay = DurationMinutes(delay) + delayed := []*DosingDecision{testDosingDecision(-lessThanDelay)} + + n, _ := cfg.EvaluateData(ctx, nil, delayed) + Expect(n).To(BeNil()) + + moreThanDelay := delay + time.Second + delayed[0].Time = pointer.FromAny(time.Now().Add(-moreThanDelay)) + + n, _ = cfg.EvaluateData(ctx, nil, delayed) + Expect(n).ToNot(BeNil()) + }) + + It("NotLooping uses its default", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping.Enabled = true + cfg.Alerts.NotLooping.Delay = 0 + lessThanDelay := DefaultNotLoopingDelay - time.Second + delayed := []*DosingDecision{testDosingDecision(-lessThanDelay)} + + n, _ := cfg.EvaluateData(ctx, nil, delayed) + Expect(n).To(BeNil()) + + moreThanDelay := DefaultNotLoopingDelay + time.Second + delayed[0].Time = pointer.FromAny(time.Now().Add(-moreThanDelay)) + + n, _ = cfg.EvaluateData(ctx, nil, delayed) + Expect(n).ToNot(BeNil()) + }) + }) + }) + + It("observes NotLoopingRepeat between notifications", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + yesterday := []*DosingDecision{testDosingDecision(-24 * time.Hour)} + + cfg.Activity.NotLooping.Sent = time.Now() + n, _ := cfg.EvaluateData(ctx, nil, yesterday) + Expect(n).To(BeNil()) + + cfg.Activity.NotLooping.Sent = time.Now().Add(-(1 + NotLoopingRepeat)) + n, _ = cfg.EvaluateData(ctx, nil, yesterday) + Expect(n).ToNot(BeNil()) + }) + Context("UrgentLowAlert", func() { Context("Threshold", func() { It("accepts values between 0 and 1000 mg/dL", func() { @@ -157,6 +443,114 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value -1 is not between 0 and 1000")) }) }) + + Context("Evaluate", func() { + It("handles being passed empty data", func() { + ctx, _, cfg := newConfigTest() + ul := cfg.Alerts.UrgentLow + + er := EvalResult{} + Expect(func() { + er = ul.Evaluate(ctx, []*Glucose{}) + }).ToNot(Panic()) + Expect(func() { + er = ul.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + }) + + It("validates glucose data", func() { + ctx, _, cfg := newConfigTest() + ul := cfg.Alerts.UrgentLow + + er := EvalResult{} + Expect(func() { + er = ul.Evaluate(ctx, []*Glucose{testUrgentLowDatum()}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(true)) + + badUnits := testInRangeDatum() + badUnits.Units = nil + Expect(func() { + er = ul.Evaluate(ctx, []*Glucose{badUnits}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + badValue := testInRangeDatum() + badValue.Value = nil + Expect(func() { + er = ul.Evaluate(ctx, []*Glucose{badValue}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + // TODO is this still useful? + // + // badTime := testGlucoseDatum(1) + // badTime.Time = nil + // Expect(func() { + // notification, _ = testUrgentLow().Evaluate(ctx, []*Glucose{badTime}) + // }).ToNot(Panic()) + // Expect(notification).To(BeNil()) + + }) + }) + }) + + Context("NoCommunicationAlert", func() { + Context("Evaluate", func() { + + It("handles being passed a Zero time.Time value", func() { + ctx, _, cfg := newConfigTest() + nc := cfg.Alerts.NoCommunication + + Expect(func() { + nc.Evaluate(ctx, time.Time{}) + }).ToNot(Panic()) + }) + + It("logs evaluation results", func() { + ctx, lgr, cfg := newConfigTest() + nc := cfg.Alerts.NoCommunication + + Expect(func() { + nc.Evaluate(ctx, time.Now().Add(-12*time.Hour)) + }).ToNot(Panic()) + Expect(func() { + lgr.AssertLog(log.InfoLevel, "no communication", log.Fields{ + "isAlerting?": true, + }) + }).ToNot(Panic()) + }) + + It("honors non-Zero Delay values", func() { + ctx, _, cfg := newConfigTest() + nc := cfg.Alerts.NoCommunication + nc.Enabled = true + nc.Delay = DurationMinutes(10 * time.Minute) + + wontTrigger := time.Now().Add(-(nc.Delay.Duration() - time.Second)) + er := nc.Evaluate(ctx, wontTrigger) + Expect(er.OutOfRange).To(Equal(false)) + + willTrigger := time.Now().Add(-(nc.Delay.Duration() + time.Second)) + er = nc.Evaluate(ctx, willTrigger) + Expect(er.OutOfRange).To(Equal(true)) + }) + + It("validates the time at which data was last received", func() { + ctx, _, cfg := newConfigTest() + validLastReceived := time.Now().Add(-10*time.Minute + -DefaultNoCommunicationDelay) + invalidLastReceived := time.Time{} + er := EvalResult{} + nc := cfg.Alerts.NoCommunication + + er = nc.Evaluate(ctx, validLastReceived) + Expect(er.OutOfRange).To(Equal(true)) + + er = nc.Evaluate(ctx, invalidLastReceived) + Expect(er.OutOfRange).To(Equal(false)) + }) + }) }) Context("LowAlert", func() { @@ -216,6 +610,57 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value 6h1m0s is not between 0s and 6h0m0s")) }) }) + + Context("Evaluate", func() { + It("handles being passed empty data", func() { + ctx, _, cfg := newConfigTest() + er := EvalResult{} + low := cfg.Alerts.Low + + Expect(func() { + er = low.Evaluate(ctx, []*Glucose{}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + Expect(func() { + er = low.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + }) + + It("validates glucose data", func() { + ctx, _, cfg := newConfigTest() + er := EvalResult{} + low := cfg.Alerts.Low + + Expect(func() { + er = low.Evaluate(ctx, []*Glucose{testUrgentLowDatum()}) + }).ToNot(Panic()) + Expect(er.OutOfRange).ToNot(Equal(false)) + + badUnits := testUrgentLowDatum() + badUnits.Units = nil + Expect(func() { + er = low.Evaluate(ctx, []*Glucose{badUnits}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + badValue := testUrgentLowDatum() + badValue.Value = nil + Expect(func() { + er = low.Evaluate(ctx, []*Glucose{badValue}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + // TODO is this useful? + // + // badTime := testGlucoseDatum(1) + // badTime.Time = nil + // Expect(func() { + // notification, _ = low.Evaluate(ctx, []*Glucose{badTime}) + // }).ToNot(Panic()) + // Expect(notification).To(BeNil()) + }) + }) }) Context("HighAlert", func() { @@ -268,6 +713,57 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value 6h1m0s is not between 0s and 6h0m0s")) }) }) + + Context("Evaluate", func() { + + It("handles being passed empty data", func() { + ctx, _, cfg := newConfigTest() + er := EvalResult{} + high := cfg.Alerts.High + + Expect(func() { + er = high.Evaluate(ctx, []*Glucose{}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + Expect(func() { + er = high.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + }) + + It("validates glucose data", func() { + ctx, _, cfg := newConfigTest() + er := EvalResult{} + high := cfg.Alerts.High + + Expect(func() { + er = high.Evaluate(ctx, []*Glucose{testHighDatum()}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(true)) + + badUnits := testInRangeDatum() + badUnits.Units = nil + Expect(func() { + er = high.Evaluate(ctx, []*Glucose{badUnits}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + badValue := testInRangeDatum() + badValue.Value = nil + Expect(func() { + er = high.Evaluate(ctx, []*Glucose{badValue}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + // TODO is this still useful? + badTime := testInRangeDatum() + badTime.Time = nil + Expect(func() { + er = high.Evaluate(ctx, []*Glucose{badTime}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + }) + }) }) Context("NoCommunicationAlert", func() { @@ -286,17 +782,18 @@ var _ = Describe("Config", func() { val = validator.New(logTest.NewLogger()) b = NoCommunicationAlert{Delay: -1} b.Validate(val) - Expect(val.Error()).To(MatchError("value -1ns is not between 0s and 6h0m0s")) + Expect(val.Error()).To(MatchError("value -1ns is not between 5m0s and 6h0m0s")) val = validator.New(logTest.NewLogger()) b = NoCommunicationAlert{Delay: DurationMinutes(time.Hour*6 + time.Second)} b.Validate(val) - Expect(val.Error()).To(MatchError("value 6h0m1s is not between 0s and 6h0m0s")) + Expect(val.Error()).To(MatchError("value 6h0m1s is not between 5m0s and 6h0m0s")) }) }) }) Context("NotLoopingAlert", func() { + Context("Delay", func() { It("accepts values between 0 and 2 hours (inclusive)", func() { val := validator.New(logTest.NewLogger()) @@ -319,37 +816,126 @@ var _ = Describe("Config", func() { b.Validate(val) Expect(val.Error()).To(MatchError("value 2h0m1s is not between 0s and 2h0m0s")) }) + }) + + Context("Evaluate", func() { + + It("uses a default delay of 30 minutes", func() { + ctx, _, cfg := newConfigTest() + decisionsNoAlert := []*DosingDecision{ + testDosingDecision(-29 * time.Minute), + } + decisionsWithAlert := []*DosingDecision{ + testDosingDecision(-30 * time.Minute), + } + nl := cfg.Alerts.NotLooping + + er := nl.Evaluate(ctx, decisionsNoAlert) + Expect(er.OutOfRange).To(Equal(false), er.String()) + er = nl.Evaluate(ctx, decisionsWithAlert) + Expect(er.OutOfRange).To(Equal(true)) + }) + + It("respects custom delays", func() { + ctx, _, cfg := newConfigTest() + decisionsNoAlert := []*DosingDecision{ + testDosingDecision(-14 * time.Minute), + } + decisionsWithAlert := []*DosingDecision{ + testDosingDecision(-15 * time.Minute), + } + nl := cfg.Alerts.NotLooping + nl.Delay = DurationMinutes(15 * time.Minute) + + er := nl.Evaluate(ctx, decisionsNoAlert) + Expect(er.OutOfRange).To(Equal(false)) + er = nl.Evaluate(ctx, decisionsWithAlert) + Expect(er.OutOfRange).To(Equal(true)) + }) + + It("handles being passed empty data", func() { + ctx, _, cfg := newConfigTest() + er := EvalResult{} + + nl := cfg.Alerts.NotLooping + + Expect(func() { + er = nl.Evaluate(ctx, []*DosingDecision{}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(true)) + Expect(func() { + er = nl.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(true)) + }) + + It("ignores decisions without a reason", func() { + ctx, _, cfg := newConfigTest() + nl := cfg.Alerts.NotLooping + noReason := testDosingDecision(time.Second) + noReason.Reason = nil + decisions := []*DosingDecision{ + testDosingDecision(-time.Hour), + noReason, + } + + er := nl.Evaluate(ctx, decisions) + Expect(er.OutOfRange).To(Equal(true)) + }) + + It("ignores decisions without a time", func() { + ctx, _, cfg := newConfigTest() + nl := cfg.Alerts.NotLooping + noTime := testDosingDecision(time.Second) + noTime.Time = nil + decisions := []*DosingDecision{ + testDosingDecision(-time.Hour), + noTime, + } + + er := nl.Evaluate(ctx, decisions) + Expect(er.OutOfRange).To(Equal(true)) + }) }) }) Context("repeat", func() { + var defaultAlert = LowAlert{ + Threshold: Threshold{Value: 11, Units: dataBloodGlucose.MmolL}, + } + It("accepts values of 0 (indicating disabled)", func() { val := validator.New(logTest.NewLogger()) - b := Base{Repeat: 0} - b.Validate(val) + l := defaultAlert + l.Repeat = 0 + l.Validate(val) Expect(val.Error()).To(Succeed()) }) It("accepts values of 15 minutes to 4 hours (inclusive)", func() { val := validator.New(logTest.NewLogger()) - b := Base{Repeat: DurationMinutes(15 * time.Minute)} - b.Validate(val) + l := defaultAlert + l.Repeat = DurationMinutes(15 * time.Minute) + l.Validate(val) Expect(val.Error()).To(Succeed()) val = validator.New(logTest.NewLogger()) - b = Base{Repeat: DurationMinutes(4 * time.Hour)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(4 * time.Hour) + l.Validate(val) Expect(val.Error()).To(Succeed()) val = validator.New(logTest.NewLogger()) - b = Base{Repeat: DurationMinutes(4*time.Hour + 1)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(4*time.Hour + 1) + l.Validate(val) Expect(val.Error()).NotTo(Succeed()) val = validator.New(logTest.NewLogger()) - b = Base{Repeat: DurationMinutes(15*time.Minute - 1)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(15*time.Minute - 1) + l.Validate(val) Expect(val.Error()).NotTo(Succeed()) }) }) @@ -361,67 +947,163 @@ var _ = Describe("Config", func() { err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(MatchError("json is malformed")) }) - It("validates repeat minutes (negative)", func() { + }) + + Context("low", func() { + It("accepts a blank repeat", func() { buf := buff(`{ "userId": "%s", "followedUserId": "%s", "uploadId": "%s", - "urgentLow": { - "enabled": false, - "repeat": -11, + "low": { + "enabled": true, + "delay": 10, "threshold": { - "units": "%s", - "value": 47.5 + "units": "mg/dL", + "value": 80 } } -}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) - cfg := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, cfg) - Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) +}`, mockUserID1, mockUserID2, mockDataSetID) + conf := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, conf) + Expect(err).To(Succeed()) + Expect(conf.Alerts.Low.Repeat).To(Equal(DurationMinutes(0))) }) - It("validates repeat minutes (string)", func() { - buf := buff(`{ + }) + It("validates repeat minutes (negative)", func() { + buf := buff(`{ "userId": "%s", "followedUserId": "%s", - "urgentLow": { + "uploadId": "%s", + "low": { "enabled": false, - "repeat": "a", + "repeat": -11, "threshold": { "units": "%s", - "value": 1 + "value": 47.5 } } -}`, mockUserID1, mockUserID2, glucose.MgdL) - cfg := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, cfg) - Expect(err).To(MatchError("json is malformed")) - }) +}`, mockUserID1, mockUserID2, mockDataSetID, dataBloodGlucose.MgdL) + cfg := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, cfg) + Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) }) - - Context("low", func() { - It("accepts a blank repeat", func() { - buf := buff(`{ + It("validates repeat minutes (string)", func() { + buf := buff(`{ "userId": "%s", "followedUserId": "%s", "uploadId": "%s", "low": { - "enabled": true, - "delay": 10, + "enabled": false, + "repeat": "a", "threshold": { - "units": "mg/dL", - "value": 80 + "units": "%s", + "value": 1 } } -}`, mockUserID1, mockUserID2, mockUploadID) - conf := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, conf) - Expect(err).To(Succeed()) - Expect(conf.Low.Repeat).To(Equal(DurationMinutes(0))) +}`, mockUserID1, mockUserID2, mockDataSetID, dataBloodGlucose.MgdL) + cfg := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, cfg) + Expect(err).To(MatchError("json is malformed")) + }) +}) + +var _ = Describe("Alerts", func() { + Describe("LongestDelay", func() { + It("does what it says", func() { + low := testLowAlert() + low.Delay = DurationMinutes(10 * time.Minute) + high := testHighAlert() + high.Delay = DurationMinutes(5 * time.Minute) + notLooping := testNotLoopingAlert() + notLooping.Delay = DurationMinutes(5 * time.Minute) + + a := Alerts{ + Low: low, + High: high, + NotLooping: notLooping, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(10 * time.Minute)) + }) + + It("ignores disabled alerts", func() { + low := testLowAlert() + low.Delay = DurationMinutes(7 * time.Minute) + high := testHighAlert() + high.Delay = DurationMinutes(5 * time.Minute) + notLooping := testNotLoopingAlert() + notLooping.Delay = DurationMinutes(5 * time.Minute) + + a := Alerts{ + Low: low, + High: high, + NotLooping: notLooping, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(7 * time.Minute)) + }) + + It("returns a Zero Duration when no alerts are set", func() { + a := Alerts{ + Low: nil, + High: nil, + NotLooping: nil, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(time.Duration(0))) + }) + }) + + Describe("Evaluate", func() { + + It("detects urgent low data", func() { + ctx, _, cfg := newConfigTest() + data := []*Glucose{testUrgentLowDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("below urgent low threshold")) + }) + + It("detects low data", func() { + ctx, _, cfg := newConfigTest() + data := []*Glucose{testLowDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("below low threshold")) + }) + + It("detects high data", func() { + ctx, _, cfg := newConfigTest() + data := []*Glucose{testHighDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("above high threshold")) + }) + + Context("with both low and urgent low alerts detected", func() { + It("prefers urgent low", func() { + ctx, _, cfg := newConfigTest() + data := []*Glucose{testUrgentLowDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("below urgent low threshold")) + }) }) }) }) -var _ = Describe("Duration", func() { +var _ = Describe("DurationMinutes", func() { It("parses 42", func() { d := DurationMinutes(0) err := d.UnmarshalJSON([]byte(`42`)) @@ -456,20 +1138,20 @@ var _ = Describe("Duration", func() { var _ = Describe("Threshold", func() { It("accepts mg/dL", func() { - buf := buff(`{"units":"%s","value":42}`, glucose.MgdL) + buf := buff(`{"units":"%s","value":42}`, dataBloodGlucose.MgdL) threshold := &Threshold{} err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(BeNil()) Expect(threshold.Value).To(Equal(42.0)) - Expect(threshold.Units).To(Equal(glucose.MgdL)) + Expect(threshold.Units).To(Equal(dataBloodGlucose.MgdL)) }) It("accepts mmol/L", func() { - buf := buff(`{"units":"%s","value":42}`, glucose.MmolL) + buf := buff(`{"units":"%s","value":42}`, dataBloodGlucose.MmolL) threshold := &Threshold{} err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(BeNil()) Expect(threshold.Value).To(Equal(42.0)) - Expect(threshold.Units).To(Equal(glucose.MmolL)) + Expect(threshold.Units).To(Equal(dataBloodGlucose.MmolL)) }) It("rejects lb/gal", func() { buf := buff(`{"units":"%s","value":42}`, "lb/gal") @@ -482,7 +1164,7 @@ var _ = Describe("Threshold", func() { Expect(err).Should(HaveOccurred()) }) It("is case-sensitive with respect to Units", func() { - badUnits := strings.ToUpper(glucose.MmolL) + badUnits := strings.ToUpper(dataBloodGlucose.MmolL) buf := buff(`{"units":"%s","value":42}`, badUnits) err := request.DecodeObject(context.Background(), nil, buf, &Threshold{}) Expect(err).Should(HaveOccurred()) @@ -490,7 +1172,265 @@ var _ = Describe("Threshold", func() { }) +var _ = Describe("AlertActivity", func() { + Describe("IsActive()", func() { + It("is true", func() { + triggered := time.Now() + resolved := triggered.Add(-time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Resolved: resolved, + } + Expect(a.IsActive()).To(BeTrue()) + }) + + It("is false", func() { + triggered := time.Now() + resolved := triggered.Add(time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Resolved: resolved, + } + Expect(a.IsActive()).To(BeFalse()) + }) + }) + + Describe("IsSent()", func() { + It("is true", func() { + triggered := time.Now() + sent := triggered.Add(time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Sent: sent, + } + Expect(a.IsSent()).To(BeTrue()) + }) + + It("is false", func() { + triggered := time.Now() + notified := triggered.Add(-time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Sent: notified, + } + Expect(a.IsSent()).To(BeFalse()) + }) + }) + + Describe("normalizeUnits", func() { + Context("given the same units", func() { + It("doesn't alter them at all", func() { + d := testUrgentLowDatum() + t := Threshold{ + Value: 5.0, + Units: dataBloodGlucose.MmolL, + } + dv, tv, err := normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(5.0)) + Expect(dv).To(Equal(2.9)) + + d = testUrgentLowDatum() + d.Blood.Units = pointer.FromAny(dataBloodGlucose.MgdL) + t = Threshold{ + Value: 5.0, + Units: dataBloodGlucose.MgdL, + } + dv, tv, err = normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(5.0)) + Expect(dv).To(Equal(2.9)) + }) + }) + + Context("value in Mmol/L & threshold in mg/dL", func() { + It("normalizes to Mmol/L", func() { + d := testUrgentLowDatum() + d.Blood.Units = pointer.FromAny(dataBloodGlucose.MmolL) + t := Threshold{ + Value: 90.0, + Units: dataBloodGlucose.MgdL, + } + dv, tv, err := normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(4.99567)) + Expect(dv).To(Equal(2.9)) + }) + }) + + Context("value in mg/dL & threshold in Mmol/L", func() { + It("normalizes to Mmol/L", func() { + d := testUrgentLowDatum() + d.Blood.Value = pointer.FromAny(90.0) + d.Blood.Units = pointer.FromAny(dataBloodGlucose.MgdL) + t := Threshold{ + Value: 5.0, + Units: dataBloodGlucose.MmolL, + } + dv, tv, err := normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(5.0)) + Expect(dv).To(Equal(4.99567)) + }) + }) + }) +}) + // buff is a helper for generating a JSON []byte representation. func buff(format string, args ...interface{}) *bytes.Buffer { return bytes.NewBufferString(fmt.Sprintf(format, args...)) } + +func testDosingDecision(d time.Duration) *DosingDecision { + return &DosingDecision{ + Base: types.Base{ + Time: pointer.FromAny(time.Now().Add(d)), + }, + Reason: pointer.FromAny(DosingDecisionReasonLoop), + } +} + +func testConfig() Config { + return Config{ + UserID: mockUserID1, + FollowedUserID: mockUserID2, + UploadID: mockDataSetID, + } +} + +func testUrgentLowDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(dataBloodGlucose.MmolL), + Value: pointer.FromAny(2.9), + }, + } +} + +func testHighDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(dataBloodGlucose.MmolL), + Value: pointer.FromAny(11.0), + }, + } +} + +func testLowDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(dataBloodGlucose.MmolL), + Value: pointer.FromAny(3.9), + }, + } +} + +func testInRangeDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(dataBloodGlucose.MmolL), + Value: pointer.FromAny(6.0), + }, + } +} + +func testNoCommunication() *NoCommunicationAlert { + return &NoCommunicationAlert{ + Base: Base{Enabled: true}, + } +} + +func testNoCommunicationDisabled() *NoCommunicationAlert { + nc := testNoCommunication() + nc.Enabled = false + return nc +} + +func testNotLoopingDisabled() *NotLoopingAlert { + nl := testNotLooping() + nl.Enabled = false + return nl +} + +func testNotLooping() *NotLoopingAlert { + return &NotLoopingAlert{ + Base: Base{Enabled: true}, + Delay: 0, + } +} + +func testAlertsActivity() Activity { + return Activity{} +} + +func testLowAlert() *LowAlert { + return &LowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 4, + Units: dataBloodGlucose.MmolL, + }, + } +} +func testHighAlert() *HighAlert { + return &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10, + Units: dataBloodGlucose.MmolL, + }, + } +} +func testUrgentLowAlert() *UrgentLowAlert { + return &UrgentLowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 3, + Units: dataBloodGlucose.MmolL, + }, + } +} +func testNotLoopingAlert() *NotLoopingAlert { + return &NotLoopingAlert{ + Base: Base{Enabled: true}, + } +} + +func newConfigTest() (context.Context, *logTest.Logger, *Config) { + lgr := logTest.NewLogger() + ctx := log.NewContextWithLogger(context.Background(), lgr) + cfg := &Config{ + UserID: mockUserID1, + FollowedUserID: mockUserID2, + UploadID: mockDataSetID, + Alerts: Alerts{ + UrgentLow: testUrgentLowAlert(), + Low: testLowAlert(), + High: testHighAlert(), + NotLooping: testNotLoopingDisabled(), // NOTE: disabled + NoCommunication: testNoCommunicationDisabled(), // NOTE: disabled + }, + Activity: testAlertsActivity(), + } + return ctx, lgr, cfg +} + +func quickJSON(v any) string { + b, err := json.MarshalIndent(v, "", " ") + if err != nil { + return fmt.Sprintf("", v) + } + return string(b) +} diff --git a/alerts/evaluator.go b/alerts/evaluator.go new file mode 100644 index 0000000000..9a757a7833 --- /dev/null +++ b/alerts/evaluator.go @@ -0,0 +1,201 @@ +package alerts + +import ( + "cmp" + "context" + "slices" + "time" + + "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/permission" +) + +// DataRepository encapsulates queries of the data collection for use with alerts. +type DataRepository interface { + // GetAlertableData queries for the data used to evaluate alerts configurations. + GetAlertableData(ctx context.Context, params GetAlertableDataParams) (*GetAlertableDataResponse, error) +} + +type GetAlertableDataParams struct { + // UserID of the user that owns the data. + UserID string + // UploadID of the device data set to query. + // + // The term DataSetID should be preferred, but UploadID already existed in some places. + UploadID string + // Start limits the data to those recorded after this time. + Start time.Time + // End limits the data to those recorded before this time. + End time.Time +} + +type GetAlertableDataResponse struct { + DosingDecisions []*dosingdecision.DosingDecision + Glucose []*glucose.Glucose +} + +type Evaluator struct { + Alerts Repository + Data DataRepository + Logger log.Logger + Permissions permission.Client + TokenProvider auth.ServerSessionTokenProvider +} + +func NewEvaluator(alerts Repository, dataRepo DataRepository, permissions permission.Client, + logger log.Logger, tokenProvider auth.ServerSessionTokenProvider) *Evaluator { + + return &Evaluator{ + Alerts: alerts, + Data: dataRepo, + Logger: logger, + Permissions: permissions, + TokenProvider: tokenProvider, + } +} + +// EvaluateData generates alert notifications in response to a user uploading data. +func (e *Evaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID string) ( + []*Notification, error) { + + configs, err := e.gatherConfigs(ctx, followedUserID, dataSetID) + if err != nil { + return nil, err + } + + configsByDataSetID := e.mapConfigsByDataSetID(configs) + + notifications := []*Notification{} + for dsID, configs := range configsByDataSetID { + resp, err := e.gatherData(ctx, followedUserID, dsID, configs) + if err != nil { + return nil, err + } + for _, config := range configs { + lgr := config.LoggerWithFields(e.Logger) + lgrCtx := log.NewContextWithLogger(ctx, lgr) + notification, needsUpsert := e.genNotificationForConfig(lgrCtx, lgr, config, resp) + if notification != nil { + notifications = append(notifications, notification) + } + if needsUpsert { + err := e.Alerts.Upsert(ctx, config) + if err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } + } + } + } + + return notifications, nil +} + +func (e *Evaluator) genNotificationForConfig(ctx context.Context, lgr log.Logger, + config *Config, resp *GetAlertableDataResponse) (*Notification, bool) { + + notification, needsUpsert := config.EvaluateData(ctx, resp.Glucose, resp.DosingDecisions) + if notification != nil { + notification.Sent = e.wrapWithUpsert(ctx, lgr, config, notification.Sent) + } + return notification, needsUpsert +} + +func (e *Evaluator) mapConfigsByDataSetID(cfgs []*Config) map[string][]*Config { + mapped := map[string][]*Config{} + for _, cfg := range cfgs { + if _, found := mapped[cfg.UploadID]; !found { + mapped[cfg.UploadID] = []*Config{} + } + mapped[cfg.UploadID] = append(mapped[cfg.UploadID], cfg) + } + return mapped +} + +func (e *Evaluator) gatherConfigs(ctx context.Context, followedUserID, dataSetID string) ( + []*Config, error) { + + configs, err := e.Alerts.List(ctx, followedUserID) + if err != nil { + return nil, err + } + configs = slices.DeleteFunc(configs, e.authDenied(ctx)) + configs = slices.DeleteFunc(configs, func(config *Config) bool { + return config.UploadID != dataSetID + }) + return configs, nil +} + +// authDenied builds a function for slices.DeleteFunc to remove unauthorized users' Configs. +// +// This would catch the unintended case where a follower's permission was revoked, but their +// [Config] wasn't deleted. +// +// A closure is used to inject information from the evaluator into the resulting function. +func (e *Evaluator) authDenied(ctx context.Context) func(*Config) bool { + return func(c *Config) bool { + if c == nil { + return true + } + logger := e.Logger.WithFields(log.Fields{ + "userID": c.UserID, + "followedUserID": c.FollowedUserID, + }) + ctx = auth.NewContextWithServerSessionTokenProvider(ctx, e.TokenProvider) + perms, err := e.Permissions.GetUserPermissions(ctx, c.UserID, c.FollowedUserID) + if err != nil { + logger.WithError(err).Warn("Unable to confirm permissions; skipping") + return true + } + if _, found := perms[permission.Follow]; !found { + logger.Debug("permission denied: skipping") + return true + } + return false + } +} + +func (e *Evaluator) gatherData(ctx context.Context, followedUserID, dataSetID string, + configs []*Config) (*GetAlertableDataResponse, error) { + + if len(configs) == 0 { + return nil, nil + } + + longestDelay := slices.MaxFunc(configs, func(i, j *Config) int { + return cmp.Compare(i.LongestDelay(), j.LongestDelay()) + }).LongestDelay() + longestDelay = max(5*time.Minute, longestDelay) + params := GetAlertableDataParams{ + UserID: followedUserID, + UploadID: dataSetID, + Start: time.Now().Add(-longestDelay), + } + resp, err := e.Data.GetAlertableData(ctx, params) + if err != nil { + return nil, err + } + + resp.Glucose = slices.DeleteFunc(resp.Glucose, + func(g *glucose.Glucose) bool { return g.Time == nil }) + resp.DosingDecisions = slices.DeleteFunc(resp.DosingDecisions, + func(d *dosingdecision.DosingDecision) bool { return d.Time == nil }) + + return resp, nil +} + +// wrapWithUpsert to upsert the Config that triggered the Notification after it's sent. +func (e *Evaluator) wrapWithUpsert(ctx context.Context, lgr log.Logger, config *Config, + original func(time.Time)) func(time.Time) { + + return func(at time.Time) { + if original != nil { + original(at) + } + if err := e.Alerts.Upsert(ctx, config); err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } + } +} diff --git a/alerts/evaluator_test.go b/alerts/evaluator_test.go new file mode 100644 index 0000000000..f3655ca1ea --- /dev/null +++ b/alerts/evaluator_test.go @@ -0,0 +1,437 @@ +package alerts + +import ( + "context" + "errors" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/pointer" +) + +var _ = Describe("Evaluator", func() { + Describe("EvaluateData", func() { + It("handles data for users without any followers gracefully", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + + evaluator := NewEvaluator(alertsRepo, nil, nil, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(notifications).To(BeEmpty()) + Expect(err).To(Succeed()) + }) + + It("handles data queries that return empty results (perm denied)", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + }, + }) + dataRepo := newMockDataRepo() + perms := newMockPermissionClient() + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(notifications).To(BeEmpty()) + Expect(err).To(Succeed()) + }) + + It("filters users without permission", func() { + // This simulates the case when permission is revoked, but the corresponding + // alerts.Config isn't yet deleted. + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID + "-2", + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + High: &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10.0, + Units: dataBloodGlucose.MmolL, + }, + }, + }, + }, + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + High: &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10.0, + Units: dataBloodGlucose.MmolL, + }, + }, + }, + }, + }) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{testHighDatum()}, + }, + } + perms := newMockPermissionClient() + perms.Allow(testUserID, testFollowedUserID, permission.Follow) + // This user still has a config, but has had their follow permission revoked. + perms.Allow(testUserID+"-2", testFollowedUserID, permission.Read) + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(err).To(Succeed()) + if Expect(len(notifications)).To(Equal(1)) { + Expect(notifications[0].RecipientUserID).To(Equal(testUserID)) + } + }) + + It("handles data queries that return empty results (no data)", func() { + ctx, lgr, cfg := newConfigTest() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{cfg}) + dataRepo := newMockDataRepo() + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + e := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + ns, err := e.EvaluateData(ctx, mockUserID2, mockDataSetID) + + Expect(ns).To(BeEmpty()) + Expect(err).To(Succeed()) + }) + + It("returns notifications", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + UrgentLow: testUrgentLowAlert(), + }, + }, + }) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{testUrgentLowDatum()}, + }, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + if Expect(notifications).To(HaveLen(1)) { + msgFound := strings.Contains(notifications[0].Message, "below urgent low") + Expect(msgFound).To(BeTrue()) + } + Expect(err).To(Succeed()) + }) + + It("queries data based on the longest delay", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + longerDelay := testHighAlert() + longerDelay.Delay = DurationMinutes(3) + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID + "-2", + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + High: testHighAlert(), + }, + }, + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + High: longerDelay, + }, + }, + }) + highDatum := testHighDatum() + highDatum.Blood.Base.Time = pointer.FromAny(time.Now().Add(-10 * time.Minute)) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{highDatum}, + }, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + Expect(err).To(Succeed()) + if Expect(notifications).To(HaveLen(2)) { + msgFound := strings.Contains(notifications[0].Message, "above high") + Expect(msgFound).To(BeTrue(), notifications[0].Message) + } + }) + + It("wraps notifications so that changes are persisted when pushed", func() { + ctx, lgr, cfg := newConfigTest() + startOfTest := time.Now() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{cfg}) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + {Glucose: []*glucose.Glucose{testUrgentLowDatum()}}, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + ns, err := evaluator.EvaluateData(ctx, mockUserID2, mockDataSetID) + Expect(err).To(Succeed()) + Expect(len(ns)).To(Equal(1)) + for _, n := range ns { + Expect(n.Sent).ToNot(BeNil()) + n.Sent(time.Now()) + } + if Expect(len(alertsRepo.UpsertCalls)).To(Equal(2)) { + activity := alertsRepo.UpsertCalls[1].Activity.UrgentLow + Expect(activity.Sent).To(BeTemporally(">", startOfTest)) + } + }) + + It("persists changes when there's no new Notification", func() { + // For example if an alert is resolved, that change should be persisted, even + // when there isn't a notification generated. + ctx, lgr := contextWithNullLoggerDeluxe() + startOfTest := time.Now() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + UrgentLow: testUrgentLowAlert(), + }, + Activity: Activity{ + UrgentLow: AlertActivity{ + Triggered: time.Now().Add(-10 * time.Minute), + }, + }, + }, + }) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{testInRangeDatum()}, + }, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + ns, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(err).To(Succeed()) + Expect(len(ns)).To(Equal(0)) + if Expect(len(alertsRepo.UpsertCalls)).To(Equal(1)) { + activity := alertsRepo.UpsertCalls[0].Activity.UrgentLow + Expect(activity.Resolved).To(BeTemporally(">", startOfTest)) + } + }) + + Context("when the user has multiple data sets", func() { + It("ignores Configs that don't match the data set id", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + resp1 := newTestAlertsConfig(testUserID, testDataSetID) + resp2 := newTestAlertsConfig(testUserID+"2", testDataSetID+"2") + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, + []*Config{resp1, resp2}) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + {Glucose: []*glucose.Glucose{testUrgentLowDatum()}}, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, + testFollowedUserID, testDataSetID) + + Expect(err).To(Succeed()) + if Expect(len(notifications)).To(Equal(1)) { + recipientUserID := notifications[0].RecipientUserID + Expect(recipientUserID).To(Equal(testUserID)) + } + }) + }) + }) +}) + +func newTestAlertsConfig(userID, dataSetID string) *Config { + return &Config{ + UserID: userID, + FollowedUserID: testFollowedUserID, + UploadID: dataSetID, + Alerts: Alerts{ + UrgentLow: testUrgentLowAlert(), + }, + } +} + +type mockAlertsClient struct { + OverdueCommunicationsError error + OverdueCommunicationsResponses [][]LastCommunication + ListResponses [][]*Config + ListError error + UpsertError error + UpsertCalls []*Config +} + +func newMockAlertsClient() *mockAlertsClient { + return &mockAlertsClient{ + OverdueCommunicationsResponses: [][]LastCommunication{}, + ListResponses: [][]*Config{}, + UpsertCalls: []*Config{}, + } +} + +func (c *mockAlertsClient) Get(ctx context.Context, conf *Config) (*Config, error) { + return nil, nil +} + +func (c *mockAlertsClient) Upsert(ctx context.Context, conf *Config) error { + if conf == nil { + c.UpsertCalls = append(c.UpsertCalls, nil) + } else { + copyConf := *conf + c.UpsertCalls = append(c.UpsertCalls, ©Conf) + } + if c.UpsertError != nil { + return c.UpsertError + } + return nil +} + +func (c *mockAlertsClient) Delete(ctx context.Context, conf *Config) error { + return nil +} + +func (c *mockAlertsClient) List(ctx context.Context, userID string) ([]*Config, error) { + if c.ListError != nil { + return nil, c.ListError + } + if len(c.ListResponses) > 0 { + ret := c.ListResponses[0] + c.ListResponses = c.ListResponses[1:] + return ret, nil + } + return []*Config{}, nil +} + +func (c *mockAlertsClient) OverdueCommunications(context.Context) ( + []LastCommunication, error) { + + if c.OverdueCommunicationsError != nil { + return nil, c.OverdueCommunicationsError + } + if len(c.OverdueCommunicationsResponses) > 0 { + ret := c.OverdueCommunicationsResponses[0] + c.OverdueCommunicationsResponses = c.OverdueCommunicationsResponses[1:] + return ret, nil + } + return nil, nil +} + +func (c *mockAlertsClient) EnsureIndexes() error { + return nil +} + +type mockDataRepo struct { + AlertableData []*GetAlertableDataResponse +} + +func newMockDataRepo() *mockDataRepo { + return &mockDataRepo{ + AlertableData: []*GetAlertableDataResponse{}, + } +} + +func (r *mockDataRepo) GetAlertableData(ctx context.Context, params GetAlertableDataParams) ( + *GetAlertableDataResponse, error) { + + if len(r.AlertableData) > 0 { + ret := r.AlertableData[0] + r.AlertableData = r.AlertableData[1:] + return ret, nil + } + + return &GetAlertableDataResponse{ + DosingDecisions: []*dosingdecision.DosingDecision{}, + Glucose: []*glucose.Glucose{}, + }, nil +} + +type mockPermissionClient struct { + AlwaysAllow bool + Perms map[string]permission.Permissions +} + +func newMockPermissionClient() *mockPermissionClient { + return &mockPermissionClient{ + Perms: map[string]permission.Permissions{}, + } +} + +func (c *mockPermissionClient) GetUserPermissions(ctx context.Context, + requestUserID string, targetUserID string) (permission.Permissions, error) { + + if c.AlwaysAllow { + return map[string]permission.Permission{ + permission.Follow: {}, + permission.Read: {}, + }, nil + } + + if p, ok := c.Perms[c.Key(requestUserID, targetUserID)]; ok { + return p, nil + } else { + return nil, errors.New("test error NOT FOUND") + } +} + +func (c *mockPermissionClient) Allow(requestUserID, targetUserID string, perms ...string) { + key := c.Key(requestUserID, targetUserID) + if _, found := c.Perms[key]; !found { + c.Perms[key] = permission.Permissions{} + } + for _, perm := range perms { + c.Perms[key][perm] = permission.Permission{} + } +} + +func (c *mockPermissionClient) Key(requesterUserID, targetUserID string) string { + return requesterUserID + targetUserID +} diff --git a/alerts/pusher.go b/alerts/pusher.go new file mode 100644 index 0000000000..1563e4fc43 --- /dev/null +++ b/alerts/pusher.go @@ -0,0 +1,78 @@ +package alerts + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/push" +) + +// Pusher is a service-agnostic interface for sending push notifications. +type Pusher interface { + // Push a notification to a device. + Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error +} + +// ToPushNotification converts Notification to push.Notification. +func ToPushNotification(notification *Notification) *push.Notification { + return &push.Notification{ + Message: notification.Message, + } +} + +type cpaPusherEnvconfig struct { + // SigningKey is the raw token signing key received from Apple (.p8 file containing + // PEM-encoded private key) + // + // https://developer.apple.com/documentation/usernotifications/sending-notification-requests-to-apns + SigningKey []byte `envconfig:"TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY" required:"true"` + KeyID string `envconfig:"TIDEPOOL_CARE_PARTNER_ALERTS_APNS_KEY_ID" required:"true"` + BundleID string `envconfig:"TIDEPOOL_CARE_PARTNER_ALERTS_APNS_BUNDLE_ID" required:"true"` + TeamID string `envconfig:"TIDEPOOL_CARE_PARTNER_ALERTS_APNS_TEAM_ID" required:"true"` +} + +// NewPusher handles the loading of care partner configuration for push notifications. +func NewPusher() (*push.APNSPusher, error) { + config, err := loadPusherViaEnvconfig() + if err != nil { + return nil, errors.Wrap(err, "unable to care partner pusher config") + } + + client, err := push.NewAPNS2Client(config.SigningKey, config.KeyID, config.TeamID) + if err != nil { + return nil, errors.Wrap(err, "unable to create care partner pusher client") + } + + return push.NewAPNSPusher(client, config.BundleID), nil +} + +func loadPusherViaEnvconfig() (*cpaPusherEnvconfig, error) { + c := &cpaPusherEnvconfig{} + if err := envconfig.Process("", c); err != nil { + return nil, errors.Wrap(err, "Unable to process APNs pusher config") + } + + // envconfig's "required" tag won't error on values that are defined but empty, so + // manually check + + if len(c.SigningKey) == 0 { + return nil, errors.New("Unable to build APNSPusherConfig: APNs signing key is blank") + } + + if c.BundleID == "" { + return nil, errors.New("Unable to build APNSPusherConfig: bundleID is blank") + } + + if c.KeyID == "" { + return nil, errors.New("Unable to build APNSPusherConfig: keyID is blank") + } + + if c.TeamID == "" { + return nil, errors.New("Unable to build APNSPusherConfig: teamID is blank") + } + + return c, nil +} diff --git a/alerts/pusher_test.go b/alerts/pusher_test.go new file mode 100644 index 0000000000..d9f7f71db0 --- /dev/null +++ b/alerts/pusher_test.go @@ -0,0 +1,75 @@ +package alerts + +import ( + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("APNSPusher", func() { + Describe("NewAPNSPusherFromEnv", func() { + It("succeeds", func() { + configureEnvconfig() + pusher, err := NewPusher() + Expect(err).To(Succeed()) + Expect(pusher).ToNot(Equal(nil)) + }) + }) +}) + +var _ = Describe("LoadAPNSPusherConfigFromEnv", func() { + BeforeEach(func() { + configureEnvconfig() + }) + + It("errors if key data is empty or blank", func() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY", "") + _, err := NewPusher() + Expect(err).To(MatchError(ContainSubstring("APNs signing key is blank"))) + + os.Unsetenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY") + _, err = NewPusher() + Expect(err).To(MatchError(ContainSubstring("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY missing value"))) + }) + + It("errors if key data is invalid", func() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY", "invalid") + _, err := NewPusher() + Expect(err).To(MatchError(ContainSubstring("AuthKey must be a valid .p8 PEM file"))) + }) + + It("errors if bundleID is blank", func() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_BUNDLE_ID", "") + _, err := NewPusher() + Expect(err).To(MatchError(ContainSubstring("bundleID is blank"))) + }) + + It("errors if teamID is blank", func() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_TEAM_ID", "") + _, err := NewPusher() + Expect(err).To(MatchError(ContainSubstring("teamID is blank"))) + }) + + It("errors if keyID is blank", func() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_KEY_ID", "") + _, err := NewPusher() + Expect(err).To(MatchError(ContainSubstring("keyID is blank"))) + }) +}) + +func configureEnvconfig() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY", string(validTestKey)) + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_KEY_ID", "key") + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_TEAM_ID", "team") + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_BUNDLE_ID", "bundle") +} + +// validTestKey is a random private key for testing +var validTestKey = []byte(`-----BEGIN PRIVATE KEY----- +MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDDNrXT9ZRWPUAAg38Qi +Z553y7sGqOgMxUCG36eCIcRCy1QiTJBgGDxIhWvkE8Sx4N6hZANiAATrsRyRXLa0 +Tgczq8tmFomMP212HdkPF3gFEl/CkqGHUodR2EdZBW1zVcmuLjIN4zvqVVXMJm/U +eHZz9xAZ95y3irAfkMuOD/Bw88UYvhKnipOHBeS8BwqyfFQ+NRB6xYU= +-----END PRIVATE KEY----- +`) diff --git a/alerts/tasks.go b/alerts/tasks.go new file mode 100644 index 0000000000..48eb6df601 --- /dev/null +++ b/alerts/tasks.go @@ -0,0 +1,202 @@ +package alerts + +import ( + "context" + "slices" + "time" + + "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/pointer" + "github.com/tidepool-org/platform/task" +) + +const CarePartnerType = "org.tidepool.carepartner" + +func NewCarePartnerTaskCreate() *task.TaskCreate { + return &task.TaskCreate{ + Name: pointer.FromAny(CarePartnerType), + Type: CarePartnerType, + AvailableTime: &time.Time{}, + Data: map[string]interface{}{}, + } +} + +type CarePartnerRunner struct { + logger log.Logger + + alerts AlertsClient + authClient auth.ServerSessionTokenProvider + deviceTokens auth.DeviceTokensClient + permissions permission.Client + pusher Pusher +} + +// AlertsClient abstracts the alerts collection for the CarePartnerRunner. +// +// One implementation is [Client]. +type AlertsClient interface { + List(_ context.Context, followedUserID string) ([]*Config, error) + Upsert(context.Context, *Config) error + // OverdueCommunications returns a slice of [LastCommunication] for users that haven't + // uploaded data recently. + OverdueCommunications(context.Context) ([]LastCommunication, error) +} + +func NewCarePartnerRunner(logger log.Logger, alerts AlertsClient, + deviceTokens auth.DeviceTokensClient, pusher Pusher, permissions permission.Client, + authClient auth.ServerSessionTokenProvider) (*CarePartnerRunner, error) { + + return &CarePartnerRunner{ + logger: logger, + alerts: alerts, + authClient: authClient, + deviceTokens: deviceTokens, + pusher: pusher, + permissions: permissions, + }, nil +} + +func (r *CarePartnerRunner) GetRunnerType() string { + return CarePartnerType +} + +func (r *CarePartnerRunner) GetRunnerTimeout() time.Duration { + return r.GetRunnerDurationMaximum() +} + +func (r *CarePartnerRunner) GetRunnerDeadline() time.Time { + return time.Now().Add(3 * r.GetRunnerDurationMaximum()) +} + +const RunnerDurationMaximum = 30 * time.Second + +func (r *CarePartnerRunner) GetRunnerDurationMaximum() time.Duration { + return RunnerDurationMaximum +} + +func (r *CarePartnerRunner) Run(ctx context.Context, tsk *task.Task) { + r.logger.Info("care partner no communication check") + start := time.Now() + ctx = auth.NewContextWithServerSessionTokenProvider(ctx, r.authClient) + if err := r.evaluateLastComms(ctx); err != nil { + r.logger.WithError(err).Warn("running care partner no communication check") + } + tsk.RepeatAvailableAfter(time.Second - time.Since(start)) +} + +func (r *CarePartnerRunner) evaluateLastComms(ctx context.Context) error { + overdue, err := r.alerts.OverdueCommunications(ctx) + if err != nil { + return errors.Wrap(err, "listing users without communication") + } + + for _, lastComm := range overdue { + if err := r.evaluateLastComm(ctx, lastComm); err != nil { + r.logger.WithError(err). + WithField("followedUserID", lastComm.UserID). + WithField("dataSetID", lastComm.DataSetID). + Info("Unable to evaluate no communication") + continue + } + } + + return nil +} + +func (r *CarePartnerRunner) evaluateLastComm(ctx context.Context, + lastComm LastCommunication) error { + + configs, err := r.alerts.List(ctx, lastComm.UserID) + if err != nil { + return errors.Wrap(err, "listing follower alerts configs") + } + + configs = slices.DeleteFunc(configs, r.authDenied(ctx)) + configs = slices.DeleteFunc(configs, func(config *Config) bool { + return config.UploadID != lastComm.DataSetID + }) + + notifications := []*Notification{} + for _, config := range configs { + lgr := config.LoggerWithFields(r.logger) + lastData := lastComm.LastReceivedDeviceData + notification, needsUpsert := config.EvaluateNoCommunication(ctx, lgr, lastData) + if notification != nil { + notification.Sent = r.wrapWithUpsert(ctx, lgr, config, notification.Sent) + notifications = append(notifications, notification) + } + if needsUpsert { + err := r.alerts.Upsert(ctx, config) + if err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } + } + } + + r.pushNotifications(ctx, notifications) + + return nil +} + +// wrapWithUpsert to upsert the Config that triggered the Notification after it's sent. +func (r *CarePartnerRunner) wrapWithUpsert(ctx context.Context, lgr log.Logger, config *Config, + original func(time.Time)) func(time.Time) { + + return func(at time.Time) { + if original != nil { + original(at) + } + if err := r.alerts.Upsert(ctx, config); err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } + } +} + +func (r *CarePartnerRunner) authDenied(ctx context.Context) func(*Config) bool { + return func(c *Config) bool { + if c == nil { + return true + } + logger := r.logger.WithFields(log.Fields{ + "userID": c.UserID, + "followedUserID": c.FollowedUserID, + }) + perms, err := r.permissions.GetUserPermissions(ctx, c.UserID, c.FollowedUserID) + if err != nil { + logger.WithError(err).Warn("Unable to confirm permissions; skipping") + return true + } + if _, found := perms[permission.Follow]; !found { + logger.Debug("permission denied: skipping") + return true + } + return false + } +} + +func (r *CarePartnerRunner) pushNotifications(ctx context.Context, + notifications []*Notification) { + + for _, notification := range notifications { + lgr := r.logger.WithField("recipientUserID", notification.RecipientUserID) + tokens, err := r.deviceTokens.GetDeviceTokens(ctx, notification.RecipientUserID) + if err != nil { + lgr.WithError(err).Info("unable to retrieve device tokens") + } + if len(tokens) == 0 { + lgr.Debug("no device tokens found, won't push any notifications") + } + pushNotification := ToPushNotification(notification) + for _, token := range tokens { + err := r.pusher.Push(ctx, token, pushNotification) + if err != nil { + lgr.WithError(err).Info("unable to push notification") + } else { + notification.Sent(time.Now()) + } + } + } +} diff --git a/alerts/tasks_test.go b/alerts/tasks_test.go new file mode 100644 index 0000000000..d2b683fbf1 --- /dev/null +++ b/alerts/tasks_test.go @@ -0,0 +1,335 @@ +package alerts + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/push" + "github.com/tidepool-org/platform/task" +) + +var _ = Describe("CarePartnerRunner", func() { + Describe("Run", func() { + It("schedules its next run for 1 second", func() { + runner, test := newCarePartnerRunnerTest() + + start := time.Now() + runner.Run(test.Ctx, test.Task) + + if Expect(test.Task.AvailableTime).ToNot(BeNil()) { + Expect(*test.Task.AvailableTime).To(BeTemporally("~", start.Add(time.Second))) + } + Expect(test.Task.DeadlineTime).To(BeNil()) + Expect(test.Task.State).To(Equal(task.TaskStatePending)) + }) + + Context("continues after logging errors", func() { + It("retrieving users without communication", func() { + runner, test := newCarePartnerRunnerTest() + test.Alerts.OverdueCommunicationsError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + test.Logger.AssertWarn("running care partner no communication check") + }) + + It("retrieving an alerts config", func() { + runner, test := newCarePartnerRunnerTest() + test.Alerts.ListError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + Expect(func() { + test.Logger.AssertInfo("Unable to evaluate no communication", log.Fields{ + "followedUserID": mockUserID2, + }) + }).ToNot(Panic(), map[string]any{ + "got": quickJSON(test.Logger.SerializedFields), + }) + }) + + It("upsetting alerts configs", func() { + runner, test := newCarePartnerRunnerTest() + test.Alerts.UpsertError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + Expect(func() { + test.Logger.AssertError("Unable to upsert changed alerts config", log.Fields{ + "userID": mockUserID1, + "followedUserID": mockUserID2, + "dataSetID": mockDataSetID, + }) + }).ToNot(Panic(), quickJSON(map[string]any{ + "got": test.Logger.SerializedFields, + })) + }) + + It("retrieving device tokens", func() { + runner, test := newCarePartnerRunnerTest() + test.Tokens.GetError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + Expect(func() { + test.Logger.AssertInfo("unable to retrieve device tokens", log.Fields{ + "recipientUserID": mockUserID1, + }) + }, quickJSON(map[string]any{ + "got": test.Logger.SerializedFields, + })) + }) + + It("pushing notifications", func() { + runner, test := newCarePartnerRunnerTest() + test.Pusher.PushErrors = append(test.Pusher.PushErrors, fmt.Errorf("test error")) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(1)) + Expect(func() { + test.Logger.AssertInfo("unable to push notification", log.Fields{ + "recipientUserID": testUserID, + }) + }, quickJSON(map[string]any{ + "got": test.Logger.SerializedFields, + })) + }) + }) + + It("ignores Configs that don't match the data set id", func() { + runner, test := newCarePartnerRunnerTest() + firstResp := test.Alerts.OverdueCommunicationsResponses[0] + test.Alerts.OverdueCommunicationsResponses[0] = append(firstResp, LastCommunication{ + UserID: firstResp[0].UserID, + DataSetID: "non-matching", + LastReceivedDeviceData: firstResp[0].LastReceivedDeviceData, + }) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(1)) + }) + + It("pushes to each token", func() { + runner, test := newCarePartnerRunnerTest() + test.Tokens.GetResponses[0] = append(test.Tokens.GetResponses[0], + test.Tokens.GetResponses[0][0]) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(2)) + }) + + It("pushes to each token, continuing if any experience an error", func() { + runner, test := newCarePartnerRunnerTest() + test.Tokens.GetResponses[0] = append(test.Tokens.GetResponses[0], + test.Tokens.GetResponses[0][0]) + test.Pusher.PushErrors = append([]error{fmt.Errorf("test error")}, test.Pusher.PushErrors...) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(2)) + }) + + It("ignores Configs that don't have permission", func() { + runner, test := newCarePartnerRunnerTest() + // disable permissions, no configs should be used + test.Permissions.AlwaysAllow = false + + runner.Run(test.Ctx, test.Task) + Expect(len(test.Pusher.PushCalls)).To(Equal(0)) + + // reset, add a user *with* perms, and check that it works + runner, test = newCarePartnerRunnerTest() + test.Permissions.AlwaysAllow = false + test.Permissions.Allow(mockUserID3, mockUserID2, permission.Follow, permission.Read) + cfg := *test.Config + cfg.UserID = mockUserID3 + test.Alerts.ListResponses[0] = append(test.Alerts.ListResponses[0], &cfg) + runner.Run(test.Ctx, test.Task) + Expect(len(test.Pusher.PushCalls)).To(Equal(1)) + }) + + It("upserts configs that need it", func() { + runner, test := newCarePartnerRunnerTest() + runner.Run(test.Ctx, test.Task) + + // One call from needsUpsert, another when the notification is sent. + Expect(len(test.Alerts.UpsertCalls)).To(Equal(2)) + act0 := test.Alerts.UpsertCalls[0].Activity.NoCommunication + Expect(act0.Triggered).ToNot(BeZero()) + Expect(act0.Sent).To(BeZero()) + act1 := test.Alerts.UpsertCalls[1].Activity.NoCommunication + Expect(act1.Sent).ToNot(BeZero()) + }) + + It("upserts configs that need it, even without a notification", func() { + runner, test := newCarePartnerRunnerTest() + act := test.Alerts.ListResponses[0][0].Activity.NoCommunication + act.Triggered = time.Now().Add(-time.Hour) + act.Sent = time.Now().Add(-time.Hour) + test.Alerts.ListResponses[0][0].Activity.NoCommunication = act + test.Alerts.OverdueCommunicationsResponses[0][0].LastReceivedDeviceData = time.Now() + + runner.Run(test.Ctx, test.Task) + + // One call from needsUpsert, no call from sent (no notification to send) + Expect(len(test.Alerts.UpsertCalls)).To(Equal(1)) + act0 := test.Alerts.UpsertCalls[0].Activity.NoCommunication + Expect(act0.Resolved).To(BeTemporally("~", time.Now())) + }) + + It("doesn't re-mark itself resolved", func() { + runner, test := newCarePartnerRunnerTest() + act := test.Alerts.ListResponses[0][0].Activity.NoCommunication + act.Triggered = time.Now().Add(-time.Hour) + act.Sent = time.Now().Add(-time.Hour) + act.Resolved = time.Now().Add(-time.Minute) + test.Alerts.ListResponses[0][0].Activity.NoCommunication = act + test.Alerts.OverdueCommunicationsResponses[0][0].LastReceivedDeviceData = time.Now() + + runner.Run(test.Ctx, test.Task) + Expect(len(test.Alerts.UpsertCalls)).To(Equal(0)) + }) + + It("doesn't re-send before delay", func() { + runner, test := newCarePartnerRunnerTest() + act := test.Alerts.ListResponses[0][0].Activity.NoCommunication + orig := time.Now().Add(-time.Minute) + act.Triggered = orig + act.Sent = orig + test.Alerts.ListResponses[0][0].Activity.NoCommunication = act + + runner.Run(test.Ctx, test.Task) + Expect(len(test.Alerts.UpsertCalls)).To(Equal(0)) + }) + }) +}) + +var _ = Describe("NewCarePartnerTaskCreate", func() { + It("succeeds", func() { + Expect(func() { + Expect(NewCarePartnerTaskCreate()).ToNot(Equal(nil)) + }).ToNot(Panic()) + }) +}) + +type carePartnerRunnerTest struct { + Alerts *mockAlertsClient + Config *Config + Ctx context.Context + Logger *logtest.Logger + Permissions *mockPermissionClient + Pusher *mockPusher + Task *task.Task + Tokens *mockDeviceTokensClient +} + +func newCarePartnerRunnerTest() (*CarePartnerRunner, *carePartnerRunnerTest) { + alerts := newMockAlertsClient() + ctx, lgr, cfg := newConfigTest() + cfg.Alerts.NoCommunication.Enabled = true + pusher := newMockPusher() + tsk := &task.Task{} + tokens := newMockDeviceTokensClient() + perms := newMockPermissionClient() + perms.AlwaysAllow = true + authClient := newMockAuthTokenProvider() + + runner, err := NewCarePartnerRunner(lgr, alerts, tokens, pusher, perms, authClient) + Expect(err).To(Succeed()) + + last := time.Now().Add(-(DefaultNoCommunicationDelay + time.Second)) + alerts.OverdueCommunicationsResponses = [][]LastCommunication{{ + { + UserID: mockUserID2, + DataSetID: mockDataSetID, + LastReceivedDeviceData: last, + }, + }} + alerts.ListResponses = [][]*Config{{cfg}} + tokens.GetResponses = [][]*devicetokens.DeviceToken{ + { + {Apple: &devicetokens.AppleDeviceToken{}}, + }, + } + + return runner, &carePartnerRunnerTest{ + Alerts: alerts, + Config: cfg, + Ctx: ctx, + Logger: lgr, + Permissions: perms, + Pusher: pusher, + Task: tsk, + Tokens: tokens, + } +} + +type mockDeviceTokensClient struct { + GetError error + GetResponses [][]*devicetokens.DeviceToken +} + +func newMockDeviceTokensClient() *mockDeviceTokensClient { + return &mockDeviceTokensClient{ + GetResponses: [][]*devicetokens.DeviceToken{}, + } +} + +func (c *mockDeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + if c.GetError != nil { + return nil, c.GetError + } + if len(c.GetResponses) > 0 { + ret := c.GetResponses[0] + c.GetResponses = c.GetResponses[1:] + return ret, nil + } + return nil, nil +} + +type mockPusher struct { + PushCalls []pushCall + PushErrors []error +} + +type pushCall struct { + Token *devicetokens.DeviceToken + Notification *push.Notification +} + +func newMockPusher() *mockPusher { + return &mockPusher{} +} + +func (p *mockPusher) Push(_ context.Context, + token *devicetokens.DeviceToken, notification *push.Notification) error { + + p.PushCalls = append(p.PushCalls, pushCall{token, notification}) + if len(p.PushErrors) > 0 { + err := p.PushErrors[0] + p.PushErrors = p.PushErrors[1:] + return err + } + return nil +} + +type mockAuthTokenProvider struct{} + +func newMockAuthTokenProvider() *mockAuthTokenProvider { + return &mockAuthTokenProvider{} +} + +func (p *mockAuthTokenProvider) ServerSessionToken() (string, error) { + return "", nil +} diff --git a/auth/auth.go b/auth/auth.go index e728beac54..976dbd1f43 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -3,6 +3,7 @@ package auth import ( "context" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" ) @@ -21,6 +22,7 @@ type Client interface { RestrictedTokenAccessor ExternalAccessor permission.Client + DeviceTokensClient } type ExternalAccessor interface { @@ -51,3 +53,9 @@ func ServerSessionTokenProviderFromContext(ctx context.Context) ServerSessionTok type contextKey string const serverSessionTokenProviderContextKey contextKey = "serverSessionTokenProvider" + +// DeviceTokensClient provides access to the tokens used to authenticate +// mobile device push notifications. +type DeviceTokensClient interface { + GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) +} diff --git a/auth/client/client.go b/auth/client/client.go index a4c8511a27..d29f6561ba 100644 --- a/auth/client/client.go +++ b/auth/client/client.go @@ -5,6 +5,7 @@ import ( "net/http" "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/page" @@ -308,6 +309,18 @@ func (c *Client) DeleteRestrictedToken(ctx context.Context, id string) error { return c.client.RequestData(ctx, http.MethodDelete, url, nil, nil, nil) } +// GetDeviceTokens belonging to a given user. +func (c *Client) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + ctx = log.NewContextWithLogger(ctx, c.logger) + url := c.client.ConstructURL("v1", "users", userID, "device_tokens") + tokens := []*devicetokens.DeviceToken{} + err := c.client.RequestData(ctx, http.MethodGet, url, nil, nil, &tokens) + if err != nil { + return nil, errors.Wrap(err, "Unable to request device token data") + } + return tokens, nil +} + type ConfigLoader interface { Load(*Config) error } diff --git a/auth/client/client_test.go b/auth/client/client_test.go index fbd9a6be14..23db62b316 100644 --- a/auth/client/client_test.go +++ b/auth/client/client_test.go @@ -2,6 +2,7 @@ package client_test import ( "context" + "encoding/json" "net/http" "time" @@ -14,6 +15,7 @@ import ( "github.com/tidepool-org/platform/auth" authClient "github.com/tidepool-org/platform/auth/client" authTest "github.com/tidepool-org/platform/auth/test" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" errorsTest "github.com/tidepool-org/platform/errors/test" "github.com/tidepool-org/platform/log" @@ -472,6 +474,64 @@ var _ = Describe("Client", func() { }) }) }) + + Describe("GetDeviceTokens", func() { + var testUserID = "test-user-id" + var testUserIDBadResponse = "test-user-id-bad-response" + var testTokens = map[string]any{ + testUserID: []*devicetokens.DeviceToken{{ + Apple: &devicetokens.AppleDeviceToken{ + Token: []byte("blah"), + Environment: "sandbox", + }, + }}, + testUserIDBadResponse: []map[string]any{ + { + "Apple": "", + }, + }, + } + + It("returns a token", func() { + body, err := json.Marshal(testTokens[testUserID]) + Expect(err).To(Succeed()) + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserID+"/device_tokens"), + RespondWith(http.StatusOK, body)), + ) + + tokens, err := client.GetDeviceTokens(ctx, testUserID) + Expect(err).To(Succeed()) + Expect(tokens).To(HaveLen(1)) + Expect([]byte(tokens[0].Apple.Token)).To(Equal([]byte("blah"))) + Expect(tokens[0].Apple.Environment).To(Equal("sandbox")) + }) + + It("returns an error when receiving malformed responses", func() { + body, err := json.Marshal(testTokens[testUserIDBadResponse]) + Expect(err).To(Succeed()) + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserIDBadResponse+"/device_tokens"), + RespondWith(http.StatusOK, body)), + ) + + _, err = client.GetDeviceTokens(ctx, testUserIDBadResponse) + Expect(err).To(HaveOccurred()) + }) + + It("returns an error on non-200 responses", func() { + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserID+"/device_tokens"), + RespondWith(http.StatusBadRequest, nil)), + ) + _, err := client.GetDeviceTokens(ctx, testUserID) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("Unable to request device token data"))) + }) + }) }) }) }) diff --git a/auth/service/api/v1/auth_service_mock.go b/auth/service/api/v1/auth_service_mock.go index 00dadd0dbf..99bc534bfa 100644 --- a/auth/service/api/v1/auth_service_mock.go +++ b/auth/service/api/v1/auth_service_mock.go @@ -10,7 +10,6 @@ import ( gomock "github.com/golang/mock/gomock" api "github.com/tidepool-org/hydrophone/client" - apple "github.com/tidepool-org/platform/apple" appvalidate "github.com/tidepool-org/platform/appvalidate" auth "github.com/tidepool-org/platform/auth" diff --git a/auth/service/api/v1/devicetokens.go b/auth/service/api/v1/devicetokens.go index c19c654343..99d6b2ede1 100644 --- a/auth/service/api/v1/devicetokens.go +++ b/auth/service/api/v1/devicetokens.go @@ -13,6 +13,7 @@ import ( func (r *Router) DeviceTokensRoutes() []*rest.Route { return []*rest.Route{ rest.Post("/v1/users/:userId/device_tokens", api.RequireUser(r.UpsertDeviceToken)), + rest.Get("/v1/users/:userId/device_tokens", api.RequireAuth(r.GetDeviceTokens)), } } @@ -39,3 +40,27 @@ func (r *Router) UpsertDeviceToken(res rest.ResponseWriter, req *rest.Request) { return } } + +func (r *Router) GetDeviceTokens(res rest.ResponseWriter, req *rest.Request) { + responder := request.MustNewResponder(res, req) + ctx := req.Request.Context() + authDetails := request.GetAuthDetails(ctx) + repo := r.AuthStore().NewDeviceTokenRepository() + userID := req.PathParam("userId") + + if userID != authDetails.UserID() && !authDetails.IsService() { + responder.Error(http.StatusForbidden, request.ErrorUnauthorized()) + return + } + + docs, err := repo.GetAllByUserID(ctx, userID) + if err != nil { + responder.Error(http.StatusInternalServerError, err) + return + } + tokens := make([]devicetokens.DeviceToken, 0, len(docs)) + for _, doc := range docs { + tokens = append(tokens, doc.DeviceToken) + } + responder.Data(http.StatusOK, tokens) +} diff --git a/auth/service/api/v1/devicetokens_test.go b/auth/service/api/v1/devicetokens_test.go index 1033b7cc9c..223208b617 100644 --- a/auth/service/api/v1/devicetokens_test.go +++ b/auth/service/api/v1/devicetokens_test.go @@ -3,6 +3,7 @@ package v1 import ( "bytes" "context" + "encoding/json" "fmt" "io" "net/http" @@ -12,14 +13,18 @@ import ( . "github.com/onsi/gomega" serviceTest "github.com/tidepool-org/platform/auth/service/test" + storetest "github.com/tidepool-org/platform/auth/store/test" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/request" "github.com/tidepool-org/platform/service/test" ) var _ = Describe("Device tokens endpoints", func() { var rtr *Router + var svc *serviceTest.Service + BeforeEach(func() { - svc := serviceTest.NewService() + svc = serviceTest.NewService() var err error rtr, err = NewRouter(svc) Expect(err).ToNot(HaveOccurred()) @@ -66,6 +71,64 @@ var _ = Describe("Device tokens endpoints", func() { }) + Describe("List", func() { + It("succeeds with valid input", func() { + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusOK)) + }) + + It("rejects non-service users", func() { + svcDetails := test.NewMockAuthDetails(request.MethodAccessToken, "test-user", test.TestToken2) + req := newDeviceTokensTestRequest(svcDetails, nil, "") + res := test.NewMockRestResponseWriter() + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusForbidden)) + }) + + It("may return multiple documents", func() { + repo := &storetest.DeviceTokenRepository{ + Tokens: map[string][]*devicetokens.DeviceToken{ + test.TestUserID1: { + &devicetokens.DeviceToken{}, + &devicetokens.DeviceToken{}, + }, + }, + } + + raw := rtr.Service.AuthStore().(*storetest.Store) + raw.NewDeviceTokenRepositoryImpl = repo + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusOK)) + got := []*devicetokens.DeviceToken{} + err := json.Unmarshal(res.Body.Bytes(), &got) + Expect(err).To(Succeed()) + Expect(got).To(HaveLen(2)) + }) + + It("handles repository errors", func() { + repo := &storetest.DeviceTokenRepository{ + Error: fmt.Errorf("test error"), + } + raw := rtr.Service.AuthStore().(*storetest.Store) + raw.NewDeviceTokenRepositoryImpl = repo + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusInternalServerError)) + }) + }) }) func buff(template string, args ...any) *bytes.Buffer { @@ -91,5 +154,4 @@ func newDeviceTokensTestRequest(auth request.AuthDetails, body io.Reader, userID Request: httpReq, PathParams: map[string]string{"userId": userIDFromPath}, } - } diff --git a/auth/service/service/client.go b/auth/service/service/client.go index 4335eae5db..71d27ad2dd 100644 --- a/auth/service/service/client.go +++ b/auth/service/service/client.go @@ -6,6 +6,7 @@ import ( "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/auth/client" authStore "github.com/tidepool-org/platform/auth/store" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/page" @@ -102,6 +103,19 @@ func (c *Client) DeleteAllProviderSessions(ctx context.Context, userID string) e return repository.DeleteAllProviderSessions(ctx, userID) } +func (c *Client) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + repo := c.authStore.NewDeviceTokenRepository() + docs, err := repo.GetAllByUserID(ctx, userID) + if err != nil { + return nil, err + } + tokens := make([]*devicetokens.DeviceToken, 0, len(docs)) + for _, doc := range docs { + tokens = append(tokens, &doc.DeviceToken) + } + return tokens, nil +} + func (c *Client) GetProviderSession(ctx context.Context, id string) (*auth.ProviderSession, error) { repository := c.authStore.NewProviderSessionRepository() return repository.GetProviderSession(ctx, id) diff --git a/auth/service/service/client_test.go b/auth/service/service/client_test.go index 9a8a94e85d..26792ca30e 100644 --- a/auth/service/service/client_test.go +++ b/auth/service/service/client_test.go @@ -1,8 +1,123 @@ package service_test import ( + "context" + "fmt" + "time" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "github.com/onsi/gomega/ghttp" + + "github.com/tidepool-org/platform/appvalidate" + "github.com/tidepool-org/platform/auth/client" + "github.com/tidepool-org/platform/auth/service/service" + "github.com/tidepool-org/platform/auth/store" + storetest "github.com/tidepool-org/platform/auth/store/test" + platformclient "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/devicetokens" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/provider" ) var _ = Describe("Client", func() { + var testUserID = "test-user-id" + var testDeviceToken1 = &devicetokens.DeviceToken{ + Apple: &devicetokens.AppleDeviceToken{ + Token: []byte("test"), + Environment: "sandbox", + }, + } + + newTestServiceClient := func(url string, authStore store.Store) *service.Client { + var err error + extCfg := &client.ExternalConfig{ + Config: &platform.Config{ + Config: &platformclient.Config{ + Address: url, + UserAgent: "test", + }, + ServiceSecret: "", + }, + ServerSessionTokenSecret: "test token", + ServerSessionTokenTimeout: time.Minute, + } + authAs := platform.AuthorizeAsService + name := "test auth client" + logger := logtest.NewLogger() + if authStore == nil { + repo := storetest.NewDeviceTokenRepository() + repo.Tokens = map[string][]*devicetokens.DeviceToken{ + testUserID: { + testDeviceToken1, + }} + + authStore = &mockAuthStore{ + DeviceTokenRepository: repo, + } + } + providerFactory := &mockProviderFactory{} + serviceClient, err := service.NewClient(extCfg, authAs, name, logger, authStore, providerFactory) + Expect(err).To(Succeed()) + return serviceClient + } + + Describe("GetDeviceTokens", func() { + It("returns a slice of tokens", func() { + ctx := context.Background() + server := NewServer() + defer server.Close() + serviceClient := newTestServiceClient(server.URL(), nil) + + tokens, err := serviceClient.GetDeviceTokens(ctx, testUserID) + + Expect(err).To(Succeed()) + Expect(tokens).To(HaveLen(1)) + Expect(tokens[0]).To(Equal(testDeviceToken1)) + }) + + It("handles errors from the underlying repo", func() { + ctx := context.Background() + server := NewServer() + defer server.Close() + repo := storetest.NewDeviceTokenRepository() + repo.Error = fmt.Errorf("test error") + authStore := &mockAuthStore{ + DeviceTokenRepository: repo, + } + serviceClient := newTestServiceClient(server.URL(), authStore) + + _, err := serviceClient.GetDeviceTokens(ctx, testUserID) + + Expect(err).To(HaveOccurred()) + }) + }) }) + +type mockAuthStore struct { + store.DeviceTokenRepository +} + +func (s *mockAuthStore) NewAppValidateRepository() appvalidate.Repository { + return nil +} + +func (s *mockAuthStore) NewProviderSessionRepository() store.ProviderSessionRepository { + return nil +} + +func (s *mockAuthStore) NewRestrictedTokenRepository() store.RestrictedTokenRepository { + return nil +} + +func (s *mockAuthStore) NewDeviceTokenRepository() store.DeviceTokenRepository { + return s.DeviceTokenRepository +} + +type mockProviderFactory struct{} + +func (f *mockProviderFactory) Get(typ string, name string) (provider.Provider, error) { + return nil, nil +} diff --git a/auth/store/mongo/device_tokens_repository.go b/auth/store/mongo/device_tokens_repository.go index 4a257ca9f0..d2bfad7a41 100644 --- a/auth/store/mongo/device_tokens_repository.go +++ b/auth/store/mongo/device_tokens_repository.go @@ -16,6 +16,20 @@ import ( // MongoDB collection. type deviceTokenRepo structuredmongo.Repository +func (r *deviceTokenRepo) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { + f := bson.M{"userId": userID} + cursor, err := r.Find(ctx, f, nil) + if err != nil { + return nil, err + } + defer cursor.Close(ctx) + var docs []*devicetokens.Document + if err := cursor.All(ctx, &docs); err != nil { + return nil, err + } + return docs, nil +} + // Upsert will create or update the given Config. func (r *deviceTokenRepo) Upsert(ctx context.Context, doc *devicetokens.Document) error { // The presence of UserID and TokenID should be enforced with a mongodb @@ -24,7 +38,7 @@ func (r *deviceTokenRepo) Upsert(ctx context.Context, doc *devicetokens.Document return errors.New("UserID is empty") } if doc.TokenKey == "" { - return errors.New("TokenID is empty") + return errors.New("TokenKey is empty") } opts := options.Update().SetUpsert(true) diff --git a/auth/store/mongo/device_tokens_repository_test.go b/auth/store/mongo/device_tokens_repository_test.go new file mode 100644 index 0000000000..6eb45b2221 --- /dev/null +++ b/auth/store/mongo/device_tokens_repository_test.go @@ -0,0 +1,76 @@ +package mongo + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/auth/store" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/store/structured/mongo" + storeStructuredMongoTest "github.com/tidepool-org/platform/store/structured/mongo/test" +) + +const testUserID = "857ec1d7-8777-4877-a308-96a23c066524" + +var _ = Describe("deviceTokenRepo", Label("mongodb", "slow", "integration"), func() { + It("retrieves all for the given user id", func() { + test := newDeviceTokensRepoTest() + + docs, err := test.Repo.GetAllByUserID(test.Ctx, testUserID) + Expect(err).To(Succeed()) + + if Expect(docs).To(HaveLen(2)) { + for _, doc := range docs { + Expect(doc.UserID).To(Equal(testUserID)) + } + } + }) + + It("ensures indexes", func() { + test := newDeviceTokensRepoTest() + Expect(test.Repo.EnsureIndexes()).To(Succeed()) + }) +}) + +type deviceTokensRepoTest struct { + Ctx context.Context + Repo store.DeviceTokenRepository + Config *mongo.Config + Store *Store +} + +func newDeviceTokensRepoTest() *deviceTokensRepoTest { + test := &deviceTokensRepoTest{ + Ctx: context.Background(), + Config: storeStructuredMongoTest.NewConfig(), + } + store, err := NewStore(test.Config) + Expect(err).To(Succeed()) + test.Store = store + test.Repo = store.NewDeviceTokenRepository() + + testDocs := []*devicetokens.Document{ + { + UserID: testUserID, + TokenKey: "a", + DeviceToken: devicetokens.DeviceToken{}, + }, + { + UserID: testUserID, + TokenKey: "b", + DeviceToken: devicetokens.DeviceToken{}, + }, + { + UserID: "not" + testUserID, + TokenKey: "c", + DeviceToken: devicetokens.DeviceToken{}, + }, + } + for _, testDoc := range testDocs { + Expect(test.Repo.Upsert(test.Ctx, testDoc)).To(Succeed()) + } + + return test +} diff --git a/auth/store/mongo/store_test.go b/auth/store/mongo/store_test.go index d12b0fba0f..34eec13f1b 100644 --- a/auth/store/mongo/store_test.go +++ b/auth/store/mongo/store_test.go @@ -133,7 +133,7 @@ var _ = Describe("Store", func() { doc.UserID = "user-id" doc.TokenKey = "" err = repository.Upsert(ctx, doc) - Expect(err).To(MatchError("TokenID is empty")) + Expect(err).To(MatchError("TokenKey is empty")) }) It("updates the existing document, instead of creating a duplicate", func() { diff --git a/auth/store/test/device_token_repository.go b/auth/store/test/device_token_repository.go index 4847596895..a15f913af1 100644 --- a/auth/store/test/device_token_repository.go +++ b/auth/store/test/device_token_repository.go @@ -9,6 +9,9 @@ import ( type DeviceTokenRepository struct { *authTest.DeviceTokenAccessor + Documents []*devicetokens.Document + Tokens map[string][]*devicetokens.DeviceToken + Error error } func NewDeviceTokenRepository() *DeviceTokenRepository { @@ -21,6 +24,20 @@ func (r *DeviceTokenRepository) Expectations() { r.DeviceTokenAccessor.Expectations() } +func (r *DeviceTokenRepository) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { + if r.Error != nil { + return nil, r.Error + } + if tokens, ok := r.Tokens[userID]; ok { + docs := make([]*devicetokens.Document, 0, len(tokens)) + for _, token := range tokens { + docs = append(docs, &devicetokens.Document{DeviceToken: *token}) + } + return docs, nil + } + return nil, nil +} + func (r *DeviceTokenRepository) Upsert(ctx context.Context, doc *devicetokens.Document) error { return nil } diff --git a/auth/test/client.go b/auth/test/client.go index e500f69d34..9fba8f4e5c 100644 --- a/auth/test/client.go +++ b/auth/test/client.go @@ -4,6 +4,7 @@ type Client struct { *ProviderSessionAccessor *RestrictedTokenAccessor *ExternalAccessor + *DeviceTokensClient } func NewClient() *Client { @@ -11,6 +12,7 @@ func NewClient() *Client { ProviderSessionAccessor: NewProviderSessionAccessor(), RestrictedTokenAccessor: NewRestrictedTokenAccessor(), ExternalAccessor: NewExternalAccessor(), + DeviceTokensClient: NewDeviceTokensClient(), } } diff --git a/auth/test/external_accessor.go b/auth/test/external_accessor.go index 1916c1cf28..a7872e4c34 100644 --- a/auth/test/external_accessor.go +++ b/auth/test/external_accessor.go @@ -3,6 +3,7 @@ package test import ( "context" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" ) @@ -179,3 +180,11 @@ func (e *ExternalAccessor) GetUserPermissions(ctx context.Context, requestUserID } panic("GetUserPermissions no output") } + +func NewDeviceTokensClient() *DeviceTokensClient { return &DeviceTokensClient{} } + +type DeviceTokensClient struct{} + +func (c *DeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + return nil, nil +} diff --git a/auth/test/mock.go b/auth/test/mock.go index 0146c6bb9d..878d3a4544 100644 --- a/auth/test/mock.go +++ b/auth/test/mock.go @@ -9,8 +9,8 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - auth "github.com/tidepool-org/platform/auth" + devicetokens "github.com/tidepool-org/platform/devicetokens" page "github.com/tidepool-org/platform/page" permission "github.com/tidepool-org/platform/permission" request "github.com/tidepool-org/platform/request" @@ -168,6 +168,21 @@ func (mr *MockClientMockRecorder) EnsureAuthorizedUser(ctx, targetUserID, permis return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureAuthorizedUser", reflect.TypeOf((*MockClient)(nil).EnsureAuthorizedUser), ctx, targetUserID, permission) } +// GetDeviceTokens mocks base method. +func (m *MockClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeviceTokens", ctx, userID) + ret0, _ := ret[0].([]*devicetokens.DeviceToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeviceTokens indicates an expected call of GetDeviceTokens. +func (mr *MockClientMockRecorder) GetDeviceTokens(ctx, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceTokens", reflect.TypeOf((*MockClient)(nil).GetDeviceTokens), ctx, userID) +} + // GetProviderSession mocks base method. func (m *MockClient) GetProviderSession(ctx context.Context, id string) (*auth.ProviderSession, error) { m.ctrl.T.Helper() @@ -436,3 +451,41 @@ func (mr *MockServerSessionTokenProviderMockRecorder) ServerSessionToken() *gomo mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServerSessionToken", reflect.TypeOf((*MockServerSessionTokenProvider)(nil).ServerSessionToken)) } + +// MockDeviceTokensClient is a mock of DeviceTokensClient interface. +type MockDeviceTokensClient struct { + ctrl *gomock.Controller + recorder *MockDeviceTokensClientMockRecorder +} + +// MockDeviceTokensClientMockRecorder is the mock recorder for MockDeviceTokensClient. +type MockDeviceTokensClientMockRecorder struct { + mock *MockDeviceTokensClient +} + +// NewMockDeviceTokensClient creates a new mock instance. +func NewMockDeviceTokensClient(ctrl *gomock.Controller) *MockDeviceTokensClient { + mock := &MockDeviceTokensClient{ctrl: ctrl} + mock.recorder = &MockDeviceTokensClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDeviceTokensClient) EXPECT() *MockDeviceTokensClientMockRecorder { + return m.recorder +} + +// GetDeviceTokens mocks base method. +func (m *MockDeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeviceTokens", ctx, userID) + ret0, _ := ret[0].([]*devicetokens.DeviceToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeviceTokens indicates an expected call of GetDeviceTokens. +func (mr *MockDeviceTokensClientMockRecorder) GetDeviceTokens(ctx, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceTokens", reflect.TypeOf((*MockDeviceTokensClient)(nil).GetDeviceTokens), ctx, userID) +} diff --git a/data/blood/glucose/glucose.go b/data/blood/glucose/glucose.go index 32ca889dd9..ac1d7717a9 100644 --- a/data/blood/glucose/glucose.go +++ b/data/blood/glucose/glucose.go @@ -67,3 +67,7 @@ func NormalizeValueForUnits(value *float64, units *string) *float64 { } return value } + +func IsMmolL(units string) bool { + return units == MmolL || units == Mmoll +} diff --git a/data/events/alerts.go b/data/events/alerts.go new file mode 100644 index 0000000000..cf0da13d3f --- /dev/null +++ b/data/events/alerts.go @@ -0,0 +1,239 @@ +package events + +import ( + "context" + "os" + "strings" + "time" + + "github.com/IBM/sarama" + "go.mongodb.org/mongo-driver/bson" + + "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/data/types" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" + lognull "github.com/tidepool-org/platform/log/null" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/push" +) + +type Consumer struct { + Alerts AlertsClient + Data alerts.DataRepository + DeviceTokens auth.DeviceTokensClient + Evaluator AlertsEvaluator + Permissions permission.Client + Pusher Pusher + LastCommunications LastCommunicationsRecorder + TokensProvider auth.ServerSessionTokenProvider + + Logger log.Logger +} + +// DosingDecision removes a stutter to improve readability. +type DosingDecision = dosingdecision.DosingDecision + +// Glucose removes a stutter to improve readability. +type Glucose = glucose.Glucose + +func (c *Consumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { + + if msg == nil { + c.logger(ctx).Info("UNEXPECTED: nil message; ignoring") + return nil + } + + ctx = auth.NewContextWithServerSessionTokenProvider(ctx, c.TokensProvider) + + switch { + case strings.Contains(msg.Topic, ".data.alerts"): + return c.consumeAlertsConfigs(ctx, session, msg) + case strings.Contains(msg.Topic, ".data.deviceData.alerts"): + return c.consumeDeviceData(ctx, session, msg) + default: + c.logger(ctx).WithField("topic", msg.Topic). + Infof("UNEXPECTED: topic; ignoring") + } + + return nil +} + +func (c *Consumer) consumeAlertsConfigs(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { + + cfg := &alerts.Config{} + updatedFields, err := unmarshalMessageValue(msg.Value, cfg) + if err != nil { + return err + } + lgr := c.logger(ctx) + if isActivityAndActivityOnly(updatedFields) { + lgr.WithField("updatedFields", updatedFields). + Debug("alerts config is an activity update, will skip") + lgr.WithField("msg.Key", string(msg.Key)).Debug("marked") + session.MarkMessage(msg, "") + return nil + } + + lgr.WithField("cfg", cfg).Info("consuming an alerts config message") + + ctxLog := cfg.LoggerWithFields(c.logger(ctx)) + ctx = log.NewContextWithLogger(ctx, ctxLog) + + notes, err := c.Evaluator.EvaluateData(ctx, cfg.FollowedUserID, cfg.UploadID) + if err != nil { + format := "Unable to evalaute alerts configs triggered event for user %s" + return errors.Wrapf(err, format, cfg.UserID) + } + ctxLog.WithField("notes", notes).Debug("notes generated from alerts config") + + c.pushNotifications(ctx, notes) + + session.MarkMessage(msg, "") + lgr.WithField("msg.Key", string(msg.Key)).Debug("marked") + return nil +} + +func isActivityAndActivityOnly(updatedFields []string) bool { + hasActivity := false + for _, field := range updatedFields { + if field == "activity" { + hasActivity = true + } else { + return false + } + } + return hasActivity +} + +func (c *Consumer) consumeDeviceData(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { + + lgr := c.logger(ctx) + lgr.Debug("consuming device data message") + + // The actual type should be either a glucose.Glucose or a + // dosingdecision.DosingDecision, but they both use types.Base, and that's where the + // only fields we need are defined. + datum := &types.Base{} + if _, err := unmarshalMessageValue(msg.Value, datum); err != nil { + return err + } + if datum.UserID == nil { + return errors.New("Unable to retrieve alerts configs: userID is nil") + } + if datum.UploadID == nil { + return errors.New("Unable to retrieve alerts configs: DataSetID is nil") + } + ctx = log.NewContextWithLogger(ctx, lgr.WithField("followedUserID", *datum.UserID)) + lastComm := alerts.LastCommunication{ + UserID: *datum.UserID, + LastReceivedDeviceData: time.Now(), + DataSetID: *datum.UploadID, + } + err := c.LastCommunications.RecordReceivedDeviceData(ctx, lastComm) + if err != nil { + lgr.WithError(err).Info("Unable to record device data received") + } + notes, err := c.Evaluator.EvaluateData(ctx, *datum.UserID, *datum.UploadID) + if err != nil { + format := "Unable to evalaute device data triggered event for user %s" + return errors.Wrapf(err, format, *datum.UserID) + } + for idx, note := range notes { + lgr.WithField("idx", idx).WithField("note", note).Debug("notes") + } + + c.pushNotifications(ctx, notes) + + session.MarkMessage(msg, "") + lgr.WithField("msg.Key", string(msg.Key)).Debug("marked") + return nil +} + +func (c *Consumer) pushNotifications(ctx context.Context, notifications []*alerts.Notification) { + lgr := c.logger(ctx) + + // Notes could be pushed into a Kafka topic to have a more durable retry, + // but that can be added later. + for _, notification := range notifications { + lgr := lgr.WithField("recipientUserID", notification.RecipientUserID) + tokens, err := c.DeviceTokens.GetDeviceTokens(ctx, notification.RecipientUserID) + if err != nil { + lgr.WithError(err).Info("Unable to retrieve device tokens") + } + if len(tokens) == 0 { + lgr.Debug("no device tokens found, won't push any notifications") + } + pushNote := alerts.ToPushNotification(notification) + for _, token := range tokens { + err := c.Pusher.Push(ctx, token, pushNote) + if err != nil { + lgr.WithError(err).Info("Unable to push notification") + } else { + notification.Sent(time.Now()) + } + } + } +} + +// logger produces a log.Logger. +// +// It tries a number of options before falling back to a null Logger. +func (c *Consumer) logger(ctx context.Context) log.Logger { + // A context's Logger is preferred, as it has the most... context. + if ctxLgr := log.LoggerFromContext(ctx); ctxLgr != nil { + return ctxLgr + } + if c.Logger != nil { + return c.Logger + } + fallback, err := logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + if err != nil { + fallback = lognull.NewLogger() + } + return fallback +} + +type AlertsEvaluator interface { + // EvaluateData to check if notifications should be sent in response to new data. + EvaluateData(ctx context.Context, followedUserID, dataSetID string) ([]*alerts.Notification, error) +} + +func unmarshalMessageValue[A any](b []byte, payload *A) ([]string, error) { + wrapper := &struct { + FullDocument A `json:"fullDocument"` + UpdateDescription struct { + UpdatedFields map[string]any `json:"updatedFields"` + } `json:"updateDescription"` + }{} + if err := bson.UnmarshalExtJSON(b, false, wrapper); err != nil { + return nil, errors.Wrap(err, "Unable to unmarshal ExtJSON") + } + *payload = wrapper.FullDocument + fields := []string{} + for k := range wrapper.UpdateDescription.UpdatedFields { + fields = append(fields, k) + } + return fields, nil +} + +type AlertsClient interface { + Delete(context.Context, *alerts.Config) error + Get(context.Context, *alerts.Config) (*alerts.Config, error) + List(_ context.Context, userID string) ([]*alerts.Config, error) + Upsert(context.Context, *alerts.Config) error +} + +// Pusher is a service-agnostic interface for sending push notifications. +type Pusher interface { + // Push a notification to a device. + Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error +} diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go new file mode 100644 index 0000000000..4ebcb80f25 --- /dev/null +++ b/data/events/alerts_test.go @@ -0,0 +1,536 @@ +package events + +import ( + "context" + "sync" + "time" + + "github.com/IBM/sarama" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + + "github.com/tidepool-org/platform/alerts" + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" + storetest "github.com/tidepool-org/platform/data/store/test" + "github.com/tidepool-org/platform/data/types" + "github.com/tidepool-org/platform/data/types/blood" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/pointer" + "github.com/tidepool-org/platform/push" +) + +const ( + testUserID = "test-user-id" + testFollowedUserID = "test-followed-user-id" + testUserNoPermsID = "test-user-no-perms" + testDataSetID = "test-data-set-id" +) + +var _ = Describe("Consumer", func() { + Describe("Consume", func() { + It("ignores nil messages", func() { + ctx, _ := addLogger(context.Background()) + c := &Consumer{} + + Expect(c.Consume(ctx, nil, nil)).To(Succeed()) + }) + + It("consumes alerts config events", func() { + cfg := &alerts.Config{ + UserID: testUserID, + FollowedUserID: testFollowedUserID, + Alerts: alerts.Alerts{ + Low: &alerts.LowAlert{ + Base: alerts.Base{Enabled: true}, + Threshold: alerts.Threshold{ + Value: 101.1, + Units: "mg/dL", + }, + }, + }, + } + kafkaMsg := newAlertsMockConsumerMessage(".data.alerts", cfg) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.Session.MarkCalls).To(Equal(1)) + }) + + It("records device data events", func() { + blood := newTestStaticDatumMmolL(7.2) + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.LastCommunications.NumCallsFor(testFollowedUserID)).To(Equal(1)) + }) + + It("consumes device data events", func() { + blood := newTestStaticDatumMmolL(7.2) + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.Session.MarkCalls).To(Equal(1)) + }) + + It("errors out when the datum's UserID is nil", func() { + blood := newTestStaticDatumMmolL(7.2) + blood.UserID = nil + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)). + To(MatchError(ContainSubstring("userID is nil"))) + Expect(deps.Session.MarkCalls).To(Equal(0)) + }) + + It("errors out when the datum's UploadID is nil", func() { + blood := newTestStaticDatumMmolL(7.2) + blood.UploadID = nil + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)). + To(MatchError(ContainSubstring("DataSetID is nil"))) + Expect(deps.Session.MarkCalls).To(Equal(0)) + }) + + It("pushes notifications", func() { + blood := newTestStaticDatumMmolL(1.0) + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + eval := newMockEvaluator() + eval.Evaluations[testFollowedUserID+testDataSetID] = []mockEvaluatorResponse{ + { + Notifications: []*alerts.Notification{ + { + Message: "something", + RecipientUserID: testUserID, + FollowedUserID: testFollowedUserID, + Sent: func(time.Time) {}, + }, + }, + }, + } + c.Evaluator = eval + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + }) + }) + + Describe("LastCommunicationsReporter", func() { + Describe("RecordReceivedDeviceData", func() { + It("records the metadata for the user id", func() { + testLogger := logtest.NewLogger() + ctx := log.NewContextWithLogger(context.Background(), testLogger) + mockRepo := newMockLastCommunicationsRepository() + rec := NewLastCommunicationRecorder(mockRepo) + lastComm := alerts.LastCommunication{ + UserID: testFollowedUserID, + LastReceivedDeviceData: time.Now(), + DataSetID: "test", + } + err := rec.RecordReceivedDeviceData(ctx, lastComm) + Expect(err).To(Succeed()) + Expect(mockRepo.NumCallsFor(testFollowedUserID)).To(Equal(1)) + }) + }) + }) +}) + +type consumerTestDeps struct { + Alerts *mockAlertsConfigClient + Context context.Context + Cursor *mongo.Cursor + DeviceTokens *mockDeviceTokens + Evaluator *mockStaticEvaluator + Logger *logtest.Logger + Permissions *mockPermissionsClient + Pusher Pusher + LastCommunications *mockLastCommunicationsRecorder + Repo *storetest.DataRepository + Session *mockConsumerGroupSession +} + +func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { + GinkgoHelper() + ctx, logger := addLogger(context.Background()) + alertsClient := newMockAlertsConfigClient([]*alerts.Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + Alerts: alerts.Alerts{}, + }, + }, nil) + dataRepo := storetest.NewDataRepository() + dataRepo.GetLastUpdatedForUserOutputs = []storetest.GetLastUpdatedForUserOutput{} + augmentedDocs := augmentMockMongoDocs(docs) + cur := newMockMongoCursor(augmentedDocs) + dataRepo.GetDataRangeOutputs = []storetest.GetDataRangeOutput{ + {Error: nil, Cursor: cur}, + } + permissions := newMockPermissionsClient() + evaluator := newMockStaticEvaluator() + pusher := push.NewLogPusher(logger) + deviceTokens := newMockDeviceTokens() + lastCommunications := newMockLastCommunicationsRecorder() + + return &Consumer{ + Alerts: alertsClient, + Evaluator: evaluator, + Data: dataRepo, + DeviceTokens: deviceTokens, + Permissions: permissions, + Pusher: pusher, + LastCommunications: lastCommunications, + }, &consumerTestDeps{ + Alerts: alertsClient, + Context: ctx, + Cursor: cur, + DeviceTokens: deviceTokens, + Evaluator: evaluator, + Pusher: pusher, + Repo: dataRepo, + Session: &mockConsumerGroupSession{}, + Logger: logger, + LastCommunications: lastCommunications, + Permissions: permissions, + } +} + +// mockEvaluator implements Evaluator. +type mockEvaluator struct { + Evaluations map[string][]mockEvaluatorResponse + EvaluateCalls map[string]int +} + +type mockEvaluatorResponse struct { + Notifications []*alerts.Notification + Error error +} + +func newMockEvaluator() *mockEvaluator { + return &mockEvaluator{ + Evaluations: map[string][]mockEvaluatorResponse{}, + EvaluateCalls: map[string]int{}, + } +} + +func (e *mockEvaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID string) ( + []*alerts.Notification, error) { + + key := followedUserID + dataSetID + if _, found := e.Evaluations[key]; !found { + return nil, nil + } + resp := e.Evaluations[key][0] + if len(e.Evaluations[key]) > 1 { + e.Evaluations[key] = e.Evaluations[key][1:] + } + e.EvaluateCalls[key] += 1 + if resp.Error != nil { + return nil, resp.Error + } + return resp.Notifications, nil +} + +func (e *mockEvaluator) EvaluateCallsTotal() int { + total := 0 + for _, val := range e.EvaluateCalls { + total += val + } + return total +} + +// mockStaticEvaluator wraps mock evaluator with a static response. +// +// Useful when testing Consumer behavior, when the behavior of the Evaulator +// isn't relevant to the Consumer test. +type mockStaticEvaluator struct { + *mockEvaluator +} + +func newMockStaticEvaluator() *mockStaticEvaluator { + return &mockStaticEvaluator{newMockEvaluator()} +} + +func (e *mockStaticEvaluator) EvaluateData(ctx context.Context, + followedUserID, dataSetID string) ([]*alerts.Notification, error) { + + e.EvaluateCalls[followedUserID] += 1 + return nil, nil +} + +func newAlertsMockConsumerMessage(topic string, v any) *sarama.ConsumerMessage { + GinkgoHelper() + doc := &struct { + FullDocument any `json:"fullDocument" bson:"fullDocument"` + }{FullDocument: v} + vBytes, err := bson.MarshalExtJSON(doc, false, false) + Expect(err).To(Succeed()) + return &sarama.ConsumerMessage{ + Value: vBytes, + Topic: topic, + } +} + +func addLogger(ctx context.Context) (context.Context, *logtest.Logger) { + GinkgoHelper() + if ctx == nil { + ctx = context.Background() + } + + lgr := logtest.NewLogger() + return log.NewContextWithLogger(ctx, lgr), lgr +} + +func augmentMockMongoDocs(inDocs []interface{}) []interface{} { + defaultDoc := bson.M{ + "_userId": testFollowedUserID, + "_active": true, + "type": "upload", + "time": time.Now(), + } + outDocs := []interface{}{} + for _, inDoc := range inDocs { + newDoc := defaultDoc + switch v := (inDoc).(type) { + case map[string]interface{}: + for key, val := range v { + newDoc[key] = val + } + outDocs = append(outDocs, newDoc) + default: + outDocs = append(outDocs, inDoc) + } + } + return outDocs +} + +func newMockMongoCursor(docs []interface{}) *mongo.Cursor { + GinkgoHelper() + cur, err := mongo.NewCursorFromDocuments(docs, nil, nil) + Expect(err).To(Succeed()) + return cur +} + +type mockAlertsConfigClient struct { + Error error + Configs []*alerts.Config +} + +func newMockAlertsConfigClient(c []*alerts.Config, err error) *mockAlertsConfigClient { + if c == nil { + c = []*alerts.Config{} + } + return &mockAlertsConfigClient{ + Configs: c, + Error: err, + } +} + +func (c *mockAlertsConfigClient) Delete(_ context.Context, _ *alerts.Config) error { + return c.Error +} + +func (c *mockAlertsConfigClient) Get(_ context.Context, _ *alerts.Config) (*alerts.Config, error) { + if c.Error != nil { + return nil, c.Error + } else if len(c.Configs) > 0 { + return c.Configs[0], nil + } + return nil, nil +} + +func (c *mockAlertsConfigClient) List(_ context.Context, userID string) ([]*alerts.Config, error) { + if c.Error != nil { + return nil, c.Error + } else if len(c.Configs) > 0 { + return c.Configs, nil + } + return nil, nil +} + +func (c *mockAlertsConfigClient) Upsert(_ context.Context, _ *alerts.Config) error { + return c.Error +} + +type mockConsumerGroupSession struct { + MarkCalls int +} + +func (s *mockConsumerGroupSession) Claims() map[string][]int32 { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MemberID() string { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) GenerationID() int32 { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) Commit() { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { + s.MarkCalls++ +} + +func (s *mockConsumerGroupSession) Context() context.Context { + panic("not implemented") // TODO: Implement +} + +type mockPermissionsClient struct { + Error error + Perms map[string]permission.Permissions +} + +func newMockPermissionsClient() *mockPermissionsClient { + return &mockPermissionsClient{ + Perms: map[string]permission.Permissions{}, + } +} + +func (c *mockPermissionsClient) Key(requesterUserID, targetUserID string) string { + return requesterUserID + targetUserID +} + +func (c *mockPermissionsClient) Allow(requestUserID, perm, targetUserID string) { + key := c.Key(requestUserID, targetUserID) + if _, found := c.Perms[key]; !found { + c.Perms[key] = permission.Permissions{} + } + c.Perms[key][perm] = permission.Permission{} +} + +func (c *mockPermissionsClient) DenyAll(requestUserID, targetUserID string) { + key := c.Key(requestUserID, targetUserID) + delete(c.Perms, key) +} + +func (c *mockPermissionsClient) GetUserPermissions(ctx context.Context, requestUserID string, targetUserID string) (permission.Permissions, error) { + if c.Error != nil { + return nil, c.Error + } + if p, ok := c.Perms[c.Key(requestUserID, targetUserID)]; ok { + return p, nil + } else { + return nil, errors.New("test error NOT FOUND") + } +} + +type mockLastCommunicationsRecorder struct { + recordCalls map[string]int + recordCallsMu sync.Mutex +} + +func newMockLastCommunicationsRecorder() *mockLastCommunicationsRecorder { + return &mockLastCommunicationsRecorder{ + recordCalls: map[string]int{}, + } +} + +func (r *mockLastCommunicationsRecorder) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + r.recordCalls[lastComm.UserID]++ + return nil +} + +func (r *mockLastCommunicationsRecorder) NumCallsFor(userID string) int { + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + return r.recordCalls[userID] +} + +type mockLastCommunicationsRepository struct { + recordCalls map[string]int + recordCallsMu sync.Mutex +} + +func newMockLastCommunicationsRepository() *mockLastCommunicationsRepository { + return &mockLastCommunicationsRepository{ + recordCalls: map[string]int{}, + } +} + +func (r *mockLastCommunicationsRepository) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + r.recordCalls[lastComm.UserID]++ + return nil +} + +func (r *mockLastCommunicationsRepository) OverdueCommunications(ctx context.Context) ( + []alerts.LastCommunication, error) { + + return nil, nil +} + +func (r *mockLastCommunicationsRepository) NumCallsFor(userID string) int { + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + return r.recordCalls[userID] +} + +func (r *mockLastCommunicationsRepository) EnsureIndexes() error { return nil } + +type mockDeviceTokens struct { + Tokens map[string][]*devicetokens.DeviceToken +} + +func newMockDeviceTokens() *mockDeviceTokens { + return &mockDeviceTokens{ + Tokens: map[string][]*devicetokens.DeviceToken{}, + } +} + +func (t *mockDeviceTokens) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + if tokens, found := t.Tokens[userID]; found { + return tokens, nil + } + return nil, nil +} + +func newTestStaticDatumMmolL(value float64) *glucose.Glucose { + return &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + UserID: pointer.FromAny(testFollowedUserID), + Time: pointer.FromTime(time.Now()), + UploadID: pointer.FromAny(testDataSetID), + }, + Units: pointer.FromString(dataBloodGlucose.MmolL), + Value: pointer.FromFloat64(value), + }, + } +} diff --git a/data/events/events.go b/data/events/events.go index 3e41a0630d..dbe425790c 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -1,15 +1,26 @@ package events import ( + "bytes" "context" + "fmt" + "log/slog" + "os" + "strconv" + "sync" + "time" + "github.com/IBM/sarama" + "github.com/tidepool-org/go-common/asyncevents" ev "github.com/tidepool-org/go-common/events" + "github.com/tidepool-org/platform/alerts" dataSourceStoreStructured "github.com/tidepool-org/platform/data/source/store/structured" dataStore "github.com/tidepool-org/platform/data/store" summaryStore "github.com/tidepool-org/platform/data/summary/store" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" ) type userDeletionEventsHandler struct { @@ -58,3 +69,507 @@ func (u *userDeletionEventsHandler) HandleDeleteUserEvent(payload ev.DeleteUserE } return nil } + +// AlertsEventRetryDelayMaximum is the maximum delay between consumption +// retries. +const AlertsEventRetryDelayMaximum = time.Minute + +// AlertsEventRetries is the maximum consumption attempts before giving up. +const AlertsEventRetries = 1000 + +// AlertsEventConsumptionTimeout is the maximum time to process an alerts event. +const AlertsEventConsumptionTimeout = 30 * time.Second + +// SaramaRunner interfaces between [events.Runner] and go-common's +// [asyncevents.SaramaEventsConsumer]. +// +// This means providing Initialize(), Run(), and Terminate() to satisfy events.Runner, while +// under the hood calling SaramaEventConsumer's Run(), and canceling its Context as +// appropriate. +type SaramaRunner struct { + eventsRunner SaramaEventsRunner + cancelCtx context.CancelFunc + cancelMu sync.Mutex +} + +func NewSaramaRunner(eventsRunner SaramaEventsRunner) *SaramaRunner { + return &SaramaRunner{ + eventsRunner: eventsRunner, + } +} + +// SaramaEventsRunner is implemented by go-common's [asyncevents.SaramaEventsRunner]. +type SaramaEventsRunner interface { + Run(ctx context.Context) error +} + +// SaramaRunnerConfig collects values needed to initialize a SaramaRunner. +// +// This provides isolation for the SaramaRunner from ConfigReporter, +// envconfig, or any of the other options in platform for reading config +// values. +type SaramaRunnerConfig struct { + Brokers []string + GroupID string + Topics []string + MessageConsumer asyncevents.SaramaMessageConsumer + + Sarama *sarama.Config +} + +func (r *SaramaRunner) Initialize() error { return nil } + +// Run adapts platform's event.Runner to work with go-common's +// asyncevents.SaramaEventsConsumer. +func (r *SaramaRunner) Run() error { + if r.eventsRunner == nil { + return errors.New("Unable to run SaramaRunner, eventsRunner is nil") + } + + r.cancelMu.Lock() + ctx, err := func() (context.Context, error) { + defer r.cancelMu.Unlock() + if r.cancelCtx != nil { + return nil, errors.New("Unable to Run SaramaRunner, it's already initialized") + } + var ctx context.Context + ctx, r.cancelCtx = context.WithCancel(context.Background()) + return ctx, nil + }() + if err != nil { + return err + } + if err := r.eventsRunner.Run(ctx); err != nil { + return errors.Wrap(err, "Unable to Run SaramaRunner") + } + return nil +} + +// Terminate adapts platform's event.Runner to work with go-common's +// asyncevents.SaramaEventsConsumer. +func (r *SaramaRunner) Terminate() error { + r.cancelMu.Lock() + defer r.cancelMu.Unlock() + if r.cancelCtx == nil { + return errors.New("Unable to Terminate SaramaRunner, it's not running") + } + r.cancelCtx() + return nil +} + +// CappedExponentialBinaryDelay builds delay functions that use exponential +// binary backoff with a maximum duration. +func CappedExponentialBinaryDelay(cap time.Duration) func(int) time.Duration { + return func(tries int) time.Duration { + b := asyncevents.DelayExponentialBinary(tries) + if b > cap { + return cap + } + return b + } +} + +type AlertsEventsConsumer struct { + Consumer asyncevents.SaramaMessageConsumer + Logger log.Logger +} + +func (c *AlertsEventsConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, message *sarama.ConsumerMessage) error { + err := c.Consumer.Consume(ctx, session, message) + if err != nil { + session.MarkMessage(message, fmt.Sprintf("I have given up after error: %s", err)) + c.Logger.WithError(err).Info("Unable to consume alerts event") + return err + } + return nil +} + +// CascadingSaramaEventsRunner manages multiple sarama consumer groups to execute a +// topic-cascading retry process. +// +// The topic names are generated from Config.Topics combined with Delays. If given a single +// topic "updates", and delays: 0s, 1s, and 5s, then the following topics will be consumed: +// updates, updates-retry-1s, updates-retry-5s. The consumer of the updates-retry-5s topic +// will write failed messages to updates-dead. +// +// The inspiration for this system was drawn from +// https://www.uber.com/blog/reliable-reprocessing/ +type CascadingSaramaEventsRunner struct { + Config SaramaRunnerConfig + Delays []time.Duration + Logger log.Logger + SaramaBuilders SaramaBuilders +} + +func NewCascadingSaramaEventsRunner(config SaramaRunnerConfig, logger log.Logger, + delays []time.Duration) *CascadingSaramaEventsRunner { + + return &CascadingSaramaEventsRunner{ + Config: config, + Delays: delays, + Logger: logger, + SaramaBuilders: DefaultSaramaBuilders{}, + } +} + +// LimitedAsyncProducer restricts the [sarama.AsyncProducer] interface to ensure that its +// recipient isn't able to call Close(), thereby opening the potential for a panic when +// writing to a closed channel. +type LimitedAsyncProducer interface { + AbortTxn() error + BeginTxn() error + CommitTxn() error + Input() chan<- *sarama.ProducerMessage +} + +func (r *CascadingSaramaEventsRunner) Run(ctx context.Context) error { + if len(r.Config.Topics) == 0 { + return errors.New("no topics") + } + if len(r.Delays) == 0 { + return errors.New("no delays") + } + + producersCtx, cancel := context.WithCancel(ctx) + defer cancel() + var wg sync.WaitGroup + errs := make(chan error, len(r.Config.Topics)*len(r.Delays)) + defer func() { + r.logger(ctx).Debug("CascadingSaramaEventsRunner: waiting for consumers") + wg.Wait() + r.logger(ctx).Debug("CascadingSaramaEventsRunner: all consumers returned") + close(errs) + }() + + for _, topic := range r.Config.Topics { + for idx, delay := range r.Delays { + producerCfg := r.producerConfig(idx, delay) + // The producer is built here rather than in buildConsumer() to control when + // producer is closed. Were the producer to be closed before consumer.Run() + // returns, it would be possible for consumer to write to the producer's + // Inputs() channel, which if closed, would cause a panic. + producer, err := r.SaramaBuilders.NewAsyncProducer(r.Config.Brokers, producerCfg) + if err != nil { + return errors.Wrapf(err, "Unable to build async producer: %s", r.Config.GroupID) + } + + consumer, err := r.buildConsumer(producersCtx, idx, producer, delay, topic) + if err != nil { + return err + } + + wg.Add(1) + go func(topic string) { + defer func() { wg.Done(); producer.Close() }() + if err := consumer.Run(producersCtx); err != nil { + errs <- fmt.Errorf("topics[%q]: %s", topic, err) + } + r.logger(ctx).WithField("topic", topic). + Debug("CascadingSaramaEventsRunner: consumer go proc returning") + }(topic) + } + } + + select { + case <-ctx.Done(): + r.logger(ctx).Debug("CascadingSaramaEventsRunner: context is done") + return nil + case err := <-errs: + r.logger(ctx).WithError(err). + Debug("CascadingSaramaEventsRunner: Run(): error from consumer") + return err + } +} + +func (r *CascadingSaramaEventsRunner) producerConfig(idx int, delay time.Duration) *sarama.Config { + uniqueConfig := *r.Config.Sarama + hostID := os.Getenv("HOSTNAME") // set by default in kubernetes pods + if hostID == "" { + hostID = fmt.Sprintf("%d-%d", time.Now().UnixNano()/int64(time.Second), os.Getpid()) + } + txnID := fmt.Sprintf("%s-%s-%d-%s", r.Config.GroupID, delay.String(), idx, hostID) + uniqueConfig.Producer.Transaction.ID = txnID + uniqueConfig.Producer.Idempotent = true + uniqueConfig.Producer.RequiredAcks = sarama.WaitForAll + uniqueConfig.Net.MaxOpenRequests = 1 + uniqueConfig.Consumer.IsolationLevel = sarama.ReadCommitted + return &uniqueConfig +} + +// SaramaBuilders allows tests to inject mock objects. +type SaramaBuilders interface { + NewAsyncProducer([]string, *sarama.Config) (sarama.AsyncProducer, error) + NewConsumerGroup([]string, string, *sarama.Config) (sarama.ConsumerGroup, error) +} + +// DefaultSaramaBuilders implements SaramaBuilders for normal, non-test use. +type DefaultSaramaBuilders struct{} + +func (DefaultSaramaBuilders) NewAsyncProducer(brokers []string, config *sarama.Config) ( + sarama.AsyncProducer, error) { + + return sarama.NewAsyncProducer(brokers, config) +} + +func (DefaultSaramaBuilders) NewConsumerGroup(brokers []string, groupID string, + config *sarama.Config) (sarama.ConsumerGroup, error) { + + return sarama.NewConsumerGroup(brokers, groupID, config) +} + +func (r *CascadingSaramaEventsRunner) buildConsumer(ctx context.Context, idx int, + producer LimitedAsyncProducer, delay time.Duration, baseTopic string) ( + *asyncevents.SaramaEventsConsumer, error) { + + groupID := r.Config.GroupID + if delay > 0 { + groupID += "-retry-" + delay.String() + } + group, err := r.SaramaBuilders.NewConsumerGroup(r.Config.Brokers, groupID, + r.Config.Sarama) + if err != nil { + return nil, errors.Wrapf(err, "Unable to build sarama consumer group: %s", groupID) + } + + var consumer asyncevents.SaramaMessageConsumer = r.Config.MessageConsumer + if len(r.Delays) > 0 { + nextTopic := baseTopic + "-dead" + if idx+1 < len(r.Delays) { + nextTopic = baseTopic + "-retry-" + r.Delays[idx+1].String() + } + consumer = &CascadingConsumer{ + Consumer: consumer, + NextTopic: nextTopic, + Producer: producer, + Logger: r.Logger, + } + } + if delay > 0 { + consumer = &NotBeforeConsumer{ + Consumer: consumer, + Logger: r.Logger, + } + } + aeLoggerAdapter := &asynceventsLoggerAdapter{r.Logger} + handler := asyncevents.NewSaramaConsumerGroupHandler(aeLoggerAdapter, consumer, + AlertsEventConsumptionTimeout) + topic := baseTopic + if delay > 0 { + topic += "-retry-" + delay.String() + } + r.logger(ctx).WithField("topic", topic).Debug("creating consumer") + + return asyncevents.NewSaramaEventsConsumer(group, handler, topic), nil +} + +func (r *CascadingSaramaEventsRunner) logger(ctx context.Context) log.Logger { + // A context logger might have more fields or ... context. So prefer that if availble. + if ctxLogger := log.LoggerFromContext(ctx); ctxLogger != nil { + return ctxLogger + } + if r.Logger == nil { + // logjson.NewLogger will only fail if an argument is missing. + r.Logger, _ = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + } + return r.Logger +} + +// NotBeforeConsumer delays consumption until a specified time. +type NotBeforeConsumer struct { + Consumer asyncevents.SaramaMessageConsumer + Logger log.Logger +} + +func (c *NotBeforeConsumer) Consume(ctx context.Context, session sarama.ConsumerGroupSession, + msg *sarama.ConsumerMessage) error { + + notBefore, err := c.notBeforeFromMsgHeaders(msg) + if err != nil { + c.Logger.WithError(err).Info("Unable to parse kafka header not-before value") + } + delay := time.Until(notBefore) + + select { + case <-ctx.Done(): + if ctxErr := ctx.Err(); ctxErr != context.Canceled { + return ctxErr + } + return nil + case <-time.After(time.Until(notBefore)): + if !notBefore.IsZero() { + fields := log.Fields{"topic": msg.Topic, "not-before": notBefore, "delay": delay} + c.Logger.WithFields(fields).Debugf("delayed") + } + return c.Consumer.Consume(ctx, session, msg) + } +} + +// HeaderNotBefore tells consumers not to consume a message before a certain time. +var HeaderNotBefore = []byte("x-tidepool-not-before") + +// NotBeforeTimeFormat specifies the [time.Parse] format to use for HeaderNotBefore. +var NotBeforeTimeFormat = time.RFC3339Nano + +// HeaderFailures counts the number of failures encountered trying to consume the message. +var HeaderFailures = []byte("x-tidepool-failures") + +// FailuresToDelay maps the number of consumption failures to the next delay. +// +// Rather than using a failures header, the name of the topic could be used as a lookup, if +// so desired. +var FailuresToDelay = map[int]time.Duration{ + 0: 0, + 1: 1 * time.Second, + 2: 2 * time.Second, + 3: 3 * time.Second, + 4: 5 * time.Second, +} + +func (c *NotBeforeConsumer) notBeforeFromMsgHeaders(msg *sarama.ConsumerMessage) ( + time.Time, error) { + + for _, header := range msg.Headers { + if bytes.Equal(header.Key, HeaderNotBefore) { + notBefore, err := time.Parse(NotBeforeTimeFormat, string(header.Value)) + if err != nil { + return time.Time{}, fmt.Errorf("parsing not before header: %s", err) + } else { + return notBefore, nil + } + } + } + return time.Time{}, fmt.Errorf("header not found: x-tidepool-not-before") +} + +// CascadingConsumer cascades messages that failed to be consumed to another topic. +// +// It also sets an adjustable delay via the "not-before" and "failures" headers so that as +// the message moves from topic to topic, the time between processing is increased according +// to [FailuresToDelay]. +type CascadingConsumer struct { + Consumer asyncevents.SaramaMessageConsumer + NextTopic string + Producer LimitedAsyncProducer + Logger log.Logger +} + +func (c *CascadingConsumer) Consume(ctx context.Context, session sarama.ConsumerGroupSession, + msg *sarama.ConsumerMessage) (err error) { + + if err := c.Consumer.Consume(ctx, session, msg); err != nil { + txnErr := c.withTxn(func() error { + select { + case <-ctx.Done(): + if ctxErr := ctx.Err(); ctxErr != context.Canceled { + return ctxErr + } + return nil + case c.Producer.Input() <- c.cascadeMessage(msg): + fields := log.Fields{"from": msg.Topic, "to": c.NextTopic} + c.Logger.WithFields(fields).Debug("cascaded") + return nil + } + }) + if txnErr != nil { + c.Logger.WithError(txnErr).Info("Unable to complete cascading transaction") + return err + } + } + return nil +} + +// withTxn wraps a function with a transaction that is aborted if an error is returned. +func (c *CascadingConsumer) withTxn(f func() error) (err error) { + if err := c.Producer.BeginTxn(); err != nil { + return errors.Wrap(err, "Unable to begin transaction") + } + defer func(err *error) { + if err != nil && *err != nil { + if abortErr := c.Producer.AbortTxn(); abortErr != nil { + c.Logger.WithError(abortErr).Info("Unable to abort transaction") + } + return + } + if commitErr := c.Producer.CommitTxn(); commitErr != nil { + c.Logger.WithError(commitErr).Info("Unable to commit transaction") + } + }(&err) + return f() +} + +// cascadeMessage to the next topic. +func (c *CascadingConsumer) cascadeMessage(msg *sarama.ConsumerMessage) *sarama.ProducerMessage { + pHeaders := make([]sarama.RecordHeader, len(msg.Headers)) + for idx, header := range msg.Headers { + pHeaders[idx] = *header + } + return &sarama.ProducerMessage{ + Key: sarama.ByteEncoder(msg.Key), + Value: sarama.ByteEncoder(msg.Value), + Topic: c.NextTopic, + Headers: c.updateCascadeHeaders(pHeaders), + } +} + +// updateCascadeHeaders calculates not before and failures header values. +// +// Existing not before and failures headers will be dropped in place of the new ones. +func (c *CascadingConsumer) updateCascadeHeaders(headers []sarama.RecordHeader) []sarama.RecordHeader { + failures := 0 + notBefore := time.Now() + + keep := make([]sarama.RecordHeader, 0, len(headers)) + for _, header := range headers { + switch { + case bytes.Equal(header.Key, HeaderNotBefore): + continue // Drop this header, we'll add a new version below. + case bytes.Equal(header.Key, HeaderFailures): + parsed, err := strconv.ParseInt(string(header.Value), 10, 32) + if err != nil { + c.Logger.WithError(err).Info("Unable to parse consumption failures count") + } else { + failures = int(parsed) + notBefore = notBefore.Add(FailuresToDelay[failures]) + } + continue // Drop this header, we'll add a new version below. + } + keep = append(keep, header) + } + + keep = append(keep, sarama.RecordHeader{ + Key: HeaderNotBefore, + Value: []byte(notBefore.Format(NotBeforeTimeFormat)), + }) + keep = append(keep, sarama.RecordHeader{ + Key: HeaderFailures, + Value: []byte(strconv.Itoa(failures + 1)), + }) + + return keep +} + +type LastCommunicationsRecorder interface { + // RecordReceivedDeviceData to support sending care partner alerts. + // + // Metadata about when we last received data for any given user is + // used to determine if alerts should be sent to the care partners + // of a given user. + RecordReceivedDeviceData(context.Context, alerts.LastCommunication) error +} + +// asynceventsLoggerAdapter adapts a [log.Logger] to [asyncevents.Logger]. +type asynceventsLoggerAdapter struct { + log.Logger +} + +var logLevels map[slog.Level]log.Level = map[slog.Level]log.Level{ + slog.LevelDebug: log.DebugLevel, + slog.LevelInfo: log.InfoLevel, + slog.LevelWarn: log.WarnLevel, + slog.LevelError: log.ErrorLevel, +} + +func (a *asynceventsLoggerAdapter) Log(ctx context.Context, level slog.Level, msg string, args ...any) { + a.Logger.Log(logLevels[level], fmt.Sprintf(msg, args...)) +} diff --git a/data/events/events_suite_test.go b/data/events/events_suite_test.go new file mode 100644 index 0000000000..4bab08b129 --- /dev/null +++ b/data/events/events_suite_test.go @@ -0,0 +1,34 @@ +package events + +import ( + "log/slog" + "os" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/test" +) + +func TestSuite(t *testing.T) { + test.Test(t) +} + +var _ = BeforeSuite(func() { + slog.SetDefault(devNullSlogLogger(GinkgoT())) +}) + +// Cleaner is part of testing.T and FullGinkgoTInterface +type Cleaner interface { + Cleanup(func()) +} + +func devNullSlogLogger(c Cleaner) *slog.Logger { + f, err := os.Open(os.DevNull) + Expect(err).To(Succeed()) + c.Cleanup(func() { + Expect(f.Close()).To(Succeed()) + }) + return slog.New(slog.NewTextHandler(f, nil)) +} diff --git a/data/events/events_test.go b/data/events/events_test.go new file mode 100644 index 0000000000..9e8036a54d --- /dev/null +++ b/data/events/events_test.go @@ -0,0 +1,747 @@ +package events + +import ( + "bytes" + "context" + "fmt" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/IBM/sarama" + "github.com/IBM/sarama/mocks" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/log/devlog" + lognull "github.com/tidepool-org/platform/log/null" + logtest "github.com/tidepool-org/platform/log/test" +) + +var _ = Describe("SaramaRunner", func() { + Context("has a lifecycle", func() { + newTestRunner := func() *SaramaRunner { + return NewSaramaRunner(&mockEventsRunner{}) + } + It("starts with Run() and stops with Terminate()", func() { + r := newTestRunner() + var runErr error + var errMu sync.Mutex + launched := make(chan struct{}, 1) + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + runErr = r.Run() + launched <- struct{}{} + }() + }() + <-launched + time.Sleep(time.Millisecond) + errMu.Lock() + defer errMu.Unlock() + + Expect(r.Terminate()).To(Succeed()) + Eventually(runErr).WithTimeout(10 * time.Millisecond).Should(Succeed()) + }) + + Describe("Run()", func() { + var errMu sync.Mutex + + It("can be started only once", func() { + r := newTestRunner() + var firstRunErr, secondRunErr error + launched := make(chan struct{}, 2) + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + firstRunErr = r.Run() + launched <- struct{}{} + }() + }() + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + secondRunErr = r.Run() + launched <- struct{}{} + }() + + }() + <-launched + <-launched + errMu.Lock() + defer errMu.Unlock() + + // The above doesn't _guarantee_ that Run has been called twice, + // but... it should work. + + Expect(r.Terminate()).To(Succeed()) + if firstRunErr != nil { + Expect(firstRunErr).To(MatchError(ContainSubstring("it's already initialized"))) + Expect(secondRunErr).To(Succeed()) + } else { + Expect(firstRunErr).To(Succeed()) + Expect(secondRunErr).To(MatchError(ContainSubstring("it's already initialized"))) + } + }) + + It("can't be Terminate()'d before it's Run()", func() { + r := newTestRunner() + Expect(r.Terminate()).To(MatchError(ContainSubstring("it's not running"))) + }) + }) + }) +}) + +var _ = DescribeTable("CappedExponentialBinaryDelay", + func(cap time.Duration, input int, output time.Duration) { + f := CappedExponentialBinaryDelay(cap) + Expect(f(input)).To(Equal(output)) + }, + Entry("cap: 1m; tries: 0", time.Minute, 0, time.Second), + Entry("cap: 1m; tries: 1", time.Minute, 1, 2*time.Second), + Entry("cap: 1m; tries: 2", time.Minute, 2, 4*time.Second), + Entry("cap: 1m; tries: 3", time.Minute, 3, 8*time.Second), + Entry("cap: 1m; tries: 4", time.Minute, 4, 16*time.Second), + Entry("cap: 1m; tries: 5", time.Minute, 5, 32*time.Second), + Entry("cap: 1m; tries: 6", time.Minute, 6, time.Minute), + Entry("cap: 1m; tries: 20", time.Minute, 20, time.Minute), +) + +var _ = Describe("NotBeforeConsumer", func() { + Describe("Consume", func() { + var newTestMsg = func(notBefore time.Time) *sarama.ConsumerMessage { + headers := []*sarama.RecordHeader{} + if !notBefore.IsZero() { + headers = append(headers, &sarama.RecordHeader{ + Key: HeaderNotBefore, + Value: []byte(notBefore.Format(NotBeforeTimeFormat)), + }) + } + return &sarama.ConsumerMessage{Topic: "test.topic", Headers: headers} + } + + It("delays based on the x-tidepool-not-before header", func() { + logger := newTestDevlog() + testDelay := 10 * time.Millisecond + ctx := context.Background() + start := time.Now() + notBefore := start.Add(testDelay) + msg := newTestMsg(notBefore) + dc := &NotBeforeConsumer{ + Consumer: &mockSaramaMessageConsumer{Logger: logger}, + Logger: logger, + } + + err := dc.Consume(ctx, nil, msg) + + Expect(err).To(BeNil()) + Expect(time.Since(start)).To(BeNumerically(">", testDelay)) + }) + + It("aborts if canceled", func() { + logger := newTestDevlog() + testDelay := 10 * time.Millisecond + abortAfter := 1 * time.Millisecond + notBefore := time.Now().Add(testDelay) + msg := newTestMsg(notBefore) + dc := &NotBeforeConsumer{ + Consumer: &mockSaramaMessageConsumer{Delay: time.Minute, Logger: logger}, + Logger: logger, + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer cancel() + <-time.After(abortAfter) + }() + start := time.Now() + + err := dc.Consume(ctx, nil, msg) + + Expect(err).To(BeNil()) + Expect(time.Since(start)).To(BeNumerically(">", abortAfter)) + }) + + }) +}) + +var _ = Describe("CascadingConsumer", func() { + Describe("Consume", func() { + var testMsg = &sarama.ConsumerMessage{ + Topic: "test.topic", + } + + Context("on failure", func() { + It("cascades topics", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{} + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{ + Err: fmt.Errorf("test error"), + Logger: logger, + }, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + cf := func(msg *sarama.ProducerMessage) error { + if msg.Topic != nextTopic { + return fmt.Errorf("expected topic to be %q, got %q", nextTopic, msg.Topic) + } + return nil + } + mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(cf) + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + + It("increments the failures header", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{ + Headers: []*sarama.RecordHeader{ + { + Key: HeaderFailures, Value: []byte("3"), + }, + }, + } + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{ + Err: fmt.Errorf("test error"), + Logger: logger, + }, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + cf := func(msg *sarama.ProducerMessage) error { + failures := 0 + for _, header := range msg.Headers { + if !bytes.Equal(header.Key, HeaderFailures) { + continue + } + parsed, err := strconv.ParseInt(string(header.Value), 10, 32) + Expect(err).To(Succeed()) + failures = int(parsed) + if failures != 4 { + return fmt.Errorf("expected failures == 4, got %d", failures) + } + return nil + } + return fmt.Errorf("expected failures header wasn't found") + } + mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(cf) + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + + It("updates the not before header", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{ + Headers: []*sarama.RecordHeader{ + { + Key: HeaderFailures, Value: []byte("2"), + }, + }, + } + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{ + Err: fmt.Errorf("test error"), + Logger: logger, + }, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + cf := func(msg *sarama.ProducerMessage) error { + for _, header := range msg.Headers { + if !bytes.Equal(header.Key, HeaderNotBefore) { + continue + } + parsed, err := time.Parse(NotBeforeTimeFormat, string(header.Value)) + if err != nil { + return err + } + until := time.Until(parsed) + delta := 10 * time.Millisecond + if until < 2*time.Second-delta || until > 2*time.Second+delta { + return fmt.Errorf("expected 2 seconds' delay, got: %s", until) + } + return nil + } + return fmt.Errorf("expected failures header wasn't found") + } + mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(cf) + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + }) + + Context("on success", func() { + It("doesn't produce a new message", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{} + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{Logger: logger}, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + }) + + Context("when canceled", func() { + It("aborts", func() { + logger := newTestDevlog() + abortAfter := 1 * time.Millisecond + p := newMockSaramaAsyncProducer(nil) + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{Delay: time.Minute, Logger: logger}, + Logger: lognull.NewLogger(), + Producer: p, + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer cancel() + time.Sleep(abortAfter) + }() + start := time.Now() + + err := sc.Consume(ctx, nil, testMsg) + Expect(err).To(BeNil()) + Expect(time.Since(start)).To(BeNumerically(">", abortAfter)) + }) + }) + }) +}) + +var _ = Describe("CascadingSaramaEventsRunner", func() { + It("cascades through configured delays", func() { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + testDelays := []time.Duration{0, 1, 2, 3, 5} + testLogger := newTestDevlog() + testMessageConsumer := &mockSaramaMessageConsumer{ + Delay: time.Millisecond, + Err: fmt.Errorf("test error"), + Logger: testLogger, + } + testConfig := SaramaRunnerConfig{ + Topics: []string{"test.cascading"}, + MessageConsumer: testMessageConsumer, + Sarama: mocks.NewTestConfig(), + } + producers := []*mockSaramaAsyncProducer{} + var msgsReceived atomic.Int32 + prodFunc := func(_ []string, config *sarama.Config) (sarama.AsyncProducer, error) { + prod := newMockSaramaAsyncProducer(func(msg *sarama.ProducerMessage) { + msgsReceived.Add(1) + if int(msgsReceived.Load()) == len(testDelays) { + // Once all messages are entered, the test is complete. Cancel the + // context to shut it all down properly. + cancel() + } + }) + producers = append(producers, prod) + return prod, nil + } + sser := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) + sser.SaramaBuilders = newTestSaramaBuilders(nil, prodFunc) + + err := sser.Run(ctx) + Expect(err).To(Succeed()) + for pIdx, p := range producers { + Expect(p.isClosed()).To(BeTrue()) + Expect(p.messages).To(HaveLen(1)) + topic := p.messages[0].Topic + switch { + case pIdx+1 < len(testDelays): + Expect(topic).To(MatchRegexp(fmt.Sprintf(".*-retry-%s$", testDelays[pIdx+1]))) + default: + Expect(topic).To(MatchRegexp(".*-dead$")) + } + } + }) + + Describe("logger", func() { + It("prefers a context's logger", func() { + testLogger := logtest.NewLogger() + ctxLogger := logtest.NewLogger() + testDelays := []time.Duration{0} + testConfig := SaramaRunnerConfig{} + r := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) + + ctx := log.NewContextWithLogger(context.Background(), ctxLogger) + got := r.logger(ctx) + + Expect(got).To(Equal(ctxLogger)) + }) + + Context("without a context logger", func() { + It("uses the configured logger", func() { + testLogger := logtest.NewLogger() + testDelays := []time.Duration{0} + testConfig := SaramaRunnerConfig{} + r := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) + + ctx := context.Background() + got := r.logger(ctx) + + Expect(got).To(Equal(testLogger)) + }) + + Context("or any configured logger", func() { + It("doesn't panic", func() { + testLogger := logtest.NewLogger() + testDelays := []time.Duration{0} + testConfig := SaramaRunnerConfig{} + r := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) + + ctx := context.Background() + got := r.logger(ctx) + + Expect(func() { + got.Debug("testing") + }).ToNot(Panic()) + }) + }) + }) + }) +}) + +// testSaramaBuilders injects mocks into the CascadingSaramaEventsRunner +type testSaramaBuilders struct { + consumerGroup func([]string, string, *sarama.Config) (sarama.ConsumerGroup, error) + producer func([]string, *sarama.Config) (sarama.AsyncProducer, error) +} + +func newTestSaramaBuilders( + cgFunc func([]string, string, *sarama.Config) (sarama.ConsumerGroup, error), + prodFunc func([]string, *sarama.Config) (sarama.AsyncProducer, error)) *testSaramaBuilders { + + if cgFunc == nil { + cgFunc = func(_ []string, groupID string, config *sarama.Config) (sarama.ConsumerGroup, error) { + logger := newTestDevlog() + return &mockSaramaConsumerGroup{ + Logger: logger, + }, nil + } + } + if prodFunc == nil { + prodFunc = func(_ []string, config *sarama.Config) (sarama.AsyncProducer, error) { + return mocks.NewAsyncProducer(GinkgoT(), config), nil + } + } + return &testSaramaBuilders{ + consumerGroup: cgFunc, + producer: prodFunc, + } +} + +func (b testSaramaBuilders) NewAsyncProducer(brokers []string, config *sarama.Config) ( + sarama.AsyncProducer, error) { + + return b.producer(brokers, config) +} + +func (b testSaramaBuilders) NewConsumerGroup(brokers []string, groupID string, + config *sarama.Config) (sarama.ConsumerGroup, error) { + + return b.consumerGroup(brokers, groupID, config) +} + +type mockEventsRunner struct { + Err error +} + +func (r *mockEventsRunner) Run(ctx context.Context) error { + return r.Err +} + +type mockSaramaMessageConsumer struct { + Delay time.Duration + Err error + Logger log.Logger +} + +func (c *mockSaramaMessageConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { + + c.Logger.Debugf("mockSaramaMessageConsumer[%q] is consuming %+v", msg.Topic, msg) + defer func(err *error) { + c.Logger.Debugf("mockSaramaMessageConsumer[%q] returns %s", msg.Topic, *err) + }(&err) + + done := ctx.Done() + select { + case <-time.After(c.Delay): + // no op + case <-done: + return ctx.Err() + } + + if c.Err != nil { + return c.Err + } + return nil +} + +type mockSaramaConsumerGroup struct { + Messages chan *sarama.ConsumerMessage + ConsumeErr error + Logger log.Logger +} + +func (g *mockSaramaConsumerGroup) Consume(ctx context.Context, + topics []string, handler sarama.ConsumerGroupHandler) error { + + if g.ConsumeErr != nil { + return g.ConsumeErr + } + + g.Logger.Debugf("mockSaramaConsumerGroup%s consuming", topics) + session := &mockSaramaConsumerGroupSession{} + if g.Messages == nil { + g.Messages = make(chan *sarama.ConsumerMessage) + go func() { <-ctx.Done(); close(g.Messages) }() + go g.feedYourClaim(ctx, topics[0]) + } + claim := &mockSaramaConsumerGroupClaim{ + topic: topics[0], + messages: g.Messages, + } + + err := handler.ConsumeClaim(session, claim) + if err != nil { + return err + } + return nil +} + +func (g *mockSaramaConsumerGroup) feedYourClaim(ctx context.Context, topic string) { + msg := &sarama.ConsumerMessage{Topic: topic} + select { + case <-ctx.Done(): + return + case g.Messages <- msg: + // no op + } +} + +func (g *mockSaramaConsumerGroup) Errors() <-chan error { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) Close() error { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) Pause(partitions map[string][]int32) { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) Resume(partitions map[string][]int32) { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) PauseAll() { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) ResumeAll() { + panic("not implemented") // implement if needed} +} + +type mockSaramaConsumerGroupSession struct{} + +func (s *mockSaramaConsumerGroupSession) Claims() map[string][]int32 { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) MemberID() string { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) GenerationID() int32 { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) Commit() { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) Context() context.Context { + return context.Background() +} + +type mockSaramaConsumerGroupClaim struct { + messages <-chan *sarama.ConsumerMessage + topic string +} + +func (c *mockSaramaConsumerGroupClaim) Topic() string { + return c.topic +} + +func (c *mockSaramaConsumerGroupClaim) Partition() int32 { + panic("not implemented") // implement if needed +} + +func (c *mockSaramaConsumerGroupClaim) InitialOffset() int64 { + panic("not implemented") // implement if needed +} + +func (c *mockSaramaConsumerGroupClaim) HighWaterMarkOffset() int64 { + panic("not implemented") // implement if needed +} + +func (c *mockSaramaConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { + return c.messages +} + +type mockSaramaAsyncProducer struct { + input chan *sarama.ProducerMessage + messages []*sarama.ProducerMessage + mu sync.Mutex + setupCallbacksOnce sync.Once + closeOnce sync.Once + msgCallback func(*sarama.ProducerMessage) +} + +func newMockSaramaAsyncProducer(msgCallback func(*sarama.ProducerMessage)) *mockSaramaAsyncProducer { + return &mockSaramaAsyncProducer{ + input: make(chan *sarama.ProducerMessage), + messages: []*sarama.ProducerMessage{}, + msgCallback: msgCallback, + } +} + +func (p *mockSaramaAsyncProducer) AsyncClose() { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) Close() error { + p.closeOnce.Do(func() { close(p.input) }) + return nil +} + +func (p *mockSaramaAsyncProducer) setupCallbacks() { + if p.msgCallback == nil { + return + } + p.setupCallbacksOnce.Do(func() { + go func(callback func(*sarama.ProducerMessage)) { + for msg := range p.input { + p.messages = append(p.messages, msg) + go callback(msg) + } + }(p.msgCallback) + }) +} + +func (p *mockSaramaAsyncProducer) Input() chan<- *sarama.ProducerMessage { + defer p.setupCallbacks() + return p.input +} + +func (p *mockSaramaAsyncProducer) Successes() <-chan *sarama.ProducerMessage { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) Errors() <-chan *sarama.ProducerError { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) IsTransactional() bool { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) BeginTxn() error { + return nil +} + +func (p *mockSaramaAsyncProducer) CommitTxn() error { + return nil +} + +func (p *mockSaramaAsyncProducer) AbortTxn() error { + return nil +} + +func (p *mockSaramaAsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) isClosed() bool { + p.mu.Lock() + defer p.mu.Unlock() + select { + case _, open := <-p.input: + return !open + default: + return false + } +} + +func newTestDevlog() log.Logger { + GinkgoHelper() + l, err := devlog.NewWithDefaults(GinkgoWriter) + Expect(err).To(Succeed()) + return l +} diff --git a/data/events/last_communications_recorder.go b/data/events/last_communications_recorder.go new file mode 100644 index 0000000000..96e6ec218c --- /dev/null +++ b/data/events/last_communications_recorder.go @@ -0,0 +1,41 @@ +package events + +import ( + "context" + + "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + lognull "github.com/tidepool-org/platform/log/null" +) + +type LastCommunicationRecorder struct { + Repo alerts.LastCommunicationsRepository +} + +func NewLastCommunicationRecorder(repo alerts.LastCommunicationsRepository) *LastCommunicationRecorder { + return &LastCommunicationRecorder{ + Repo: repo, + } +} + +func (r *LastCommunicationRecorder) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + logger := r.log(ctx).WithFields(log.Fields{ + "userID": lastComm.UserID, + "dataSetID": lastComm.DataSetID, + }) + logger.Info("recording received data") + if err := r.Repo.RecordReceivedDeviceData(ctx, lastComm); err != nil { + return errors.Wrap(err, "Unable to record metadata on reception of device data") + } + return nil +} + +func (r *LastCommunicationRecorder) log(ctx context.Context) log.Logger { + if ctxLogger := log.LoggerFromContext(ctx); ctxLogger != nil { + return ctxLogger + } + return lognull.NewLogger() +} diff --git a/data/service/api/v1/alerts.go b/data/service/api/v1/alerts.go index d07891247e..ec987757a5 100644 --- a/data/service/api/v1/alerts.go +++ b/data/service/api/v1/alerts.go @@ -24,6 +24,8 @@ func AlertsRoutes() []service.Route { service.Get("/v1/users/:userId/followers/:followerUserId/alerts", GetAlert, api.RequireAuth), service.Post("/v1/users/:userId/followers/:followerUserId/alerts", UpsertAlert, api.RequireAuth), service.Delete("/v1/users/:userId/followers/:followerUserId/alerts", DeleteAlert, api.RequireAuth), + service.Get("/v1/users/:userId/followers/alerts", ListAlerts, api.RequireServer), + service.Get("/v1/overdue_communications", ListOverdueCommunications, api.RequireServer), } } @@ -114,8 +116,8 @@ func UpsertAlert(dCtx service.Context) { return } - a := &alerts.Alerts{} - if err := request.DecodeRequestBody(r.Request, a); err != nil { + cfg := &alerts.Config{} + if err := request.DecodeRequestBody(r.Request, cfg); err != nil { dCtx.RespondWithError(platform.ErrorJSONMalformed()) return } @@ -126,7 +128,6 @@ func UpsertAlert(dCtx service.Context) { return } - cfg := &alerts.Config{UserID: path.UserID, FollowedUserID: path.FollowedUserID, Alerts: *a} if err := repo.Upsert(ctx, cfg); err != nil { dCtx.RespondWithError(platform.ErrorInternalServerFailure()) lgr.WithError(err).Error("upserting alerts config") @@ -134,6 +135,67 @@ func UpsertAlert(dCtx service.Context) { } } +func ListAlerts(dCtx service.Context) { + r := dCtx.Request() + ctx := r.Context() + authDetails := request.GetAuthDetails(ctx) + repo := dCtx.AlertsRepository() + lgr := log.LoggerFromContext(ctx) + + if err := checkAuthentication(authDetails); err != nil { + lgr.Debug("authentication failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + + pathsUserID := r.PathParam("userId") + if err := checkUserIDConsistency(authDetails, pathsUserID); err != nil { + lgr.WithFields(log.Fields{"path": pathsUserID, "auth": authDetails.UserID()}). + Debug("user id consistency failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + + alerts, err := repo.List(ctx, pathsUserID) + if err != nil { + dCtx.RespondWithInternalServerFailure("listing alerts configs", err) + lgr.WithError(err).Error("listing alerts config") + return + } + if len(alerts) == 0 { + dCtx.RespondWithError(ErrorUserIDNotFound(pathsUserID)) + lgr.Debug("no alerts configs found") + } + + responder := request.MustNewResponder(dCtx.Response(), r) + responder.Data(http.StatusOK, alerts) +} + +func ListOverdueCommunications(dCtx service.Context) { + r := dCtx.Request() + ctx := r.Context() + + authDetails := request.GetAuthDetails(ctx) + lgr := log.LoggerFromContext(ctx) + if err := checkAuthentication(authDetails); err != nil { + lgr.Debug("authentication failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + overdue, err := dCtx.LastCommunicationsRepository().OverdueCommunications(ctx) + if err != nil { + lgr.WithError(err).Debug("Unable to list overdue records") + dCtx.RespondWithError(platform.ErrorInternalServerFailure()) + return + } + + lgr.WithField("found", len(overdue)).WithField("overdue", overdue). + Debug("/v1/overdue_communications") + + responder := request.MustNewResponder(dCtx.Response(), r) + responder.Data(http.StatusOK, overdue) +} + // checkUserIDConsistency verifies the userIDs in a request. // // For safety reasons, if these values don't agree, return an error. @@ -150,7 +212,7 @@ func checkUserIDConsistency(details request.AuthDetails, userIDFromPath string) // checkAuthentication ensures that the request has an authentication token. func checkAuthentication(details request.AuthDetails) error { - if details.Token() == "" { + if details.HasToken() && details.Token() == "" { return platformerrors.New("unauthorized") } if details.IsUser() { diff --git a/data/service/api/v1/alerts_test.go b/data/service/api/v1/alerts_test.go index c3b4b2f2a5..f517c224a7 100644 --- a/data/service/api/v1/alerts_test.go +++ b/data/service/api/v1/alerts_test.go @@ -3,7 +3,10 @@ package v1 import ( "bytes" "context" + "encoding/json" + "fmt" "net/http" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -17,10 +20,15 @@ import ( "github.com/tidepool-org/platform/service/test" ) +var testUserID = mocks.TestUserID1 +var testFollowedUserID = mocks.TestUserID2 + +const testDataSetID = "upid_000000000000" + func permsNoFollow() map[string]map[string]permission.Permissions { return map[string]map[string]permission.Permissions{ mocks.TestUserID1: { - mocks.TestUserID2: { + testFollowedUserID: { permission.Read: map[string]interface{}{}, }, }, @@ -32,11 +40,11 @@ var _ = Describe("Alerts endpoints", func() { testAuthenticationRequired := func(f func(dataservice.Context)) { t := GinkgoT() body := bytes.NewBuffer(mocks.MustMarshalJSON(t, alerts.Config{ - UserID: mocks.TestUserID1, - FollowedUserID: mocks.TestUserID2, + UserID: testUserID, + FollowedUserID: testFollowedUserID, })) dCtx := mocks.NewContext(t, "", "", body) - dCtx.MockAlertsRepository = newMockRepo() + dCtx.MockAlertsRepository = newMockAlertsRepo() badDetails := test.NewMockAuthDetails(request.MethodSessionToken, "", "") dCtx.WithAuthDetails(badDetails) @@ -49,11 +57,12 @@ var _ = Describe("Alerts endpoints", func() { testUserHasFollowPermission := func(f func(dataservice.Context)) { t := GinkgoT() body := bytes.NewBuffer(mocks.MustMarshalJSON(t, alerts.Config{ - UserID: mocks.TestUserID1, - FollowedUserID: mocks.TestUserID2, + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, })) dCtx := mocks.NewContext(t, "", "", body) - dCtx.MockAlertsRepository = newMockRepo() + dCtx.MockAlertsRepository = newMockAlertsRepo() dCtx.MockPermissionClient = mocks.NewPermission(permsNoFollow(), nil, nil) f(dCtx) @@ -69,7 +78,7 @@ var _ = Describe("Alerts endpoints", func() { dCtx.WithAuthDetails(details) } dCtx.RESTRequest.PathParams["followerUserId"] = "bad" - repo := newMockRepo() + repo := newMockAlertsRepo() dCtx.MockAlertsRepository = repo f(dCtx) @@ -82,7 +91,7 @@ var _ = Describe("Alerts endpoints", func() { t := GinkgoT() body := bytes.NewBuffer([]byte(`"improper JSON data"`)) dCtx := mocks.NewContext(t, "", "", body) - repo := newMockRepo() + repo := newMockAlertsRepo() dCtx.MockAlertsRepository = repo f(dCtx) @@ -103,6 +112,24 @@ var _ = Describe("Alerts endpoints", func() { It("rejects users without alerting permissions", func() { testUserHasFollowPermission(DeleteAlert) }) + + It("succeeds", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + repo.AlertsForUserID[testFollowedUserID] = []*alerts.Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + }, + } + dCtx := mocks.NewContext(t, "", "", nil) + dCtx.MockAlertsRepository = repo + rec := dCtx.Recorder() + + DeleteAlert(dCtx) + + Expect(rec.Code).To(Equal(http.StatusOK)) + }) }) Describe("Upsert", func() { @@ -121,8 +148,65 @@ var _ = Describe("Alerts endpoints", func() { It("rejects users without alerting permissions", func() { testUserHasFollowPermission(UpsertAlert) }) + + It("succeeds", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + testCfg, _ := json.Marshal(testConfig()) + dCtx := mocks.NewContext(t, "", "", bytes.NewBuffer(testCfg)) + dCtx.MockAlertsRepository = repo + rec := dCtx.Recorder() + + UpsertAlert(dCtx) + + Expect(rec.Code).To(Equal(http.StatusOK)) + }) }) + Describe("ListAlerts", func() { + It("rejects unauthenticated users", func() { + testAuthenticationRequired(ListAlerts) + }) + + It("requires that the user's token matches the userID path param", func() { + testTokenUserIDMustMatchPathParam(ListAlerts, nil) + }) + + It("errors when no Config exists", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + dCtx := mocks.NewContext(t, "", "", nil) + dCtx.MockAlertsRepository = repo + dCtx.WithAuthDetails(mocks.ServiceAuthDetails()) + rec := dCtx.Recorder() + + ListAlerts(dCtx) + + Expect(rec.Code).To(Equal(http.StatusNotFound)) + }) + + It("succeeds", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + dCtx := mocks.NewContext(t, "", "", nil) + dCtx.MockAlertsRepository = repo + dCtx.WithAuthDetails(mocks.ServiceAuthDetails()) + rec := dCtx.Recorder() + repo.AlertsForUserID[testFollowedUserID] = []*alerts.Config{ + {FollowedUserID: "foo", UserID: "bar"}, + } + + ListAlerts(dCtx) + + Expect(rec.Code).To(Equal(http.StatusOK), rec.Body.String()) + got := []*alerts.Config{} + Expect(json.NewDecoder(rec.Body).Decode(&got)).To(Succeed()) + if Expect(len(got)).To(Equal(1)) { + Expect(got[0].UserID).To(Equal("bar")) + Expect(got[0].FollowedUserID).To(Equal("foo")) + } + }) + }) Describe("Get", func() { It("rejects unauthenticated users", func() { testAuthenticationRequired(GetAlert) @@ -132,14 +216,14 @@ var _ = Describe("Alerts endpoints", func() { testTokenUserIDMustMatchPathParam(GetAlert, nil) }) - It("errors when Config doesn't exist", func() { + It("errors when no Config exists", func() { t := GinkgoT() body := bytes.NewBuffer(mocks.MustMarshalJSON(t, alerts.Config{ - UserID: mocks.TestUserID1, - FollowedUserID: mocks.TestUserID2, + UserID: testUserID, + FollowedUserID: testFollowedUserID, })) dCtx := mocks.NewContext(t, "", "", body) - repo := newMockRepo() + repo := newMockAlertsRepo() repo.ReturnsError(mongo.ErrNoDocuments) dCtx.MockAlertsRepository = repo @@ -151,21 +235,107 @@ var _ = Describe("Alerts endpoints", func() { It("rejects users without alerting permissions", func() { testUserHasFollowPermission(func(dCtx dataservice.Context) { - dCtx.Request().PathParams["userId"] = mocks.TestUserID2 + dCtx.Request().PathParams["userId"] = testFollowedUserID GetAlert(dCtx) }) }) + + It("succeeds", func() { + t := GinkgoT() + url := fmt.Sprintf("/v1/users/%s/followers/%s/alerts", testFollowedUserID, testUserID) + dCtx := mocks.NewContext(t, "GET", url, nil) + repo := newMockAlertsRepo() + repo.GetAlertsResponses[testUserID+testFollowedUserID] = &alerts.Config{ + FollowedUserID: "foo", + UserID: "bar", + } + dCtx.MockAlertsRepository = repo + + GetAlert(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusOK)) + got := &alerts.Config{} + Expect(json.NewDecoder(rec.Body).Decode(got)).To(Succeed()) + Expect(got.UserID).To(Equal("bar")) + Expect(got.FollowedUserID).To(Equal("foo")) + }) + }) + + Describe("ListOverdueCommunications", func() { + It("rejects unauthenticated users", func() { + testAuthenticationRequired(ListOverdueCommunications) + }) + + It("succeeds, even when there are no users found", func() { + t := GinkgoT() + dCtx := mocks.NewContext(t, "", "", nil) + alertsRepo := newMockAlertsRepo() + dCtx.MockAlertsRepository = alertsRepo + dCtx.MockLastCommunicationsRepository = newMockLastCommunicationsRepo() + ListOverdueCommunications(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusOK)) + }) + + It("errors when the upstream repo errors", func() { + t := GinkgoT() + dCtx := mocks.NewContext(t, "", "", nil) + alertsRepo := newMockAlertsRepo() + dCtx.MockAlertsRepository = alertsRepo + lastCommunicationsRepo := newMockLastCommunicationsRepo() + lastCommunicationsRepo.ListOverdueCommunicationsError = fmt.Errorf("test error") + dCtx.MockLastCommunicationsRepository = lastCommunicationsRepo + + ListOverdueCommunications(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusInternalServerError)) + }) + + It("succeeds, even when there are no users found", func() { + t := GinkgoT() + dCtx := mocks.NewContext(t, "", "", nil) + alertsRepo := newMockAlertsRepo() + dCtx.MockAlertsRepository = alertsRepo + lastCommunicationsRepo := newMockLastCommunicationsRepo() + testTime := time.Unix(123, 456) + lastCommunicationsRepo.ListOverdueCommunicationsResponses = [][]alerts.LastCommunication{ + { + { + LastReceivedDeviceData: testTime, + }, + }, + } + dCtx.MockLastCommunicationsRepository = lastCommunicationsRepo + + ListOverdueCommunications(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusOK)) + got := []alerts.LastCommunication{} + Expect(json.NewDecoder(rec.Body).Decode(&got)).To(Succeed()) + if Expect(len(got)).To(Equal(1)) { + Expect(got[0].LastReceivedDeviceData).To(BeTemporally("==", testTime)) + } + }) }) }) type mockRepo struct { - UserID string - Error error + UserID string + Error error + AlertsForUserID map[string][]*alerts.Config + GetAlertsResponses map[string]*alerts.Config } -func newMockRepo() *mockRepo { - return &mockRepo{} +func newMockAlertsRepo() *mockRepo { + return &mockRepo{ + AlertsForUserID: map[string][]*alerts.Config{}, + GetAlertsResponses: map[string]*alerts.Config{}, + } } func (r *mockRepo) ReturnsError(err error) { @@ -189,6 +359,9 @@ func (r *mockRepo) Get(ctx context.Context, conf *alerts.Config) (*alerts.Config if conf != nil { r.UserID = conf.UserID } + if resp, found := r.GetAlertsResponses[conf.UserID+conf.FollowedUserID]; found { + return resp, nil + } return &alerts.Config{}, nil } @@ -202,6 +375,62 @@ func (r *mockRepo) Delete(ctx context.Context, conf *alerts.Config) error { return nil } +func (r *mockRepo) List(ctx context.Context, userID string) ([]*alerts.Config, error) { + if r.Error != nil { + return nil, r.Error + } + r.UserID = userID + alerts, ok := r.AlertsForUserID[userID] + if !ok { + return nil, nil + } + return alerts, nil +} + func (r *mockRepo) EnsureIndexes() error { return nil } + +type mockLastCommunicationsRepo struct { + ListOverdueCommunicationsResponses [][]alerts.LastCommunication + ListOverdueCommunicationsError error +} + +func newMockLastCommunicationsRepo() *mockLastCommunicationsRepo { + return &mockLastCommunicationsRepo{ + ListOverdueCommunicationsResponses: [][]alerts.LastCommunication{}, + } +} + +func (r *mockLastCommunicationsRepo) RecordReceivedDeviceData(_ context.Context, + _ alerts.LastCommunication) error { + + return nil +} + +func (r *mockLastCommunicationsRepo) OverdueCommunications(_ context.Context) ( + []alerts.LastCommunication, error) { + + if r.ListOverdueCommunicationsError != nil { + return nil, r.ListOverdueCommunicationsError + } + + if len(r.ListOverdueCommunicationsResponses) > 0 { + ret := r.ListOverdueCommunicationsResponses[0] + r.ListOverdueCommunicationsResponses = r.ListOverdueCommunicationsResponses[1:] + return ret, nil + } + return nil, nil +} + +func (r *mockLastCommunicationsRepo) EnsureIndexes() error { + return nil +} + +func testConfig() *alerts.Config { + return &alerts.Config{ + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + } +} diff --git a/data/service/api/v1/mocks/context.go b/data/service/api/v1/mocks/context.go index d0ac5c33d2..1d1afb20cb 100644 --- a/data/service/api/v1/mocks/context.go +++ b/data/service/api/v1/mocks/context.go @@ -10,7 +10,8 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data/service/context" - "github.com/tidepool-org/platform/devicetokens" + log "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" servicecontext "github.com/tidepool-org/platform/service/context" @@ -23,18 +24,20 @@ type Context struct { T likeT // authDetails should be updated via the WithAuthDetails method. - authDetails *test.MockAuthDetails - RESTRequest *rest.Request - ResponseWriter rest.ResponseWriter - recorder *httptest.ResponseRecorder - MockAlertsRepository alerts.Repository - MockDeviceTokensRepository devicetokens.Repository - MockPermissionClient permission.Client + authDetails *test.MockAuthDetails + RESTRequest *rest.Request + ResponseWriter rest.ResponseWriter + recorder *httptest.ResponseRecorder + MockAlertsRepository alerts.Repository + MockPermissionClient permission.Client + MockLastCommunicationsRepository alerts.LastCommunicationsRepository } func NewContext(t likeT, method, url string, body io.Reader) *Context { details := DefaultAuthDetails() ctx := request.NewContextWithAuthDetails(stdcontext.Background(), details) + lgr := logtest.NewLogger() + ctx = log.NewContextWithLogger(ctx, lgr) r, err := http.NewRequestWithContext(ctx, method, url, body) if err != nil { t.Fatalf("error creating request: %s", err) @@ -98,10 +101,10 @@ func (c *Context) AlertsRepository() alerts.Repository { return c.MockAlertsRepository } -func (c *Context) DeviceTokensRepository() devicetokens.Repository { - return c.MockDeviceTokensRepository -} - func (c *Context) PermissionClient() permission.Client { return c.MockPermissionClient } + +func (c *Context) LastCommunicationsRepository() alerts.LastCommunicationsRepository { + return c.MockLastCommunicationsRepository +} diff --git a/data/service/api/v1/mocks/mocklogger_test_gen.go b/data/service/api/v1/mocks/mocklogger_test_gen.go index 81757d6525..65b949fcee 100644 --- a/data/service/api/v1/mocks/mocklogger_test_gen.go +++ b/data/service/api/v1/mocks/mocklogger_test_gen.go @@ -8,7 +8,6 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - log "github.com/tidepool-org/platform/log" ) diff --git a/data/service/api/v1/users_datasets_create_test.go b/data/service/api/v1/users_datasets_create_test.go index 26004f79fb..302a4f7474 100644 --- a/data/service/api/v1/users_datasets_create_test.go +++ b/data/service/api/v1/users_datasets_create_test.go @@ -5,6 +5,7 @@ import ( "net/http" "strings" + dataService "github.com/tidepool-org/platform/data/service" "github.com/tidepool-org/platform/data/summary/reporters" "github.com/tidepool-org/platform/clinics" @@ -84,6 +85,8 @@ type testingT interface { Fatalf(format string, args ...any) } +var _ dataService.Context = (*mockDataServiceContext)(nil) + type mockDataServiceContext struct { t testingT @@ -224,3 +227,7 @@ func (c *mockDataServiceContext) DataSourceClient() dataSource.Client { func (c *mockDataServiceContext) SummaryReporter() *reporters.PatientRealtimeDaysReporter { panic("not implemented") } + +func (c *mockDataServiceContext) LastCommunicationsRepository() alerts.LastCommunicationsRepository { + panic("not implemented") +} diff --git a/data/service/context.go b/data/service/context.go index 7ea41f3005..5cb714a7f0 100644 --- a/data/service/context.go +++ b/data/service/context.go @@ -29,6 +29,7 @@ type Context interface { SummaryRepository() dataStore.SummaryRepository SyncTaskRepository() syncTaskStore.SyncTaskRepository AlertsRepository() alerts.Repository + LastCommunicationsRepository() alerts.LastCommunicationsRepository SummarizerRegistry() *summary.SummarizerRegistry SummaryReporter() *reporters.PatientRealtimeDaysReporter diff --git a/data/service/context/standard.go b/data/service/context/standard.go index 79d7a6c95d..f9c72b07d9 100644 --- a/data/service/context/standard.go +++ b/data/service/context/standard.go @@ -26,21 +26,22 @@ import ( type Standard struct { *serviceContext.Responder - authClient auth.Client - metricClient metric.Client - permissionClient permission.Client - dataDeduplicatorFactory deduplicator.Factory - dataStore dataStore.Store - dataRepository dataStore.DataRepository - summaryRepository dataStore.SummaryRepository - summarizerRegistry *summary.SummarizerRegistry - summaryReporter *reporters.PatientRealtimeDaysReporter - syncTaskStore syncTaskStore.Store - syncTasksRepository syncTaskStore.SyncTaskRepository - dataClient dataClient.Client - clinicsClient clinics.Client - dataSourceClient dataSource.Client - alertsRepository alerts.Repository + authClient auth.Client + metricClient metric.Client + permissionClient permission.Client + dataDeduplicatorFactory deduplicator.Factory + dataStore dataStore.Store + dataRepository dataStore.DataRepository + summaryRepository dataStore.SummaryRepository + summarizerRegistry *summary.SummarizerRegistry + summaryReporter *reporters.PatientRealtimeDaysReporter + syncTaskStore syncTaskStore.Store + syncTasksRepository syncTaskStore.SyncTaskRepository + dataClient dataClient.Client + clinicsClient clinics.Client + dataSourceClient dataSource.Client + alertsRepository alerts.Repository + lastCommunicationsRepository alerts.LastCommunicationsRepository } func WithContext(authClient auth.Client, metricClient metric.Client, permissionClient permission.Client, @@ -129,6 +130,9 @@ func (s *Standard) Close() { if s.alertsRepository != nil { s.alertsRepository = nil } + if s.lastCommunicationsRepository != nil { + s.lastCommunicationsRepository = nil + } } func (s *Standard) AuthClient() auth.Client { @@ -208,3 +212,10 @@ func (s *Standard) AlertsRepository() alerts.Repository { } return s.alertsRepository } + +func (s *Standard) LastCommunicationsRepository() alerts.LastCommunicationsRepository { + if s.lastCommunicationsRepository == nil { + s.lastCommunicationsRepository = s.dataStore.NewLastCommunicationsRepository() + } + return s.lastCommunicationsRepository +} diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 80911f4b20..c873d535f7 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -2,15 +2,16 @@ package service import ( "context" - "log" - "os" - - "github.com/tidepool-org/platform/clinics" + "strings" + "time" "github.com/IBM/sarama" + eventsCommon "github.com/tidepool-org/go-common/events" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/application" + "github.com/tidepool-org/platform/clinics" dataDeduplicatorDeduplicator "github.com/tidepool-org/platform/data/deduplicator/deduplicator" dataDeduplicatorFactory "github.com/tidepool-org/platform/data/deduplicator/factory" dataEvents "github.com/tidepool-org/platform/data/events" @@ -22,11 +23,12 @@ import ( dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/events" - logInternal "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/log" metricClient "github.com/tidepool-org/platform/metric/client" "github.com/tidepool-org/platform/permission" permissionClient "github.com/tidepool-org/platform/permission/client" "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/push" "github.com/tidepool-org/platform/service/server" "github.com/tidepool-org/platform/service/service" storeStructuredMongo "github.com/tidepool-org/platform/store/structured/mongo" @@ -44,7 +46,9 @@ type Standard struct { dataClient *Client clinicsClient *clinics.Client dataSourceClient *dataSourceServiceClient.Client + alertsPusher dataEvents.Pusher userEventsHandler events.Runner + alertsEventsHandler events.Runner api *api.Standard server *server.Standard } @@ -87,9 +91,18 @@ func (s *Standard) Initialize(provider application.Provider) error { if err := s.initializeDataSourceClient(); err != nil { return err } + if err := s.initializeSaramaLogger(); err != nil { + return err + } + if err := s.initializeAlertsPusher(); err != nil { + return err + } if err := s.initializeUserEventsHandler(); err != nil { return err } + if err := s.initializeAlertsEventsHandler(); err != nil { + return err + } if err := s.initializeAPI(); err != nil { return err } @@ -110,6 +123,13 @@ func (s *Standard) Terminate() { } s.userEventsHandler = nil } + if s.alertsEventsHandler != nil { + s.Logger().Info("Terminating the alertsEventsHandler") + if err := s.alertsEventsHandler.Terminate(); err != nil { + s.Logger().Errorf("Error while terminating the alertsEventsHandler: %v", err) + } + s.alertsEventsHandler = nil + } s.api = nil s.dataClient = nil if s.syncTaskStore != nil { @@ -140,6 +160,9 @@ func (s *Standard) Run() error { go func() { errs <- s.userEventsHandler.Run() }() + go func() { + errs <- s.alertsEventsHandler.Run() + }() go func() { errs <- s.server.Serve() }() @@ -406,9 +429,8 @@ func (s *Standard) initializeServer() error { func (s *Standard) initializeUserEventsHandler() error { s.Logger().Debug("Initializing user events handler") - sarama.Logger = log.New(os.Stdout, "SARAMA ", log.LstdFlags|log.Lshortfile) - ctx := logInternal.NewContextWithLogger(context.Background(), s.Logger()) + ctx := log.NewContextWithLogger(context.Background(), s.Logger()) handler := dataEvents.NewUserDataDeletionHandler(ctx, s.dataStore, s.dataSourceStructuredStore) handlers := []eventsCommon.EventHandler{handler} runner := events.NewRunner(handlers) @@ -419,3 +441,110 @@ func (s *Standard) initializeUserEventsHandler() error { return nil } + +func (s *Standard) initializeSaramaLogger() error { + // Multiple properties of Standard use the sarama package. This is + // intended to be the one place that the sarama Logger is initialized, + // before any of the properties that need it are run. + sarama.Logger = log.NewSarama(s.Logger()) + return nil +} + +func (s *Standard) initializeAlertsPusher() error { + var err error + var pusher dataEvents.Pusher + pusher, err = alerts.NewPusher() + if err != nil { + s.Logger().WithError(err).Warn("falling back to logging of alerts push notifications") + pusher = push.NewLogPusher(s.Logger()) + } + s.alertsPusher = pusher + + return nil +} + +func (s *Standard) initializeAlertsEventsHandler() error { + s.Logger().Debug("Initializing alerts events handler") + + commonConfig := eventsCommon.NewConfig() + if err := commonConfig.LoadFromEnv(); err != nil { + return err + } + + topics := []string{"data.alerts", "data.deviceData.alerts"} + // Some kafka topics use a `-` as a prefix. But MongoDB CDC topics are created with + // `.`. This code is using CDC topics, so ensuring that a `.` is used for alerts events + // lines everything up as expected. + topicPrefix := strings.ReplaceAll(commonConfig.KafkaTopicPrefix, "-", ".") + prefixedTopics := make([]string, 0, len(topics)) + for _, topic := range topics { + prefixedTopics = append(prefixedTopics, topicPrefix+topic) + } + commonConfig.SaramaConfig.ClientID = topicPrefix + "alerts" + + alertsRepo := s.dataStore.NewAlertsRepository() + dataRepo := s.dataStore.NewDataRepository() + lastCommunicationsRepo := s.dataStore.NewLastCommunicationsRepository() + + alertsEvaluator := alerts.NewEvaluator(alertsRepo, dataRepo, s.PermissionClient(), + s.Logger(), s.AuthClient()) + + ec := &dataEvents.Consumer{ + Alerts: alertsRepo, + Evaluator: alertsEvaluator, + Data: dataRepo, + DeviceTokens: s.AuthClient(), + Logger: s.Logger(), + Permissions: s.PermissionClient(), + Pusher: s.alertsPusher, + LastCommunications: dataEvents.NewLastCommunicationRecorder(lastCommunicationsRepo), + TokensProvider: s.AuthClient(), + } + + runnerCfg := dataEvents.SaramaRunnerConfig{ + Brokers: commonConfig.KafkaBrokers, + GroupID: topicPrefix + "alerts", + Topics: prefixedTopics, + Sarama: commonConfig.SaramaConfig, + MessageConsumer: &dataEvents.AlertsEventsConsumer{ + Consumer: ec, + Logger: s.Logger(), + }, + } + + cfg := platform.NewConfig() + cfg.UserAgent = s.UserAgent() + reporter := s.ConfigReporter().WithScopes("alerts", "retry") + loader := platform.NewConfigReporterLoader(reporter) + if err := cfg.Load(loader); err != nil { + return errors.Wrap(err, "unable to alerts retry delays config") + } + delays, err := parseCommaSeparatedDurations(reporter.GetWithDefault("delays", "0s")) + if err != nil { + return errors.Wrap(err, "Unable to read configured alerts retry delays") + } + + eventsRunner := dataEvents.NewCascadingSaramaEventsRunner(runnerCfg, s.Logger(), delays) + runner := dataEvents.NewSaramaRunner(eventsRunner) + if err := runner.Initialize(); err != nil { + return errors.Wrap(err, "Unable to initialize alerts events handler runner") + } + s.alertsEventsHandler = runner + + return nil +} + +func parseCommaSeparatedDurations(s string) ([]time.Duration, error) { + out := []time.Duration{} + for _, d := range strings.Split(s, ",") { + if d == "" { + continue + } + dur, err := time.ParseDuration(d) + if err != nil { + return nil, err + } + out = append(out, dur) + } + return out, nil +} diff --git a/data/store/mongo/mongo.go b/data/store/mongo/mongo.go index 8ebfa97239..7bcccfd252 100644 --- a/data/store/mongo/mongo.go +++ b/data/store/mongo/mongo.go @@ -29,6 +29,7 @@ func (s *Store) EnsureIndexes() error { dataRepository := s.NewDataRepository() summaryRepository := s.NewSummaryRepository() alertsRepository := s.NewAlertsRepository() + lastCommunicationsRepository := s.NewLastCommunicationsRepository() if err := dataRepository.EnsureIndexes(); err != nil { return err @@ -42,6 +43,10 @@ func (s *Store) EnsureIndexes() error { return err } + if err := lastCommunicationsRepository.EnsureIndexes(); err != nil { + return err + } + return nil } @@ -66,3 +71,8 @@ func (s *Store) NewAlertsRepository() alerts.Repository { r := alertsRepo(*s.Store.GetRepository("alerts")) return &r } + +func (s *Store) NewLastCommunicationsRepository() alerts.LastCommunicationsRepository { + r := lastCommunicationsRepo(*s.Store.GetRepository("lastCommunications")) + return &r +} diff --git a/data/store/mongo/mongo_alerts.go b/data/store/mongo/mongo_alerts.go index ee313f3ffb..91f2d90196 100644 --- a/data/store/mongo/mongo_alerts.go +++ b/data/store/mongo/mongo_alerts.go @@ -9,6 +9,7 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/errors" structuredmongo "github.com/tidepool-org/platform/store/structured/mongo" ) @@ -16,9 +17,21 @@ import ( type alertsRepo structuredmongo.Repository // Upsert will create or update the given Config. -func (r *alertsRepo) Upsert(ctx context.Context, conf *alerts.Config) error { +// +// Once set, UploadID, UserID, and FollowedUserID cannot be changed. This is to prevent a +// user from granting themselves access to another data set. +func (a *alertsRepo) Upsert(ctx context.Context, conf *alerts.Config) error { opts := options.Update().SetUpsert(true) - _, err := r.UpdateOne(ctx, r.filter(conf), bson.M{"$set": conf}, opts) + filter := bson.D{ + {Key: "userId", Value: conf.UserID}, + {Key: "followedUserId", Value: conf.FollowedUserID}, + {Key: "uploadId", Value: conf.UploadID}, + } + doc := bson.M{ + "$set": bson.M{"alerts": conf.Alerts, "activity": conf.Activity}, + "$setOnInsert": filter, + } + _, err := a.UpdateOne(ctx, filter, doc, opts) if err != nil { return fmt.Errorf("upserting alerts.Config: %w", err) } @@ -26,17 +39,37 @@ func (r *alertsRepo) Upsert(ctx context.Context, conf *alerts.Config) error { } // Delete will delete the given Config. -func (r *alertsRepo) Delete(ctx context.Context, cfg *alerts.Config) error { - _, err := r.DeleteMany(ctx, r.filter(cfg), nil) +func (a *alertsRepo) Delete(ctx context.Context, cfg *alerts.Config) error { + _, err := a.DeleteMany(ctx, a.filter(cfg), nil) if err != nil { return fmt.Errorf("upserting alerts.Config: %w", err) } return nil } +// List will retrieve any Configs that are defined by followers of the given user. +func (a *alertsRepo) List(ctx context.Context, followedUserID string) ([]*alerts.Config, error) { + filter := bson.D{ + {Key: "followedUserId", Value: followedUserID}, + } + cursor, err := a.Find(ctx, filter, nil) + if err != nil { + return nil, errors.Wrapf(err, "Unable to list alerts.Config(s) for followed user %s", followedUserID) + } + defer cursor.Close(ctx) + out := []*alerts.Config{} + if err := cursor.All(ctx, &out); err != nil { + return nil, errors.Wrapf(err, "Unable to decode alerts.Config(s) for followed user %s", followedUserID) + } + if err := cursor.Err(); err != nil { + return nil, errors.Wrapf(err, "Unexpected error for followed user %s", followedUserID) + } + return out, nil +} + // Get will retrieve the given Config. -func (r *alertsRepo) Get(ctx context.Context, cfg *alerts.Config) (*alerts.Config, error) { - res := r.FindOne(ctx, r.filter(cfg), nil) +func (a *alertsRepo) Get(ctx context.Context, cfg *alerts.Config) (*alerts.Config, error) { + res := a.FindOne(ctx, a.filter(cfg), nil) if res.Err() != nil { return nil, fmt.Errorf("getting alerts.Config: %w", res.Err()) } @@ -48,8 +81,8 @@ func (r *alertsRepo) Get(ctx context.Context, cfg *alerts.Config) (*alerts.Confi } // EnsureIndexes to maintain index constraints. -func (r *alertsRepo) EnsureIndexes() error { - repo := structuredmongo.Repository(*r) +func (a *alertsRepo) EnsureIndexes() error { + repo := structuredmongo.Repository(*a) return (&repo).CreateAllIndexes(context.Background(), []mongo.IndexModel{ { Keys: bson.D{ @@ -63,9 +96,9 @@ func (r *alertsRepo) EnsureIndexes() error { }) } -func (r *alertsRepo) filter(cfg *alerts.Config) interface{} { - return &alerts.Config{ - UserID: cfg.UserID, - FollowedUserID: cfg.FollowedUserID, +func (a *alertsRepo) filter(cfg *alerts.Config) interface{} { + return bson.D{ + {Key: "userId", Value: cfg.UserID}, + {Key: "followedUserId", Value: cfg.FollowedUserID}, } } diff --git a/data/store/mongo/mongo_data.go b/data/store/mongo/mongo_data.go index 4076a968af..62789b9c67 100644 --- a/data/store/mongo/mongo_data.go +++ b/data/store/mongo/mongo_data.go @@ -8,9 +8,14 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data" "github.com/tidepool-org/platform/data/store" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose/continuous" + "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/data/types/upload" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" @@ -195,10 +200,56 @@ func (d *DataRepository) DestroyDataForUserByID(ctx context.Context, userID stri return nil } -func (d *DataRepository) mongoClient() *mongo.Client { - return d.DatumRepository.Database().Client() -} - func isTypeUpload(typ []string) bool { return slices.Contains(typ, strings.ToLower(upload.Type)) } + +func (d *DataRepository) GetAlertableData(ctx context.Context, + params alerts.GetAlertableDataParams) (*alerts.GetAlertableDataResponse, error) { + + if params.End.IsZero() { + params.End = time.Now() + } + + cursor, err := d.getAlertableData(ctx, params, dosingdecision.Type) + if err != nil { + return nil, err + } + dosingDecisions := []*dosingdecision.DosingDecision{} + if err := cursor.All(ctx, &dosingDecisions); err != nil { + return nil, errors.Wrap(err, "Unable to load alertable dosing documents") + } + cursor, err = d.getAlertableData(ctx, params, continuous.Type) + if err != nil { + return nil, err + } + glucoseData := []*glucose.Glucose{} + if err := cursor.All(ctx, &glucoseData); err != nil { + return nil, errors.Wrap(err, "Unable to load alertable glucose documents") + } + response := &alerts.GetAlertableDataResponse{ + DosingDecisions: dosingDecisions, + Glucose: glucoseData, + } + + return response, nil +} + +func (d *DataRepository) getAlertableData(ctx context.Context, + params alerts.GetAlertableDataParams, typ string) (*mongo.Cursor, error) { + + selector := bson.M{ + "_active": true, + "uploadId": params.UploadID, + "type": typ, + "_userId": params.UserID, + "time": bson.M{"$gte": params.Start, "$lte": params.End}, + } + findOptions := options.Find().SetSort(bson.D{{Key: "time", Value: -1}}) + cursor, err := d.DatumRepository.Find(ctx, selector, findOptions) + if err != nil { + format := "Unable to find alertable %s data in dataset %s" + return nil, errors.Wrapf(err, format, typ, params.UploadID) + } + return cursor, nil +} diff --git a/data/store/mongo/mongo_datum.go b/data/store/mongo/mongo_datum.go index 977c6f813b..159c7ec93f 100644 --- a/data/store/mongo/mongo_datum.go +++ b/data/store/mongo/mongo_datum.go @@ -11,14 +11,9 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "github.com/tidepool-org/platform/data" - "github.com/tidepool-org/platform/data/store" "github.com/tidepool-org/platform/data/summary/types" baseDatum "github.com/tidepool-org/platform/data/types" - "github.com/tidepool-org/platform/data/types/blood/glucose" - "github.com/tidepool-org/platform/data/types/blood/glucose/continuous" - "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/data/types/upload" - platerrors "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" storeStructuredMongo "github.com/tidepool-org/platform/store/structured/mongo" structureValidator "github.com/tidepool-org/platform/structure/validator" @@ -646,56 +641,6 @@ func (d *DatumRepository) GetDataRange(ctx context.Context, userId string, typ [ return cursor, nil } -func (d *DatumRepository) GetAlertableData(ctx context.Context, - params store.AlertableParams) (*store.AlertableResponse, error) { - - if params.End.IsZero() { - params.End = time.Now() - } - - cursor, err := d.getAlertableData(ctx, params, dosingdecision.Type) - if err != nil { - return nil, err - } - dosingDecisions := []*dosingdecision.DosingDecision{} - if err := cursor.All(ctx, &dosingDecisions); err != nil { - return nil, platerrors.Wrap(err, "Unable to load alertable dosing documents") - } - cursor, err = d.getAlertableData(ctx, params, continuous.Type) - if err != nil { - return nil, err - } - glucoseData := []*glucose.Glucose{} - if err := cursor.All(ctx, &glucoseData); err != nil { - return nil, platerrors.Wrap(err, "Unable to load alertable glucose documents") - } - response := &store.AlertableResponse{ - DosingDecisions: dosingDecisions, - Glucose: glucoseData, - } - - return response, nil -} - -func (d *DatumRepository) getAlertableData(ctx context.Context, - params store.AlertableParams, typ string) (*mongo.Cursor, error) { - - selector := bson.M{ - "_active": true, - "uploadId": params.UploadID, - "type": typ, - "_userId": params.UserID, - "time": bson.M{"$gte": params.Start, "$lte": params.End}, - } - findOptions := options.Find().SetSort(bson.D{{Key: "time", Value: -1}}) - cursor, err := d.Find(ctx, selector, findOptions) - if err != nil { - format := "Unable to find alertable %s data in dataset %s" - return nil, platerrors.Wrapf(err, format, typ, params.UploadID) - } - return cursor, nil -} - func (d *DatumRepository) getTimeRange(ctx context.Context, userId string, typ []string, status *data.UserDataStatus) (err error) { timestamp := time.Now().UTC() futureCutoff := timestamp.AddDate(0, 0, 1) diff --git a/data/store/mongo/mongo_last_communications.go b/data/store/mongo/mongo_last_communications.go new file mode 100644 index 0000000000..b620e07094 --- /dev/null +++ b/data/store/mongo/mongo_last_communications.go @@ -0,0 +1,77 @@ +package mongo + +import ( + "context" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/errors" + structuredmongo "github.com/tidepool-org/platform/store/structured/mongo" +) + +// lastCommunicationsRepo implements LastCommunicationsRepository, writing data to a +// MongoDB collection. +type lastCommunicationsRepo structuredmongo.Repository + +func (l *lastCommunicationsRepo) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + opts := options.Update().SetUpsert(true) + _, err := l.UpdateOne(ctx, l.filter(lastComm), bson.M{"$set": lastComm}, opts) + if err != nil { + return fmt.Errorf("upserting alerts.LastCommunication: %w", err) + } + return nil +} + +func (l *lastCommunicationsRepo) EnsureIndexes() error { + repo := structuredmongo.Repository(*l) + return (&repo).CreateAllIndexes(context.Background(), []mongo.IndexModel{ + { + Keys: bson.D{ + {Key: "lastReceivedDeviceData", Value: 1}, + }, + Options: options.Index(). + SetName("LastReceivedDeviceData"), + }, + { + Keys: bson.D{ + {Key: "dataSetId", Value: 1}, + }, + Options: options.Index(). + SetUnique(true). + SetName("DataSetIdUnique"), + }, + }) +} + +func (l *lastCommunicationsRepo) filter(lastComm alerts.LastCommunication) map[string]any { + return map[string]any{ + "userId": lastComm.UserID, + "dataSetId": lastComm.DataSetID, + } +} + +func (l *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ( + []alerts.LastCommunication, error) { + + start := time.Now().Add(-alerts.MinimumNoCommunicationDelay) + selector := bson.M{ + "lastReceivedDeviceData": bson.M{"$lte": start}, + } + findOptions := options.Find().SetSort(bson.D{{Key: "lastReceivedDeviceData", Value: 1}}) + cursor, err := l.Find(ctx, selector, findOptions) + if err != nil { + return nil, errors.Wrapf(err, "Unable to list overdue records") + } + records := []alerts.LastCommunication{} + if err := cursor.All(ctx, &records); err != nil { + return nil, errors.Wrapf(err, "Unable to iterate overdue records") + } + return records, nil +} diff --git a/data/store/mongo/mongo_test.go b/data/store/mongo/mongo_test.go index 7b0a0fd2a4..465548a960 100644 --- a/data/store/mongo/mongo_test.go +++ b/data/store/mongo/mongo_test.go @@ -2,6 +2,7 @@ package mongo_test import ( "context" + "encoding/json" "fmt" "math/rand" "sync" @@ -17,6 +18,7 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data" + "github.com/tidepool-org/platform/data/service/api/v1/mocks" dataStore "github.com/tidepool-org/platform/data/store" dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" dataTest "github.com/tidepool-org/platform/data/test" @@ -238,8 +240,10 @@ func DataSetDatumAsInterface(dataSetDatum data.Datum) interface{} { var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { var repository dataStore.DataRepository + var alertsDataRepository alerts.DataRepository var summaryRepository dataStore.SummaryRepository var alertsRepository alerts.Repository + var lastCommunicationsRepository alerts.LastCommunicationsRepository var logger = logTest.NewLogger() var store *dataStoreMongo.Store @@ -266,6 +270,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { var dataSetCollection *mongo.Collection var summaryCollection *mongo.Collection var alertsCollection *mongo.Collection + var recordsCollection *mongo.Collection var collectionsOnce sync.Once BeforeEach(func() { @@ -274,6 +279,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { dataSetCollection = store.GetCollection("deviceDataSets") summaryCollection = store.GetCollection("summary") alertsCollection = store.GetCollection("alerts") + recordsCollection = store.GetCollection("lastCommunications") }) }) @@ -289,6 +295,8 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(err).To(Succeed()) _, err = alertsCollection.DeleteMany(ctx, all) Expect(err).To(Succeed()) + _, err = recordsCollection.DeleteMany(ctx, all) + Expect(err).To(Succeed()) }) Context("EnsureIndexes", func() { @@ -445,14 +453,24 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { }) }) + Context("NewLastCommunicationsRepository", func() { + It("returns a new repository", func() { + lastCommunicationsRepository = store.NewLastCommunicationsRepository() + Expect(lastCommunicationsRepository).ToNot(BeNil()) + }) + }) + Context("with a new repository", func() { BeforeEach(func() { repository = store.NewDataRepository() summaryRepository = store.NewSummaryRepository() alertsRepository = store.NewAlertsRepository() + alertsDataRepository = store.NewDataRepository() + lastCommunicationsRepository = store.NewLastCommunicationsRepository() Expect(repository).ToNot(BeNil()) Expect(summaryRepository).ToNot(BeNil()) Expect(alertsRepository).ToNot(BeNil()) + Expect(alertsDataRepository).ToNot(BeNil()) }) Context("with persisted data sets", func() { @@ -2409,13 +2427,15 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(repository.CreateDataSet(ctx, testSet)).To(Succeed()) testSetData := testDataSetData(testSet) Expect(repository.CreateDataSetData(ctx, testSet, testSetData)).To(Succeed()) + alertsDataRepository = store.NewDataRepository() + Expect(alertsDataRepository).ToNot(BeNil()) - params := dataStore.AlertableParams{ + params := alerts.GetAlertableDataParams{ Start: time.Now().Add(-time.Hour), UserID: testUserID, UploadID: *testSet.UploadID, } - resp, err := repository.GetAlertableData(ctx, params) + resp, err := alertsDataRepository.GetAlertableData(ctx, params) Expect(err).To(Succeed()) Expect(resp).ToNot(BeNil()) @@ -2427,6 +2447,11 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Context("alerts", func() { BeforeEach(func() { + var err error + ctx := context.Background() + all := bson.D{} + _, err = alertsCollection.DeleteMany(ctx, all) + Expect(err).To(Succeed()) alertsRepository = store.NewAlertsRepository() Expect(alertsRepository).ToNot(BeNil()) }) @@ -2439,8 +2464,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { ctx := context.Background() filter := bson.M{} if upsertDoc { - Expect(alertsRepository.Upsert(ctx, cfg)). - To(Succeed()) + Expect(alertsRepository.Upsert(ctx, cfg)).To(Succeed()) filter["userId"] = cfg.UserID filter["followedUserId"] = cfg.FollowedUserID } @@ -2463,7 +2487,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { It("updates the existing document", func() { ctx, cfg, filter := prep(true) - cfg.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} + cfg.Alerts.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} err := alertsRepository.Upsert(ctx, cfg) Expect(err).To(Succeed()) @@ -2471,10 +2495,72 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { res := store.GetCollection("alerts").FindOne(ctx, filter) Expect(res.Err()).To(Succeed()) Expect(res.Decode(doc)).To(Succeed()) - Expect(doc.Low).ToNot(BeNil()) - Expect(doc.Low.Base.Enabled).To(Equal(true)) + jsonOut, _ := json.Marshal(doc) + Expect(doc.Alerts.Low).ToNot(BeNil(), string(jsonOut)) + Expect(doc.Alerts.Low.Base.Enabled).To(Equal(true)) }) + It("sets userId, followedUserId, and uploadId only on creation", func() { + ctx, cfg, filter := prep(false) + cfg.UploadID = "something" + + Expect(alertsRepository.Upsert(ctx, cfg)).To(Succeed()) + doc := &alerts.Config{} + res := store.GetCollection("alerts").FindOne(ctx, filter) + Expect(res.Err()).To(Succeed()) + Expect(res.Decode(doc)).To(Succeed()) + Expect(doc.UserID).To(Equal("user-id")) + Expect(doc.FollowedUserID).To(Equal("followed-user-id")) + Expect(doc.UploadID).To(Equal("something")) + + testDelay := 42 * time.Minute + doc.Alerts.Low = &alerts.LowAlert{} + doc.Alerts.Low.Delay = alerts.DurationMinutes(testDelay) + doc.UploadID = "something else" + doc.UserID = "new junk" + doc.FollowedUserID = "this shouldn't be" + + Expect(alertsRepository.Upsert(ctx, cfg)).To(Succeed()) + res = store.GetCollection("alerts").FindOne(ctx, filter) + Expect(res.Err()).To(Succeed()) + Expect(res.Decode(doc)).To(Succeed()) + Expect(doc.UploadID).To(Equal("something")) + Expect(doc.FollowedUserID).To(Equal("followed-user-id")) + Expect(doc.UserID).To(Equal("user-id")) + Expect(doc.Alerts.Low.Delay.Duration()).To(Equal(testDelay)) + }) + + It("updates the Config's Activity", func() { + ctx, cfg, filter := prep(true) + testSent := time.Now().Add(-3 * time.Minute) + testTriggered := time.Now().Add(-5 * time.Minute) + cfg.Alerts.Low = &alerts.LowAlert{ + Base: alerts.Base{ + Enabled: true, + // Activity: alerts.AlertActivity{ + // Triggered: testTriggered, + // Sent: testSent, + // // Resolved is unset, so it should be a zero value. + // }, + }, + } + cfg.Activity.Low.Sent = testSent + cfg.Activity.Low.Triggered = testTriggered + + err := alertsRepository.Upsert(ctx, cfg) + Expect(err).To(Succeed()) + + doc := &alerts.Config{} + //raw := map[string]any{} + res := store.GetCollection("alerts").FindOne(ctx, filter) + Expect(res.Err()).To(Succeed()) + Expect(res.Decode(doc)).To(Succeed()) + Expect(doc.Alerts.Low).ToNot(BeNil()) + Expect(doc.Alerts.Low.Base.Enabled).To(Equal(true)) + Expect(doc.Activity.Low.Triggered).To(BeTemporally("~", testTriggered, time.Millisecond)) + Expect(doc.Activity.Low.Sent).To(BeTemporally("~", testSent, time.Millisecond)) + Expect(doc.Activity.Low.Resolved).To(Equal(time.Time{})) + }) }) Describe("Get", func() { @@ -2493,20 +2579,23 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { UserID: "879d5cb2-f70d-4b05-8d38-fb6d88ef2ea9", FollowedUserID: "d2ee01db-3458-42ac-95d2-ac2fc571a21d", Alerts: alerts.Alerts{ + // DataAlerts: alerts.DataAlerts{ High: &alerts.HighAlert{ Base: alerts.Base{Enabled: true}, }, - }} + // }, + }, + } Expect(alertsRepository.Upsert(ctx, other)).To(Succeed()) - cfg.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} + cfg.Alerts.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} err := alertsRepository.Upsert(ctx, cfg) Expect(err).To(Succeed()) got, err := alertsRepository.Get(ctx, cfg) Expect(err).To(Succeed()) Expect(got).ToNot(BeNil()) - Expect(got.Low).ToNot(BeNil()) - Expect(got.Low.Enabled).To(Equal(true)) + Expect(got.Alerts.Low).ToNot(BeNil()) + Expect(got.Alerts.Low.Enabled).To(Equal(true)) Expect(got.UserID).To(Equal(cfg.UserID)) Expect(got.FollowedUserID).To(Equal(cfg.FollowedUserID)) }) @@ -2523,6 +2612,70 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(res.Err()).To(MatchError(mongo.ErrNoDocuments)) }) }) + + Describe("List", func() { + It("lists only matching configs", func() { + ctx, cfg, _ := prep(true) + cfg2 := &alerts.Config{ + FollowedUserID: "followed-user-id-2", + UserID: "user-id", + } + Expect(alertsRepository.Upsert(ctx, cfg2)).To(Succeed()) + cfg3 := &alerts.Config{ + FollowedUserID: "followed-user-id", + UserID: "user-id-2", + } + Expect(alertsRepository.Upsert(ctx, cfg3)).To(Succeed()) + + got, err := alertsRepository.List(ctx, cfg.FollowedUserID) + Expect(err).To(Succeed()) + Expect(len(got)).To(Equal(2)) + }) + }) + }) + + Context("LastCommunicationsRecorder", func() { + BeforeEach(func() { + lastCommunicationsRepository = store.NewLastCommunicationsRepository() + Expect(lastCommunicationsRepository).ToNot(BeNil()) + }) + + Describe("OverdueCommunications", func() { + It("retrieves matching records", func() { + ctx := context.Background() + got, err := lastCommunicationsRepository.OverdueCommunications(ctx) + Expect(err).To(Succeed()) + Expect(len(got)).To(Equal(0)) + }) + + It("retrieves matching records2", func() { + ctx := context.Background() + testLastComm := alerts.LastCommunication{ + UserID: testUserID, + DataSetID: testDataSetID, + LastReceivedDeviceData: time.Unix(123, 456), + } + Expect(lastCommunicationsRepository.RecordReceivedDeviceData(ctx, testLastComm)).To(Succeed()) + testLastComm2 := alerts.LastCommunication{ + UserID: testUserID + "2", + DataSetID: testDataSetID + "2", + LastReceivedDeviceData: time.Now(), + } + Expect(lastCommunicationsRepository.RecordReceivedDeviceData(ctx, testLastComm2)).To(Succeed()) + + got, err := lastCommunicationsRepository.OverdueCommunications(ctx) + Expect(err).To(Succeed()) + Expect(len(got)).To(Equal(1)) + }) + + It("is true", func() { + Expect(true).To(BeTrue()) + }) + }) }) }) }) + +var testUserID = mocks.TestUserID1 + +const testDataSetID = "blah" diff --git a/data/store/store.go b/data/store/store.go index 7410d76c88..f68649d0d0 100644 --- a/data/store/store.go +++ b/data/store/store.go @@ -7,9 +7,6 @@ import ( "go.mongodb.org/mongo-driver/mongo" "github.com/tidepool-org/platform/alerts" - "github.com/tidepool-org/platform/data/types/blood/glucose" - "github.com/tidepool-org/platform/data/types/dosingdecision" - "github.com/tidepool-org/platform/data" "github.com/tidepool-org/platform/data/types/upload" "github.com/tidepool-org/platform/page" @@ -23,6 +20,7 @@ type Store interface { NewDataRepository() DataRepository NewSummaryRepository() SummaryRepository NewAlertsRepository() alerts.Repository + NewLastCommunicationsRepository() alerts.LastCommunicationsRepository } // DataSetRepository is the interface for interacting and modifying @@ -65,16 +63,14 @@ type DatumRepository interface { GetDataRange(ctx context.Context, userId string, typ []string, status *data.UserDataStatus) (*mongo.Cursor, error) GetLastUpdatedForUser(ctx context.Context, userId string, typ []string, lastUpdated time.Time) (*data.UserDataStatus, error) DistinctUserIDs(ctx context.Context, typ []string) ([]string, error) - - // GetAlertableData queries for the data used to evaluate alerts configurations. - GetAlertableData(ctx context.Context, params AlertableParams) (*AlertableResponse, error) } -// DataRepository is the combined interface of DataSetRepository and -// DatumRepository. +// DataRepository is the combined interface of DataSetRepository, +// DatumRepository, and [alerts.DataRepository]. type DataRepository interface { DataSetRepository DatumRepository + alerts.DataRepository } type Filter struct { @@ -98,19 +94,3 @@ type SummaryRepository interface { GetStore() *storeStructuredMongo.Repository } - -type AlertableParams struct { - // UserID of the user that owns the data. - UserID string - // UploadID of the device data set to query. - UploadID string - // Start limits the data to those recorded after this time. - Start time.Time - // End limits the data to those recorded before this time. - End time.Time -} - -type AlertableResponse struct { - Glucose []*glucose.Glucose - DosingDecisions []*dosingdecision.DosingDecision -} diff --git a/data/store/test/data_repository.go b/data/store/test/data_repository.go index ff5a857bcd..e1dd86df33 100644 --- a/data/store/test/data_repository.go +++ b/data/store/test/data_repository.go @@ -8,6 +8,7 @@ import ( "github.com/onsi/gomega" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data" dataStore "github.com/tidepool-org/platform/data/store" "github.com/tidepool-org/platform/data/types/upload" @@ -182,11 +183,11 @@ type DistinctUserIDsOutput struct { type GetAlertableDataInput struct { Context context.Context - Params dataStore.AlertableParams + Params alerts.GetAlertableDataParams } type GetAlertableDataOutput struct { - Response *dataStore.AlertableResponse + Response *alerts.GetAlertableDataResponse Error error } @@ -528,7 +529,7 @@ func (d *DataRepository) DistinctUserIDs(ctx context.Context, typ []string) ([]s return output.UserIDs, output.Error } -func (d *DataRepository) GetAlertableData(ctx context.Context, params dataStore.AlertableParams) (*dataStore.AlertableResponse, error) { +func (d *DataRepository) GetAlertableData(ctx context.Context, params alerts.GetAlertableDataParams) (*alerts.GetAlertableDataResponse, error) { d.GetAlertableDataInvocations++ d.GetAlertableDataInputs = append(d.GetAlertableDataInputs, GetAlertableDataInput{Context: ctx, Params: params}) diff --git a/devicetokens/devicetokens.go b/devicetokens/devicetokens.go index a8fb790a3d..fc901187f7 100644 --- a/devicetokens/devicetokens.go +++ b/devicetokens/devicetokens.go @@ -4,6 +4,7 @@ import ( "context" "crypto/sha256" "encoding/hex" + "encoding/json" "fmt" "github.com/tidepool-org/platform/structure" @@ -52,6 +53,14 @@ type DeviceToken struct { Apple *AppleDeviceToken `json:"apple,omitempty" bson:"apple,omitempty"` } +func (t DeviceToken) String() string { + b, err := json.Marshal(t) + if err != nil { + return "" + } + return string(b) +} + // key provides a unique string value to identify this device token. // // Intended to be used as part of a unique index for database indexes. @@ -100,6 +109,7 @@ type AppleBlob []byte // Repository abstracts persistent storage for Token data. type Repository interface { + GetAllByUserID(ctx context.Context, userID string) ([]*Document, error) Upsert(ctx context.Context, doc *Document) error EnsureIndexes() error diff --git a/dexcom/fetch/test/mock.go b/dexcom/fetch/test/mock.go index 1ea79b2471..65e6c878a1 100644 --- a/dexcom/fetch/test/mock.go +++ b/dexcom/fetch/test/mock.go @@ -10,7 +10,6 @@ import ( time "time" gomock "github.com/golang/mock/gomock" - auth "github.com/tidepool-org/platform/auth" data "github.com/tidepool-org/platform/data" source "github.com/tidepool-org/platform/data/source" diff --git a/go.mod b/go.mod index 646c4bb7d0..759f978210 100644 --- a/go.mod +++ b/go.mod @@ -21,9 +21,10 @@ require ( github.com/onsi/gomega v1.33.1 github.com/prometheus/client_golang v1.19.1 github.com/rinchsan/device-check-go v1.3.0 + github.com/sideshow/apns2 v0.23.0 github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2 github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5 - github.com/tidepool-org/go-common v0.12.2-0.20250129210214-bd36b59b9733 + github.com/tidepool-org/go-common v0.12.2 github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace github.com/urfave/cli v1.22.15 go.mongodb.org/mongo-driver v1.16.0 diff --git a/go.sum b/go.sum index 295990bf55..44733686bc 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/ant0ine/go-json-rest v3.3.2+incompatible h1:nBixrkLFiDNAW0hauKDLc8yJI6XfrQumWvytE1Hk14E= github.com/ant0ine/go-json-rest v3.3.2+incompatible/go.mod h1:q6aCt0GfU6LhpBsnZ/2U+mwe+0XB5WStbmwyoPfc+sk= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= @@ -53,6 +55,7 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -162,6 +165,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sideshow/apns2 v0.23.0 h1:lpkikaZ995GIcKk6AFsYzHyezCrsrfEDvUWcWkEGErY= +github.com/sideshow/apns2 v0.23.0/go.mod h1:7Fceu+sL0XscxrfLSkAoH6UtvKefq3Kq1n4W3ayQZqE= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -180,8 +185,8 @@ github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2 h1:fTIg github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2/go.mod h1:7BpAdFdGJNB3aw/xvCz5XnWjSWRoUtWIX4xcMc4Bsko= github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5 h1:1kiZtHhs++yXayRD/Mh/3POLwtmxV99YR2bSCle1Q74= github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5/go.mod h1:xuQ8k0mLR1ZyEmwe/m0v2BuXctqQuCZeR43urSQpTUM= -github.com/tidepool-org/go-common v0.12.2-0.20250129210214-bd36b59b9733 h1:WCOSrazmNv7KdjIJafWyHkLkHNp2SsoLAm6OXp2rAco= -github.com/tidepool-org/go-common v0.12.2-0.20250129210214-bd36b59b9733/go.mod h1:BeqsQcDwfSsmnmc+/N/EOT8h3m8/YtqrLNykk5kGkv4= +github.com/tidepool-org/go-common v0.12.2 h1:3mse3wJtq5irbgdCz3LeEfs8XE9oDX9kzDcHuWNW/jw= +github.com/tidepool-org/go-common v0.12.2/go.mod h1:BeqsQcDwfSsmnmc+/N/EOT8h3m8/YtqrLNykk5kGkv4= github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace h1:L0UiCj2eL/NOpLa19Tf5IgoK6feILmdA+zK3nCTIhqU= github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace/go.mod h1:gon+x+jAh8DZZ2hD23fBWqrYwOizVSwIBbxEsuXCbZ4= github.com/ugorji/go v1.2.4/go.mod h1:EuaSCk8iZMdIspsu6HXH7X2UGKw1ezO4wCfGszGmmo4= @@ -218,6 +223,7 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20170512130425-ab89591268e0/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -238,6 +244,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -259,6 +266,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -300,6 +308,7 @@ google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/log/devlog/devlog.go b/log/devlog/devlog.go index 2800cfd2a0..f1dc98193b 100644 --- a/log/devlog/devlog.go +++ b/log/devlog/devlog.go @@ -15,6 +15,7 @@ import ( "fmt" "io" stdlog "log" + "os" "sort" "strings" "time" @@ -83,7 +84,20 @@ func (s *serializer) Serialize(fields log.Fields) error { if len(pairs) > 0 { rest = ": " + strings.Join(pairs, " ") } - s.Logger.Printf(msgTime + " " + msgLevel + " " + msg + rest) + prefixes := []string{} + prefixes = append(prefixes, msgTime) + // HOSTNAME is set on Kubernetes pods and is useful for distinguishing logs from an + // outgoing Pod vs a newly created Pod. + if h := os.Getenv("HOSTNAME"); h != "" { + pieces := strings.Split(h, "-") + if len(pieces) > 0 { + prefixes = append(prefixes, pieces[len(pieces)-1]) + } else { + prefixes = append(prefixes, h) + } + } + prefixes = append(prefixes, msgLevel) + s.Logger.Print(strings.Join(prefixes, " ") + " " + msg + rest) return nil } diff --git a/log/sarama.go b/log/sarama.go new file mode 100644 index 0000000000..d09576c5a6 --- /dev/null +++ b/log/sarama.go @@ -0,0 +1,35 @@ +package log + +import ( + "fmt" + "strings" + + "github.com/IBM/sarama" +) + +// NewSarama returns a [Logger] adapted to implement [sarama.StdLogger]. +func NewSarama(l Logger) sarama.StdLogger { + return &SaramaLogger{Logger: l.WithField("SARAMA", "1")} +} + +// SaramaLogger wraps a [Logger] to implement [sarama.StdLogger]. +// +// Sarama doesn't support the concept of logging levels, so all messages will +// use the info level. +type SaramaLogger struct { + Logger +} + +func (l *SaramaLogger) Print(args ...interface{}) { + l.Logger.Info(fmt.Sprint(args...)) +} + +func (l *SaramaLogger) Printf(format string, args ...interface{}) { + // Sarama log messages sent via this method include a newline, which + // doesn't fit with Logger's style, so remove it. + l.Logger.Infof(strings.TrimSuffix(format, "\n"), args...) +} + +func (l *SaramaLogger) Println(args ...interface{}) { + l.Logger.Info(fmt.Sprint(args...)) +} diff --git a/log/sarama_test.go b/log/sarama_test.go new file mode 100644 index 0000000000..6fccd5e256 --- /dev/null +++ b/log/sarama_test.go @@ -0,0 +1,47 @@ +package log_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" +) + +var _ = Describe("NewSarama", func() { + It("initializes a new sarama log adapter", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + }) + + It("implements Print", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + + saramaLog.Print("testing 1 2 3") + + testLog.AssertInfo("testing 1 2 3") + }) + + It("implements Printf", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + + saramaLog.Printf("testing %s", "4 5 6") + + testLog.AssertInfo("testing 4 5 6") + }) + + It("implements Println", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + + saramaLog.Println("testing 7 8 9") + + testLog.AssertInfo("testing 7 8 9") + }) +}) diff --git a/push/logpush.go b/push/logpush.go new file mode 100644 index 0000000000..41e772260d --- /dev/null +++ b/push/logpush.go @@ -0,0 +1,41 @@ +package push + +import ( + "context" + "os" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" + lognull "github.com/tidepool-org/platform/log/null" +) + +// LogPusher logs notifications instead of sending push notifications. +// +// Useful for dev or testing situations. +type LogPusher struct { + log.Logger +} + +// NewLogPusher uses a [log.Logger] instead of pushing via APNs. +func NewLogPusher(l log.Logger) *LogPusher { + if l == nil { + var err error + l, err = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + if err != nil { + l = lognull.NewLogger() + } + } + return &LogPusher{Logger: l} +} + +// Push implements [service.Pusher]. +func (p *LogPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, + notification *Notification) error { + + p.Logger.WithFields(log.Fields{ + "deviceToken": deviceToken, + "notification": notification, + }).Info("logging push notification") + return nil +} diff --git a/push/logpush_test.go b/push/logpush_test.go new file mode 100644 index 0000000000..f8f8611237 --- /dev/null +++ b/push/logpush_test.go @@ -0,0 +1,50 @@ +package push + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" +) + +var _ = Describe("NewLogPusher", func() { + It("succeeds", func() { + testLog := logtest.NewLogger() + + Expect(NewLogPusher(testLog)).ToNot(Equal(nil)) + }) + + It("implements Push by logging a message", func() { + testLog := logtest.NewLogger() + ctx := context.Background() + testToken := &devicetokens.DeviceToken{} + testNotification := &Notification{} + + pusher := NewLogPusher(testLog) + Expect(pusher).ToNot(Equal(nil)) + + Expect(pusher.Push(ctx, testToken, testNotification)).To(Succeed()) + testFields := log.Fields{ + "deviceToken": testToken, + "notification": testNotification, + } + testLog.AssertInfo("logging push notification", testFields) + }) + + It("handles being passed a nil logger", func() { + ctx := context.Background() + testToken := &devicetokens.DeviceToken{} + testNotification := &Notification{} + + pusher := NewLogPusher(nil) + Expect(pusher).ToNot(Equal(nil)) + + Expect(func() { + Expect(pusher.Push(ctx, testToken, testNotification)).To(Succeed()) + }).ToNot(Panic()) + }) +}) diff --git a/push/push.go b/push/push.go new file mode 100644 index 0000000000..47323989ad --- /dev/null +++ b/push/push.go @@ -0,0 +1,131 @@ +// Package push provides clients for sending mobile device push notifications. +package push + +import ( + "context" + "encoding/hex" + "net/http" + "sync" + + "github.com/sideshow/apns2" + "github.com/sideshow/apns2/payload" + "github.com/sideshow/apns2/token" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" +) + +// Notification models a provider-independent push notification. +type Notification struct { + Message string +} + +// String implements fmt.Stringer. +func (n Notification) String() string { + return n.Message +} + +// APNSPusher implements push notifications via Apple APNs. +type APNSPusher struct { + BundleID string + + client APNS2Client + clientMu sync.Mutex +} + +// NewAPNSPusher creates an APNSPusher for sending device notifications via Apple's APNs. +func NewAPNSPusher(client APNS2Client, bundleID string) *APNSPusher { + return &APNSPusher{ + BundleID: bundleID, + client: client, + } +} + +func (p *APNSPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, + notification *Notification) error { + + if deviceToken.Apple == nil { + return errors.New("Unable to push notification: APNSPusher can only use Apple device tokens but the Apple token is nil") + } + + hexToken := hex.EncodeToString(deviceToken.Apple.Token) + appleNotification := p.buildAppleNotification(hexToken, notification) + resp, err := p.safePush(ctx, deviceToken.Apple.Environment, appleNotification) + if err != nil { + return errors.Wrap(err, "Unable to push notification") + } + if resp.StatusCode != http.StatusOK { + return errors.Newf("Unable to push notification: APNs returned non-200 status: %d, %s", resp.StatusCode, resp.Reason) + } + if logger := log.LoggerFromContext(ctx); logger != nil { + logger.WithFields(log.Fields{ + "apnsID": resp.ApnsID, + }).Info("notification pushed") + } + + return nil +} + +// safePush guards the environment setup and push method with a mutex. +// +// This prevents the environment from being changed out from under +// you. Unlikely, but better safe than sorry. +func (p *APNSPusher) safePush(ctx context.Context, env string, notification *apns2.Notification) ( + *apns2.Response, error) { + + p.clientMu.Lock() + defer p.clientMu.Unlock() + if env == devicetokens.AppleEnvProduction { + p.client.Production() + } else { + p.client.Development() + } + return p.client.PushWithContext(ctx, notification) +} + +func (p *APNSPusher) buildAppleNotification(hexToken string, notification *Notification) *apns2.Notification { + payload := payload.NewPayload(). + Alert(notification.Message). + AlertBody(notification.Message) + return &apns2.Notification{ + DeviceToken: hexToken, + Payload: payload, + Topic: p.BundleID, + } +} + +// APNS2Client abstracts the apns2 library for easier testing. +type APNS2Client interface { + Development() APNS2Client + Production() APNS2Client + PushWithContext(apns2.Context, *apns2.Notification) (*apns2.Response, error) +} + +// apns2Client adapts the apns2.Client to APNS2Client so it can be replaced for testing. +type apns2Client struct { + *apns2.Client +} + +func NewAPNS2Client(signingKey []byte, keyID, teamID string) (*apns2Client, error) { + authKey, err := token.AuthKeyFromBytes(signingKey) + if err != nil { + return nil, err + } + token := &token.Token{ + AuthKey: authKey, + KeyID: keyID, + TeamID: teamID, + } + return &apns2Client{apns2.NewTokenClient(token)}, nil +} + +func (c apns2Client) Development() APNS2Client { + d := c.Client.Development() + return &apns2Client{Client: d} +} + +func (c apns2Client) Production() APNS2Client { + p := c.Client.Production() + return &apns2Client{Client: p} +} diff --git a/push/push_suite_test.go b/push/push_suite_test.go new file mode 100644 index 0000000000..a5b73e9d49 --- /dev/null +++ b/push/push_suite_test.go @@ -0,0 +1,11 @@ +package push + +import ( + "testing" + + "github.com/tidepool-org/platform/test" +) + +func TestSuite(t *testing.T) { + test.Test(t) +} diff --git a/push/push_test.go b/push/push_test.go new file mode 100644 index 0000000000..5922f85e25 --- /dev/null +++ b/push/push_test.go @@ -0,0 +1,147 @@ +package push + +import ( + "context" + "fmt" + "net/http" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/sideshow/apns2" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + testlog "github.com/tidepool-org/platform/log/test" +) + +const ( + testBundleID = "test-bundle-id" +) + +var ( + testDeviceToken []byte = []byte("dGVzdGluZyAxIDIgMw==") +) + +type pushTestDeps struct { + Client *mockAPNS2Client + Token *devicetokens.DeviceToken + Notification *Notification +} + +func testDeps() (context.Context, *APNSPusher, *pushTestDeps) { + ctx := context.Background() + mockClient := &mockAPNS2Client{ + Response: &apns2.Response{ + StatusCode: http.StatusOK, + }, + } + pusher := NewAPNSPusher(mockClient, testBundleID) + deps := &pushTestDeps{ + Client: mockClient, + Token: &devicetokens.DeviceToken{ + Apple: &devicetokens.AppleDeviceToken{ + Token: testDeviceToken, + }, + }, + Notification: &Notification{}, + } + return ctx, pusher, deps +} + +var _ = Describe("APNSPusher", func() { + Describe("Push", func() { + It("requires an Apple token", func() { + ctx, pusher, deps := testDeps() + deps.Token.Apple = nil + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("can only use Apple device tokens"))) + }) + + Context("its environment", func() { + + for _, env := range []string{devicetokens.AppleEnvProduction, devicetokens.AppleEnvSandbox} { + It("is set via its token", func() { + ctx, pusher, deps := testDeps() + deps.Token.Apple.Environment = env + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(Succeed()) + // This is reaching into the implementation of + // APNS2Client, but there's no other way to test this. + Expect(deps.Client.Env).To(Equal(env)) + }) + } + }) + + It("reports upstream errors", func() { + ctx, pusher, deps := testDeps() + deps.Client.Error = fmt.Errorf("test error") + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("test error"))) + }) + + Context("when a logger is available", func() { + It("logs", func() { + ctx, pusher, deps := testDeps() + testLogger := testlog.NewLogger() + ctx = log.NewContextWithLogger(ctx, testLogger) + deps.Client.Response = &apns2.Response{ + StatusCode: http.StatusOK, + ApnsID: "test-id", + } + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(Succeed()) + testLogger.AssertInfo("notification pushed", log.Fields{ + "apnsID": "test-id", + }) + }) + }) + + It("reports non-200 responses as errors", func() { + ctx, pusher, deps := testDeps() + deps.Client.Response = &apns2.Response{ + StatusCode: http.StatusBadRequest, + } + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("APNs returned non-200 status"))) + }) + }) +}) + +type mockAPNS2Client struct { + Response *apns2.Response + Error error + Env string +} + +func (c *mockAPNS2Client) Development() APNS2Client { + c.Env = devicetokens.AppleEnvSandbox + return c +} + +func (c *mockAPNS2Client) Production() APNS2Client { + c.Env = devicetokens.AppleEnvProduction + return c +} + +func (c *mockAPNS2Client) PushWithContext(_ apns2.Context, _ *apns2.Notification) (*apns2.Response, error) { + if c.Error != nil { + return nil, c.Error + } + if c.Response != nil { + return c.Response, nil + } + return nil, nil +} diff --git a/task/queue/queue.go b/task/queue/queue.go index 7e3dfe0203..39f50524ad 100644 --- a/task/queue/queue.go +++ b/task/queue/queue.go @@ -374,7 +374,10 @@ func (q *queue) completeTask(ctx context.Context, tsk *task.Task) { func (q *queue) computeState(tsk *task.Task) { switch tsk.State { case task.TaskStatePending: - if tsk.AvailableTime == nil || time.Now().After(*tsk.AvailableTime) { + now := time.Now() + if tsk.AvailableTime == nil || tsk.AvailableTime.Before(now) { + tsk.AvailableTime = &now + } else if time.Now().After(*tsk.AvailableTime) { tsk.AppendError(errors.New("pending task requires future available time")) tsk.SetFailed() } diff --git a/task/service/service/service.go b/task/service/service/service.go index 40ed741686..4eda8d0c9c 100644 --- a/task/service/service/service.go +++ b/task/service/service/service.go @@ -3,21 +3,25 @@ package service import ( "context" - "github.com/tidepool-org/platform/clinics" - "github.com/tidepool-org/platform/ehr/reconcile" - "github.com/tidepool-org/platform/ehr/sync" - + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/application" "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/clinics" dataClient "github.com/tidepool-org/platform/data/client" + "github.com/tidepool-org/platform/data/events" dataSource "github.com/tidepool-org/platform/data/source" dataSourceClient "github.com/tidepool-org/platform/data/source/client" "github.com/tidepool-org/platform/dexcom" dexcomClient "github.com/tidepool-org/platform/dexcom/client" dexcomFetch "github.com/tidepool-org/platform/dexcom/fetch" dexcomProvider "github.com/tidepool-org/platform/dexcom/provider" + "github.com/tidepool-org/platform/ehr/reconcile" + "github.com/tidepool-org/platform/ehr/sync" "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/permission" + permissionClient "github.com/tidepool-org/platform/permission/client" "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/push" serviceService "github.com/tidepool-org/platform/service/service" storeStructuredMongo "github.com/tidepool-org/platform/store/structured/mongo" "github.com/tidepool-org/platform/task" @@ -39,6 +43,9 @@ type Service struct { dexcomClient dexcom.Client taskQueue queue.Queue clinicsClient clinics.Client + alertsClient *alerts.Client + pusher events.Pusher + permissionClient permission.Client } func New() *Service { @@ -70,6 +77,15 @@ func (s *Service) Initialize(provider application.Provider) error { if err := s.initializeClinicsClient(); err != nil { return err } + if err := s.initializeAlertsClient(); err != nil { + return err + } + if err := s.initializeAlertsPusher(); err != nil { + return err + } + if err := s.initializePermissionClient(); err != nil { + return err + } if err := s.initializeTaskQueue(); err != nil { return err } @@ -346,6 +362,17 @@ func (s *Service) initializeTaskQueue() error { } runners = append(runners, ehrSyncRnnr) + if s.alertsClient == nil { + s.Logger().Info("alerts client is nil; care partner tasks will not run successfully") + } + + carePartnerRunner, err := alerts.NewCarePartnerRunner(s.Logger(), s.alertsClient, + s.AuthClient(), s.pusher, s.permissionClient, s.AuthClient()) + if err != nil { + return errors.Wrap(err, "unable to create care partner runner") + } + runners = append(runners, carePartnerRunner) + for _, r := range runners { r := r if err := taskQueue.RegisterRunner(r); err != nil { @@ -359,6 +386,63 @@ func (s *Service) initializeTaskQueue() error { return nil } +func (s *Service) initializeAlertsClient() error { + s.Logger().Debug("initializing alerts client") + + platformConfig := platform.NewConfig() + platformConfig.UserAgent = s.UserAgent() + reporter := s.ConfigReporter().WithScopes("data", "client") + loader := platform.NewConfigReporterLoader(reporter) + if err := platformConfig.Load(loader); err != nil { + return errors.Wrap(err, "Unable to load alerts client config") + } + + s.Logger().Debug("Creating alerts client") + + platformClient, err := platform.NewClient(platformConfig, platform.AuthorizeAsService) + if err != nil { + return errors.Wrap(err, "Unable to create platform client for use in alerts client") + } + s.alertsClient = alerts.NewClient(platformClient, s.Logger()) + + return nil +} + +func (s *Service) initializeAlertsPusher() error { + var err error + var pusher events.Pusher + pusher, err = alerts.NewPusher() + if err != nil { + s.Logger().WithError(err).Warn("falling back to logging of push notifications") + pusher = push.NewLogPusher(s.Logger()) + } + s.pusher = pusher + + return nil +} + +func (s *Service) initializePermissionClient() error { + s.Logger().Debug("Loading permission client config") + + cfg := platform.NewConfig() + cfg.UserAgent = s.UserAgent() + reporter := s.ConfigReporter().WithScopes("permission", "client") + loader := platform.NewConfigReporterLoader(reporter) + if err := cfg.Load(loader); err != nil { + return errors.Wrap(err, "unable to load permission client config") + } + + s.Logger().Debug("Creating permission client") + + clnt, err := permissionClient.New(cfg, platform.AuthorizeAsService) + if err != nil { + return errors.Wrap(err, "unable to create permission client") + } + s.permissionClient = clnt + + return nil +} + func (s *Service) terminateTaskQueue() { if s.taskQueue != nil { s.Logger().Debug("Stopping task queue") diff --git a/task/service/service/service_test.go b/task/service/service/service_test.go index 8fe529cca5..71314e3c63 100644 --- a/task/service/service/service_test.go +++ b/task/service/service/service_test.go @@ -35,12 +35,12 @@ var _ = Describe("Service", func() { var dataClientConfig map[string]interface{} var dataSourceClientConfig map[string]interface{} var taskStoreConfig map[string]interface{} + var permissionClientConfig map[string]interface{} var taskServiceConfig map[string]interface{} var service *taskServiceService.Service BeforeEach(func() { provider = applicationTest.NewProviderWithDefaults() - serverSecret = authTest.NewServiceSecret() sessionToken = authTest.NewSessionToken() server = NewServer() @@ -69,6 +69,9 @@ var _ = Describe("Service", func() { "address": server.URL(), "server_token_secret": authTest.NewServiceSecret(), } + permissionClientConfig = map[string]interface{}{ + "address": server.URL(), + } taskStoreConfig = map[string]interface{}{ "addresses": os.Getenv("TIDEPOOL_STORE_ADDRESSES"), "database": test.RandomStringFromRangeAndCharset(4, 8, test.CharsetLowercase), @@ -88,6 +91,9 @@ var _ = Describe("Service", func() { "task": map[string]interface{}{ "store": taskStoreConfig, }, + "permission": map[string]interface{}{ + "client": permissionClientConfig, + }, "secret": authTest.NewServiceSecret(), "server": map[string]interface{}{ "address": testHttp.NewAddress(), diff --git a/task/store/mongo/mongo.go b/task/store/mongo/mongo.go index c36cbf1105..8232d55300 100644 --- a/task/store/mongo/mongo.go +++ b/task/store/mongo/mongo.go @@ -10,6 +10,7 @@ import ( "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/ehr/reconcile" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" @@ -81,6 +82,7 @@ func (s *Store) EnsureDefaultTasks() error { repository.EnsureSummaryBackfillTask, repository.EnsureSummaryMigrationTask, repository.EnsureEHRReconcileTask, + repository.EnsureCarePartnerTask, } for _, f := range fs { @@ -176,6 +178,11 @@ func (t *TaskRepository) EnsureEHRReconcileTask(ctx context.Context) error { return t.ensureTask(ctx, create) } +func (t *TaskRepository) EnsureCarePartnerTask(ctx context.Context) error { + create := alerts.NewCarePartnerTaskCreate() + return t.ensureTask(ctx, create) +} + func (t *TaskRepository) ensureTask(ctx context.Context, create *task.TaskCreate) error { tsk, err := task.NewTask(ctx, create) if err != nil { diff --git a/task/store/mongo/mongo_test.go b/task/store/mongo/mongo_test.go index 756cc33ed0..9c16d09aab 100644 --- a/task/store/mongo/mongo_test.go +++ b/task/store/mongo/mongo_test.go @@ -8,7 +8,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" - "github.com/prometheus/client_golang/prometheus/testutil" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" diff --git a/task/test/mock.go b/task/test/mock.go index 7c06ef90b3..c702095423 100644 --- a/task/test/mock.go +++ b/task/test/mock.go @@ -9,7 +9,6 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - page "github.com/tidepool-org/platform/page" task "github.com/tidepool-org/platform/task" ) diff --git a/vendor/github.com/IBM/sarama/mocks/README.md b/vendor/github.com/IBM/sarama/mocks/README.md new file mode 100644 index 0000000000..9f40ae2ff7 --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/README.md @@ -0,0 +1,13 @@ +# sarama/mocks + +The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. +You can use them to test your sarama applications using dependency injection. + +The following mock objects are available: + +- [Consumer](https://pkg.go.dev/github.com/IBM/sarama/mocks#Consumer), which will create [PartitionConsumer](https://pkg.go.dev/github.com/IBM/sarama/mocks#PartitionConsumer) mocks. +- [AsyncProducer](https://pkg.go.dev/github.com/IBM/sarama/mocks#AsyncProducer) +- [SyncProducer](https://pkg.go.dev/github.com/IBM/sarama/mocks#SyncProducer) + +The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, +and the results will be reported to the `*testing.T` object you provided when creating the mock. diff --git a/vendor/github.com/IBM/sarama/mocks/async_producer.go b/vendor/github.com/IBM/sarama/mocks/async_producer.go new file mode 100644 index 0000000000..89e0e0db99 --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/async_producer.go @@ -0,0 +1,272 @@ +package mocks + +import ( + "errors" + "sync" + + "github.com/IBM/sarama" +) + +// AsyncProducer implements sarama's Producer interface for testing purposes. +// Before you can send messages to it's Input channel, you have to set expectations +// so it knows how to handle the input; it returns an error if the number of messages +// received is bigger then the number of expectations set. You can also set a +// function in each expectation so that the message is checked by this function and +// an error is returned if the match fails. +type AsyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + closed chan struct{} + input chan *sarama.ProducerMessage + successes chan *sarama.ProducerMessage + errors chan *sarama.ProducerError + isTransactional bool + txnLock sync.Mutex + txnStatus sarama.ProducerTxnStatusFlag + lastOffset int64 + *TopicConfig +} + +// NewAsyncProducer instantiates a new Producer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is validated and used to determine +// whether it should ack successes on the Successes channel and handle partitioning. +func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { + if config == nil { + config = sarama.NewConfig() + } + if err := config.Validate(); err != nil { + t.Errorf("Invalid mock configuration provided: %s", err.Error()) + } + mp := &AsyncProducer{ + t: t, + closed: make(chan struct{}), + expectations: make([]*producerExpectation, 0), + input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), + isTransactional: config.Producer.Transaction.ID != "", + txnStatus: sarama.ProducerTxnFlagReady, + TopicConfig: NewTopicConfig(), + } + + go func() { + defer func() { + close(mp.successes) + close(mp.errors) + close(mp.closed) + }() + + partitioners := make(map[string]sarama.Partitioner, 1) + + for msg := range mp.input { + mp.txnLock.Lock() + if mp.IsTransactional() && mp.txnStatus&sarama.ProducerTxnFlagInTransaction == 0 { + mp.t.Errorf("attempt to send message when transaction is not started or is in ending state.") + mp.errors <- &sarama.ProducerError{Err: errors.New("attempt to send message when transaction is not started or is in ending state"), Msg: msg} + continue + } + mp.txnLock.Unlock() + partitioner := partitioners[msg.Topic] + if partitioner == nil { + partitioner = config.Producer.Partitioner(msg.Topic) + partitioners[msg.Topic] = partitioner + } + mp.l.Lock() + if mp.expectations == nil || len(mp.expectations) == 0 { + mp.expectations = nil + mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + } else { + expectation := mp.expectations[0] + mp.expectations = mp.expectations[1:] + + partition, err := partitioner.Partition(msg, mp.partitions(msg.Topic)) + if err != nil { + mp.t.Errorf("Partitioner returned an error: %s", err.Error()) + mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} + } else { + msg.Partition = partition + if expectation.CheckFunction != nil { + err := expectation.CheckFunction(msg) + if err != nil { + mp.t.Errorf("Check function returned an error: %s", err.Error()) + mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} + } + } + if errors.Is(expectation.Result, errProduceSuccess) { + mp.lastOffset++ + if config.Producer.Return.Successes { + msg.Offset = mp.lastOffset + mp.successes <- msg + } + } else if config.Producer.Return.Errors { + mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} + } + } + } + mp.l.Unlock() + } + + mp.l.Lock() + if len(mp.expectations) > 0 { + mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) + } + mp.l.Unlock() + }() + + return mp +} + +//////////////////////////////////////////////// +// Implement Producer interface +//////////////////////////////////////////////// + +// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) AsyncClose() { + close(mp.input) +} + +// Close corresponds with the Close method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) Close() error { + mp.AsyncClose() + <-mp.closed + return nil +} + +// Input corresponds with the Input method of sarama's Producer implementation. +// You have to set expectations on the mock producer before writing messages to the Input +// channel, so it knows how to handle them. If there is no more remaining expectations and +// a messages is written to the Input channel, the mock producer will write an error to the test +// state object. +func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { + return mp.input +} + +// Successes corresponds with the Successes method of sarama's Producer implementation. +func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { + return mp.successes +} + +// Errors corresponds with the Errors method of sarama's Producer implementation. +func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { + return mp.errors +} + +func (mp *AsyncProducer) IsTransactional() bool { + return mp.isTransactional +} + +func (mp *AsyncProducer) BeginTxn() error { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + mp.txnStatus = sarama.ProducerTxnFlagInTransaction + return nil +} + +func (mp *AsyncProducer) CommitTxn() error { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + mp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (mp *AsyncProducer) AbortTxn() error { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + mp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (mp *AsyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + return mp.txnStatus +} + +func (mp *AsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { + return nil +} + +func (mp *AsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { + return nil +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectInputWithMessageCheckerFunctionAndSucceed sets an expectation on the mock producer that a +// message will be provided on the input channel. The mock producer will call the given function to +// check the message. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it produced successfully, i.e. it will make it +// available on the Successes channel if the Producer.Return.Successes setting is set to true. +func (mp *AsyncProducer) ExpectInputWithMessageCheckerFunctionAndSucceed(cf MessageChecker) *AsyncProducer { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) + + return mp +} + +// ExpectInputWithMessageCheckerFunctionAndFail sets an expectation on the mock producer that a +// message will be provided on the input channel. The mock producer will first call the given +// function to check the message. If an error is returned it will be made available on the Errors +// channel otherwise the mock will handle the message as if it failed to produce successfully. This +// means it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputWithMessageCheckerFunctionAndFail(cf MessageChecker, err error) *AsyncProducer { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) + + return mp +} + +// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message +// will be provided on the input channel. The mock producer will call the given function to check +// the message value. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it produced successfully, i.e. it will make +// it available on the Successes channel if the Producer.Return.Successes setting is set to true. +func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndSucceed(messageValueChecker(cf)) + + return mp +} + +// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message +// will be provided on the input channel. The mock producer will first call the given function to +// check the message value. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it failed to produce successfully. This means +// it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndFail(messageValueChecker(cf), err) + + return mp +} + +// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it is produced successfully, +// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting +// is set to true. +func (mp *AsyncProducer) ExpectInputAndSucceed() *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndSucceed(nil) + + return mp +} + +// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it failed to produce +// successfully. This means it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputAndFail(err error) *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndFail(nil, err) + + return mp +} diff --git a/vendor/github.com/IBM/sarama/mocks/consumer.go b/vendor/github.com/IBM/sarama/mocks/consumer.go new file mode 100644 index 0000000000..77bb9195cb --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/consumer.go @@ -0,0 +1,441 @@ +package mocks + +import ( + "sync" + "sync/atomic" + + "github.com/IBM/sarama" +) + +// Consumer implements sarama's Consumer interface for testing purposes. +// Before you can start consuming from this consumer, you have to register +// topic/partitions using ExpectConsumePartition, and set expectations on them. +type Consumer struct { + l sync.Mutex + t ErrorReporter + config *sarama.Config + partitionConsumers map[string]map[int32]*PartitionConsumer + metadata map[string][]int32 +} + +// NewConsumer returns a new mock Consumer instance. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument can be set to nil; if it is +// non-nil it is validated. +func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { + if config == nil { + config = sarama.NewConfig() + } + if err := config.Validate(); err != nil { + t.Errorf("Invalid mock configuration provided: %s", err.Error()) + } + + c := &Consumer{ + t: t, + config: config, + partitionConsumers: make(map[string]map[int32]*PartitionConsumer), + } + return c +} + +/////////////////////////////////////////////////// +// Consumer interface implementation +/////////////////////////////////////////////////// + +// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. +// Before you can start consuming a partition, you have to set expectations on it using +// ExpectConsumePartition. You can only consume a partition once per consumer. +func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { + c.t.Errorf("No expectations set for %s/%d", topic, partition) + return nil, errOutOfExpectations + } + + pc := c.partitionConsumers[topic][partition] + if pc.consumed { + return nil, sarama.ConfigurationError("The topic/partition is already being consumed") + } + + if pc.offset != AnyOffset && pc.offset != offset { + c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) + } + + pc.consumed = true + return pc, nil +} + +// Topics returns a list of topics, as registered with SetTopicMetadata +func (c *Consumer) Topics() ([]string, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetTopicMetadata.") + return nil, sarama.ErrOutOfBrokers + } + + var result []string + for topic := range c.metadata { + result = append(result, topic) + } + return result, nil +} + +// Partitions returns the list of parititons for the given topic, as registered with SetTopicMetadata +func (c *Consumer) Partitions(topic string) ([]int32, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetTopicMetadata.") + return nil, sarama.ErrOutOfBrokers + } + if c.metadata[topic] == nil { + return nil, sarama.ErrUnknownTopicOrPartition + } + + return c.metadata[topic], nil +} + +func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { + c.l.Lock() + defer c.l.Unlock() + + hwms := make(map[string]map[int32]int64, len(c.partitionConsumers)) + for topic, partitionConsumers := range c.partitionConsumers { + hwm := make(map[int32]int64, len(partitionConsumers)) + for partition, pc := range partitionConsumers { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + +// Close implements the Close method from the sarama.Consumer interface. It will close +// all registered PartitionConsumer instances. +func (c *Consumer) Close() error { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + _ = partitionConsumer.Close() + } + } + + return nil +} + +// Pause implements Consumer. +func (c *Consumer) Pause(topicPartitions map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + for topic, partitions := range topicPartitions { + for _, partition := range partitions { + if topicConsumers, ok := c.partitionConsumers[topic]; ok { + if partitionConsumer, ok := topicConsumers[partition]; ok { + partitionConsumer.Pause() + } + } + } + } +} + +// Resume implements Consumer. +func (c *Consumer) Resume(topicPartitions map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + for topic, partitions := range topicPartitions { + for _, partition := range partitions { + if topicConsumers, ok := c.partitionConsumers[topic]; ok { + if partitionConsumer, ok := topicConsumers[partition]; ok { + partitionConsumer.Resume() + } + } + } + } +} + +// PauseAll implements Consumer. +func (c *Consumer) PauseAll() { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + partitionConsumer.Pause() + } + } +} + +// ResumeAll implements Consumer. +func (c *Consumer) ResumeAll() { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + partitionConsumer.Resume() + } + } +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// SetTopicMetadata sets the clusters topic/partition metadata, +// which will be returned by Topics() and Partitions(). +func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + c.metadata = metadata +} + +// ExpectConsumePartition will register a topic/partition, so you can set expectations on it. +// The registered PartitionConsumer will be returned, so you can set expectations +// on it using method chaining. Once a topic/partition is registered, you are +// expected to start consuming it using ConsumePartition. If that doesn't happen, +// an error will be written to the error reporter once the mock consumer is closed. It also expects +// that the message and error channels be written with YieldMessage and YieldError accordingly, +// and be fully consumed once the mock consumer is closed if ExpectMessagesDrainedOnClose or +// ExpectErrorsDrainedOnClose have been called. +func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil { + c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) + } + + if c.partitionConsumers[topic][partition] == nil { + highWatermarkOffset := offset + if offset == sarama.OffsetOldest { + highWatermarkOffset = 0 + } + + c.partitionConsumers[topic][partition] = &PartitionConsumer{ + highWaterMarkOffset: highWatermarkOffset, + t: c.t, + topic: topic, + partition: partition, + offset: offset, + messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), + suppressedMessages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), + errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), + } + } + + return c.partitionConsumers[topic][partition] +} + +/////////////////////////////////////////////////// +// PartitionConsumer mock type +/////////////////////////////////////////////////// + +// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. +// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is +// registered first using the Consumer's ExpectConsumePartition method. Before consuming the +// Errors and Messages channel, you should specify what values will be provided on these +// channels using YieldMessage and YieldError. +type PartitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + suppressedHighWaterMarkOffset int64 + l sync.Mutex + t ErrorReporter + topic string + partition int32 + offset int64 + messages chan *sarama.ConsumerMessage + suppressedMessages chan *sarama.ConsumerMessage + errors chan *sarama.ConsumerError + singleClose sync.Once + consumed bool + errorsShouldBeDrained bool + messagesShouldBeDrained bool + paused bool +} + +/////////////////////////////////////////////////// +// PartitionConsumer interface implementation +/////////////////////////////////////////////////// + +// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) AsyncClose() { + pc.singleClose.Do(func() { + close(pc.suppressedMessages) + close(pc.messages) + close(pc.errors) + }) +} + +// Close implements the Close method from the sarama.PartitionConsumer interface. It will +// verify whether the partition consumer was actually started. +func (pc *PartitionConsumer) Close() error { + if !pc.consumed { + pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) + return errPartitionConsumerNotStarted + } + + if pc.errorsShouldBeDrained && len(pc.errors) > 0 { + pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) + } + + if pc.messagesShouldBeDrained && len(pc.messages) > 0 { + pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) + } + + pc.AsyncClose() + + var ( + closeErr error + wg sync.WaitGroup + ) + + wg.Add(1) + go func() { + defer wg.Done() + + errs := make(sarama.ConsumerErrors, 0) + for err := range pc.errors { + errs = append(errs, err) + } + + if len(errs) > 0 { + closeErr = errs + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for range pc.messages { + // drain + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for range pc.suppressedMessages { + // drain + } + }() + + wg.Wait() + return closeErr +} + +// Errors implements the Errors method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { + return pc.errors +} + +// Messages implements the Messages method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { + return pc.messages +} + +func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&pc.highWaterMarkOffset) +} + +// Pause implements the Pause method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Pause() { + pc.l.Lock() + defer pc.l.Unlock() + + pc.suppressedHighWaterMarkOffset = atomic.LoadInt64(&pc.highWaterMarkOffset) + + pc.paused = true +} + +// Resume implements the Resume method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Resume() { + pc.l.Lock() + defer pc.l.Unlock() + + pc.highWaterMarkOffset = atomic.LoadInt64(&pc.suppressedHighWaterMarkOffset) + for len(pc.suppressedMessages) > 0 { + msg := <-pc.suppressedMessages + pc.messages <- msg + } + + pc.paused = false +} + +// IsPaused implements the IsPaused method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) IsPaused() bool { + pc.l.Lock() + defer pc.l.Unlock() + + return pc.paused +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// YieldMessage will yield a messages Messages channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this +// message was consumed from the Messages channel, because there are legitimate +// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will +// verify that the channel is empty on close. +func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) *PartitionConsumer { + pc.l.Lock() + defer pc.l.Unlock() + + msg.Topic = pc.topic + msg.Partition = pc.partition + + if pc.paused { + msg.Offset = atomic.AddInt64(&pc.suppressedHighWaterMarkOffset, 1) - 1 + pc.suppressedMessages <- msg + } else { + msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1) - 1 + pc.messages <- msg + } + + return pc +} + +// YieldError will yield an error on the Errors channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this error was +// consumed from the Errors channel, because there are legitimate reasons for this +// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that +// the channel is empty on close. +func (pc *PartitionConsumer) YieldError(err error) *PartitionConsumer { + pc.errors <- &sarama.ConsumerError{ + Topic: pc.topic, + Partition: pc.partition, + Err: err, + } + + return pc +} + +// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer +// that the messages channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() *PartitionConsumer { + pc.messagesShouldBeDrained = true + + return pc +} + +// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer +// that the errors channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() *PartitionConsumer { + pc.errorsShouldBeDrained = true + + return pc +} diff --git a/vendor/github.com/IBM/sarama/mocks/mocks.go b/vendor/github.com/IBM/sarama/mocks/mocks.go new file mode 100644 index 0000000000..bd9d630ddb --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/mocks.go @@ -0,0 +1,110 @@ +/* +Package mocks provides mocks that can be used for testing applications +that use Sarama. The mock types provided by this package implement the +interfaces Sarama exports, so you can use them for dependency injection +in your tests. + +All mock instances require you to set expectations on them before you +can use them. It will determine how the mock will behave. If an +expectation is not met, it will make your test fail. + +NOTE: this package currently does not fall under the API stability +guarantee of Sarama as it is still considered experimental. +*/ +package mocks + +import ( + "errors" + "fmt" + + "github.com/IBM/sarama" +) + +// ErrorReporter is a simple interface that includes the testing.T methods we use to report +// expectation violations when using the mock objects. +type ErrorReporter interface { + Errorf(string, ...interface{}) +} + +// ValueChecker is a function type to be set in each expectation of the producer mocks +// to check the value passed. +type ValueChecker func(val []byte) error + +// MessageChecker is a function type to be set in each expectation of the producer mocks +// to check the message passed. +type MessageChecker func(*sarama.ProducerMessage) error + +// messageValueChecker wraps a ValueChecker into a MessageChecker. +// Failure to encode the message value will return an error and not call +// the wrapped ValueChecker. +func messageValueChecker(f ValueChecker) MessageChecker { + if f == nil { + return nil + } + return func(msg *sarama.ProducerMessage) error { + val, err := msg.Value.Encode() + if err != nil { + return fmt.Errorf("Input message encoding failed: %w", err) + } + return f(val) + } +} + +var ( + errProduceSuccess error = nil + errOutOfExpectations = errors.New("No more expectations set on mock") + errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") +) + +const AnyOffset int64 = -1000 + +type producerExpectation struct { + Result error + CheckFunction MessageChecker +} + +// TopicConfig describes a mock topic structure for the mock producers’ partitioning needs. +type TopicConfig struct { + overridePartitions map[string]int32 + defaultPartitions int32 +} + +// NewTopicConfig makes a configuration which defaults to 32 partitions for every topic. +func NewTopicConfig() *TopicConfig { + return &TopicConfig{ + overridePartitions: make(map[string]int32, 0), + defaultPartitions: 32, + } +} + +// SetDefaultPartitions sets the number of partitions any topic not explicitly configured otherwise +// (by SetPartitions) will have from the perspective of created partitioners. +func (pc *TopicConfig) SetDefaultPartitions(n int32) { + pc.defaultPartitions = n +} + +// SetPartitions sets the number of partitions the partitioners will see for specific topics. This +// only applies to messages produced after setting them. +func (pc *TopicConfig) SetPartitions(partitions map[string]int32) { + for p, n := range partitions { + pc.overridePartitions[p] = n + } +} + +func (pc *TopicConfig) partitions(topic string) int32 { + if n, found := pc.overridePartitions[topic]; found { + return n + } + return pc.defaultPartitions +} + +// NewTestConfig returns a config meant to be used by tests. +// Due to inconsistencies with the request versions the clients send using the default Kafka version +// and the response versions our mocks use, we default to the minimum Kafka version in most tests +func NewTestConfig() *sarama.Config { + config := sarama.NewConfig() + config.Consumer.Retry.Backoff = 0 + config.Producer.Retry.Backoff = 0 + config.Version = sarama.MinVersion + return config +} diff --git a/vendor/github.com/IBM/sarama/mocks/sync_producer.go b/vendor/github.com/IBM/sarama/mocks/sync_producer.go new file mode 100644 index 0000000000..9d103ed0d7 --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/sync_producer.go @@ -0,0 +1,264 @@ +package mocks + +import ( + "errors" + "sync" + + "github.com/IBM/sarama" +) + +// SyncProducer implements sarama's SyncProducer interface for testing purposes. +// Before you can use it, you have to set expectations on the mock SyncProducer +// to tell it how to handle calls to SendMessage, so you can easily test success +// and failure scenarios. +type SyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + lastOffset int64 + + *TopicConfig + newPartitioner sarama.PartitionerConstructor + partitioners map[string]sarama.Partitioner + + isTransactional bool + txnLock sync.Mutex + txnStatus sarama.ProducerTxnStatusFlag +} + +// NewSyncProducer instantiates a new SyncProducer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is validated and used to handle +// partitioning. +func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { + if config == nil { + config = sarama.NewConfig() + } + if err := config.Validate(); err != nil { + t.Errorf("Invalid mock configuration provided: %s", err.Error()) + } + return &SyncProducer{ + t: t, + expectations: make([]*producerExpectation, 0), + TopicConfig: NewTopicConfig(), + newPartitioner: config.Producer.Partitioner, + partitioners: make(map[string]sarama.Partitioner, 1), + isTransactional: config.Producer.Transaction.ID != "", + txnStatus: sarama.ProducerTxnFlagReady, + } +} + +//////////////////////////////////////////////// +// Implement SyncProducer interface +//////////////////////////////////////////////// + +// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessage, so it knows +// how to handle them. You can set a function in each expectation so that the message value +// checked by this function and an error is returned if the match fails. +// If there is no more remaining expectation when SendMessage is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { + sp.l.Lock() + defer sp.l.Unlock() + + if sp.IsTransactional() && sp.txnStatus&sarama.ProducerTxnFlagInTransaction == 0 { + sp.t.Errorf("attempt to send message when transaction is not started or is in ending state.") + return -1, -1, errors.New("attempt to send message when transaction is not started or is in ending state") + } + + if len(sp.expectations) > 0 { + expectation := sp.expectations[0] + sp.expectations = sp.expectations[1:] + topic := msg.Topic + partition, err := sp.partitioner(topic).Partition(msg, sp.partitions(topic)) + if err != nil { + sp.t.Errorf("Partitioner returned an error: %s", err.Error()) + return -1, -1, err + } + msg.Partition = partition + if expectation.CheckFunction != nil { + errCheck := expectation.CheckFunction(msg) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return -1, -1, errCheck + } + } + if errors.Is(expectation.Result, errProduceSuccess) { + sp.lastOffset++ + msg.Offset = sp.lastOffset + return 0, msg.Offset, nil + } + return -1, -1, expectation.Result + } + sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + return -1, -1, errOutOfExpectations +} + +// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessages, so it knows +// how to handle them. If there is no more remaining expectations when SendMessages is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) >= len(msgs) { + expectations := sp.expectations[0:len(msgs)] + sp.expectations = sp.expectations[len(msgs):] + + for i, expectation := range expectations { + topic := msgs[i].Topic + partition, err := sp.partitioner(topic).Partition(msgs[i], sp.partitions(topic)) + if err != nil { + sp.t.Errorf("Partitioner returned an error: %s", err.Error()) + return err + } + msgs[i].Partition = partition + if expectation.CheckFunction != nil { + errCheck := expectation.CheckFunction(msgs[i]) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return errCheck + } + } + if !errors.Is(expectation.Result, errProduceSuccess) { + return expectation.Result + } + sp.lastOffset++ + msgs[i].Offset = sp.lastOffset + } + return nil + } + sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") + return errOutOfExpectations +} + +func (sp *SyncProducer) partitioner(topic string) sarama.Partitioner { + partitioner := sp.partitioners[topic] + if partitioner == nil { + partitioner = sp.newPartitioner(topic) + sp.partitioners[topic] = partitioner + } + return partitioner +} + +// Close corresponds with the Close method of sarama's SyncProducer implementation. +// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, +// so it will write an error to the test state if there's any remaining expectations. +func (sp *SyncProducer) Close() error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) > 0 { + sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) + } + + return nil +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectSendMessageWithMessageCheckerFunctionAndSucceed sets an expectation on the mock producer +// that SendMessage will be called. The mock producer will first call the given function to check +// the message. It will cascade the error of the function, if any, or handle the message as if it +// produced successfully, i.e. by returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageWithMessageCheckerFunctionAndSucceed(cf MessageChecker) *SyncProducer { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) + + return sp +} + +// ExpectSendMessageWithMessageCheckerFunctionAndFail sets an expectation on the mock producer that +// SendMessage will be called. The mock producer will first call the given function to check the +// message. It will cascade the error of the function, if any, or handle the message as if it +// failed to produce successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageWithMessageCheckerFunctionAndFail(cf MessageChecker, err error) *SyncProducer { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) + + return sp +} + +// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage +// will be called. The mock producer will first call the given function to check the message value. +// It will cascade the error of the function, if any, or handle the message as if it produced +// successfully, i.e. by returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndSucceed(messageValueChecker(cf)) + + return sp +} + +// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will first call the given function to check the message value. +// It will cascade the error of the function, if any, or handle the message as if it failed +// to produce successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndFail(messageValueChecker(cf), err) + + return sp +} + +// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it produced successfully, i.e. by +// returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageAndSucceed() *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndSucceed(nil) + + return sp +} + +// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it failed to produce +// successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageAndFail(err error) *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndFail(nil, err) + + return sp +} + +func (sp *SyncProducer) IsTransactional() bool { + return sp.isTransactional +} + +func (sp *SyncProducer) BeginTxn() error { + sp.txnLock.Lock() + defer sp.txnLock.Unlock() + + sp.txnStatus = sarama.ProducerTxnFlagInTransaction + return nil +} + +func (sp *SyncProducer) CommitTxn() error { + sp.txnLock.Lock() + defer sp.txnLock.Unlock() + + sp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (sp *SyncProducer) AbortTxn() error { + sp.txnLock.Lock() + defer sp.txnLock.Unlock() + + sp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (sp *SyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { + return sp.txnStatus +} + +func (sp *SyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { + return nil +} + +func (sp *SyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { + return nil +} diff --git a/vendor/github.com/sideshow/apns2/.gitignore b/vendor/github.com/sideshow/apns2/.gitignore new file mode 100644 index 0000000000..5b77d5d22e --- /dev/null +++ b/vendor/github.com/sideshow/apns2/.gitignore @@ -0,0 +1,31 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +/*.p12 +/*.pem +/*.cer +/*.p8 + +.DS_Store \ No newline at end of file diff --git a/vendor/github.com/sideshow/apns2/LICENSE b/vendor/github.com/sideshow/apns2/LICENSE new file mode 100644 index 0000000000..59abbcf40e --- /dev/null +++ b/vendor/github.com/sideshow/apns2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Adam Jones + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/sideshow/apns2/README.md b/vendor/github.com/sideshow/apns2/README.md new file mode 100644 index 0000000000..32e04190ce --- /dev/null +++ b/vendor/github.com/sideshow/apns2/README.md @@ -0,0 +1,216 @@ +# APNS/2 + +APNS/2 is a go package designed for simple, flexible and fast Apple Push Notifications on iOS, OSX and Safari using the new HTTP/2 Push provider API. + +[![Build Status](https://github.com/sideshow/apns2/actions/workflows/tests.yml/badge.svg)](https://github.com/sideshow/apns2/actions/workflows/tests.yml) [![Coverage Status](https://coveralls.io/repos/sideshow/apns2/badge.svg?branch=master&service=github)](https://coveralls.io/github/sideshow/apns2?branch=master) [![GoDoc](https://godoc.org/github.com/sideshow/apns2?status.svg)](https://godoc.org/github.com/sideshow/apns2) + +## Features + +- Uses new Apple APNs HTTP/2 connection +- Fast - See [notes on speed](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed) +- Works with go 1.7 and later +- Supports new Apple Token Based Authentication (JWT) +- Supports new iOS 10 features such as Collapse IDs, Subtitles and Mutable Notifications +- Supports new iOS 15 features interruptionLevel and relevanceScore +- Supports persistent connections to APNs +- Supports VoIP/PushKit notifications (iOS 8 and later) +- Modular & easy to use +- Tested and working in APNs production environment + +## Install + +- Make sure you have [Go](https://golang.org/doc/install) installed and have set your [GOPATH](https://golang.org/doc/code.html#GOPATH). +- Install apns2: + +```sh +go get -u github.com/sideshow/apns2 +``` + +If you are running the test suite you will also need to install testify: + +```sh +go get -u github.com/stretchr/testify +``` + +## Example + +```go +package main + +import ( + "log" + "fmt" + + "github.com/sideshow/apns2" + "github.com/sideshow/apns2/certificate" +) + +func main() { + + cert, err := certificate.FromP12File("../cert.p12", "") + if err != nil { + log.Fatal("Cert Error:", err) + } + + notification := &apns2.Notification{} + notification.DeviceToken = "11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef7" + notification.Topic = "com.sideshow.Apns2" + notification.Payload = []byte(`{"aps":{"alert":"Hello!"}}`) // See Payload section below + + // If you want to test push notifications for builds running directly from XCode (Development), use + // client := apns2.NewClient(cert).Development() + // For apps published to the app store or installed as an ad-hoc distribution use Production() + + client := apns2.NewClient(cert).Production() + res, err := client.Push(notification) + + if err != nil { + log.Fatal("Error:", err) + } + + fmt.Printf("%v %v %v\n", res.StatusCode, res.ApnsID, res.Reason) +} +``` + +## JWT Token Example + +Instead of using a `.p12` or `.pem` certificate as above, you can optionally use +APNs JWT _Provider Authentication Tokens_. First you will need a signing key (`.p8` file), Key ID and Team ID [from Apple](http://help.apple.com/xcode/mac/current/#/dev54d690a66). Once you have these details, you can create a new client: + +```go +authKey, err := token.AuthKeyFromFile("../AuthKey_XXX.p8") +if err != nil { + log.Fatal("token error:", err) +} + +token := &token.Token{ + AuthKey: authKey, + // KeyID from developer account (Certificates, Identifiers & Profiles -> Keys) + KeyID: "ABC123DEFG", + // TeamID from developer account (View Account -> Membership) + TeamID: "DEF123GHIJ", +} +... + +client := apns2.NewTokenClient(token) +res, err := client.Push(notification) +``` + +- You can use one APNs signing key to authenticate tokens for multiple apps. +- A signing key works for both the development and production environments. +- A signing key doesn’t expire but can be revoked. + +## Notification + +At a minimum, a _Notification_ needs a _DeviceToken_, a _Topic_ and a _Payload_. + +```go +notification := &apns2.Notification{ + DeviceToken: "11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef7", + Topic: "com.sideshow.Apns2", + Payload: []byte(`{"aps":{"alert":"Hello!"}}`), +} +``` + +You can also set an optional _ApnsID_, _Expiration_ or _Priority_. + +```go +notification.ApnsID = "40636A2C-C093-493E-936A-2A4333C06DEA" +notification.Expiration = time.Now() +notification.Priority = apns2.PriorityLow +``` + +## Payload + +You can use raw bytes for the `notification.Payload` as above, or you can use the payload builder package which makes it easy to construct APNs payloads. + +```go +// {"aps":{"alert":"hello","badge":1},"key":"val"} + +payload := payload.NewPayload().Alert("hello").Badge(1).Custom("key", "val") + +notification.Payload = payload +client.Push(notification) +``` + +Refer to the [payload](https://godoc.org/github.com/sideshow/apns2/payload) docs for more info. + +## Response, Error handling + +APNS/2 draws the distinction between a valid response from Apple indicating whether or not the _Notification_ was sent or not, and an unrecoverable or unexpected _Error_; + +- An `Error` is returned if a non-recoverable error occurs, i.e. if there is a problem with the underlying _http.Client_ connection or _Certificate_, the payload was not sent, or a valid _Response_ was not received. +- A `Response` is returned if the payload was successfully sent to Apple and a documented response was received. This struct will contain more information about whether or not the push notification succeeded, its _apns-id_ and if applicable, more information around why it did not succeed. + +To check if a `Notification` was successfully sent; + +```go +res, err := client.Push(notification) +if err != nil { + log.Println("There was an error", err) + return +} + +if res.Sent() { + log.Println("Sent:", res.ApnsID) +} else { + fmt.Printf("Not Sent: %v %v %v\n", res.StatusCode, res.ApnsID, res.Reason) +} +``` + +## Context & Timeouts + +For better control over request cancellations and timeouts APNS/2 supports +contexts. Using a context can be helpful if you want to cancel all pushes when +the parent process is cancelled, or need finer grained control over individual +push timeouts. See the [Google post](https://blog.golang.org/context) for more +information on contexts. + +```go +ctx, cancel = context.WithTimeout(context.Background(), 10 * time.Second) +res, err := client.PushWithContext(ctx, notification) +defer cancel() +``` + +## Speed & Performance + +Also see the wiki page on [APNS HTTP 2 Push Speed](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed). + +For best performance, you should hold on to an `apns2.Client` instance and not re-create it every push. The underlying TLS connection itself can take a few seconds to connect and negotiate, so if you are setting up an `apns2.Client` and tearing it down every push, then this will greatly affect performance. (Apple suggest keeping the connection open all the time). + +You should also limit the amount of `apns2.Client` instances. The underlying transport has a http connection pool itself, so a single client instance will be enough for most users (One instance can potentially do 4,000+ pushes per second). If you need more than this then one instance per CPU core is a good starting point. + +Speed is greatly affected by the location of your server and the quality of your network connection. If you're just testing locally, behind a proxy or if your server is outside USA then you're not going to get great performance. With a good server located in AWS, you should be able to get [decent throughput](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed). + +## Command line tool + +APNS/2 has a command line tool that can be installed with `go get github.com/sideshow/apns2/apns2`. Usage: + +``` +apns2 --help +usage: apns2 --certificate-path=CERTIFICATE-PATH --topic=TOPIC [] + +Listens to STDIN to send notifications and writes APNS response code and reason to STDOUT. + +The expected format is: +Example: aff0c63d9eaa63ad161bafee732d5bc2c31f66d552054718ff19ce314371e5d0 {"aps": {"alert": "hi"}} +Flags: + --help Show context-sensitive help (also try --help-long and --help-man). + -c, --certificate-path=CERTIFICATE-PATH + Path to certificate file. + -t, --topic=TOPIC The topic of the remote notification, which is typically the bundle ID for your app + -m, --mode="production" APNS server to send notifications to. `production` or `development`. Defaults to `production` + --version Show application version. +``` + +## License + +The MIT License (MIT) + +Copyright (c) 2016 Adam Jones + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/sideshow/apns2/client.go b/vendor/github.com/sideshow/apns2/client.go new file mode 100644 index 0000000000..cd98dd4228 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/client.go @@ -0,0 +1,238 @@ +// Package apns2 is a go Apple Push Notification Service (APNs) provider that +// allows you to send remote notifications to your iOS, tvOS, and OS X +// apps, using the new APNs HTTP/2 network protocol. +package apns2 + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "io" + "net" + "net/http" + "strconv" + "time" + + "github.com/sideshow/apns2/token" + "golang.org/x/net/http2" +) + +// Apple HTTP/2 Development & Production urls +const ( + HostDevelopment = "https://api.sandbox.push.apple.com" + HostProduction = "https://api.push.apple.com" +) + +// DefaultHost is a mutable var for testing purposes +var DefaultHost = HostDevelopment + +var ( + // HTTPClientTimeout specifies a time limit for requests made by the + // HTTPClient. The timeout includes connection time, any redirects, + // and reading the response body. + HTTPClientTimeout = 60 * time.Second + + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. If + // zero, no health check is performed. + ReadIdleTimeout = 15 * time.Second + + // TCPKeepAlive specifies the keep-alive period for an active network + // connection. If zero, keep-alive probes are sent with a default value + // (currently 15 seconds) + TCPKeepAlive = 15 * time.Second + + // TLSDialTimeout is the maximum amount of time a dial will wait for a connect + // to complete. + TLSDialTimeout = 20 * time.Second +) + +// DialTLS is the default dial function for creating TLS connections for +// non-proxied HTTPS requests. +var DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialer := &net.Dialer{ + Timeout: TLSDialTimeout, + KeepAlive: TCPKeepAlive, + } + return tls.DialWithDialer(dialer, network, addr, cfg) +} + +// Client represents a connection with the APNs +type Client struct { + Host string + Certificate tls.Certificate + Token *token.Token + HTTPClient *http.Client +} + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. Context's methods may be called by multiple goroutines +// simultaneously. +type Context interface { + context.Context +} + +type connectionCloser interface { + CloseIdleConnections() +} + +// NewClient returns a new Client with an underlying http.Client configured with +// the correct APNs HTTP/2 transport settings. It does not connect to the APNs +// until the first Notification is sent via the Push method. +// +// As per the Apple APNs Provider API, you should keep a handle on this client +// so that you can keep your connections with APNs open across multiple +// notifications; don’t repeatedly open and close connections. APNs treats rapid +// connection and disconnection as a denial-of-service attack. +// +// If your use case involves multiple long-lived connections, consider using +// the ClientManager, which manages clients for you. +func NewClient(certificate tls.Certificate) *Client { + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{certificate}, + } + if len(certificate.Certificate) > 0 { + tlsConfig.BuildNameToCertificate() + } + transport := &http2.Transport{ + TLSClientConfig: tlsConfig, + DialTLS: DialTLS, + ReadIdleTimeout: ReadIdleTimeout, + } + return &Client{ + HTTPClient: &http.Client{ + Transport: transport, + Timeout: HTTPClientTimeout, + }, + Certificate: certificate, + Host: DefaultHost, + } +} + +// NewTokenClient returns a new Client with an underlying http.Client configured +// with the correct APNs HTTP/2 transport settings. It does not connect to the APNs +// until the first Notification is sent via the Push method. +// +// As per the Apple APNs Provider API, you should keep a handle on this client +// so that you can keep your connections with APNs open across multiple +// notifications; don’t repeatedly open and close connections. APNs treats rapid +// connection and disconnection as a denial-of-service attack. +func NewTokenClient(token *token.Token) *Client { + transport := &http2.Transport{ + DialTLS: DialTLS, + ReadIdleTimeout: ReadIdleTimeout, + } + return &Client{ + Token: token, + HTTPClient: &http.Client{ + Transport: transport, + Timeout: HTTPClientTimeout, + }, + Host: DefaultHost, + } +} + +// Development sets the Client to use the APNs development push endpoint. +func (c *Client) Development() *Client { + c.Host = HostDevelopment + return c +} + +// Production sets the Client to use the APNs production push endpoint. +func (c *Client) Production() *Client { + c.Host = HostProduction + return c +} + +// Push sends a Notification to the APNs gateway. If the underlying http.Client +// is not currently connected, this method will attempt to reconnect +// transparently before sending the notification. It will return a Response +// indicating whether the notification was accepted or rejected by the APNs +// gateway, or an error if something goes wrong. +// +// Use PushWithContext if you need better cancellation and timeout control. +func (c *Client) Push(n *Notification) (*Response, error) { + return c.PushWithContext(context.Background(), n) +} + +// PushWithContext sends a Notification to the APNs gateway. Context carries a +// deadline and a cancellation signal and allows you to close long running +// requests when the context timeout is exceeded. Context can be nil, for +// backwards compatibility. +// +// If the underlying http.Client is not currently connected, this method will +// attempt to reconnect transparently before sending the notification. It will +// return a Response indicating whether the notification was accepted or +// rejected by the APNs gateway, or an error if something goes wrong. +func (c *Client) PushWithContext(ctx Context, n *Notification) (*Response, error) { + payload, err := json.Marshal(n) + if err != nil { + return nil, err + } + + url := c.Host + "/3/device/" + n.DeviceToken + request, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(payload)) + if err != nil { + return nil, err + } + + if c.Token != nil { + c.setTokenHeader(request) + } + + setHeaders(request, n) + + response, err := c.HTTPClient.Do(request) + if err != nil { + return nil, err + } + defer response.Body.Close() + + r := &Response{} + r.StatusCode = response.StatusCode + r.ApnsID = response.Header.Get("apns-id") + + decoder := json.NewDecoder(response.Body) + if err := decoder.Decode(r); err != nil && err != io.EOF { + return &Response{}, err + } + return r, nil +} + +// CloseIdleConnections closes any underlying connections which were previously +// connected from previous requests but are now sitting idle. It will not +// interrupt any connections currently in use. +func (c *Client) CloseIdleConnections() { + c.HTTPClient.Transport.(connectionCloser).CloseIdleConnections() +} + +func (c *Client) setTokenHeader(r *http.Request) { + bearer := c.Token.GenerateIfExpired() + r.Header.Set("authorization", "bearer "+bearer) +} + +func setHeaders(r *http.Request, n *Notification) { + r.Header.Set("Content-Type", "application/json; charset=utf-8") + if n.Topic != "" { + r.Header.Set("apns-topic", n.Topic) + } + if n.ApnsID != "" { + r.Header.Set("apns-id", n.ApnsID) + } + if n.CollapseID != "" { + r.Header.Set("apns-collapse-id", n.CollapseID) + } + if n.Priority > 0 { + r.Header.Set("apns-priority", strconv.Itoa(n.Priority)) + } + if !n.Expiration.IsZero() { + r.Header.Set("apns-expiration", strconv.FormatInt(n.Expiration.Unix(), 10)) + } + if n.PushType != "" { + r.Header.Set("apns-push-type", string(n.PushType)) + } else { + r.Header.Set("apns-push-type", string(PushTypeAlert)) + } + +} diff --git a/vendor/github.com/sideshow/apns2/client_manager.go b/vendor/github.com/sideshow/apns2/client_manager.go new file mode 100644 index 0000000000..bb4bdf900d --- /dev/null +++ b/vendor/github.com/sideshow/apns2/client_manager.go @@ -0,0 +1,162 @@ +package apns2 + +import ( + "container/list" + "crypto/sha1" + "crypto/tls" + "sync" + "time" +) + +type managerItem struct { + key [sha1.Size]byte + client *Client + lastUsed time.Time +} + +// ClientManager is a way to manage multiple connections to the APNs. +type ClientManager struct { + // MaxSize is the maximum number of clients allowed in the manager. When + // this limit is reached, the least recently used client is evicted. Set + // zero for no limit. + MaxSize int + + // MaxAge is the maximum age of clients in the manager. Upon retrieval, if + // a client has remained unused in the manager for this duration or longer, + // it is evicted and nil is returned. Set zero to disable this + // functionality. + MaxAge time.Duration + + // Factory is the function which constructs clients if not found in the + // manager. + Factory func(certificate tls.Certificate) *Client + + cache map[[sha1.Size]byte]*list.Element + ll *list.List + mu sync.Mutex + once sync.Once +} + +// NewClientManager returns a new ClientManager for prolonged, concurrent usage +// of multiple APNs clients. ClientManager is flexible enough to work best for +// your use case. When a client is not found in the manager, Get will return +// the result of calling Factory, which can be a Client or nil. +// +// Having multiple clients per certificate in the manager is not allowed. +// +// By default, MaxSize is 64, MaxAge is 10 minutes, and Factory always returns +// a Client with default options. +func NewClientManager() *ClientManager { + manager := &ClientManager{ + MaxSize: 64, + MaxAge: 10 * time.Minute, + Factory: NewClient, + } + + manager.initInternals() + + return manager +} + +// Add adds a Client to the manager. You can use this to individually configure +// Clients in the manager. +func (m *ClientManager) Add(client *Client) { + m.initInternals() + m.mu.Lock() + defer m.mu.Unlock() + + key := cacheKey(client.Certificate) + now := time.Now() + if ele, hit := m.cache[key]; hit { + item := ele.Value.(*managerItem) + item.client = client + item.lastUsed = now + m.ll.MoveToFront(ele) + return + } + ele := m.ll.PushFront(&managerItem{key, client, now}) + m.cache[key] = ele + if m.MaxSize != 0 && m.ll.Len() > m.MaxSize { + m.mu.Unlock() + m.removeOldest() + m.mu.Lock() + } +} + +// Get gets a Client from the manager. If a Client is not found in the manager +// or if a Client has remained in the manager longer than MaxAge, Get will call +// the ClientManager's Factory function, store the result in the manager if +// non-nil, and return it. +func (m *ClientManager) Get(certificate tls.Certificate) *Client { + m.initInternals() + m.mu.Lock() + defer m.mu.Unlock() + + key := cacheKey(certificate) + now := time.Now() + if ele, hit := m.cache[key]; hit { + item := ele.Value.(*managerItem) + if m.MaxAge != 0 && item.lastUsed.Before(now.Add(-m.MaxAge)) { + c := m.Factory(certificate) + if c == nil { + return nil + } + item.client = c + } + item.lastUsed = now + m.ll.MoveToFront(ele) + return item.client + } + + c := m.Factory(certificate) + if c == nil { + return nil + } + m.mu.Unlock() + m.Add(c) + m.mu.Lock() + return c +} + +// Len returns the current size of the ClientManager. +func (m *ClientManager) Len() int { + if m.cache == nil { + return 0 + } + m.mu.Lock() + defer m.mu.Unlock() + return m.ll.Len() +} + +func (m *ClientManager) initInternals() { + m.once.Do(func() { + m.cache = map[[sha1.Size]byte]*list.Element{} + m.ll = list.New() + }) +} + +func (m *ClientManager) removeOldest() { + m.mu.Lock() + ele := m.ll.Back() + m.mu.Unlock() + if ele != nil { + m.removeElement(ele) + } +} + +func (m *ClientManager) removeElement(e *list.Element) { + m.mu.Lock() + defer m.mu.Unlock() + m.ll.Remove(e) + delete(m.cache, e.Value.(*managerItem).key) +} + +func cacheKey(certificate tls.Certificate) [sha1.Size]byte { + var data []byte + + for _, cert := range certificate.Certificate { + data = append(data, cert...) + } + + return sha1.Sum(data) +} diff --git a/vendor/github.com/sideshow/apns2/notification.go b/vendor/github.com/sideshow/apns2/notification.go new file mode 100644 index 0000000000..69bf312de5 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/notification.go @@ -0,0 +1,148 @@ +package apns2 + +import ( + "encoding/json" + "time" +) + +// EPushType defines the value for the apns-push-type header +type EPushType string + +const ( + // PushTypeAlert is used for notifications that trigger a user interaction — + // for example, an alert, badge, or sound. If you set this push type, the + // topic field must use your app’s bundle ID as the topic. If the + // notification requires immediate action from the user, set notification + // priority to 10; otherwise use 5. The alert push type is required on + // watchOS 6 and later. It is recommended on macOS, iOS, tvOS, and iPadOS. + PushTypeAlert EPushType = "alert" + + // PushTypeBackground is used for notifications that deliver content in the + // background, and don’t trigger any user interactions. If you set this push + // type, the topic field must use your app’s bundle ID as the topic. Always + // use priority 5. Using priority 10 is an error. The background push type + // is required on watchOS 6 and later. It is recommended on macOS, iOS, + // tvOS, and iPadOS. + PushTypeBackground EPushType = "background" + + // PushTypeLocation is used for notifications that request a user’s + // location. If you set this push type, the topic field must use your app’s + // bundle ID with .location-query appended to the end. The location push + // type is recommended for iOS and iPadOS. It isn’t available on macOS, + // tvOS, and watchOS. If the location query requires an immediate response + // from the Location Push Service Extension, set notification apns-priority + // to 10; otherwise, use 5. The location push type supports only token-based + // authentication. + PushTypeLocation EPushType = "location" + + // PushTypeVOIP is used for notifications that provide information about an + // incoming Voice-over-IP (VoIP) call. If you set this push type, the topic + // field must use your app’s bundle ID with .voip appended to the end. If + // you’re using certificate-based authentication, you must also register the + // certificate for VoIP services. The voip push type is not available on + // watchOS. It is recommended on macOS, iOS, tvOS, and iPadOS. + PushTypeVOIP EPushType = "voip" + + // PushTypeComplication is used for notifications that contain update + // information for a watchOS app’s complications. If you set this push type, + // the topic field must use your app’s bundle ID with .complication appended + // to the end. If you’re using certificate-based authentication, you must + // also register the certificate for WatchKit services. The complication + // push type is recommended for watchOS and iOS. It is not available on + // macOS, tvOS, and iPadOS. + PushTypeComplication EPushType = "complication" + + // PushTypeFileProvider is used to signal changes to a File Provider + // extension. If you set this push type, the topic field must use your app’s + // bundle ID with .pushkit.fileprovider appended to the end. The + // fileprovider push type is not available on watchOS. It is recommended on + // macOS, iOS, tvOS, and iPadOS. + PushTypeFileProvider EPushType = "fileprovider" + + // PushTypeMDM is used for notifications that tell managed devices to + // contact the MDM server. If you set this push type, you must use the topic + // from the UID attribute in the subject of your MDM push certificate. + PushTypeMDM EPushType = "mdm" +) + +const ( + // PriorityLow will tell APNs to send the push message at a time that takes + // into account power considerations for the device. Notifications with this + // priority might be grouped and delivered in bursts. They are throttled, + // and in some cases are not delivered. + PriorityLow = 5 + + // PriorityHigh will tell APNs to send the push message immediately. + // Notifications with this priority must trigger an alert, sound, or badge + // on the target device. It is an error to use this priority for a push + // notification that contains only the content-available key. + PriorityHigh = 10 +) + +// Notification represents the the data and metadata for a APNs Remote Notification. +type Notification struct { + + // An optional canonical UUID that identifies the notification. The + // canonical form is 32 lowercase hexadecimal digits, displayed in five + // groups separated by hyphens in the form 8-4-4-4-12. An example UUID is as + // follows: + // + // 123e4567-e89b-12d3-a456-42665544000 + // + // If you don't set this, a new UUID is created by APNs and returned in the + // response. + ApnsID string + + // A string which allows multiple notifications with the same collapse + // identifier to be displayed to the user as a single notification. The + // value should not exceed 64 bytes. + CollapseID string + + // A string containing hexadecimal bytes of the device token for the target + // device. + DeviceToken string + + // The topic of the remote notification, which is typically the bundle ID + // for your app. The certificate you create in the Apple Developer Member + // Center must include the capability for this topic. If your certificate + // includes multiple topics, you must specify a value for this header. If + // you omit this header and your APNs certificate does not specify multiple + // topics, the APNs server uses the certificate’s Subject as the default + // topic. + Topic string + + // An optional time at which the notification is no longer valid and can be + // discarded by APNs. If this value is in the past, APNs treats the + // notification as if it expires immediately and does not store the + // notification or attempt to redeliver it. If this value is left as the + // default (ie, Expiration.IsZero()) an expiration header will not added to + // the http request. + Expiration time.Time + + // The priority of the notification. Specify ether apns.PriorityHigh (10) or + // apns.PriorityLow (5) If you don't set this, the APNs server will set the + // priority to 10. + Priority int + + // A byte array containing the JSON-encoded payload of this push notification. + // Refer to "The Remote Notification Payload" section in the Apple Local and + // Remote Notification Programming Guide for more info. + Payload interface{} + + // The pushtype of the push notification. If this values is left as the + // default an apns-push-type header with value 'alert' will be added to the + // http request. + PushType EPushType +} + +// MarshalJSON converts the notification payload to JSON. +func (n *Notification) MarshalJSON() ([]byte, error) { + switch payload := n.Payload.(type) { + case string: + return []byte(payload), nil + case []byte: + return payload, nil + default: + return json.Marshal(payload) + } +} diff --git a/vendor/github.com/sideshow/apns2/payload/builder.go b/vendor/github.com/sideshow/apns2/payload/builder.go new file mode 100644 index 0000000000..a2ff30da10 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/payload/builder.go @@ -0,0 +1,402 @@ +// Package payload is a helper package which contains a payload +// builder to make constructing notification payloads easier. +package payload + +import "encoding/json" + +// InterruptionLevel defines the value for the payload aps interruption-level +type EInterruptionLevel string + +const ( + // InterruptionLevelPassive is used to indicate that notification be delivered in a passive manner. + InterruptionLevelPassive EInterruptionLevel = "passive" + + // InterruptionLevelActive is used to indicate the importance and delivery timing of a notification. + InterruptionLevelActive EInterruptionLevel = "active" + + // InterruptionLevelTimeSensitive is used to indicate the importance and delivery timing of a notification. + InterruptionLevelTimeSensitive EInterruptionLevel = "time-sensitive" + + // InterruptionLevelCritical is used to indicate the importance and delivery timing of a notification. + // This interruption level requires an approved entitlement from Apple. + // See: https://developer.apple.com/documentation/usernotifications/unnotificationinterruptionlevel/ + InterruptionLevelCritical EInterruptionLevel = "critical" +) + +// Payload represents a notification which holds the content that will be +// marshalled as JSON. +type Payload struct { + content map[string]interface{} +} + +type aps struct { + Alert interface{} `json:"alert,omitempty"` + Badge interface{} `json:"badge,omitempty"` + Category string `json:"category,omitempty"` + ContentAvailable int `json:"content-available,omitempty"` + InterruptionLevel EInterruptionLevel `json:"interruption-level,omitempty"` + MutableContent int `json:"mutable-content,omitempty"` + RelevanceScore interface{} `json:"relevance-score,omitempty"` + Sound interface{} `json:"sound,omitempty"` + ThreadID string `json:"thread-id,omitempty"` + URLArgs []string `json:"url-args,omitempty"` +} + +type alert struct { + Action string `json:"action,omitempty"` + ActionLocKey string `json:"action-loc-key,omitempty"` + Body string `json:"body,omitempty"` + LaunchImage string `json:"launch-image,omitempty"` + LocArgs []string `json:"loc-args,omitempty"` + LocKey string `json:"loc-key,omitempty"` + Title string `json:"title,omitempty"` + Subtitle string `json:"subtitle,omitempty"` + TitleLocArgs []string `json:"title-loc-args,omitempty"` + TitleLocKey string `json:"title-loc-key,omitempty"` + SummaryArg string `json:"summary-arg,omitempty"` + SummaryArgCount int `json:"summary-arg-count,omitempty"` +} + +type sound struct { + Critical int `json:"critical,omitempty"` + Name string `json:"name,omitempty"` + Volume float32 `json:"volume,omitempty"` +} + +// NewPayload returns a new Payload struct +func NewPayload() *Payload { + return &Payload{ + map[string]interface{}{ + "aps": &aps{}, + }, + } +} + +// Alert sets the aps alert on the payload. +// This will display a notification alert message to the user. +// +// {"aps":{"alert":alert}}` +func (p *Payload) Alert(alert interface{}) *Payload { + p.aps().Alert = alert + return p +} + +// Badge sets the aps badge on the payload. +// This will display a numeric badge on the app icon. +// +// {"aps":{"badge":b}} +func (p *Payload) Badge(b int) *Payload { + p.aps().Badge = b + return p +} + +// ZeroBadge sets the aps badge on the payload to 0. +// This will clear the badge on the app icon. +// +// {"aps":{"badge":0}} +func (p *Payload) ZeroBadge() *Payload { + p.aps().Badge = 0 + return p +} + +// UnsetBadge removes the badge attribute from the payload. +// This will leave the badge on the app icon unchanged. +// If you wish to clear the app icon badge, use ZeroBadge() instead. +// +// {"aps":{}} +func (p *Payload) UnsetBadge() *Payload { + p.aps().Badge = nil + return p +} + +// Sound sets the aps sound on the payload. +// This will play a sound from the app bundle, or the default sound otherwise. +// +// {"aps":{"sound":sound}} +func (p *Payload) Sound(sound interface{}) *Payload { + p.aps().Sound = sound + return p +} + +// ContentAvailable sets the aps content-available on the payload to 1. +// This will indicate to the app that there is new content available to download +// and launch the app in the background. +// +// {"aps":{"content-available":1}} +func (p *Payload) ContentAvailable() *Payload { + p.aps().ContentAvailable = 1 + return p +} + +// MutableContent sets the aps mutable-content on the payload to 1. +// This will indicate to the to the system to call your Notification Service +// extension to mutate or replace the notification's content. +// +// {"aps":{"mutable-content":1}} +func (p *Payload) MutableContent() *Payload { + p.aps().MutableContent = 1 + return p +} + +// Custom payload + +// Custom sets a custom key and value on the payload. +// This will add custom key/value data to the notification payload at root level. +// +// {"aps":{}, key:value} +func (p *Payload) Custom(key string, val interface{}) *Payload { + p.content[key] = val + return p +} + +// Alert dictionary + +// AlertTitle sets the aps alert title on the payload. +// This will display a short string describing the purpose of the notification. +// Apple Watch & Safari display this string as part of the notification interface. +// +// {"aps":{"alert":{"title":title}}} +func (p *Payload) AlertTitle(title string) *Payload { + p.aps().alert().Title = title + return p +} + +// AlertTitleLocKey sets the aps alert title localization key on the payload. +// This is the key to a title string in the Localizable.strings file for the +// current localization. See Localized Formatted Strings in Apple documentation +// for more information. +// +// {"aps":{"alert":{"title-loc-key":key}}} +func (p *Payload) AlertTitleLocKey(key string) *Payload { + p.aps().alert().TitleLocKey = key + return p +} + +// AlertTitleLocArgs sets the aps alert title localization args on the payload. +// These are the variable string values to appear in place of the format +// specifiers in title-loc-key. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"title-loc-args":args}}} +func (p *Payload) AlertTitleLocArgs(args []string) *Payload { + p.aps().alert().TitleLocArgs = args + return p +} + +// AlertSubtitle sets the aps alert subtitle on the payload. +// This will display a short string describing the purpose of the notification. +// Apple Watch & Safari display this string as part of the notification interface. +// +// {"aps":{"alert":{"subtitle":"subtitle"}}} +func (p *Payload) AlertSubtitle(subtitle string) *Payload { + p.aps().alert().Subtitle = subtitle + return p +} + +// AlertBody sets the aps alert body on the payload. +// This is the text of the alert message. +// +// {"aps":{"alert":{"body":body}}} +func (p *Payload) AlertBody(body string) *Payload { + p.aps().alert().Body = body + return p +} + +// AlertLaunchImage sets the aps launch image on the payload. +// This is the filename of an image file in the app bundle. The image is used +// as the launch image when users tap the action button or move the action +// slider. +// +// {"aps":{"alert":{"launch-image":image}}} +func (p *Payload) AlertLaunchImage(image string) *Payload { + p.aps().alert().LaunchImage = image + return p +} + +// AlertLocArgs sets the aps alert localization args on the payload. +// These are the variable string values to appear in place of the format +// specifiers in loc-key. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"loc-args":args}}} +func (p *Payload) AlertLocArgs(args []string) *Payload { + p.aps().alert().LocArgs = args + return p +} + +// AlertLocKey sets the aps alert localization key on the payload. +// This is the key to an alert-message string in the Localizable.strings file +// for the current localization. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"loc-key":key}}} +func (p *Payload) AlertLocKey(key string) *Payload { + p.aps().alert().LocKey = key + return p +} + +// AlertAction sets the aps alert action on the payload. +// This is the label of the action button, if the user sets the notifications +// to appear as alerts. This label should be succinct, such as “Details” or +// “Read more”. If omitted, the default value is “Show”. +// +// {"aps":{"alert":{"action":action}}} +func (p *Payload) AlertAction(action string) *Payload { + p.aps().alert().Action = action + return p +} + +// AlertActionLocKey sets the aps alert action localization key on the payload. +// This is the the string used as a key to get a localized string in the current +// localization to use for the notfication right button’s title instead of +// “View”. See Localized Formatted Strings in Apple documentation for more +// information. +// +// {"aps":{"alert":{"action-loc-key":key}}} +func (p *Payload) AlertActionLocKey(key string) *Payload { + p.aps().alert().ActionLocKey = key + return p +} + +// AlertSummaryArg sets the aps alert summary arg key on the payload. +// This is the string that is used as a key to fill in an argument +// at the bottom of a notification to provide more context, such as +// a name associated with the sender of the notification. +// +// {"aps":{"alert":{"summary-arg":key}}} +func (p *Payload) AlertSummaryArg(key string) *Payload { + p.aps().alert().SummaryArg = key + return p +} + +// AlertSummaryArgCount sets the aps alert summary arg count key on the payload. +// This integer sets a custom "weight" on the notification, effectively +// allowing a notification to be viewed internally as two. For example if +// a notification encompasses 3 messages, you can set it to 3. +// +// {"aps":{"alert":{"summary-arg-count":key}}} +func (p *Payload) AlertSummaryArgCount(key int) *Payload { + p.aps().alert().SummaryArgCount = key + return p +} + +// General + +// Category sets the aps category on the payload. +// This is a string value that represents the identifier property of the +// UIMutableUserNotificationCategory object you created to define custom actions. +// +// {"aps":{"category":category}} +func (p *Payload) Category(category string) *Payload { + p.aps().Category = category + return p +} + +// Mdm sets the mdm on the payload. +// This is for Apple Mobile Device Management (mdm) payloads. +// +// {"aps":{}:"mdm":mdm} +func (p *Payload) Mdm(mdm string) *Payload { + p.content["mdm"] = mdm + return p +} + +// ThreadID sets the aps thread id on the payload. +// This is for the purpose of updating the contents of a View Controller in a +// Notification Content app extension when a new notification arrives. If a +// new notification arrives whose thread-id value matches the thread-id of the +// notification already being displayed, the didReceiveNotification method +// is called. +// +// {"aps":{"thread-id":id}} +func (p *Payload) ThreadID(threadID string) *Payload { + p.aps().ThreadID = threadID + return p +} + +// URLArgs sets the aps category on the payload. +// This specifies an array of values that are paired with the placeholders +// inside the urlFormatString value of your website.json file. +// See Apple Notification Programming Guide for Websites. +// +// {"aps":{"url-args":urlArgs}} +func (p *Payload) URLArgs(urlArgs []string) *Payload { + p.aps().URLArgs = urlArgs + return p +} + +// SoundName sets the name value on the aps sound dictionary. +// This function makes the notification a critical alert, which should be pre-approved by Apple. +// See: https://developer.apple.com/contact/request/notifications-critical-alerts-entitlement/ +// +// {"aps":{"sound":{"critical":1,"name":name,"volume":1.0}}} +func (p *Payload) SoundName(name string) *Payload { + p.aps().sound().Name = name + return p +} + +// SoundVolume sets the volume value on the aps sound dictionary. +// This function makes the notification a critical alert, which should be pre-approved by Apple. +// See: https://developer.apple.com/contact/request/notifications-critical-alerts-entitlement/ +// +// {"aps":{"sound":{"critical":1,"name":"default","volume":volume}}} +func (p *Payload) SoundVolume(volume float32) *Payload { + p.aps().sound().Volume = volume + return p +} + +// InterruptionLevel defines the value for the payload aps interruption-level +// This is to indicate the importance and delivery timing of a notification. +// (Using InterruptionLevelCritical requires an approved entitlement from Apple.) +// See: https://developer.apple.com/documentation/usernotifications/unnotificationinterruptionlevel/ +// +// {"aps":{"interruption-level":passive}} +func (p *Payload) InterruptionLevel(interruptionLevel EInterruptionLevel) *Payload { + p.aps().InterruptionLevel = interruptionLevel + return p +} + +// The relevance score, a number between 0 and 1, +// that the system uses to sort the notifications from your app. +// The highest score gets featured in the notification summary. +// See https://developer.apple.com/documentation/usernotifications/unnotificationcontent/3821031-relevancescore. +// +// {"aps":{"relevance-score":0.1}} +func (p *Payload) RelevanceScore(b float32) *Payload { + p.aps().RelevanceScore = b + return p +} + +// Unsets the relevance score +// that the system uses to sort the notifications from your app. +// The highest score gets featured in the notification summary. +// See https://developer.apple.com/documentation/usernotifications/unnotificationcontent/3821031-relevancescore. +// +// {"aps":{"relevance-score":0.1}} +func (p *Payload) UnsetRelevanceScore() *Payload { + p.aps().RelevanceScore = nil + return p +} + +// MarshalJSON returns the JSON encoded version of the Payload +func (p *Payload) MarshalJSON() ([]byte, error) { + return json.Marshal(p.content) +} + +func (p *Payload) aps() *aps { + return p.content["aps"].(*aps) +} + +func (a *aps) alert() *alert { + if _, ok := a.Alert.(*alert); !ok { + a.Alert = &alert{} + } + return a.Alert.(*alert) +} + +func (a *aps) sound() *sound { + if _, ok := a.Sound.(*sound); !ok { + a.Sound = &sound{Critical: 1, Name: "default", Volume: 1.0} + } + return a.Sound.(*sound) +} diff --git a/vendor/github.com/sideshow/apns2/response.go b/vendor/github.com/sideshow/apns2/response.go new file mode 100644 index 0000000000..99d6345634 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/response.go @@ -0,0 +1,156 @@ +package apns2 + +import ( + "net/http" + "strconv" + "time" +) + +// StatusSent is a 200 response. +const StatusSent = http.StatusOK + +// The possible Reason error codes returned from APNs. From table 4 in the +// Handling Notification Responses from APNs article +const ( + // 400 The collapse identifier exceeds the maximum allowed size + ReasonBadCollapseID = "BadCollapseId" + + // 400 The specified device token was bad. Verify that the request contains a + // valid token and that the token matches the environment. + ReasonBadDeviceToken = "BadDeviceToken" + + // 400 The apns-expiration value is bad. + ReasonBadExpirationDate = "BadExpirationDate" + + // 400 The apns-id value is bad. + ReasonBadMessageID = "BadMessageId" + + // 400 The apns-priority value is bad. + ReasonBadPriority = "BadPriority" + + // 400 The apns-topic was invalid. + ReasonBadTopic = "BadTopic" + + // 400 The device token does not match the specified topic. + ReasonDeviceTokenNotForTopic = "DeviceTokenNotForTopic" + + // 400 One or more headers were repeated. + ReasonDuplicateHeaders = "DuplicateHeaders" + + // 400 Idle time out. + ReasonIdleTimeout = "IdleTimeout" + + // 400 The apns-push-type value is invalid. + ReasonInvalidPushType = "InvalidPushType" + + // 400 The device token is not specified in the request :path. Verify that the + // :path header contains the device token. + ReasonMissingDeviceToken = "MissingDeviceToken" + + // 400 The apns-topic header of the request was not specified and was + // required. The apns-topic header is mandatory when the client is connected + // using a certificate that supports multiple topics. + ReasonMissingTopic = "MissingTopic" + + // 400 The message payload was empty. + ReasonPayloadEmpty = "PayloadEmpty" + + // 400 Pushing to this topic is not allowed. + ReasonTopicDisallowed = "TopicDisallowed" + + // 403 The certificate was bad. + ReasonBadCertificate = "BadCertificate" + + // 403 The client certificate was for the wrong environment. + ReasonBadCertificateEnvironment = "BadCertificateEnvironment" + + // 403 The provider token is stale and a new token should be generated. + ReasonExpiredProviderToken = "ExpiredProviderToken" + + // 403 The specified action is not allowed. + ReasonForbidden = "Forbidden" + + // 403 The provider token is not valid or the token signature could not be + // verified. + ReasonInvalidProviderToken = "InvalidProviderToken" + + // 403 No provider certificate was used to connect to APNs and Authorization + // header was missing or no provider token was specified. + ReasonMissingProviderToken = "MissingProviderToken" + + // 404 The request contained a bad :path value. + ReasonBadPath = "BadPath" + + // 405 The specified :method was not POST. + ReasonMethodNotAllowed = "MethodNotAllowed" + + // 410 The device token is inactive for the specified topic. + ReasonUnregistered = "Unregistered" + + // 413 The message payload was too large. See Creating the Remote Notification + // Payload in the Apple Local and Remote Notification Programming Guide for + // details on maximum payload size. + ReasonPayloadTooLarge = "PayloadTooLarge" + + // 429 The provider token is being updated too often. + ReasonTooManyProviderTokenUpdates = "TooManyProviderTokenUpdates" + + // 429 Too many requests were made consecutively to the same device token. + ReasonTooManyRequests = "TooManyRequests" + + // 500 An internal server error occurred. + ReasonInternalServerError = "InternalServerError" + + // 503 The service is unavailable. + ReasonServiceUnavailable = "ServiceUnavailable" + + // 503 The server is shutting down. + ReasonShutdown = "Shutdown" +) + +// Response represents a result from the APNs gateway indicating whether a +// notification was accepted or rejected and (if applicable) the metadata +// surrounding the rejection. +type Response struct { + + // The HTTP status code returned by APNs. + // A 200 value indicates that the notification was successfully sent. + // For a list of other possible status codes, see table 6-4 in the Apple Local + // and Remote Notification Programming Guide. + StatusCode int + + // The APNs error string indicating the reason for the notification failure (if + // any). The error code is specified as a string. For a list of possible + // values, see the Reason constants above. + // If the notification was accepted, this value will be "". + Reason string + + // The APNs ApnsID value from the Notification. If you didn't set an ApnsID on the + // Notification, this will be a new unique UUID which has been created by APNs. + ApnsID string + + // If the value of StatusCode is 410, this is the last time at which APNs + // confirmed that the device token was no longer valid for the topic. + Timestamp Time +} + +// Sent returns whether or not the notification was successfully sent. +// This is the same as checking if the StatusCode == 200. +func (c *Response) Sent() bool { + return c.StatusCode == StatusSent +} + +// Time represents a device uninstall time +type Time struct { + time.Time +} + +// UnmarshalJSON converts an epoch date into a Time struct. +func (t *Time) UnmarshalJSON(b []byte) error { + ts, err := strconv.ParseInt(string(b), 10, 64) + if err != nil { + return err + } + t.Time = time.Unix(ts/1000, 0) + return nil +} diff --git a/vendor/github.com/sideshow/apns2/token/token.go b/vendor/github.com/sideshow/apns2/token/token.go new file mode 100644 index 0000000000..26fec563dd --- /dev/null +++ b/vendor/github.com/sideshow/apns2/token/token.go @@ -0,0 +1,107 @@ +package token + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" + "io/ioutil" + "sync" + "time" + + "github.com/golang-jwt/jwt/v4" +) + +const ( + // TokenTimeout is the period of time in seconds that a token is valid for. + // If the timestamp for token issue is not within the last hour, APNs + // rejects subsequent push messages. This is set to under an hour so that + // we generate a new token before the existing one expires. + TokenTimeout = 3000 +) + +// Possible errors when parsing a .p8 file. +var ( + ErrAuthKeyNotPem = errors.New("token: AuthKey must be a valid .p8 PEM file") + ErrAuthKeyNotECDSA = errors.New("token: AuthKey must be of type ecdsa.PrivateKey") + ErrAuthKeyNil = errors.New("token: AuthKey was nil") +) + +// Token represents an Apple Provider Authentication Token (JSON Web Token). +type Token struct { + sync.Mutex + AuthKey *ecdsa.PrivateKey + KeyID string + TeamID string + IssuedAt int64 + Bearer string +} + +// AuthKeyFromFile loads a .p8 certificate from a local file and returns a +// *ecdsa.PrivateKey. +func AuthKeyFromFile(filename string) (*ecdsa.PrivateKey, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return AuthKeyFromBytes(bytes) +} + +// AuthKeyFromBytes loads a .p8 certificate from an in memory byte array and +// returns an *ecdsa.PrivateKey. +func AuthKeyFromBytes(bytes []byte) (*ecdsa.PrivateKey, error) { + block, _ := pem.Decode(bytes) + if block == nil { + return nil, ErrAuthKeyNotPem + } + key, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + if pk, ok := key.(*ecdsa.PrivateKey); ok { + return pk, nil + } + return nil, ErrAuthKeyNotECDSA +} + +// GenerateIfExpired checks to see if the token is about to expire and +// generates a new token. +func (t *Token) GenerateIfExpired() (bearer string) { + t.Lock() + defer t.Unlock() + if t.Expired() { + t.Generate() + } + return t.Bearer +} + +// Expired checks to see if the token has expired. +func (t *Token) Expired() bool { + return time.Now().Unix() >= (t.IssuedAt + TokenTimeout) +} + +// Generate creates a new token. +func (t *Token) Generate() (bool, error) { + if t.AuthKey == nil { + return false, ErrAuthKeyNil + } + issuedAt := time.Now().Unix() + jwtToken := &jwt.Token{ + Header: map[string]interface{}{ + "alg": "ES256", + "kid": t.KeyID, + }, + Claims: jwt.MapClaims{ + "iss": t.TeamID, + "iat": issuedAt, + }, + Method: jwt.SigningMethodES256, + } + bearer, err := jwtToken.SignedString(t.AuthKey) + if err != nil { + return false, err + } + t.IssuedAt = issuedAt + t.Bearer = bearer + return true, nil +} diff --git a/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go b/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go new file mode 100644 index 0000000000..83ad151e31 --- /dev/null +++ b/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go @@ -0,0 +1,224 @@ +package asyncevents + +import ( + "context" + "errors" + "fmt" + "log/slog" + "math" + "time" + + "github.com/IBM/sarama" +) + +// SaramaEventsConsumer consumes Kafka messages for asynchronous event +// handling. +type SaramaEventsConsumer struct { + Handler sarama.ConsumerGroupHandler + ConsumerGroup sarama.ConsumerGroup + Topics []string +} + +func NewSaramaEventsConsumer(consumerGroup sarama.ConsumerGroup, + handler sarama.ConsumerGroupHandler, topics ...string) *SaramaEventsConsumer { + + return &SaramaEventsConsumer{ + ConsumerGroup: consumerGroup, + Handler: handler, + Topics: topics, + } +} + +// Run the consumer, to begin consuming Kafka messages. +// +// Run is stopped by its context being canceled. When its context is canceled, +// it returns nil. +func (p *SaramaEventsConsumer) Run(ctx context.Context) (err error) { + for { + err := p.ConsumerGroup.Consume(ctx, p.Topics, p.Handler) + if err != nil { + return err + } + if ctxErr := ctx.Err(); ctxErr != nil { + return nil + } + } +} + +// SaramaConsumerGroupHandler implements sarama.ConsumerGroupHandler. +type SaramaConsumerGroupHandler struct { + Consumer SaramaMessageConsumer + ConsumerTimeout time.Duration + Logger Logger +} + +// NewSaramaConsumerGroupHandler builds a consumer group handler. +// +// A timeout of 0 will use DefaultMessageConsumptionTimeout. +func NewSaramaConsumerGroupHandler(logger Logger, consumer SaramaMessageConsumer, + timeout time.Duration) *SaramaConsumerGroupHandler { + + if timeout == 0 { + timeout = DefaultMessageConsumptionTimeout + } + if logger == nil { + logger = slog.Default() + } + return &SaramaConsumerGroupHandler{ + Consumer: consumer, + ConsumerTimeout: timeout, + Logger: logger, + } +} + +const ( + // DefaultMessageConsumptionTimeout is the default time to allow + // SaramaMessageConsumer.Consume to work before canceling. + DefaultMessageConsumptionTimeout = 30 * time.Second +) + +// Setup implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } + +// Cleanup implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } + +// ConsumeClaim implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, + claim sarama.ConsumerGroupClaim) error { + + done := session.Context().Done() + for { + select { + case <-done: + return nil + case message, more := <-claim.Messages(): + if !more { + return nil + } + err := func() error { + ctx, cancel := context.WithTimeout(session.Context(), h.ConsumerTimeout) + defer cancel() + return h.Consumer.Consume(ctx, session, message) + }() + switch { + case errors.Is(err, context.DeadlineExceeded): + h.Logger.Log(session.Context(), slog.LevelDebug, err.Error()) + case !errors.Is(err, nil): + return err + } + } + } +} + +// Close implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Close() error { return nil } + +// SaramaMessageConsumer processes Kafka messages. +type SaramaMessageConsumer interface { + // Consume should process a message. + // + // Consume is responsible for marking the message consumed, unless the + // context is canceled, in which case the caller should retry, or mark the + // message as appropriate. + Consume(ctx context.Context, session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error +} + +var ErrRetriesLimitExceeded = errors.New("retry limit exceeded") + +// NTimesRetryingConsumer enhances a SaramaMessageConsumer with a finite +// number of immediate retries. +// +// The delay between each retry can be controlled via the Delay property. If +// no Delay property is specified, a delay based on the Fibonacci sequence is +// used. +// +// Logger is intentionally minimal. The slog.Log function is used by default. +type NTimesRetryingConsumer struct { + Times int + Consumer SaramaMessageConsumer + Delay func(tries int) time.Duration + Logger Logger +} + +// Logger is an intentionally minimal interface for basic logging. +// +// It matches the signature of slog.Log. +type Logger interface { + Log(ctx context.Context, level slog.Level, msg string, args ...any) +} + +func (c *NTimesRetryingConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, message *sarama.ConsumerMessage) (err error) { + + var joinedErrors error + var tries int = 0 + var delay time.Duration = 0 + if c.Delay == nil { + c.Delay = DelayFibonacci + } + if c.Logger == nil { + c.Logger = slog.Default() + } + done := ctx.Done() + for tries < c.Times { + select { + case <-done: + return nil + case <-time.After(delay): + err := c.Consumer.Consume(ctx, session, message) + if errors.Is(err, nil) || errors.Is(err, context.Canceled) { + return nil + } + delay = c.Delay(tries) + c.Logger.Log(ctx, slog.LevelInfo, "failure consuming Kafka message, will retry", + slog.Attr{Key: "tries", Value: slog.IntValue(tries)}, + slog.Attr{Key: "times", Value: slog.IntValue(c.Times)}, + slog.Attr{Key: "delay", Value: slog.DurationValue(delay)}, + slog.Attr{Key: "err", Value: slog.AnyValue(err)}, + ) + joinedErrors = errors.Join(joinedErrors, err) + tries++ + } + } + + return errors.Join(joinedErrors, c.retryLimitError()) +} + +func (c *NTimesRetryingConsumer) retryLimitError() error { + return fmt.Errorf("%w (%d)", ErrRetriesLimitExceeded, c.Times) +} + +// DelayNone is a function returning a constant "no delay" of 0 seconds. +var DelayNone = func(_ int) time.Duration { return DelayConstant(0) } + +// DelayConstant is a function returning a constant number of seconds. +func DelayConstant(n int) time.Duration { return time.Duration(n) * time.Second } + +// DelayExponentialBinary returns a binary exponential delay. +// +// The delay is 2**tries seconds. +func DelayExponentialBinary(tries int) time.Duration { + return time.Second * time.Duration(math.Pow(2, float64(tries))) +} + +// DelayFibonacci returns a delay based on the Fibonacci sequence. +func DelayFibonacci(tries int) time.Duration { + return time.Second * time.Duration(Fib(tries)) +} + +// Fib returns the nth number in the Fibonacci sequence. +func Fib(n int) int { + if n == 0 { + return 0 + } else if n < 3 { + return 1 + } + + n1, n2 := 1, 1 + for i := 3; i <= n; i++ { + n1, n2 = n1+n2, n1 + } + + return n1 +} diff --git a/vendor/github.com/tidepool-org/go-common/events/config.go b/vendor/github.com/tidepool-org/go-common/events/config.go index a07d70ed6e..5deff14d06 100644 --- a/vendor/github.com/tidepool-org/go-common/events/config.go +++ b/vendor/github.com/tidepool-org/go-common/events/config.go @@ -2,6 +2,7 @@ package events import ( "errors" + "github.com/IBM/sarama" "github.com/kelseyhightower/envconfig" ) diff --git a/vendor/modules.txt b/vendor/modules.txt index 4f15786ed7..0da254b537 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,7 @@ # github.com/IBM/sarama v1.43.2 ## explicit; go 1.19 github.com/IBM/sarama +github.com/IBM/sarama/mocks # github.com/ant0ine/go-json-rest v3.3.2+incompatible ## explicit github.com/ant0ine/go-json-rest/rest @@ -360,14 +361,20 @@ github.com/rinchsan/device-check-go # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 +# github.com/sideshow/apns2 v0.23.0 +## explicit; go 1.15 +github.com/sideshow/apns2 +github.com/sideshow/apns2/payload +github.com/sideshow/apns2/token # github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2 ## explicit; go 1.22 github.com/tidepool-org/clinic/client # github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5 ## explicit; go 1.22 github.com/tidepool-org/devices/api -# github.com/tidepool-org/go-common v0.12.2-0.20250129210214-bd36b59b9733 +# github.com/tidepool-org/go-common v0.12.2 ## explicit; go 1.22 +github.com/tidepool-org/go-common/asyncevents github.com/tidepool-org/go-common/clients github.com/tidepool-org/go-common/clients/disc github.com/tidepool-org/go-common/clients/hakken