From 04692d15ba59963404d092708d445f8b48fb33b2 Mon Sep 17 00:00:00 2001 From: TwiN Date: Sat, 17 May 2025 16:10:28 -0400 Subject: [PATCH] feat: Make maximum number of results and events configurable (#1110) --- README.md | 16 +++--- api/api.go | 2 +- api/endpoint_status.go | 42 +++++++-------- api/endpoint_status_test.go | 13 ++++- api/util.go | 10 ++-- api/util_test.go | 68 ++++++++++++++----------- config/config.go | 6 ++- config/config_test.go | 5 ++ config/ui/ui.go | 18 ++++--- storage/config.go | 17 +++++++ storage/store/common/limits.go | 9 ---- storage/store/memory/memory.go | 11 ++-- storage/store/memory/memory_test.go | 5 +- storage/store/memory/uptime_test.go | 3 +- storage/store/memory/util.go | 11 ++-- storage/store/memory/util_bench_test.go | 6 +-- storage/store/memory/util_test.go | 18 +++---- storage/store/sql/sql.go | 26 ++++++---- storage/store/sql/sql_test.go | 44 ++++++++-------- storage/store/store.go | 9 ++-- storage/store/store_bench_test.go | 13 ++--- storage/store/store_test.go | 18 +++---- web/app/public/index.html | 2 +- web/app/src/components/Pagination.vue | 10 +++- web/app/src/views/Details.vue | 2 +- web/app/src/views/Home.vue | 2 +- web/static/css/app.css | 2 +- web/static/index.html | 2 +- web/static/js/app.js | 2 +- 29 files changed, 231 insertions(+), 161 deletions(-) delete mode 100644 storage/store/common/limits.go diff --git a/README.md b/README.md index d2621c7d..a809ae42 100644 --- a/README.md +++ b/README.md @@ -382,12 +382,14 @@ Here are some examples of conditions you can use: ### Storage -| Parameter | Description | Default | -|:------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------|:-----------| -| `storage` | Storage configuration | `{}` | -| `storage.path` | Path to persist the data in. Only supported for types `sqlite` and `postgres`. | `""` | -| `storage.type` | Type of storage. Valid types: `memory`, `sqlite`, `postgres`. | `"memory"` | -| `storage.caching` | Whether to use write-through caching. Improves loading time for large dashboards.
Only supported if `storage.type` is `sqlite` or `postgres` | `false` | +| Parameter | Description | Default | +|:------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------|:-----------| +| `storage` | Storage configuration | `{}` | +| `storage.path` | Path to persist the data in. Only supported for types `sqlite` and `postgres`. | `""` | +| `storage.type` | Type of storage. Valid types: `memory`, `sqlite`, `postgres`. | `"memory"` | +| `storage.caching` | Whether to use write-through caching. Improves loading time for large dashboards.
Only supported if `storage.type` is `sqlite` or `postgres` | `false` | +| `storage.maximum-number-of-results` | The maximum number of results that an endpoint can have | `100` | +| `storage.maximum-number-of-events` | The maximum number of events that an endpoint can have | `50` | The results for each endpoint health check as well as the data for uptime and the past events must be persisted so that they can be displayed on the dashboard. These parameters allow you to configure the storage in question. @@ -398,6 +400,8 @@ so that they can be displayed on the dashboard. These parameters allow you to co # Because the data is stored in memory, the data will not survive a restart. storage: type: memory + maximum-number-of-results: 200 + maximum-number-of-events: 5 ``` - If `storage.type` is `sqlite`, `storage.path` must not be blank: ```yaml diff --git a/api/api.go b/api/api.go index b918af2a..b5e04ff7 100644 --- a/api/api.go +++ b/api/api.go @@ -126,6 +126,6 @@ func (a *API) createRouter(cfg *config.Config) *fiber.App { } } protectedAPIRouter.Get("/v1/endpoints/statuses", EndpointStatuses(cfg)) - protectedAPIRouter.Get("/v1/endpoints/:key/statuses", EndpointStatus) + protectedAPIRouter.Get("/v1/endpoints/:key/statuses", EndpointStatus(cfg)) return app } diff --git a/api/endpoint_status.go b/api/endpoint_status.go index 691fa8ca..4d8777d8 100644 --- a/api/endpoint_status.go +++ b/api/endpoint_status.go @@ -20,7 +20,7 @@ import ( // Due to how intensive this operation can be on the storage, this function leverages a cache. func EndpointStatuses(cfg *config.Config) fiber.Handler { return func(c *fiber.Ctx) error { - page, pageSize := extractPageAndPageSizeFromRequest(c) + page, pageSize := extractPageAndPageSizeFromRequest(c, cfg.Storage.MaximumNumberOfResults) value, exists := cache.Get(fmt.Sprintf("endpoint-status-%d-%d", page, pageSize)) var data []byte if !exists { @@ -83,25 +83,27 @@ func getEndpointStatusesFromRemoteInstances(remoteConfig *remote.Config) ([]*end } // EndpointStatus retrieves a single endpoint.Status by group and endpoint name -func EndpointStatus(c *fiber.Ctx) error { - page, pageSize := extractPageAndPageSizeFromRequest(c) - endpointStatus, err := store.Get().GetEndpointStatusByKey(c.Params("key"), paging.NewEndpointStatusParams().WithResults(page, pageSize).WithEvents(1, common.MaximumNumberOfEvents)) - if err != nil { - if errors.Is(err, common.ErrEndpointNotFound) { - return c.Status(404).SendString(err.Error()) +func EndpointStatus(cfg *config.Config) fiber.Handler { + return func(c *fiber.Ctx) error { + page, pageSize := extractPageAndPageSizeFromRequest(c, cfg.Storage.MaximumNumberOfResults) + endpointStatus, err := store.Get().GetEndpointStatusByKey(c.Params("key"), paging.NewEndpointStatusParams().WithResults(page, pageSize).WithEvents(1, cfg.Storage.MaximumNumberOfEvents)) + if err != nil { + if errors.Is(err, common.ErrEndpointNotFound) { + return c.Status(404).SendString(err.Error()) + } + logr.Errorf("[api.EndpointStatus] Failed to retrieve endpoint status: %s", err.Error()) + return c.Status(500).SendString(err.Error()) } - logr.Errorf("[api.EndpointStatus] Failed to retrieve endpoint status: %s", err.Error()) - return c.Status(500).SendString(err.Error()) + if endpointStatus == nil { // XXX: is this check necessary? + logr.Errorf("[api.EndpointStatus] Endpoint with key=%s not found", c.Params("key")) + return c.Status(404).SendString("not found") + } + output, err := json.Marshal(endpointStatus) + if err != nil { + logr.Errorf("[api.EndpointStatus] Unable to marshal object to JSON: %s", err.Error()) + return c.Status(500).SendString("unable to marshal object to JSON") + } + c.Set("Content-Type", "application/json") + return c.Status(200).Send(output) } - if endpointStatus == nil { // XXX: is this check necessary? - logr.Errorf("[api.EndpointStatus] Endpoint with key=%s not found", c.Params("key")) - return c.Status(404).SendString("not found") - } - output, err := json.Marshal(endpointStatus) - if err != nil { - logr.Errorf("[api.EndpointStatus] Unable to marshal object to JSON: %s", err.Error()) - return c.Status(500).SendString("unable to marshal object to JSON") - } - c.Set("Content-Type", "application/json") - return c.Status(200).Send(output) } diff --git a/api/endpoint_status_test.go b/api/endpoint_status_test.go index 08725393..f835db06 100644 --- a/api/endpoint_status_test.go +++ b/api/endpoint_status_test.go @@ -9,6 +9,7 @@ import ( "github.com/TwiN/gatus/v5/config" "github.com/TwiN/gatus/v5/config/endpoint" + "github.com/TwiN/gatus/v5/storage" "github.com/TwiN/gatus/v5/storage/store" "github.com/TwiN/gatus/v5/watchdog" ) @@ -95,6 +96,10 @@ func TestEndpointStatus(t *testing.T) { Group: "core", }, }, + Storage: &storage.Config{ + MaximumNumberOfResults: storage.DefaultMaximumNumberOfResults, + MaximumNumberOfEvents: storage.DefaultMaximumNumberOfEvents, + }, } watchdog.UpdateEndpointStatuses(cfg.Endpoints[0], &endpoint.Result{Success: true, Duration: time.Millisecond, Timestamp: time.Now()}) watchdog.UpdateEndpointStatuses(cfg.Endpoints[1], &endpoint.Result{Success: false, Duration: time.Second, Timestamp: time.Now()}) @@ -156,7 +161,13 @@ func TestEndpointStatuses(t *testing.T) { // Can't be bothered dealing with timezone issues on the worker that runs the automated tests firstResult.Timestamp = time.Time{} secondResult.Timestamp = time.Time{} - api := New(&config.Config{Metrics: true}) + api := New(&config.Config{ + Metrics: true, + Storage: &storage.Config{ + MaximumNumberOfResults: storage.DefaultMaximumNumberOfResults, + MaximumNumberOfEvents: storage.DefaultMaximumNumberOfEvents, + }, + }) router := api.Router() type Scenario struct { Name string diff --git a/api/util.go b/api/util.go index 90e647b5..766ab782 100644 --- a/api/util.go +++ b/api/util.go @@ -3,7 +3,6 @@ package api import ( "strconv" - "github.com/TwiN/gatus/v5/storage/store/common" "github.com/gofiber/fiber/v2" ) @@ -13,12 +12,9 @@ const ( // DefaultPageSize is the default page siZE to use if none is specified or an invalid value is provided DefaultPageSize = 20 - - // MaximumPageSize is the maximum page size allowed - MaximumPageSize = common.MaximumNumberOfResults ) -func extractPageAndPageSizeFromRequest(c *fiber.Ctx) (page, pageSize int) { +func extractPageAndPageSizeFromRequest(c *fiber.Ctx, maximumNumberOfResults int) (page, pageSize int) { var err error if pageParameter := c.Query("page"); len(pageParameter) == 0 { page = DefaultPage @@ -38,8 +34,8 @@ func extractPageAndPageSizeFromRequest(c *fiber.Ctx) (page, pageSize int) { if err != nil { pageSize = DefaultPageSize } - if pageSize > MaximumPageSize { - pageSize = MaximumPageSize + if pageSize > maximumNumberOfResults { + pageSize = maximumNumberOfResults } else if pageSize < 1 { pageSize = DefaultPageSize } diff --git a/api/util_test.go b/api/util_test.go index 652146ed..a0e846a9 100644 --- a/api/util_test.go +++ b/api/util_test.go @@ -4,54 +4,62 @@ import ( "fmt" "testing" + "github.com/TwiN/gatus/v5/storage" "github.com/gofiber/fiber/v2" "github.com/valyala/fasthttp" ) func TestExtractPageAndPageSizeFromRequest(t *testing.T) { type Scenario struct { - Name string - Page string - PageSize string - ExpectedPage int - ExpectedPageSize int + Name string + Page string + PageSize string + ExpectedPage int + ExpectedPageSize int + MaximumNumberOfResults int } scenarios := []Scenario{ { - Page: "1", - PageSize: "20", - ExpectedPage: 1, - ExpectedPageSize: 20, + Page: "1", + PageSize: "20", + ExpectedPage: 1, + ExpectedPageSize: 20, + MaximumNumberOfResults: 20, }, { - Page: "2", - PageSize: "10", - ExpectedPage: 2, - ExpectedPageSize: 10, + Page: "2", + PageSize: "10", + ExpectedPage: 2, + ExpectedPageSize: 10, + MaximumNumberOfResults: 40, }, { - Page: "2", - PageSize: "10", - ExpectedPage: 2, - ExpectedPageSize: 10, + Page: "2", + PageSize: "10", + ExpectedPage: 2, + ExpectedPageSize: 10, + MaximumNumberOfResults: 200, }, { - Page: "1", - PageSize: "999999", - ExpectedPage: 1, - ExpectedPageSize: MaximumPageSize, + Page: "1", + PageSize: "999999", + ExpectedPage: 1, + ExpectedPageSize: storage.DefaultMaximumNumberOfResults, + MaximumNumberOfResults: 100, }, { - Page: "-1", - PageSize: "-1", - ExpectedPage: DefaultPage, - ExpectedPageSize: DefaultPageSize, + Page: "-1", + PageSize: "-1", + ExpectedPage: DefaultPage, + ExpectedPageSize: DefaultPageSize, + MaximumNumberOfResults: 20, }, { - Page: "invalid", - PageSize: "invalid", - ExpectedPage: DefaultPage, - ExpectedPageSize: DefaultPageSize, + Page: "invalid", + PageSize: "invalid", + ExpectedPage: DefaultPage, + ExpectedPageSize: DefaultPageSize, + MaximumNumberOfResults: 100, }, } for _, scenario := range scenarios { @@ -61,7 +69,7 @@ func TestExtractPageAndPageSizeFromRequest(t *testing.T) { c := app.AcquireCtx(&fasthttp.RequestCtx{}) defer app.ReleaseCtx(c) c.Request().SetRequestURI(fmt.Sprintf("/api/v1/statuses?page=%s&pageSize=%s", scenario.Page, scenario.PageSize)) - actualPage, actualPageSize := extractPageAndPageSizeFromRequest(c) + actualPage, actualPageSize := extractPageAndPageSizeFromRequest(c, scenario.MaximumNumberOfResults) if actualPage != scenario.ExpectedPage { t.Errorf("expected %d, got %d", scenario.ExpectedPage, actualPage) } diff --git a/config/config.go b/config/config.go index 61e1b128..701dd84e 100644 --- a/config/config.go +++ b/config/config.go @@ -280,6 +280,8 @@ func parseAndValidateConfigBytes(yamlBytes []byte) (config *Config, err error) { if err := validateConnectivityConfig(config); err != nil { return nil, err } + // Cross-config changes + config.UI.MaximumNumberOfResults = config.Storage.MaximumNumberOfResults } return } @@ -303,7 +305,9 @@ func validateRemoteConfig(config *Config) error { func validateStorageConfig(config *Config) error { if config.Storage == nil { config.Storage = &storage.Config{ - Type: storage.TypeMemory, + Type: storage.TypeMemory, + MaximumNumberOfResults: storage.DefaultMaximumNumberOfResults, + MaximumNumberOfEvents: storage.DefaultMaximumNumberOfEvents, } } else { if err := config.Storage.ValidateAndSetDefaults(); err != nil { diff --git a/config/config_test.go b/config/config_test.go index 8ed3ddf1..12ee5922 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -330,6 +330,8 @@ func TestParseAndValidateConfigBytes(t *testing.T) { storage: type: sqlite path: %s + maximum-number-of-results: 10 + maximum-number-of-events: 5 maintenance: enabled: true @@ -386,6 +388,9 @@ endpoints: if config.Storage == nil || config.Storage.Path != file || config.Storage.Type != storage.TypeSQLite { t.Error("expected storage to be set to sqlite, got", config.Storage) } + if config.Storage == nil || config.Storage.MaximumNumberOfResults != 10 || config.Storage.MaximumNumberOfEvents != 5 { + t.Error("expected MaximumNumberOfResults and MaximumNumberOfEvents to be set to 10 and 5, got", config.Storage.MaximumNumberOfResults, config.Storage.MaximumNumberOfEvents) + } if config.UI == nil || config.UI.Title != "T" || config.UI.Header != "H" || config.UI.Link != "https://example.org" || len(config.UI.Buttons) != 2 || config.UI.Buttons[0].Name != "Home" || config.UI.Buttons[0].Link != "https://example.org" || config.UI.Buttons[1].Name != "Status page" || config.UI.Buttons[1].Link != "https://status.example.org" { t.Error("expected ui to be set to T, H, https://example.org, 2 buttons, Home and Status page, got", config.UI) } diff --git a/config/ui/ui.go b/config/ui/ui.go index 33acf248..1055a6b0 100644 --- a/config/ui/ui.go +++ b/config/ui/ui.go @@ -5,6 +5,7 @@ import ( "errors" "html/template" + "github.com/TwiN/gatus/v5/storage" static "github.com/TwiN/gatus/v5/web" ) @@ -33,6 +34,8 @@ type Config struct { Buttons []Button `yaml:"buttons,omitempty"` // Buttons to display below the header CustomCSS string `yaml:"custom-css,omitempty"` // Custom CSS to include in the page DarkMode *bool `yaml:"dark-mode,omitempty"` // DarkMode is a flag to enable dark mode by default + + MaximumNumberOfResults int // MaximumNumberOfResults to display on the page, it's not configurable because we're passing it from the storage config } func (cfg *Config) IsDarkMode() bool { @@ -59,13 +62,14 @@ func (btn *Button) Validate() error { // GetDefaultConfig returns a Config struct with the default values func GetDefaultConfig() *Config { return &Config{ - Title: defaultTitle, - Description: defaultDescription, - Header: defaultHeader, - Logo: defaultLogo, - Link: defaultLink, - CustomCSS: defaultCustomCSS, - DarkMode: &defaultDarkMode, + Title: defaultTitle, + Description: defaultDescription, + Header: defaultHeader, + Logo: defaultLogo, + Link: defaultLink, + CustomCSS: defaultCustomCSS, + DarkMode: &defaultDarkMode, + MaximumNumberOfResults: storage.DefaultMaximumNumberOfResults, } } diff --git a/storage/config.go b/storage/config.go index 5ee3539d..a707d5ef 100644 --- a/storage/config.go +++ b/storage/config.go @@ -4,6 +4,11 @@ import ( "errors" ) +const ( + DefaultMaximumNumberOfResults = 100 + DefaultMaximumNumberOfEvents = 50 +) + var ( ErrSQLStorageRequiresPath = errors.New("sql storage requires a non-empty path to be defined") ErrMemoryStorageDoesNotSupportPath = errors.New("memory storage does not support persistence, use sqlite if you want persistence on file") @@ -25,6 +30,12 @@ type Config struct { // as they happen, also known as the write-through caching strategy. // Does not apply if Config.Type is not TypePostgres or TypeSQLite. Caching bool `yaml:"caching,omitempty"` + + // MaximumNumberOfResults is the number of results each endpoint should be able to provide + MaximumNumberOfResults int `yaml:"maximum-number-of-results,omitempty"` + + // MaximumNumberOfEvents is the number of events each endpoint should be able to provide + MaximumNumberOfEvents int `yaml:"maximum-number-of-events,omitempty"` } // ValidateAndSetDefaults validates the configuration and sets the default values (if applicable) @@ -38,5 +49,11 @@ func (c *Config) ValidateAndSetDefaults() error { if c.Type == TypeMemory && len(c.Path) > 0 { return ErrMemoryStorageDoesNotSupportPath } + if c.MaximumNumberOfResults <= 0 { + c.MaximumNumberOfResults = DefaultMaximumNumberOfResults + } + if c.MaximumNumberOfEvents <= 0 { + c.MaximumNumberOfEvents = DefaultMaximumNumberOfEvents + } return nil } diff --git a/storage/store/common/limits.go b/storage/store/common/limits.go deleted file mode 100644 index 92007220..00000000 --- a/storage/store/common/limits.go +++ /dev/null @@ -1,9 +0,0 @@ -package common - -const ( - // MaximumNumberOfResults is the maximum number of results that an endpoint can have - MaximumNumberOfResults = 100 - - // MaximumNumberOfEvents is the maximum number of events that an endpoint can have - MaximumNumberOfEvents = 50 -) diff --git a/storage/store/memory/memory.go b/storage/store/memory/memory.go index 6451381d..41714dcd 100644 --- a/storage/store/memory/memory.go +++ b/storage/store/memory/memory.go @@ -17,15 +17,20 @@ type Store struct { sync.RWMutex cache *gocache.Cache + + maximumNumberOfResults int // maximum number of results that an endpoint can have + maximumNumberOfEvents int // maximum number of events that an endpoint can have } // NewStore creates a new store using gocache.Cache // // This store holds everything in memory, and if the file parameter is not blank, // supports eventual persistence. -func NewStore() (*Store, error) { +func NewStore(maximumNumberOfResults, maximumNumberOfEvents int) (*Store, error) { store := &Store{ - cache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize), + cache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize), + maximumNumberOfResults: maximumNumberOfResults, + maximumNumberOfEvents: maximumNumberOfEvents, } return store, nil } @@ -151,7 +156,7 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error { Timestamp: time.Now(), }) } - AddResult(status.(*endpoint.Status), result) + AddResult(status.(*endpoint.Status), result, s.maximumNumberOfResults, s.maximumNumberOfEvents) s.cache.Set(key, status) s.Unlock() return nil diff --git a/storage/store/memory/memory_test.go b/storage/store/memory/memory_test.go index 5d489677..544577bf 100644 --- a/storage/store/memory/memory_test.go +++ b/storage/store/memory/memory_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/TwiN/gatus/v5/config/endpoint" + "github.com/TwiN/gatus/v5/storage" "github.com/TwiN/gatus/v5/storage/store/common/paging" ) @@ -82,7 +83,7 @@ var ( // Note that are much more extensive tests in /storage/store/store_test.go. // This test is simply an extra sanity check func TestStore_SanityCheck(t *testing.T) { - store, _ := NewStore() + store, _ := NewStore(storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() store.Insert(&testEndpoint, &testSuccessfulResult) endpointStatuses, _ := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams()) @@ -122,7 +123,7 @@ func TestStore_SanityCheck(t *testing.T) { } func TestStore_Save(t *testing.T) { - store, err := NewStore() + store, err := NewStore(storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if err != nil { t.Fatal("expected no error, got", err.Error()) } diff --git a/storage/store/memory/uptime_test.go b/storage/store/memory/uptime_test.go index 3d9f36c1..61a798a7 100644 --- a/storage/store/memory/uptime_test.go +++ b/storage/store/memory/uptime_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/TwiN/gatus/v5/config/endpoint" + "github.com/TwiN/gatus/v5/storage" ) func TestProcessUptimeAfterResult(t *testing.T) { @@ -50,7 +51,7 @@ func TestAddResultUptimeIsCleaningUpAfterItself(t *testing.T) { // Start 12 days ago timestamp := now.Add(-12 * 24 * time.Hour) for timestamp.Unix() <= now.Unix() { - AddResult(status, &endpoint.Result{Timestamp: timestamp, Success: true}) + AddResult(status, &endpoint.Result{Timestamp: timestamp, Success: true}, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if len(status.Uptime.HourlyStatistics) > uptimeCleanUpThreshold { t.Errorf("At no point in time should there be more than %d entries in status.SuccessfulExecutionsPerHour, but there are %d", uptimeCleanUpThreshold, len(status.Uptime.HourlyStatistics)) } diff --git a/storage/store/memory/util.go b/storage/store/memory/util.go index 961f740a..acb17eb1 100644 --- a/storage/store/memory/util.go +++ b/storage/store/memory/util.go @@ -2,7 +2,6 @@ package memory import ( "github.com/TwiN/gatus/v5/config/endpoint" - "github.com/TwiN/gatus/v5/storage/store/common" "github.com/TwiN/gatus/v5/storage/store/common/paging" ) @@ -51,7 +50,7 @@ func getStartAndEndIndex(numberOfResults int, page, pageSize int) (int, int) { // AddResult adds a Result to Status.Results and makes sure that there are // no more than MaximumNumberOfResults results in the Results slice -func AddResult(ss *endpoint.Status, result *endpoint.Result) { +func AddResult(ss *endpoint.Status, result *endpoint.Result, maximumNumberOfResults, maximumNumberOfEvents int) { if ss == nil { return } @@ -59,11 +58,11 @@ func AddResult(ss *endpoint.Status, result *endpoint.Result) { // Check if there's any change since the last result if ss.Results[len(ss.Results)-1].Success != result.Success { ss.Events = append(ss.Events, endpoint.NewEventFromResult(result)) - if len(ss.Events) > common.MaximumNumberOfEvents { + if len(ss.Events) > maximumNumberOfEvents { // Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has // more than one extra element, we can get rid of all of them at once and thus returning the slice to a // length of MaximumNumberOfEvents by using ss.Events[len(ss.Events)-MaximumNumberOfEvents:] instead - ss.Events = ss.Events[len(ss.Events)-common.MaximumNumberOfEvents:] + ss.Events = ss.Events[len(ss.Events)-maximumNumberOfEvents:] } } } else { @@ -71,11 +70,11 @@ func AddResult(ss *endpoint.Status, result *endpoint.Result) { ss.Events = append(ss.Events, endpoint.NewEventFromResult(result)) } ss.Results = append(ss.Results, result) - if len(ss.Results) > common.MaximumNumberOfResults { + if len(ss.Results) > maximumNumberOfResults { // Doing ss.Results[1:] would usually be sufficient, but in the case where for some reason, the slice has more // than one extra element, we can get rid of all of them at once and thus returning the slice to a length of // MaximumNumberOfResults by using ss.Results[len(ss.Results)-MaximumNumberOfResults:] instead - ss.Results = ss.Results[len(ss.Results)-common.MaximumNumberOfResults:] + ss.Results = ss.Results[len(ss.Results)-maximumNumberOfResults:] } processUptimeAfterResult(ss.Uptime, result) } diff --git a/storage/store/memory/util_bench_test.go b/storage/store/memory/util_bench_test.go index 36252829..d8f1d26a 100644 --- a/storage/store/memory/util_bench_test.go +++ b/storage/store/memory/util_bench_test.go @@ -4,15 +4,15 @@ import ( "testing" "github.com/TwiN/gatus/v5/config/endpoint" - "github.com/TwiN/gatus/v5/storage/store/common" + "github.com/TwiN/gatus/v5/storage" "github.com/TwiN/gatus/v5/storage/store/common/paging" ) func BenchmarkShallowCopyEndpointStatus(b *testing.B) { ep := &testEndpoint status := endpoint.NewStatus(ep.Group, ep.Name) - for i := 0; i < common.MaximumNumberOfResults; i++ { - AddResult(status, &testSuccessfulResult) + for i := 0; i < storage.DefaultMaximumNumberOfResults; i++ { + AddResult(status, &testSuccessfulResult, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) } for n := 0; n < b.N; n++ { ShallowCopyEndpointStatus(status, paging.NewEndpointStatusParams().WithResults(1, 20)) diff --git a/storage/store/memory/util_test.go b/storage/store/memory/util_test.go index 1de445e7..8287dafe 100644 --- a/storage/store/memory/util_test.go +++ b/storage/store/memory/util_test.go @@ -5,24 +5,24 @@ import ( "time" "github.com/TwiN/gatus/v5/config/endpoint" - "github.com/TwiN/gatus/v5/storage/store/common" + "github.com/TwiN/gatus/v5/storage" "github.com/TwiN/gatus/v5/storage/store/common/paging" ) func TestAddResult(t *testing.T) { ep := &endpoint.Endpoint{Name: "name", Group: "group"} endpointStatus := endpoint.NewStatus(ep.Group, ep.Name) - for i := 0; i < (common.MaximumNumberOfResults+common.MaximumNumberOfEvents)*2; i++ { - AddResult(endpointStatus, &endpoint.Result{Success: i%2 == 0, Timestamp: time.Now()}) + for i := 0; i < (storage.DefaultMaximumNumberOfResults+storage.DefaultMaximumNumberOfEvents)*2; i++ { + AddResult(endpointStatus, &endpoint.Result{Success: i%2 == 0, Timestamp: time.Now()}, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) } - if len(endpointStatus.Results) != common.MaximumNumberOfResults { - t.Errorf("expected endpointStatus.Results to not exceed a length of %d", common.MaximumNumberOfResults) + if len(endpointStatus.Results) != storage.DefaultMaximumNumberOfResults { + t.Errorf("expected endpointStatus.Results to not exceed a length of %d", storage.DefaultMaximumNumberOfResults) } - if len(endpointStatus.Events) != common.MaximumNumberOfEvents { - t.Errorf("expected endpointStatus.Events to not exceed a length of %d", common.MaximumNumberOfEvents) + if len(endpointStatus.Events) != storage.DefaultMaximumNumberOfEvents { + t.Errorf("expected endpointStatus.Events to not exceed a length of %d", storage.DefaultMaximumNumberOfEvents) } // Try to add nil endpointStatus - AddResult(nil, &endpoint.Result{Timestamp: time.Now()}) + AddResult(nil, &endpoint.Result{Timestamp: time.Now()}, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) } func TestShallowCopyEndpointStatus(t *testing.T) { @@ -30,7 +30,7 @@ func TestShallowCopyEndpointStatus(t *testing.T) { endpointStatus := endpoint.NewStatus(ep.Group, ep.Name) ts := time.Now().Add(-25 * time.Hour) for i := 0; i < 25; i++ { - AddResult(endpointStatus, &endpoint.Result{Success: i%2 == 0, Timestamp: ts}) + AddResult(endpointStatus, &endpoint.Result{Success: i%2 == 0, Timestamp: ts}, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) ts = ts.Add(time.Hour) } if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(-1, -1)).Results) != 0 { diff --git a/storage/store/sql/sql.go b/storage/store/sql/sql.go index 30b7906f..cb0dd058 100644 --- a/storage/store/sql/sql.go +++ b/storage/store/sql/sql.go @@ -28,8 +28,8 @@ const ( // for aesthetic purposes, I deemed it wasn't worth the performance impact of yet another one-to-many table. arraySeparator = "|~|" - eventsCleanUpThreshold = common.MaximumNumberOfEvents + 10 // Maximum number of events before triggering a cleanup - resultsCleanUpThreshold = common.MaximumNumberOfResults + 10 // Maximum number of results before triggering a cleanup + eventsAboveMaximumCleanUpThreshold = 10 // Maximum number of events above the configured maximum before triggering a cleanup + resultsAboveMaximumCleanUpThreshold = 10 // Maximum number of results above the configured maximum before triggering a cleanup uptimeTotalEntriesMergeThreshold = 100 // Maximum number of uptime entries before triggering a merge uptimeAgeCleanUpThreshold = 32 * 24 * time.Hour // Maximum uptime age before triggering a cleanup @@ -58,17 +58,25 @@ type Store struct { // writeThroughCache is a cache used to drastically decrease read latency by pre-emptively // caching writes as they happen. If nil, writes are not cached. writeThroughCache *gocache.Cache + + maximumNumberOfResults int // maximum number of results that an endpoint can have + maximumNumberOfEvents int // maximum number of events that an endpoint can have } // NewStore initializes the database and creates the schema if it doesn't already exist in the path specified -func NewStore(driver, path string, caching bool) (*Store, error) { +func NewStore(driver, path string, caching bool, maximumNumberOfResults, maximumNumberOfEvents int) (*Store, error) { if len(driver) == 0 { return nil, ErrDatabaseDriverNotSpecified } if len(path) == 0 { return nil, ErrPathNotSpecified } - store := &Store{driver: driver, path: path} + store := &Store{ + driver: driver, + path: path, + maximumNumberOfResults: maximumNumberOfResults, + maximumNumberOfEvents: maximumNumberOfEvents, + } var err error if store.db, err = sql.Open(driver, path); err != nil { return nil, err @@ -293,10 +301,10 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error { } } } - // Clean up old events if there's more than twice the maximum number of events + // Clean up old events if we're above the threshold // This lets us both keep the table clean without impacting performance too much // (since we're only deleting MaximumNumberOfEvents at a time instead of 1) - if numberOfEvents > eventsCleanUpThreshold { + if numberOfEvents > int64(s.maximumNumberOfEvents+eventsAboveMaximumCleanUpThreshold) { if err = s.deleteOldEndpointEvents(tx, endpointID); err != nil { logr.Errorf("[sql.Insert] Failed to delete old events for endpoint with key=%s: %s", ep.Key(), err.Error()) } @@ -313,7 +321,7 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error { if err != nil { logr.Errorf("[sql.Insert] Failed to retrieve total number of results for endpoint with key=%s: %s", ep.Key(), err.Error()) } else { - if numberOfResults > resultsCleanUpThreshold { + if numberOfResults > int64(s.maximumNumberOfResults+resultsAboveMaximumCleanUpThreshold) { if err = s.deleteOldEndpointResults(tx, endpointID); err != nil { logr.Errorf("[sql.Insert] Failed to delete old results for endpoint with key=%s: %s", ep.Key(), err.Error()) } @@ -941,7 +949,7 @@ func (s *Store) deleteOldEndpointEvents(tx *sql.Tx, endpointID int64) error { ) `, endpointID, - common.MaximumNumberOfEvents, + s.maximumNumberOfEvents, ) return err } @@ -961,7 +969,7 @@ func (s *Store) deleteOldEndpointResults(tx *sql.Tx, endpointID int64) error { ) `, endpointID, - common.MaximumNumberOfResults, + s.maximumNumberOfResults, ) return err } diff --git a/storage/store/sql/sql_test.go b/storage/store/sql/sql_test.go index 7261c8fa..5dbfb899 100644 --- a/storage/store/sql/sql_test.go +++ b/storage/store/sql/sql_test.go @@ -8,7 +8,7 @@ import ( "github.com/TwiN/gatus/v5/alerting/alert" "github.com/TwiN/gatus/v5/config/endpoint" - "github.com/TwiN/gatus/v5/storage/store/common" + "github.com/TwiN/gatus/v5/storage" "github.com/TwiN/gatus/v5/storage/store/common/paging" ) @@ -84,13 +84,13 @@ var ( ) func TestNewStore(t *testing.T) { - if _, err := NewStore("", t.TempDir()+"/TestNewStore.db", false); !errors.Is(err, ErrDatabaseDriverNotSpecified) { + if _, err := NewStore("", t.TempDir()+"/TestNewStore.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents); !errors.Is(err, ErrDatabaseDriverNotSpecified) { t.Error("expected error due to blank driver parameter") } - if _, err := NewStore("sqlite", "", false); !errors.Is(err, ErrPathNotSpecified) { + if _, err := NewStore("sqlite", "", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents); !errors.Is(err, ErrPathNotSpecified) { t.Error("expected error due to blank path parameter") } - if store, err := NewStore("sqlite", t.TempDir()+"/TestNewStore.db", true); err != nil { + if store, err := NewStore("sqlite", t.TempDir()+"/TestNewStore.db", true, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents); err != nil { t.Error("shouldn't have returned any error, got", err.Error()) } else { _ = store.db.Close() @@ -98,7 +98,7 @@ func TestNewStore(t *testing.T) { } func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpOldUptimeEntriesProperly.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpOldUptimeEntriesProperly.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() now := time.Now().Truncate(time.Hour) now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) @@ -155,7 +155,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) { } func TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() now := time.Now().Truncate(time.Hour) now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) @@ -212,7 +212,7 @@ func TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly(t *tes } func TestStore_getEndpointUptime(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Clear() defer store.Close() // Add 768 hourly entries (32 days) @@ -274,13 +274,15 @@ func TestStore_getEndpointUptime(t *testing.T) { } func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Clear() defer store.Close() + resultsCleanUpThreshold := store.maximumNumberOfResults + resultsAboveMaximumCleanUpThreshold + eventsCleanUpThreshold := store.maximumNumberOfEvents + eventsAboveMaximumCleanUpThreshold for i := 0; i < resultsCleanUpThreshold+eventsCleanUpThreshold; i++ { store.Insert(&testEndpoint, &testSuccessfulResult) store.Insert(&testEndpoint, &testUnsuccessfulResult) - ss, _ := store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(1, common.MaximumNumberOfResults*5).WithEvents(1, common.MaximumNumberOfEvents*5)) + ss, _ := store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(1, storage.DefaultMaximumNumberOfResults*5).WithEvents(1, storage.DefaultMaximumNumberOfEvents*5)) if len(ss.Results) > resultsCleanUpThreshold+1 { t.Errorf("number of results shouldn't have exceeded %d, reached %d", resultsCleanUpThreshold, len(ss.Results)) } @@ -291,7 +293,7 @@ func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) { } func TestStore_InsertWithCaching(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertWithCaching.db", true) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertWithCaching.db", true, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() // Add 2 results store.Insert(&testEndpoint, &testSuccessfulResult) @@ -326,7 +328,7 @@ func TestStore_InsertWithCaching(t *testing.T) { func TestStore_Persistence(t *testing.T) { path := t.TempDir() + "/TestStore_Persistence.db" - store, _ := NewStore("sqlite", path, false) + store, _ := NewStore("sqlite", path, false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) store.Insert(&testEndpoint, &testSuccessfulResult) store.Insert(&testEndpoint, &testUnsuccessfulResult) if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); uptime != 0.5 { @@ -341,15 +343,15 @@ func TestStore_Persistence(t *testing.T) { if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour*24*30), time.Now()); uptime != 0.5 { t.Errorf("the uptime over the past 30d should've been 0.5, got %f", uptime) } - ssFromOldStore, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, common.MaximumNumberOfResults).WithEvents(1, common.MaximumNumberOfEvents)) + ssFromOldStore, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, storage.DefaultMaximumNumberOfResults).WithEvents(1, storage.DefaultMaximumNumberOfEvents)) if ssFromOldStore == nil || ssFromOldStore.Group != "group" || ssFromOldStore.Name != "name" || len(ssFromOldStore.Events) != 3 || len(ssFromOldStore.Results) != 2 { store.Close() t.Fatal("sanity check failed") } store.Close() - store, _ = NewStore("sqlite", path, false) + store, _ = NewStore("sqlite", path, false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() - ssFromNewStore, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, common.MaximumNumberOfResults).WithEvents(1, common.MaximumNumberOfEvents)) + ssFromNewStore, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, storage.DefaultMaximumNumberOfResults).WithEvents(1, storage.DefaultMaximumNumberOfEvents)) if ssFromNewStore == nil || ssFromNewStore.Group != "group" || ssFromNewStore.Name != "name" || len(ssFromNewStore.Events) != 3 || len(ssFromNewStore.Results) != 2 { t.Fatal("failed sanity check") } @@ -411,7 +413,7 @@ func TestStore_Persistence(t *testing.T) { } func TestStore_Save(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_Save.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_Save.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() if store.Save() != nil { t.Error("Save shouldn't do anything for this store") @@ -421,7 +423,7 @@ func TestStore_Save(t *testing.T) { // Note that are much more extensive tests in /storage/store/store_test.go. // This test is simply an extra sanity check func TestStore_SanityCheck(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_SanityCheck.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_SanityCheck.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() store.Insert(&testEndpoint, &testSuccessfulResult) endpointStatuses, _ := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams()) @@ -465,7 +467,7 @@ func TestStore_SanityCheck(t *testing.T) { // TestStore_InvalidTransaction tests what happens if an invalid transaction is passed as parameter func TestStore_InvalidTransaction(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InvalidTransaction.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InvalidTransaction.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() tx, _ := store.db.Begin() tx.Commit() @@ -523,7 +525,7 @@ func TestStore_InvalidTransaction(t *testing.T) { } func TestStore_NoRows(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_NoRows.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_NoRows.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() tx, _ := store.db.Begin() defer tx.Rollback() @@ -537,7 +539,7 @@ func TestStore_NoRows(t *testing.T) { // This tests very unlikely cases where a table is deleted. func TestStore_BrokenSchema(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_BrokenSchema.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_BrokenSchema.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil { t.Fatal("expected no error, got", err.Error()) @@ -725,7 +727,7 @@ func TestCacheKey(t *testing.T) { } func TestTriggeredEndpointAlertsPersistence(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestTriggeredEndpointAlertsPersistence.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestTriggeredEndpointAlertsPersistence.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() yes, desc := false, "description" ep := testEndpoint @@ -791,7 +793,7 @@ func TestTriggeredEndpointAlertsPersistence(t *testing.T) { } func TestStore_DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(t *testing.T) { - store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_DeleteAllTriggeredAlertsNotInChecksumsByEndpoint.db", false) + store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_DeleteAllTriggeredAlertsNotInChecksumsByEndpoint.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) defer store.Close() yes, desc := false, "description" ep1 := testEndpoint diff --git a/storage/store/store.go b/storage/store/store.go index 729e6482..f7b581ec 100644 --- a/storage/store/store.go +++ b/storage/store/store.go @@ -111,7 +111,10 @@ func Initialize(cfg *storage.Config) error { if cfg == nil { // This only happens in tests logr.Warn("[store.Initialize] nil storage config passed as parameter. This should only happen in tests. Defaulting to an empty config.") - cfg = &storage.Config{} + cfg = &storage.Config{ + MaximumNumberOfResults: storage.DefaultMaximumNumberOfResults, + MaximumNumberOfEvents: storage.DefaultMaximumNumberOfEvents, + } } if len(cfg.Path) == 0 && cfg.Type != storage.TypePostgres { logr.Infof("[store.Initialize] Creating storage provider of type=%s", cfg.Type) @@ -119,14 +122,14 @@ func Initialize(cfg *storage.Config) error { ctx, cancelFunc = context.WithCancel(context.Background()) switch cfg.Type { case storage.TypeSQLite, storage.TypePostgres: - store, err = sql.NewStore(string(cfg.Type), cfg.Path, cfg.Caching) + store, err = sql.NewStore(string(cfg.Type), cfg.Path, cfg.Caching, cfg.MaximumNumberOfResults, cfg.MaximumNumberOfEvents) if err != nil { return err } case storage.TypeMemory: fallthrough default: - store, _ = memory.NewStore() + store, _ = memory.NewStore(cfg.MaximumNumberOfResults, cfg.MaximumNumberOfEvents) } return nil } diff --git a/storage/store/store_bench_test.go b/storage/store/store_bench_test.go index 0ed4c2db..dd4a0dd3 100644 --- a/storage/store/store_bench_test.go +++ b/storage/store/store_bench_test.go @@ -6,17 +6,18 @@ import ( "time" "github.com/TwiN/gatus/v5/config/endpoint" + "github.com/TwiN/gatus/v5/storage" "github.com/TwiN/gatus/v5/storage/store/common/paging" "github.com/TwiN/gatus/v5/storage/store/memory" "github.com/TwiN/gatus/v5/storage/store/sql" ) func BenchmarkStore_GetAllEndpointStatuses(b *testing.B) { - memoryStore, err := memory.NewStore() + memoryStore, err := memory.NewStore(storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if err != nil { b.Fatal("failed to create store:", err.Error()) } - sqliteStore, err := sql.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_GetAllEndpointStatuses.db", false) + sqliteStore, err := sql.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_GetAllEndpointStatuses.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if err != nil { b.Fatal("failed to create store:", err.Error()) } @@ -81,11 +82,11 @@ func BenchmarkStore_GetAllEndpointStatuses(b *testing.B) { } func BenchmarkStore_Insert(b *testing.B) { - memoryStore, err := memory.NewStore() + memoryStore, err := memory.NewStore(storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if err != nil { b.Fatal("failed to create store:", err.Error()) } - sqliteStore, err := sql.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_Insert.db", false) + sqliteStore, err := sql.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_Insert.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if err != nil { b.Fatal("failed to create store:", err.Error()) } @@ -153,11 +154,11 @@ func BenchmarkStore_Insert(b *testing.B) { } func BenchmarkStore_GetEndpointStatusByKey(b *testing.B) { - memoryStore, err := memory.NewStore() + memoryStore, err := memory.NewStore(storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if err != nil { b.Fatal("failed to create store:", err.Error()) } - sqliteStore, err := sql.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_GetEndpointStatusByKey.db", false) + sqliteStore, err := sql.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_GetEndpointStatusByKey.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if err != nil { b.Fatal("failed to create store:", err.Error()) } diff --git a/storage/store/store_test.go b/storage/store/store_test.go index 6f18f446..1dcfb5ac 100644 --- a/storage/store/store_test.go +++ b/storage/store/store_test.go @@ -91,15 +91,15 @@ type Scenario struct { } func initStoresAndBaseScenarios(t *testing.T, testName string) []*Scenario { - memoryStore, err := memory.NewStore() + memoryStore, err := memory.NewStore(storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if err != nil { t.Fatal("failed to create store:", err.Error()) } - sqliteStore, err := sql.NewStore("sqlite", t.TempDir()+"/"+testName+".db", false) + sqliteStore, err := sql.NewStore("sqlite", t.TempDir()+"/"+testName+".db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if err != nil { t.Fatal("failed to create store:", err.Error()) } - sqliteStoreWithCaching, err := sql.NewStore("sqlite", t.TempDir()+"/"+testName+"-with-caching.db", true) + sqliteStoreWithCaching, err := sql.NewStore("sqlite", t.TempDir()+"/"+testName+"-with-caching.db", true, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents) if err != nil { t.Fatal("failed to create store:", err.Error()) } @@ -138,7 +138,7 @@ func TestStore_GetEndpointStatusByKey(t *testing.T) { t.Run(scenario.Name, func(t *testing.T) { scenario.Store.Insert(&testEndpoint, &firstResult) scenario.Store.Insert(&testEndpoint, &secondResult) - endpointStatus, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults)) + endpointStatus, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, storage.DefaultMaximumNumberOfEvents).WithResults(1, storage.DefaultMaximumNumberOfResults)) if err != nil { t.Fatal("shouldn't have returned an error, got", err.Error()) } @@ -158,7 +158,7 @@ func TestStore_GetEndpointStatusByKey(t *testing.T) { t.Error("The result at index 0 should've been older than the result at index 1") } scenario.Store.Insert(&testEndpoint, &thirdResult) - endpointStatus, err = scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults)) + endpointStatus, err = scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, storage.DefaultMaximumNumberOfEvents).WithResults(1, storage.DefaultMaximumNumberOfResults)) if err != nil { t.Fatal("shouldn't have returned an error, got", err.Error()) } @@ -176,21 +176,21 @@ func TestStore_GetEndpointStatusForMissingStatusReturnsNil(t *testing.T) { for _, scenario := range scenarios { t.Run(scenario.Name, func(t *testing.T) { scenario.Store.Insert(&testEndpoint, &testSuccessfulResult) - endpointStatus, err := scenario.Store.GetEndpointStatus("nonexistantgroup", "nonexistantname", paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults)) + endpointStatus, err := scenario.Store.GetEndpointStatus("nonexistantgroup", "nonexistantname", paging.NewEndpointStatusParams().WithEvents(1, storage.DefaultMaximumNumberOfEvents).WithResults(1, storage.DefaultMaximumNumberOfResults)) if !errors.Is(err, common.ErrEndpointNotFound) { t.Error("should've returned ErrEndpointNotFound, got", err) } if endpointStatus != nil { t.Errorf("Returned endpoint status for group '%s' and name '%s' not nil after inserting the endpoint into the store", testEndpoint.Group, testEndpoint.Name) } - endpointStatus, err = scenario.Store.GetEndpointStatus(testEndpoint.Group, "nonexistantname", paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults)) + endpointStatus, err = scenario.Store.GetEndpointStatus(testEndpoint.Group, "nonexistantname", paging.NewEndpointStatusParams().WithEvents(1, storage.DefaultMaximumNumberOfEvents).WithResults(1, storage.DefaultMaximumNumberOfResults)) if !errors.Is(err, common.ErrEndpointNotFound) { t.Error("should've returned ErrEndpointNotFound, got", err) } if endpointStatus != nil { t.Errorf("Returned endpoint status for group '%s' and name '%s' not nil after inserting the endpoint into the store", testEndpoint.Group, "nonexistantname") } - endpointStatus, err = scenario.Store.GetEndpointStatus("nonexistantgroup", testEndpoint.Name, paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults)) + endpointStatus, err = scenario.Store.GetEndpointStatus("nonexistantgroup", testEndpoint.Name, paging.NewEndpointStatusParams().WithEvents(1, storage.DefaultMaximumNumberOfEvents).WithResults(1, storage.DefaultMaximumNumberOfResults)) if !errors.Is(err, common.ErrEndpointNotFound) { t.Error("should've returned ErrEndpointNotFound, got", err) } @@ -470,7 +470,7 @@ func TestStore_Insert(t *testing.T) { t.Run(scenario.Name, func(t *testing.T) { scenario.Store.Insert(&testEndpoint, &firstResult) scenario.Store.Insert(&testEndpoint, &secondResult) - ss, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults)) + ss, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, storage.DefaultMaximumNumberOfEvents).WithResults(1, storage.DefaultMaximumNumberOfResults)) if err != nil { t.Error("shouldn't have returned an error, got", err) } diff --git a/web/app/public/index.html b/web/app/public/index.html index 31982192..87d01e8f 100644 --- a/web/app/public/index.html +++ b/web/app/public/index.html @@ -3,7 +3,7 @@ {{ .UI.Title }} diff --git a/web/app/src/components/Pagination.vue b/web/app/src/components/Pagination.vue index 3f909e1f..b8ab1ff5 100644 --- a/web/app/src/components/Pagination.vue +++ b/web/app/src/components/Pagination.vue @@ -1,7 +1,7 @@