Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08aba6cd51 | ||
|
|
d3805cd77a | ||
|
|
dd70136e6c | ||
|
|
a94c480c22 | ||
|
|
10fd4ecd6b | ||
|
|
9287e2f9e2 | ||
|
|
257f859825 | ||
|
|
3a4ab62ddd | ||
|
|
a4e9d8e9b0 | ||
|
|
3be6d04d29 | ||
|
|
b59ff6f89e | ||
|
|
813fea93ee | ||
|
|
8f50e44b45 | ||
|
|
fb2448c15a | ||
|
|
db575aad13 |
@@ -1,4 +1,4 @@
|
||||
examples
|
||||
.examples
|
||||
Dockerfile
|
||||
.github
|
||||
.idea
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
storage:
|
||||
type: postgres
|
||||
file: "postgres://username:password@postgres:5432/gatus?sslmode=disable"
|
||||
path: "postgres://username:password@postgres:5432/gatus?sslmode=disable"
|
||||
|
||||
endpoints:
|
||||
- name: back-end
|
||||
@@ -1,6 +1,6 @@
|
||||
storage:
|
||||
type: sqlite
|
||||
file: /data/data.db
|
||||
path: /data/data.db
|
||||
|
||||
endpoints:
|
||||
- name: back-end
|
||||
@@ -9,14 +9,17 @@ data:
|
||||
endpoints:
|
||||
- name: website
|
||||
url: https://twin.sh/health
|
||||
interval: 1m
|
||||
interval: 5m
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].status == UP"
|
||||
|
||||
- name: github
|
||||
url: https://api.github.com/healthz
|
||||
interval: 5m
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
|
||||
- name: cat-fact
|
||||
url: "https://cat-fact.herokuapp.com/facts/random"
|
||||
interval: 5m
|
||||
@@ -27,11 +30,18 @@ data:
|
||||
- "[BODY].text == pat(*cat*)"
|
||||
- "[STATUS] == pat(2*)"
|
||||
- "[CONNECTED] == true"
|
||||
|
||||
- name: example
|
||||
url: https://example.com/
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: gatus
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@@ -41,14 +51,16 @@ spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: gatus
|
||||
app: gatus
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: gatus
|
||||
app: gatus
|
||||
name: gatus
|
||||
namespace: kube-system
|
||||
spec:
|
||||
serviceAccountName: gatus
|
||||
terminationGracePeriodSeconds: 5
|
||||
containers:
|
||||
- image: twinproduction/gatus
|
||||
imagePullPolicy: IfNotPresent
|
||||
@@ -84,4 +96,4 @@ spec:
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
selector:
|
||||
k8s-app: gatus
|
||||
app: gatus
|
||||
8
.github/workflows/build.yml
vendored
8
.github/workflows/build.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.17
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
- name: Build binary to make sure it works
|
||||
@@ -25,9 +25,9 @@ jobs:
|
||||
- name: Test
|
||||
# We're using "sudo" because one of the tests leverages ping, which requires super-user privileges.
|
||||
# As for the 'env "PATH=$PATH" "GOROOT=$GOROOT"', we need it to use the same "go" executable that
|
||||
# was configured by the "Set up Go 1.15" step (otherwise, it'd use sudo's "go" executable)
|
||||
# was configured by the "Set up Go" step (otherwise, it'd use sudo's "go" executable)
|
||||
run: sudo env "PATH=$PATH" "GOROOT=$GOROOT" go test -mod vendor ./... -race -coverprofile=coverage.txt -covermode=atomic
|
||||
- name: Codecov
|
||||
uses: codecov/codecov-action@v1.5.2
|
||||
uses: codecov/codecov-action@v2.1.0
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
files: ./coverage.txt
|
||||
|
||||
49
README.md
49
README.md
@@ -19,7 +19,7 @@ core applications: https://status.twin.sh/
|
||||
<details>
|
||||
<summary><b>Quick start</b></summary>
|
||||
|
||||
```
|
||||
```console
|
||||
docker run -p 8080:8080 --name gatus twinproduction/gatus
|
||||
```
|
||||
For more details, see [Usage](#usage)
|
||||
@@ -235,26 +235,31 @@ Here are some examples of conditions you can use:
|
||||
| Parameter | Description | Default |
|
||||
|:------------------ |:-------------------------------------------------------------------------------------- |:-------------- |
|
||||
| `storage` | Storage configuration | `{}` |
|
||||
| `storage.file` | Path to persist the data in. If the type is `memory`, data is persisted on interval. | `""` |
|
||||
| `storage.type` | Type of storage. Valid types: `memory`, `sqlite`, `postgres` (ALPHA). | `"memory"` |
|
||||
| `storage.path` | Path to persist the data in. Only supported for types `sqlite` and `postgres`. | `""` |
|
||||
| `storage.type` | Type of storage. Valid types: `memory`, `sqlite`, `postgres`. | `"memory"` |
|
||||
|
||||
- If `storage.type` is `memory` (default) and `storage.file` is set to a non-blank value.
|
||||
Furthermore, the data is periodically persisted, but everything remains in memory.
|
||||
- If `storage.type` is `sqlite`, `storage.file` must not be blank:
|
||||
- If `storage.type` is `memory` (default):
|
||||
```yaml
|
||||
# Note that this is the default value, and you can omit the storage configuration altogether to achieve the same result.
|
||||
# Because the data is stored in memory, the data will not survive a restart.
|
||||
storage:
|
||||
type: memory
|
||||
```
|
||||
- If `storage.type` is `sqlite`, `storage.path` must not be blank:
|
||||
```yaml
|
||||
storage:
|
||||
type: sqlite
|
||||
file: data.db
|
||||
path: data.db
|
||||
```
|
||||
See [examples/docker-compose-sqlite-storage](examples/docker-compose-sqlite-storage) for an example.
|
||||
See [examples/docker-compose-sqlite-storage](.examples/docker-compose-sqlite-storage) for an example.
|
||||
|
||||
- If `storage.type` is `postgres`, `storage.file` must be the connection URL:
|
||||
- If `storage.type` is `postgres`, `storage.path` must be the connection URL:
|
||||
```yaml
|
||||
storage:
|
||||
type: postgres
|
||||
file: "postgres://user:password@127.0.0.1:5432/gatus?sslmode=disable"
|
||||
path: "postgres://user:password@127.0.0.1:5432/gatus?sslmode=disable"
|
||||
```
|
||||
See [examples/docker-compose-postgres-storage](examples/docker-compose-postgres-storage) for an example.
|
||||
See [examples/docker-compose-postgres-storage](.examples/docker-compose-postgres-storage) for an example.
|
||||
|
||||
|
||||
### Client configuration
|
||||
@@ -798,29 +803,29 @@ maintenance:
|
||||
|
||||
|
||||
## Deployment
|
||||
Many examples can be found in the [examples](examples) folder, but this section will focus on the most popular ways of deploying Gatus.
|
||||
Many examples can be found in the [.examples](.examples) folder, but this section will focus on the most popular ways of deploying Gatus.
|
||||
|
||||
|
||||
### Docker
|
||||
To run Gatus locally with Docker:
|
||||
```
|
||||
```console
|
||||
docker run -p 8080:8080 --name gatus twinproduction/gatus
|
||||
```
|
||||
|
||||
Other than using one of the examples provided in the `examples` folder, you can also try it out locally by
|
||||
Other than using one of the examples provided in the [.examples](.examples) folder, you can also try it out locally by
|
||||
creating a configuration file, we'll call it `config.yaml` for this example, and running the following
|
||||
command:
|
||||
```
|
||||
```console
|
||||
docker run -p 8080:8080 --mount type=bind,source="$(pwd)"/config.yaml,target=/config/config.yaml --name gatus twinproduction/gatus
|
||||
```
|
||||
|
||||
If you're on Windows, replace `"$(pwd)"` by the absolute path to your current directory, e.g.:
|
||||
```
|
||||
```console
|
||||
docker run -p 8080:8080 --mount type=bind,source=C:/Users/Chris/Desktop/config.yaml,target=/config/config.yaml --name gatus twinproduction/gatus
|
||||
```
|
||||
|
||||
To build the image locally:
|
||||
```
|
||||
```console
|
||||
docker build . -t twinproduction/gatus
|
||||
```
|
||||
|
||||
@@ -845,7 +850,7 @@ Gatus can be deployed on Terraform by using the following module: [terraform-kub
|
||||
|
||||
|
||||
## Running the tests
|
||||
```
|
||||
```console
|
||||
go test ./... -mod vendor
|
||||
```
|
||||
|
||||
@@ -937,8 +942,8 @@ endpoints:
|
||||
- "[CONNECTED] == true"
|
||||
```
|
||||
|
||||
Placeholders `[STATUS]` and `[BODY]` as well as the fields `endpoints[].body`, `endpoints[].insecure`,
|
||||
`endpoints[].headers`, `endpoints[].method` and `endpoints[].graphql` are not supported for TCP endpoints.
|
||||
Placeholders `[STATUS]` and `[BODY]` as well as the fields `endpoints[].body`, `endpoints[].headers`,
|
||||
`endpoints[].method` and `endpoints[].graphql` are not supported for TCP endpoints.
|
||||
|
||||
**NOTE**: `[CONNECTED] == true` does not guarantee that the endpoint itself is healthy - it only guarantees that there's
|
||||
something at the given address listening to the given port, and that a connection to that address was successfully
|
||||
@@ -991,7 +996,7 @@ endpoints:
|
||||
url: "starttls://smtp.gmail.com:587"
|
||||
interval: 30m
|
||||
client:
|
||||
timeout: 5s
|
||||
timeout: 5s
|
||||
conditions:
|
||||
- "[CONNECTED] == true"
|
||||
- "[CERTIFICATE_EXPIRATION] > 48h"
|
||||
@@ -1006,7 +1011,7 @@ endpoints:
|
||||
url: "tls://ldap.example.com:636"
|
||||
interval: 30m
|
||||
client:
|
||||
timeout: 5s
|
||||
timeout: 5s
|
||||
conditions:
|
||||
- "[CONNECTED] == true"
|
||||
- "[CERTIFICATE_EXPIRATION] > 48h"
|
||||
|
||||
@@ -71,15 +71,15 @@ func (provider *AlertProvider) ToCustomAlertProvider(endpoint *core.Endpoint, al
|
||||
"source": "%s",
|
||||
"severity": "critical"
|
||||
}
|
||||
}`, provider.getPagerDutyIntegrationKeyForGroup(endpoint.Group), resolveKey, eventAction, message, endpoint.Name),
|
||||
}`, provider.getIntegrationKeyForGroup(endpoint.Group), resolveKey, eventAction, message, endpoint.Name),
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getPagerDutyIntegrationKeyForGroup returns the appropriate pagerduty integration key for a given group
|
||||
func (provider *AlertProvider) getPagerDutyIntegrationKeyForGroup(group string) string {
|
||||
// getIntegrationKeyForGroup returns the appropriate pagerduty integration key for a given group
|
||||
func (provider *AlertProvider) getIntegrationKeyForGroup(group string) string {
|
||||
if provider.Overrides != nil {
|
||||
for _, override := range provider.Overrides {
|
||||
if group == override.Group {
|
||||
@@ -87,10 +87,7 @@ func (provider *AlertProvider) getPagerDutyIntegrationKeyForGroup(group string)
|
||||
}
|
||||
}
|
||||
}
|
||||
if provider.IntegrationKey != "" {
|
||||
return provider.IntegrationKey
|
||||
}
|
||||
return ""
|
||||
return provider.IntegrationKey
|
||||
}
|
||||
|
||||
// GetDefaultAlert returns the provider's default alert configuration
|
||||
|
||||
@@ -161,7 +161,7 @@ func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlertAndOverride(t *tes
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_getPagerDutyIntegrationKey(t *testing.T) {
|
||||
func TestAlertProvider_getIntegrationKeyForGroup(t *testing.T) {
|
||||
scenarios := []struct {
|
||||
Name string
|
||||
Provider AlertProvider
|
||||
@@ -217,7 +217,7 @@ func TestAlertProvider_getPagerDutyIntegrationKey(t *testing.T) {
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
if output := scenario.Provider.getPagerDutyIntegrationKeyForGroup(scenario.InputGroup); output != scenario.ExpectedOutput {
|
||||
if output := scenario.Provider.getIntegrationKeyForGroup(scenario.InputGroup); output != scenario.ExpectedOutput {
|
||||
t.Errorf("expected %s, got %s", scenario.ExpectedOutput, output)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -43,28 +43,28 @@ var (
|
||||
// Config is the main configuration structure
|
||||
type Config struct {
|
||||
// Debug Whether to enable debug logs
|
||||
Debug bool `yaml:"debug"`
|
||||
Debug bool `yaml:"debug,omitempty"`
|
||||
|
||||
// Metrics Whether to expose metrics at /metrics
|
||||
Metrics bool `yaml:"metrics"`
|
||||
Metrics bool `yaml:"metrics,omitempty"`
|
||||
|
||||
// SkipInvalidConfigUpdate Whether to make the application ignore invalid configuration
|
||||
// if the configuration file is updated while the application is running
|
||||
SkipInvalidConfigUpdate bool `yaml:"skip-invalid-config-update"`
|
||||
SkipInvalidConfigUpdate bool `yaml:"skip-invalid-config-update,omitempty"`
|
||||
|
||||
// DisableMonitoringLock Whether to disable the monitoring lock
|
||||
// The monitoring lock is what prevents multiple endpoints from being processed at the same time.
|
||||
// Disabling this may lead to inaccurate response times
|
||||
DisableMonitoringLock bool `yaml:"disable-monitoring-lock"`
|
||||
DisableMonitoringLock bool `yaml:"disable-monitoring-lock,omitempty"`
|
||||
|
||||
// Security Configuration for securing access to Gatus
|
||||
Security *security.Config `yaml:"security"`
|
||||
Security *security.Config `yaml:"security,omitempty"`
|
||||
|
||||
// Alerting Configuration for alerting
|
||||
Alerting *alerting.Config `yaml:"alerting"`
|
||||
Alerting *alerting.Config `yaml:"alerting,omitempty"`
|
||||
|
||||
// Endpoints List of endpoints to monitor
|
||||
Endpoints []*core.Endpoint `yaml:"endpoints"`
|
||||
Endpoints []*core.Endpoint `yaml:"endpoints,omitempty"`
|
||||
|
||||
// Services List of endpoints to monitor
|
||||
//
|
||||
@@ -72,19 +72,19 @@ type Config struct {
|
||||
// XXX: This is not a typo -- not v4.0.0, but v5.0.0 -- I want to give enough time for people to migrate
|
||||
//
|
||||
// Deprecated in favor of Endpoints
|
||||
Services []*core.Endpoint `yaml:"services"`
|
||||
Services []*core.Endpoint `yaml:"services,omitempty"`
|
||||
|
||||
// Storage is the configuration for how the data is stored
|
||||
Storage *storage.Config `yaml:"storage"`
|
||||
Storage *storage.Config `yaml:"storage,omitempty"`
|
||||
|
||||
// Web is the web configuration for the application
|
||||
Web *web.Config `yaml:"web"`
|
||||
Web *web.Config `yaml:"web,omitempty"`
|
||||
|
||||
// UI is the configuration for the UI
|
||||
UI *ui.Config `yaml:"ui"`
|
||||
UI *ui.Config `yaml:"ui,omitempty"`
|
||||
|
||||
// Maintenance is the configuration for creating a maintenance window in which no alerts are sent
|
||||
Maintenance *maintenance.Config `yaml:"maintenance"`
|
||||
Maintenance *maintenance.Config `yaml:"maintenance,omitempty"`
|
||||
|
||||
filePath string // path to the file from which config was loaded from
|
||||
lastFileModTime time.Time // last modification time
|
||||
@@ -149,12 +149,12 @@ func readConfigurationFile(fileName string) (config *Config, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// parseAndValidateConfigBytes parses a Gatus configuration file into a Config struct and validates its parameters
|
||||
func parseAndValidateConfigBytes(yamlBytes []byte) (config *Config, err error) {
|
||||
// Expand environment variables
|
||||
yamlBytes = []byte(os.ExpandEnv(string(yamlBytes)))
|
||||
// Parse configuration file
|
||||
err = yaml.Unmarshal(yamlBytes, &config)
|
||||
if err != nil {
|
||||
if err = yaml.Unmarshal(yamlBytes, &config); err != nil {
|
||||
return
|
||||
}
|
||||
if config != nil && len(config.Services) > 0 { // XXX: Remove this in v5.0.0
|
||||
@@ -167,8 +167,6 @@ func parseAndValidateConfigBytes(yamlBytes []byte) (config *Config, err error) {
|
||||
if config == nil || config.Endpoints == nil || len(config.Endpoints) == 0 {
|
||||
err = ErrNoEndpointInConfig
|
||||
} else {
|
||||
// Note that the functions below may panic, and this is on purpose to prevent Gatus from starting with
|
||||
// invalid configurations
|
||||
validateAlertingConfig(config.Alerting, config.Endpoints, config.Debug)
|
||||
if err := validateSecurityConfig(config); err != nil {
|
||||
return nil, err
|
||||
@@ -197,19 +195,10 @@ func validateStorageConfig(config *Config) error {
|
||||
config.Storage = &storage.Config{
|
||||
Type: storage.TypeMemory,
|
||||
}
|
||||
}
|
||||
err := storage.Initialize(config.Storage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove all EndpointStatus that represent endpoints which no longer exist in the configuration
|
||||
var keys []string
|
||||
for _, endpoint := range config.Endpoints {
|
||||
keys = append(keys, endpoint.Key())
|
||||
}
|
||||
numberOfEndpointStatusesDeleted := storage.Get().DeleteAllEndpointStatusesNotInKeys(keys)
|
||||
if numberOfEndpointStatusesDeleted > 0 {
|
||||
log.Printf("[config][validateStorageConfig] Deleted %d endpoint statuses because their matching endpoints no longer existed", numberOfEndpointStatusesDeleted)
|
||||
} else {
|
||||
if err := config.Storage.ValidateAndSetDefaults(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/TwiN/gatus/v3/config/ui"
|
||||
"github.com/TwiN/gatus/v3/config/web"
|
||||
"github.com/TwiN/gatus/v3/core"
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
)
|
||||
|
||||
func TestLoadFileThatDoesNotExist(t *testing.T) {
|
||||
@@ -44,7 +45,8 @@ func TestParseAndValidateConfigBytes(t *testing.T) {
|
||||
}()
|
||||
config, err := parseAndValidateConfigBytes([]byte(fmt.Sprintf(`
|
||||
storage:
|
||||
file: %s
|
||||
type: sqlite
|
||||
path: %s
|
||||
maintenance:
|
||||
enabled: true
|
||||
start: 00:00
|
||||
@@ -83,6 +85,9 @@ endpoints:
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
}
|
||||
if config.Storage == nil || config.Storage.Path != file || config.Storage.Type != storage.TypeSQLite {
|
||||
t.Error("expected storage to be set to sqlite, got", config.Storage)
|
||||
}
|
||||
if config.UI == nil || config.UI.Title != "Test" {
|
||||
t.Error("Expected Config.UI.Title to be Test")
|
||||
}
|
||||
@@ -1297,3 +1302,53 @@ endpoints:
|
||||
t.Error("services should've been merged in endpoints")
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Remove this in v4.0.0
|
||||
func TestParseAndValidateConfigBytes_backwardCompatibleWithStorageFile(t *testing.T) {
|
||||
file := t.TempDir() + "/test.db"
|
||||
config, err := parseAndValidateConfigBytes([]byte(fmt.Sprintf(`
|
||||
storage:
|
||||
type: sqlite
|
||||
file: %s
|
||||
|
||||
endpoints:
|
||||
- name: website
|
||||
url: https://twin.sh/actuator/health
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`, file)))
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
}
|
||||
if config.Storage == nil || config.Storage.Path != file || config.Storage.Type != storage.TypeSQLite {
|
||||
t.Error("expected storage to be set to sqlite, got", config.Storage)
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Remove this in v4.0.0
|
||||
func TestParseAndValidateConfigBytes_backwardCompatibleWithStorageTypeMemoryAndFile(t *testing.T) {
|
||||
file := t.TempDir() + "/test.db"
|
||||
config, err := parseAndValidateConfigBytes([]byte(fmt.Sprintf(`
|
||||
storage:
|
||||
type: memory
|
||||
file: %s
|
||||
|
||||
endpoints:
|
||||
- name: website
|
||||
url: https://twin.sh/actuator/health
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`, file)))
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
}
|
||||
if config.Storage == nil || config.Storage.Path != file || config.Storage.Type != storage.TypeMemory {
|
||||
t.Error("expected storage to be set to memory, got", config.Storage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store"
|
||||
"github.com/TwiN/gatus/v3/storage/store/common"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
@@ -40,7 +40,7 @@ func UptimeBadge(writer http.ResponseWriter, request *http.Request) {
|
||||
return
|
||||
}
|
||||
key := variables["key"]
|
||||
uptime, err := storage.Get().GetUptimeByKey(key, from, time.Now())
|
||||
uptime, err := store.Get().GetUptimeByKey(key, from, time.Now())
|
||||
if err != nil {
|
||||
if err == common.ErrEndpointNotFound {
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
@@ -79,7 +79,7 @@ func ResponseTimeBadge(writer http.ResponseWriter, request *http.Request) {
|
||||
return
|
||||
}
|
||||
key := variables["key"]
|
||||
averageResponseTime, err := storage.Get().GetAverageResponseTimeByKey(key, from, time.Now())
|
||||
averageResponseTime, err := store.Get().GetAverageResponseTimeByKey(key, from, time.Now())
|
||||
if err != nil {
|
||||
if err == common.ErrEndpointNotFound {
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
|
||||
@@ -9,12 +9,12 @@ import (
|
||||
|
||||
"github.com/TwiN/gatus/v3/config"
|
||||
"github.com/TwiN/gatus/v3/core"
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store"
|
||||
"github.com/TwiN/gatus/v3/watchdog"
|
||||
)
|
||||
|
||||
func TestUptimeBadge(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer store.Get().Clear()
|
||||
defer cache.Clear()
|
||||
cfg := &config.Config{
|
||||
Metrics: true,
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store"
|
||||
"github.com/TwiN/gatus/v3/storage/store/common"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/wcharczuk/go-chart/v2"
|
||||
@@ -42,7 +42,7 @@ func ResponseTimeChart(writer http.ResponseWriter, r *http.Request) {
|
||||
http.Error(writer, "Durations supported: 7d, 24h", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
hourlyAverageResponseTime, err := storage.Get().GetHourlyAverageResponseTimeByKey(vars["key"], from, time.Now())
|
||||
hourlyAverageResponseTime, err := store.Get().GetHourlyAverageResponseTimeByKey(vars["key"], from, time.Now())
|
||||
if err != nil {
|
||||
if err == common.ErrEndpointNotFound {
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
|
||||
@@ -8,12 +8,12 @@ import (
|
||||
|
||||
"github.com/TwiN/gatus/v3/config"
|
||||
"github.com/TwiN/gatus/v3/core"
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store"
|
||||
"github.com/TwiN/gatus/v3/watchdog"
|
||||
)
|
||||
|
||||
func TestResponseTimeChart(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer store.Get().Clear()
|
||||
defer cache.Clear()
|
||||
cfg := &config.Config{
|
||||
Metrics: true,
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store"
|
||||
"github.com/TwiN/gatus/v3/storage/store/common"
|
||||
"github.com/TwiN/gatus/v3/storage/store/common/paging"
|
||||
"github.com/TwiN/gocache"
|
||||
@@ -44,7 +44,7 @@ func EndpointStatuses(writer http.ResponseWriter, r *http.Request) {
|
||||
var err error
|
||||
buffer := &bytes.Buffer{}
|
||||
gzipWriter := gzip.NewWriter(buffer)
|
||||
endpointStatuses, err := storage.Get().GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(page, pageSize))
|
||||
endpointStatuses, err := store.Get().GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(page, pageSize))
|
||||
if err != nil {
|
||||
log.Printf("[handler][EndpointStatuses] Failed to retrieve endpoint statuses: %s", err.Error())
|
||||
http.Error(writer, err.Error(), http.StatusInternalServerError)
|
||||
@@ -76,7 +76,7 @@ func EndpointStatuses(writer http.ResponseWriter, r *http.Request) {
|
||||
func EndpointStatus(writer http.ResponseWriter, r *http.Request) {
|
||||
page, pageSize := extractPageAndPageSizeFromRequest(r)
|
||||
vars := mux.Vars(r)
|
||||
endpointStatus, err := storage.Get().GetEndpointStatusByKey(vars["key"], paging.NewEndpointStatusParams().WithResults(page, pageSize).WithEvents(1, common.MaximumNumberOfEvents))
|
||||
endpointStatus, err := store.Get().GetEndpointStatusByKey(vars["key"], paging.NewEndpointStatusParams().WithResults(page, pageSize).WithEvents(1, common.MaximumNumberOfEvents))
|
||||
if err != nil {
|
||||
if err == common.ErrEndpointNotFound {
|
||||
http.Error(writer, err.Error(), http.StatusNotFound)
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/TwiN/gatus/v3/config"
|
||||
"github.com/TwiN/gatus/v3/core"
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store"
|
||||
"github.com/TwiN/gatus/v3/watchdog"
|
||||
)
|
||||
|
||||
@@ -84,7 +84,7 @@ var (
|
||||
)
|
||||
|
||||
func TestEndpointStatus(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer store.Get().Clear()
|
||||
defer cache.Clear()
|
||||
cfg := &config.Config{
|
||||
Metrics: true,
|
||||
@@ -153,12 +153,12 @@ func TestEndpointStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEndpointStatuses(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer store.Get().Clear()
|
||||
defer cache.Clear()
|
||||
firstResult := &testSuccessfulResult
|
||||
secondResult := &testUnsuccessfulResult
|
||||
storage.Get().Insert(&testEndpoint, firstResult)
|
||||
storage.Get().Insert(&testEndpoint, secondResult)
|
||||
store.Get().Insert(&testEndpoint, firstResult)
|
||||
store.Get().Insert(&testEndpoint, secondResult)
|
||||
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||
firstResult.Timestamp = time.Time{}
|
||||
secondResult.Timestamp = time.Time{}
|
||||
@@ -175,37 +175,37 @@ func TestEndpointStatuses(t *testing.T) {
|
||||
Name: "no-pagination",
|
||||
Path: "/api/v1/endpoints/statuses",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}],"events":[]}]`,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}]`,
|
||||
},
|
||||
{
|
||||
Name: "pagination-first-result",
|
||||
Path: "/api/v1/endpoints/statuses?page=1&pageSize=1",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}],"events":[]}]`,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}]`,
|
||||
},
|
||||
{
|
||||
Name: "pagination-second-result",
|
||||
Path: "/api/v1/endpoints/statuses?page=2&pageSize=1",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"}],"events":[]}]`,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"}]}]`,
|
||||
},
|
||||
{
|
||||
Name: "pagination-no-results",
|
||||
Path: "/api/v1/endpoints/statuses?page=5&pageSize=20",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[],"events":[]}]`,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[]}]`,
|
||||
},
|
||||
{
|
||||
Name: "invalid-pagination-should-fall-back-to-default",
|
||||
Path: "/api/v1/endpoints/statuses?page=INVALID&pageSize=INVALID",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}],"events":[]}]`,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}]`,
|
||||
},
|
||||
{ // XXX: Remove this in v4.0.0
|
||||
Name: "backward-compatible-service-status",
|
||||
Path: "/api/v1/services/statuses",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}],"events":[]}]`,
|
||||
ExpectedBody: `[{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}]`,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -8,12 +8,12 @@ import (
|
||||
|
||||
"github.com/TwiN/gatus/v3/config"
|
||||
"github.com/TwiN/gatus/v3/core"
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store"
|
||||
"github.com/TwiN/gatus/v3/watchdog"
|
||||
)
|
||||
|
||||
func TestSinglePageApplication(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer store.Get().Clear()
|
||||
defer cache.Clear()
|
||||
cfg := &config.Config{
|
||||
Metrics: true,
|
||||
|
||||
@@ -80,7 +80,7 @@ const (
|
||||
maximumLengthBeforeTruncatingWhenComparedWithPattern = 25
|
||||
)
|
||||
|
||||
// Condition is a condition that needs to be met in order for a Endpoint to be considered healthy.
|
||||
// Condition is a condition that needs to be met in order for an Endpoint to be considered healthy.
|
||||
type Condition string
|
||||
|
||||
// evaluate the Condition with the Result of the health check
|
||||
@@ -283,7 +283,7 @@ func prettifyNumericalParameters(parameters []string, resolvedParameters []int64
|
||||
return prettify(parameters, []string{strconv.Itoa(int(resolvedParameters[0])), strconv.Itoa(int(resolvedParameters[1]))}, operator)
|
||||
}
|
||||
|
||||
// XXX: make this configurable? i.e. show-resolved-conditions-on-failure
|
||||
// prettify returns a string representation of a condition with its parameters resolved between parentheses
|
||||
func prettify(parameters []string, resolvedParameters []string, operator string) string {
|
||||
// Since, in the event of an invalid path, the resolvedParameters also contain the condition itself,
|
||||
// we'll return the resolvedParameters as-is.
|
||||
|
||||
@@ -79,19 +79,19 @@ type Endpoint struct {
|
||||
Conditions []*Condition `yaml:"conditions"`
|
||||
|
||||
// Alerts is the alerting configuration for the endpoint in case of failure
|
||||
Alerts []*alert.Alert `yaml:"alerts"`
|
||||
Alerts []*alert.Alert `yaml:"alerts,omitempty"`
|
||||
|
||||
// ClientConfig is the configuration of the client used to communicate with the endpoint's target
|
||||
ClientConfig *client.Config `yaml:"client"`
|
||||
ClientConfig *client.Config `yaml:"client,omitempty"`
|
||||
|
||||
// UIConfig is the configuration for the UI
|
||||
UIConfig *ui.Config `yaml:"ui"`
|
||||
UIConfig *ui.Config `yaml:"ui,omitempty"`
|
||||
|
||||
// NumberOfFailuresInARow is the number of unsuccessful evaluations in a row
|
||||
NumberOfFailuresInARow int
|
||||
NumberOfFailuresInARow int `yaml:"-"`
|
||||
|
||||
// NumberOfSuccessesInARow is the number of successful evaluations in a row
|
||||
NumberOfSuccessesInARow int
|
||||
NumberOfSuccessesInARow int `yaml:"-"`
|
||||
}
|
||||
|
||||
// IsEnabled returns whether the endpoint is enabled or not
|
||||
|
||||
@@ -17,7 +17,7 @@ type EndpointStatus struct {
|
||||
Results []*Result `json:"results"`
|
||||
|
||||
// Events is a list of events
|
||||
Events []*Event `json:"events"`
|
||||
Events []*Event `json:"events,omitempty"`
|
||||
|
||||
// Uptime information on the endpoint's uptime
|
||||
//
|
||||
|
||||
@@ -13,7 +13,7 @@ type Result struct {
|
||||
DNSRCode string `json:"-"`
|
||||
|
||||
// Hostname extracted from Endpoint.URL
|
||||
Hostname string `json:"hostname"`
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
|
||||
// IP resolved from the Endpoint URL
|
||||
IP string `json:"-"`
|
||||
@@ -25,7 +25,7 @@ type Result struct {
|
||||
Duration time.Duration `json:"duration"`
|
||||
|
||||
// Errors encountered during the evaluation of the Endpoint's health
|
||||
Errors []string `json:"errors"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
|
||||
// ConditionResults results of the Endpoint's conditions
|
||||
ConditionResults []*ConditionResult `json:"conditionResults"`
|
||||
|
||||
27
main.go
27
main.go
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/TwiN/gatus/v3/config"
|
||||
"github.com/TwiN/gatus/v3/controller"
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store"
|
||||
"github.com/TwiN/gatus/v3/watchdog"
|
||||
)
|
||||
|
||||
@@ -18,6 +18,7 @@ func main() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
initializeStorage(cfg)
|
||||
start(cfg)
|
||||
// Wait for termination signal
|
||||
signalChannel := make(chan os.Signal, 1)
|
||||
@@ -46,8 +47,7 @@ func stop() {
|
||||
}
|
||||
|
||||
func save() {
|
||||
err := storage.Get().Save()
|
||||
if err != nil {
|
||||
if err := store.Get().Save(); err != nil {
|
||||
log.Println("Failed to save storage provider:", err.Error())
|
||||
}
|
||||
}
|
||||
@@ -62,6 +62,27 @@ func loadConfiguration() (cfg *config.Config, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// initializeStorage initializes the storage provider
|
||||
//
|
||||
// Q: "TwiN, why are you putting this here? Wouldn't it make more sense to have this in the config?!"
|
||||
// A: Yes. Yes it would make more sense to have it in the config package. But I don't want to import
|
||||
// the massive SQL dependencies just because I want to import the config, so here we are.
|
||||
func initializeStorage(cfg *config.Config) {
|
||||
err := store.Initialize(cfg.Storage)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Remove all EndpointStatus that represent endpoints which no longer exist in the configuration
|
||||
var keys []string
|
||||
for _, endpoint := range cfg.Endpoints {
|
||||
keys = append(keys, endpoint.Key())
|
||||
}
|
||||
numberOfEndpointStatusesDeleted := store.Get().DeleteAllEndpointStatusesNotInKeys(keys)
|
||||
if numberOfEndpointStatusesDeleted > 0 {
|
||||
log.Printf("[config][validateStorageConfig] Deleted %d endpoint statuses because their matching endpoints no longer existed", numberOfEndpointStatusesDeleted)
|
||||
}
|
||||
}
|
||||
|
||||
func listenToConfigurationFileChanges(cfg *config.Config) {
|
||||
for {
|
||||
time.Sleep(30 * time.Second)
|
||||
|
||||
@@ -1,14 +1,59 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSQLStorageRequiresFile = errors.New("sql storage requires a non-empty file to be defined")
|
||||
ErrMemoryStorageDoesNotSupportFile = errors.New("memory storage does not support persistence, use sqlite if you want persistence on file")
|
||||
ErrCannotSetBothFileAndPath = errors.New("file has been deprecated in favor of path: you cannot set both of them")
|
||||
)
|
||||
|
||||
// Config is the configuration for storage
|
||||
type Config struct {
|
||||
// Path is the path used by the store to achieve persistence
|
||||
// If blank, persistence is disabled.
|
||||
// Note that not all Type support persistence
|
||||
//
|
||||
// XXX: Rename to path for v4.0.0
|
||||
Path string `yaml:"path"`
|
||||
|
||||
// File is the path of the file to use for persistence
|
||||
// If blank, persistence is disabled
|
||||
//
|
||||
// XXX: Rename to path for v4.0.0
|
||||
// Deprecated
|
||||
File string `yaml:"file"`
|
||||
|
||||
// Type of store
|
||||
// If blank, uses the default in-memory store
|
||||
Type Type `yaml:"type"`
|
||||
}
|
||||
|
||||
// ValidateAndSetDefaults validates the configuration and sets the default values (if applicable)
|
||||
func (c *Config) ValidateAndSetDefaults() error {
|
||||
if len(c.File) > 0 && len(c.Path) > 0 { // XXX: Remove for v4.0.0
|
||||
return ErrCannotSetBothFileAndPath
|
||||
} else if len(c.File) > 0 { // XXX: Remove for v4.0.0
|
||||
log.Println("WARNING: Your configuration is using 'storage.file', which is deprecated in favor of 'storage.path'")
|
||||
log.Println("WARNING: storage.file will be completely removed in v4.0.0, so please update your configuration")
|
||||
log.Println("WARNING: See https://github.com/TwiN/gatus/issues/197")
|
||||
c.Path = c.File
|
||||
}
|
||||
if c.Type == "" {
|
||||
c.Type = TypeMemory
|
||||
}
|
||||
if (c.Type == TypePostgres || c.Type == TypeSQLite) && len(c.Path) == 0 {
|
||||
return ErrSQLStorageRequiresFile
|
||||
}
|
||||
if c.Type == TypeMemory && len(c.Path) > 0 {
|
||||
log.Println("WARNING: Your configuration is using a storage of type memory with persistence, which has been deprecated")
|
||||
log.Println("WARNING: As of v4.0.0, the default storage type (memory) will not support persistence.")
|
||||
log.Println("WARNING: If you want persistence, use 'storage.type: sqlite' instead of 'storage.type: memory'")
|
||||
log.Println("WARNING: See https://github.com/TwiN/gatus/issues/198")
|
||||
// XXX: Uncomment the following line for v4.0.0
|
||||
//return ErrMemoryStorageDoesNotSupportFile
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v3/storage/store"
|
||||
"github.com/TwiN/gatus/v3/storage/store/memory"
|
||||
"github.com/TwiN/gatus/v3/storage/store/sql"
|
||||
)
|
||||
|
||||
var (
|
||||
provider store.Store
|
||||
|
||||
// initialized keeps track of whether the storage provider was initialized
|
||||
// Because store.Store is an interface, a nil check wouldn't be sufficient, so instead of doing reflection
|
||||
// every single time Get is called, we'll just lazily keep track of its existence through this variable
|
||||
initialized bool
|
||||
|
||||
ctx context.Context
|
||||
cancelFunc context.CancelFunc
|
||||
)
|
||||
|
||||
// Get retrieves the storage provider
|
||||
func Get() store.Store {
|
||||
if !initialized {
|
||||
log.Println("[storage][Get] Provider requested before it was initialized, automatically initializing")
|
||||
err := Initialize(nil)
|
||||
if err != nil {
|
||||
panic("failed to automatically initialize store: " + err.Error())
|
||||
}
|
||||
}
|
||||
return provider
|
||||
}
|
||||
|
||||
// Initialize instantiates the storage provider based on the Config provider
|
||||
func Initialize(cfg *Config) error {
|
||||
initialized = true
|
||||
var err error
|
||||
if cancelFunc != nil {
|
||||
// Stop the active autoSaveStore task, if there's already one
|
||||
cancelFunc()
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = &Config{}
|
||||
}
|
||||
if len(cfg.File) == 0 && cfg.Type != TypePostgres {
|
||||
log.Printf("[storage][Initialize] Creating storage provider with type=%s and file=%s", cfg.Type, cfg.File)
|
||||
} else {
|
||||
log.Printf("[storage][Initialize] Creating storage provider with type=%s", cfg.Type)
|
||||
}
|
||||
ctx, cancelFunc = context.WithCancel(context.Background())
|
||||
switch cfg.Type {
|
||||
case TypeSQLite, TypePostgres:
|
||||
provider, err = sql.NewStore(string(cfg.Type), cfg.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case TypeMemory:
|
||||
fallthrough
|
||||
default:
|
||||
if len(cfg.File) > 0 {
|
||||
provider, err = memory.NewStore(cfg.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go autoSaveStore(ctx, provider, 7*time.Minute)
|
||||
} else {
|
||||
provider, _ = memory.NewStore("")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// autoSaveStore automatically calls the Save function of the provider at every interval
|
||||
func autoSaveStore(ctx context.Context, provider store.Store, interval time.Duration) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Printf("[storage][autoSaveStore] Stopping active job")
|
||||
return
|
||||
case <-time.After(interval):
|
||||
log.Printf("[storage][autoSaveStore] Saving")
|
||||
err := provider.Save()
|
||||
if err != nil {
|
||||
log.Println("[storage][autoSaveStore] Save failed:", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v3/storage/store/sql"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
store := Get()
|
||||
if store == nil {
|
||||
t.Error("store should've been automatically initialized")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Cfg *Config
|
||||
ExpectedErr error
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "nil",
|
||||
Cfg: nil,
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "blank",
|
||||
Cfg: &Config{},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "memory-no-file",
|
||||
Cfg: &Config{Type: TypeMemory},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "memory-with-file",
|
||||
Cfg: &Config{Type: TypeMemory, File: t.TempDir() + "/TestInitialize_memory-with-file.db"},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-no-file",
|
||||
Cfg: &Config{Type: TypeSQLite},
|
||||
ExpectedErr: sql.ErrFilePathNotSpecified,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-with-file",
|
||||
Cfg: &Config{Type: TypeSQLite, File: t.TempDir() + "/TestInitialize_sqlite-with-file.db"},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
err := Initialize(scenario.Cfg)
|
||||
if err != scenario.ExpectedErr {
|
||||
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if cancelFunc == nil {
|
||||
t.Error("cancelFunc shouldn't have been nil")
|
||||
}
|
||||
if ctx == nil {
|
||||
t.Error("ctx shouldn't have been nil")
|
||||
}
|
||||
if provider == nil {
|
||||
t.Fatal("provider shouldn't have been nit")
|
||||
}
|
||||
provider.Close()
|
||||
// Try to initialize it again
|
||||
err = Initialize(scenario.Cfg)
|
||||
if err != scenario.ExpectedErr {
|
||||
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
|
||||
return
|
||||
}
|
||||
provider.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoSave(t *testing.T) {
|
||||
file := t.TempDir() + "/TestAutoSave.db"
|
||||
if err := Initialize(&Config{File: file}); err != nil {
|
||||
t.Fatal("shouldn't have returned an error")
|
||||
}
|
||||
go autoSaveStore(ctx, provider, 3*time.Millisecond)
|
||||
time.Sleep(15 * time.Millisecond)
|
||||
cancelFunc()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
@@ -2,7 +2,11 @@ package memory
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -24,6 +28,10 @@ func init() {
|
||||
// Store that leverages gocache
|
||||
type Store struct {
|
||||
sync.RWMutex
|
||||
// Deprecated
|
||||
//
|
||||
// File persistence will no longer be supported as of v4.0.0
|
||||
// XXX: Remove me in v4.0.0
|
||||
file string
|
||||
cache *gocache.Cache
|
||||
}
|
||||
@@ -37,9 +45,24 @@ func NewStore(file string) (*Store, error) {
|
||||
file: file,
|
||||
cache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize),
|
||||
}
|
||||
// XXX: Remove the block below in v4.0.0 because persistence with the memory store will no longer be supported
|
||||
// XXX: Make sure to also update gocache to v2.0.0
|
||||
if len(file) > 0 {
|
||||
_, err := store.cache.ReadFromFile(file)
|
||||
if err != nil {
|
||||
// XXX: Remove the block below in v4.0.0
|
||||
if data, err2 := ioutil.ReadFile(file); err2 == nil {
|
||||
isFromOldVersion := strings.Contains(string(data), "*core.ServiceStatus")
|
||||
if isFromOldVersion {
|
||||
log.Println("WARNING: Couldn't read file due to recent change in v3.3.0, see https://github.com/TwiN/gatus/issues/191")
|
||||
log.Println("WARNING: Will automatically rename old file to " + file + ".old and overwrite the current file")
|
||||
if err = ioutil.WriteFile(file+".old", data, fs.ModePerm); err != nil {
|
||||
log.Println("WARNING: Tried my best to keep the old file, but it wasn't enough. Sorry, your file will be overwritten :(")
|
||||
}
|
||||
// Return the store regardless of whether there was an error or not
|
||||
return store, nil
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,8 +34,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrFilePathNotSpecified is the error returned when path parameter passed in NewStore is blank
|
||||
ErrFilePathNotSpecified = errors.New("file path cannot be empty")
|
||||
// ErrPathNotSpecified is the error returned when the path parameter passed in NewStore is blank
|
||||
ErrPathNotSpecified = errors.New("path cannot be empty")
|
||||
|
||||
// ErrDatabaseDriverNotSpecified is the error returned when the driver parameter passed in NewStore is blank
|
||||
ErrDatabaseDriverNotSpecified = errors.New("database driver cannot be empty")
|
||||
@@ -45,20 +45,20 @@ var (
|
||||
|
||||
// Store that leverages a database
|
||||
type Store struct {
|
||||
driver, file string
|
||||
driver, path string
|
||||
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewStore initializes the database and creates the schema if it doesn't already exist in the file specified
|
||||
// NewStore initializes the database and creates the schema if it doesn't already exist in the path specified
|
||||
func NewStore(driver, path string) (*Store, error) {
|
||||
if len(driver) == 0 {
|
||||
return nil, ErrDatabaseDriverNotSpecified
|
||||
}
|
||||
if len(path) == 0 {
|
||||
return nil, ErrFilePathNotSpecified
|
||||
return nil, ErrPathNotSpecified
|
||||
}
|
||||
store := &Store{driver: driver, file: path}
|
||||
store := &Store{driver: driver, path: path}
|
||||
var err error
|
||||
if store.db, err = sql.Open(driver, path); err != nil {
|
||||
return nil, err
|
||||
@@ -342,7 +342,7 @@ func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
|
||||
query += fmt.Sprintf("$%d,", i+1)
|
||||
args = append(args, keys[i])
|
||||
}
|
||||
query = query[:len(query)-1] + ")" // Remove the last comma and close the parenthesis
|
||||
query = query[:len(query)-1] + ")" // Remove the last comma and add the closing parenthesis
|
||||
result, err = s.db.Exec(query, args...)
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -84,7 +84,7 @@ func TestNewStore(t *testing.T) {
|
||||
if _, err := NewStore("", "TestNewStore.db"); err != ErrDatabaseDriverNotSpecified {
|
||||
t.Error("expected error due to blank driver parameter")
|
||||
}
|
||||
if _, err := NewStore("sqlite", ""); err != ErrFilePathNotSpecified {
|
||||
if _, err := NewStore("sqlite", ""); err != ErrPathNotSpecified {
|
||||
t.Error("expected error due to blank path parameter")
|
||||
}
|
||||
if store, err := NewStore("sqlite", t.TempDir()+"/TestNewStore.db"); err != nil {
|
||||
@@ -169,8 +169,8 @@ func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_Persistence(t *testing.T) {
|
||||
file := t.TempDir() + "/TestStore_Persistence.db"
|
||||
store, _ := NewStore("sqlite", file)
|
||||
path := t.TempDir() + "/TestStore_Persistence.db"
|
||||
store, _ := NewStore("sqlite", path)
|
||||
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); uptime != 0.5 {
|
||||
@@ -188,7 +188,7 @@ func TestStore_Persistence(t *testing.T) {
|
||||
t.Fatal("sanity check failed")
|
||||
}
|
||||
store.Close()
|
||||
store, _ = NewStore("sqlite", file)
|
||||
store, _ = NewStore("sqlite", path)
|
||||
defer store.Close()
|
||||
ssFromNewStore, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, common.MaximumNumberOfResults).WithEvents(1, common.MaximumNumberOfEvents))
|
||||
if ssFromNewStore == nil || ssFromNewStore.Group != "group" || ssFromNewStore.Name != "name" || len(ssFromNewStore.Events) != 3 || len(ssFromNewStore.Results) != 2 {
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v3/core"
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store/common/paging"
|
||||
"github.com/TwiN/gatus/v3/storage/store/memory"
|
||||
"github.com/TwiN/gatus/v3/storage/store/sql"
|
||||
@@ -56,3 +59,82 @@ var (
|
||||
_ Store = (*memory.Store)(nil)
|
||||
_ Store = (*sql.Store)(nil)
|
||||
)
|
||||
|
||||
var (
|
||||
store Store
|
||||
|
||||
// initialized keeps track of whether the storage provider was initialized
|
||||
// Because store.Store is an interface, a nil check wouldn't be sufficient, so instead of doing reflection
|
||||
// every single time Get is called, we'll just lazily keep track of its existence through this variable
|
||||
initialized bool
|
||||
|
||||
ctx context.Context
|
||||
cancelFunc context.CancelFunc
|
||||
)
|
||||
|
||||
func Get() Store {
|
||||
if !initialized {
|
||||
log.Println("[store][Get] Provider requested before it was initialized, automatically initializing")
|
||||
err := Initialize(nil)
|
||||
if err != nil {
|
||||
panic("failed to automatically initialize store: " + err.Error())
|
||||
}
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
// Initialize instantiates the storage provider based on the Config provider
|
||||
func Initialize(cfg *storage.Config) error {
|
||||
initialized = true
|
||||
var err error
|
||||
if cancelFunc != nil {
|
||||
// Stop the active autoSave task, if there's already one
|
||||
cancelFunc()
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = &storage.Config{}
|
||||
}
|
||||
if len(cfg.Path) == 0 && cfg.Type != storage.TypePostgres {
|
||||
log.Printf("[store][Initialize] Creating storage provider with type=%s and file=%s", cfg.Type, cfg.Path)
|
||||
} else {
|
||||
log.Printf("[store][Initialize] Creating storage provider with type=%s", cfg.Type)
|
||||
}
|
||||
ctx, cancelFunc = context.WithCancel(context.Background())
|
||||
switch cfg.Type {
|
||||
case storage.TypeSQLite, storage.TypePostgres:
|
||||
store, err = sql.NewStore(string(cfg.Type), cfg.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case storage.TypeMemory:
|
||||
fallthrough
|
||||
default:
|
||||
if len(cfg.Path) > 0 {
|
||||
store, err = memory.NewStore(cfg.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go autoSave(ctx, store, 7*time.Minute)
|
||||
} else {
|
||||
store, _ = memory.NewStore("")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// autoSave automatically calls the Save function of the provider at every interval
|
||||
func autoSave(ctx context.Context, store Store, interval time.Duration) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Printf("[store][autoSave] Stopping active job")
|
||||
return
|
||||
case <-time.After(interval):
|
||||
log.Printf("[store][autoSave] Saving")
|
||||
err := store.Save()
|
||||
if err != nil {
|
||||
log.Println("[store][autoSave] Save failed:", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v3/core"
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store/common"
|
||||
"github.com/TwiN/gatus/v3/storage/store/common/paging"
|
||||
"github.com/TwiN/gatus/v3/storage/store/memory"
|
||||
@@ -520,3 +521,89 @@ func TestStore_DeleteAllEndpointStatusesNotInKeys(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
store := Get()
|
||||
if store == nil {
|
||||
t.Error("store should've been automatically initialized")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Cfg *storage.Config
|
||||
ExpectedErr error
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "nil",
|
||||
Cfg: nil,
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "blank",
|
||||
Cfg: &storage.Config{},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "memory-no-path",
|
||||
Cfg: &storage.Config{Type: storage.TypeMemory},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{ // XXX: Remove for v4.0.0. See https://github.com/TwiN/gatus/issues/198
|
||||
Name: "memory-with-path",
|
||||
Cfg: &storage.Config{Type: storage.TypeMemory, Path: t.TempDir() + "/TestInitialize_memory-with-path.db"},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-no-path",
|
||||
Cfg: &storage.Config{Type: storage.TypeSQLite},
|
||||
ExpectedErr: sql.ErrPathNotSpecified,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-with-path",
|
||||
Cfg: &storage.Config{Type: storage.TypeSQLite, Path: t.TempDir() + "/TestInitialize_sqlite-with-path.db"},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
err := Initialize(scenario.Cfg)
|
||||
if err != scenario.ExpectedErr {
|
||||
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if cancelFunc == nil {
|
||||
t.Error("cancelFunc shouldn't have been nil")
|
||||
}
|
||||
if ctx == nil {
|
||||
t.Error("ctx shouldn't have been nil")
|
||||
}
|
||||
if store == nil {
|
||||
t.Fatal("provider shouldn't have been nit")
|
||||
}
|
||||
store.Close()
|
||||
// Try to initialize it again
|
||||
err = Initialize(scenario.Cfg)
|
||||
if err != scenario.ExpectedErr {
|
||||
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
|
||||
return
|
||||
}
|
||||
store.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoSave(t *testing.T) {
|
||||
file := t.TempDir() + "/TestAutoSave.db"
|
||||
if err := Initialize(&storage.Config{Path: file}); err != nil {
|
||||
t.Fatal("shouldn't have returned an error")
|
||||
}
|
||||
go autoSave(ctx, store, 3*time.Millisecond)
|
||||
time.Sleep(15 * time.Millisecond)
|
||||
cancelFunc()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/TwiN/gatus/v3/config/maintenance"
|
||||
"github.com/TwiN/gatus/v3/core"
|
||||
"github.com/TwiN/gatus/v3/metric"
|
||||
"github.com/TwiN/gatus/v3/storage"
|
||||
"github.com/TwiN/gatus/v3/storage/store"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -29,13 +29,13 @@ func Monitor(cfg *config.Config) {
|
||||
for _, endpoint := range cfg.Endpoints {
|
||||
if endpoint.IsEnabled() {
|
||||
// To prevent multiple requests from running at the same time, we'll wait for a little before each iteration
|
||||
time.Sleep(1111 * time.Millisecond)
|
||||
time.Sleep(777 * time.Millisecond)
|
||||
go monitor(endpoint, cfg.Alerting, cfg.Maintenance, cfg.DisableMonitoringLock, cfg.Metrics, cfg.Debug, ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// monitor monitors a single endpoint in a loop
|
||||
// monitor a single endpoint in a loop
|
||||
func monitor(endpoint *core.Endpoint, alertingConfig *alerting.Config, maintenanceConfig *maintenance.Config, disableMonitoringLock, enabledMetrics, debug bool, ctx context.Context) {
|
||||
// Run it immediately on start
|
||||
execute(endpoint, alertingConfig, maintenanceConfig, disableMonitoringLock, enabledMetrics, debug)
|
||||
@@ -88,7 +88,7 @@ func execute(endpoint *core.Endpoint, alertingConfig *alerting.Config, maintenan
|
||||
|
||||
// UpdateEndpointStatuses updates the slice of endpoint statuses
|
||||
func UpdateEndpointStatuses(endpoint *core.Endpoint, result *core.Result) {
|
||||
if err := storage.Get().Insert(endpoint, result); err != nil {
|
||||
if err := store.Get().Insert(endpoint, result); err != nil {
|
||||
log.Println("[watchdog][UpdateEndpointStatuses] Failed to insert data in storage:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user