Compare commits
111 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8ec256edbf | ||
|
|
a48ec41bca | ||
|
|
541e0264ab | ||
|
|
f945e4b8a2 | ||
|
|
076b92a2b4 | ||
|
|
02e9f74a04 | ||
|
|
b37dd5e819 | ||
|
|
1775f80ffe | ||
|
|
3187db1e9a | ||
|
|
932eab00a0 | ||
|
|
c842ac2343 | ||
|
|
6320237326 | ||
|
|
8fe9d013b5 | ||
|
|
c094c06e56 | ||
|
|
f961bf961e | ||
|
|
404a3cea64 | ||
|
|
d000460a99 | ||
|
|
455fae05c1 | ||
|
|
85dff34350 | ||
|
|
99fa632021 | ||
|
|
cafcc9d45b | ||
|
|
dc929dac70 | ||
|
|
42825b62fb | ||
|
|
a89bb392ed | ||
|
|
e7c4d03c22 | ||
|
|
4e6bf91651 | ||
|
|
de31a7a62e | ||
|
|
7f0543ebd2 | ||
|
|
9b893aa4e0 | ||
|
|
50435f4030 | ||
|
|
9649d80388 | ||
|
|
8c3ab1eac2 | ||
|
|
cdb5ba080a | ||
|
|
0a145da912 | ||
|
|
b603cdb0ea | ||
|
|
11d1f24ceb | ||
|
|
c74472d332 | ||
|
|
78a1262e7c | ||
|
|
7ff8907eda | ||
|
|
1d21f5889d | ||
|
|
d7d904ae5f | ||
|
|
7390895514 | ||
|
|
2873d96b9f | ||
|
|
ea9623f695 | ||
|
|
9cdef02bdc | ||
|
|
16229592a2 | ||
|
|
52ad4ee9e5 | ||
|
|
b47c6dc408 | ||
|
|
8e2a2c4dbc | ||
|
|
8698736e7d | ||
|
|
cd1430f043 | ||
|
|
79bef8d391 | ||
|
|
9196f57487 | ||
|
|
1e0d9e184c | ||
|
|
bc42497cb7 | ||
|
|
b0b0ab574d | ||
|
|
e369484e5f | ||
|
|
d18618449f | ||
|
|
9186049589 | ||
|
|
4362831f71 | ||
|
|
99f558d43e | ||
|
|
4a3d5944b6 | ||
|
|
688456d7cf | ||
|
|
431fb3e9f2 | ||
|
|
a1679ddc5e | ||
|
|
d8d8e8720b | ||
|
|
4a0d9d058a | ||
|
|
c8ccf9b352 | ||
|
|
45c966fbca | ||
|
|
d8d4756ef3 | ||
|
|
1e9c54cc0f | ||
|
|
80570688e1 | ||
|
|
43e6e3e8f5 | ||
|
|
8b4c5c20f3 | ||
|
|
6f6db36b0f | ||
|
|
467874de10 | ||
|
|
8337f41425 | ||
|
|
601d676e34 | ||
|
|
fbb5d48bf7 | ||
|
|
119b80edc0 | ||
|
|
99e8cfb1ce | ||
|
|
dcbbec7931 | ||
|
|
2ccd656386 | ||
|
|
5755f3a699 | ||
|
|
911d809376 | ||
|
|
752c872d3b | ||
|
|
67a3e4e330 | ||
|
|
668ed3b1a2 | ||
|
|
dc6cb8fc1d | ||
|
|
f1aa5191bf | ||
|
|
bc6ca2ebd0 | ||
|
|
30801938b2 | ||
|
|
ddddd405bb | ||
|
|
2207dd9c32 | ||
|
|
3204a79eb6 | ||
|
|
e9ac115a95 | ||
|
|
c90c786f39 | ||
|
|
f10e2ac639 | ||
|
|
c2d899f2a3 | ||
|
|
7415d8e361 | ||
|
|
298dcc4790 | ||
|
|
2f2890c093 | ||
|
|
e463aec5f6 | ||
|
|
6b3e11a47c | ||
|
|
0985e3bed8 | ||
|
|
6d8fd267de | ||
|
|
e89bb932ea | ||
|
|
77737dbab6 | ||
|
|
271c3dc91d | ||
|
|
5860a27ab5 | ||
|
|
819093cb7e |
@@ -3,3 +3,4 @@ Dockerfile
|
||||
.github
|
||||
.idea
|
||||
.git
|
||||
web/app
|
||||
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 27 KiB |
8
.github/workflows/build.yml
vendored
8
.github/workflows/build.yml
vendored
@@ -15,16 +15,18 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Set up Go 1.15
|
||||
uses: actions/setup-go@v1
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
id: go
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
- name: Build binary to make sure it works
|
||||
run: go build -mod vendor
|
||||
- name: Test
|
||||
run: sudo go test -mod vendor ./... -race -coverprofile=coverage.txt -covermode=atomic
|
||||
# We're using "sudo" because one of the tests leverages ping, which requires super-user privileges.
|
||||
# As for the 'env "PATH=$PATH" "GOROOT=$GOROOT"', we need it to use the same "go" executable that
|
||||
# was configured by the "Set up Go 1.15" step (otherwise, it'd use sudo's "go" executable)
|
||||
run: sudo env "PATH=$PATH" "GOROOT=$GOROOT" go test -mod vendor ./... -race -coverprofile=coverage.txt -covermode=atomic
|
||||
- name: Codecov
|
||||
uses: codecov/codecov-action@v1.0.14
|
||||
with:
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,4 +1,4 @@
|
||||
bin
|
||||
.idea
|
||||
.vscode
|
||||
gatus
|
||||
gatus
|
||||
db.db
|
||||
@@ -13,7 +13,7 @@ RUN CGO_ENABLED=0 GOOS=linux go build -mod vendor -a -installsuffix cgo -o gatus
|
||||
FROM scratch
|
||||
COPY --from=builder /app/gatus .
|
||||
COPY --from=builder /app/config.yaml ./config/config.yaml
|
||||
COPY --from=builder /app/static static/
|
||||
COPY --from=builder /app/web/static ./web/static
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
ENV PORT=8080
|
||||
EXPOSE ${PORT}
|
||||
|
||||
14
Makefile
Normal file
14
Makefile
Normal file
@@ -0,0 +1,14 @@
|
||||
docker-build:
|
||||
docker build -t twinproduction/gatus:latest .
|
||||
|
||||
docker-build-and-run:
|
||||
docker build -t twinproduction/gatus:latest . && docker run -p 8080:8080 --name gatus twinproduction/gatus:latest
|
||||
|
||||
build-frontend:
|
||||
npm --prefix web/app run build
|
||||
|
||||
run-frontend:
|
||||
npm --prefix web/app run serve
|
||||
|
||||
test:
|
||||
go test ./alerting/... ./client/... ./config/... ./controller/... ./core/... ./jsonpath/... ./pattern/... ./security/... ./storage/... ./util/... ./watchdog/... -cover
|
||||
@@ -2,6 +2,7 @@ package alerting
|
||||
|
||||
import (
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/discord"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/mattermost"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/messagebird"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/pagerduty"
|
||||
@@ -11,8 +12,11 @@ import (
|
||||
|
||||
// Config is the configuration for alerting providers
|
||||
type Config struct {
|
||||
// Slack is the configuration for the slack alerting provider
|
||||
Slack *slack.AlertProvider `yaml:"slack"`
|
||||
// Custom is the configuration for the custom alerting provider
|
||||
Custom *custom.AlertProvider `yaml:"custom"`
|
||||
|
||||
// Discord is the configuration for the discord alerting provider
|
||||
Discord *discord.AlertProvider `yaml:"discord"`
|
||||
|
||||
// Mattermost is the configuration for the mattermost alerting provider
|
||||
Mattermost *mattermost.AlertProvider `yaml:"mattermost"`
|
||||
@@ -20,12 +24,12 @@ type Config struct {
|
||||
// Messagebird is the configuration for the messagebird alerting provider
|
||||
Messagebird *messagebird.AlertProvider `yaml:"messagebird"`
|
||||
|
||||
// Pagerduty is the configuration for the pagerduty alerting provider
|
||||
// PagerDuty is the configuration for the pagerduty alerting provider
|
||||
PagerDuty *pagerduty.AlertProvider `yaml:"pagerduty"`
|
||||
|
||||
// Slack is the configuration for the slack alerting provider
|
||||
Slack *slack.AlertProvider `yaml:"slack"`
|
||||
|
||||
// Twilio is the configuration for the twilio alerting provider
|
||||
Twilio *twilio.AlertProvider `yaml:"twilio"`
|
||||
|
||||
// Custom is the configuration for the custom alerting provider
|
||||
Custom *custom.AlertProvider `yaml:"custom"`
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package custom
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -15,11 +16,12 @@ import (
|
||||
// AlertProvider is the configuration necessary for sending an alert using a custom HTTP request
|
||||
// Technically, all alert providers should be reachable using the custom alert provider
|
||||
type AlertProvider struct {
|
||||
URL string `yaml:"url"`
|
||||
Method string `yaml:"method,omitempty"`
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
Body string `yaml:"body,omitempty"`
|
||||
Headers map[string]string `yaml:"headers,omitempty"`
|
||||
URL string `yaml:"url"`
|
||||
Method string `yaml:"method,omitempty"`
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
Body string `yaml:"body,omitempty"`
|
||||
Headers map[string]string `yaml:"headers,omitempty"`
|
||||
Placeholders map[string]map[string]string `yaml:"placeholders,omitempty"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
@@ -32,10 +34,25 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
return provider
|
||||
}
|
||||
|
||||
// GetAlertStatePlaceholderValue returns the Placeholder value for ALERT_TRIGGERED_OR_RESOLVED if configured
|
||||
func (provider *AlertProvider) GetAlertStatePlaceholderValue(resolved bool) string {
|
||||
status := "TRIGGERED"
|
||||
if resolved {
|
||||
status = "RESOLVED"
|
||||
}
|
||||
if _, ok := provider.Placeholders["ALERT_TRIGGERED_OR_RESOLVED"]; ok {
|
||||
if val, ok := provider.Placeholders["ALERT_TRIGGERED_OR_RESOLVED"][status]; ok {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (provider *AlertProvider) buildHTTPRequest(serviceName, alertDescription string, resolved bool) *http.Request {
|
||||
body := provider.Body
|
||||
providerURL := provider.URL
|
||||
method := provider.Method
|
||||
|
||||
if strings.Contains(body, "[ALERT_DESCRIPTION]") {
|
||||
body = strings.ReplaceAll(body, "[ALERT_DESCRIPTION]", alertDescription)
|
||||
}
|
||||
@@ -44,9 +61,9 @@ func (provider *AlertProvider) buildHTTPRequest(serviceName, alertDescription st
|
||||
}
|
||||
if strings.Contains(body, "[ALERT_TRIGGERED_OR_RESOLVED]") {
|
||||
if resolved {
|
||||
body = strings.ReplaceAll(body, "[ALERT_TRIGGERED_OR_RESOLVED]", "RESOLVED")
|
||||
body = strings.ReplaceAll(body, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(true))
|
||||
} else {
|
||||
body = strings.ReplaceAll(body, "[ALERT_TRIGGERED_OR_RESOLVED]", "TRIGGERED")
|
||||
body = strings.ReplaceAll(body, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(false))
|
||||
}
|
||||
}
|
||||
if strings.Contains(providerURL, "[ALERT_DESCRIPTION]") {
|
||||
@@ -57,9 +74,9 @@ func (provider *AlertProvider) buildHTTPRequest(serviceName, alertDescription st
|
||||
}
|
||||
if strings.Contains(providerURL, "[ALERT_TRIGGERED_OR_RESOLVED]") {
|
||||
if resolved {
|
||||
providerURL = strings.ReplaceAll(providerURL, "[ALERT_TRIGGERED_OR_RESOLVED]", "RESOLVED")
|
||||
providerURL = strings.ReplaceAll(providerURL, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(true))
|
||||
} else {
|
||||
providerURL = strings.ReplaceAll(providerURL, "[ALERT_TRIGGERED_OR_RESOLVED]", "TRIGGERED")
|
||||
providerURL = strings.ReplaceAll(providerURL, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(false))
|
||||
}
|
||||
}
|
||||
if len(method) == 0 {
|
||||
@@ -76,6 +93,9 @@ func (provider *AlertProvider) buildHTTPRequest(serviceName, alertDescription st
|
||||
// Send a request to the alert provider and return the body
|
||||
func (provider *AlertProvider) Send(serviceName, alertDescription string, resolved bool) ([]byte, error) {
|
||||
if os.Getenv("MOCK_ALERT_PROVIDER") == "true" {
|
||||
if os.Getenv("MOCK_ALERT_PROVIDER_ERROR") == "true" {
|
||||
return nil, errors.New("error")
|
||||
}
|
||||
return []byte("{}"), nil
|
||||
}
|
||||
request := provider.buildHTTPRequest(serviceName, alertDescription, resolved)
|
||||
|
||||
@@ -62,9 +62,49 @@ func TestAlertProvider_ToCustomAlertProvider(t *testing.T) {
|
||||
provider := AlertProvider{URL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if customAlertProvider != customAlertProvider {
|
||||
t.Error("customAlertProvider should've been equal to customAlertProvider")
|
||||
if customAlertProvider.URL != "http://example.com" {
|
||||
t.Error("expected URL to be http://example.com, got", customAlertProvider.URL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_buildHTTPRequestWithCustomPlaceholder(t *testing.T) {
|
||||
const (
|
||||
ExpectedURL = "http://example.com/service-name?event=test&description=alert-description"
|
||||
ExpectedBody = "service-name,alert-description,test"
|
||||
)
|
||||
customAlertProvider := &AlertProvider{
|
||||
URL: "http://example.com/[SERVICE_NAME]?event=[ALERT_TRIGGERED_OR_RESOLVED]&description=[ALERT_DESCRIPTION]",
|
||||
Body: "[SERVICE_NAME],[ALERT_DESCRIPTION],[ALERT_TRIGGERED_OR_RESOLVED]",
|
||||
Headers: nil,
|
||||
Placeholders: map[string]map[string]string{
|
||||
"ALERT_TRIGGERED_OR_RESOLVED": {
|
||||
"RESOLVED": "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
request := customAlertProvider.buildHTTPRequest("service-name", "alert-description", true)
|
||||
if request.URL.String() != ExpectedURL {
|
||||
t.Error("expected URL to be", ExpectedURL, "was", request.URL.String())
|
||||
}
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
if string(body) != ExpectedBody {
|
||||
t.Error("expected body to be", ExpectedBody, "was", string(body))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_GetAlertStatePlaceholderValueDefaults(t *testing.T) {
|
||||
customAlertProvider := &AlertProvider{
|
||||
URL: "http://example.com/[SERVICE_NAME]?event=[ALERT_TRIGGERED_OR_RESOLVED]&description=[ALERT_DESCRIPTION]",
|
||||
Body: "[SERVICE_NAME],[ALERT_DESCRIPTION],[ALERT_TRIGGERED_OR_RESOLVED]",
|
||||
Headers: nil,
|
||||
Placeholders: nil,
|
||||
}
|
||||
if customAlertProvider.GetAlertStatePlaceholderValue(true) != "RESOLVED" {
|
||||
t.Error("expected RESOLVED, got", customAlertProvider.GetAlertStatePlaceholderValue(true))
|
||||
}
|
||||
if customAlertProvider.GetAlertStatePlaceholderValue(false) != "TRIGGERED" {
|
||||
t.Error("expected TRIGGERED, got", customAlertProvider.GetAlertStatePlaceholderValue(false))
|
||||
}
|
||||
}
|
||||
|
||||
63
alerting/provider/discord/discord.go
Normal file
63
alerting/provider/discord/discord.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package discord
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
// AlertProvider is the configuration necessary for sending an alert using Discord
|
||||
type AlertProvider struct {
|
||||
WebhookURL string `yaml:"webhook-url"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
func (provider *AlertProvider) IsValid() bool {
|
||||
return len(provider.WebhookURL) > 0
|
||||
}
|
||||
|
||||
// ToCustomAlertProvider converts the provider into a custom.AlertProvider
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *core.Alert, result *core.Result, resolved bool) *custom.AlertProvider {
|
||||
var message, results string
|
||||
var colorCode int
|
||||
if resolved {
|
||||
message = fmt.Sprintf("An alert for **%s** has been resolved after passing successfully %d time(s) in a row", service.Name, alert.SuccessThreshold)
|
||||
colorCode = 3066993
|
||||
} else {
|
||||
message = fmt.Sprintf("An alert for **%s** has been triggered due to having failed %d time(s) in a row", service.Name, alert.FailureThreshold)
|
||||
colorCode = 15158332
|
||||
}
|
||||
for _, conditionResult := range result.ConditionResults {
|
||||
var prefix string
|
||||
if conditionResult.Success {
|
||||
prefix = ":white_check_mark:"
|
||||
} else {
|
||||
prefix = ":x:"
|
||||
}
|
||||
results += fmt.Sprintf("%s - `%s`\\n", prefix, conditionResult.Condition)
|
||||
}
|
||||
return &custom.AlertProvider{
|
||||
URL: provider.WebhookURL,
|
||||
Method: http.MethodPost,
|
||||
Body: fmt.Sprintf(`{
|
||||
"content": "",
|
||||
"embeds": [
|
||||
{
|
||||
"title": ":helmet_with_white_cross: Gatus",
|
||||
"description": "%s:\n> %s",
|
||||
"color": %d,
|
||||
"fields": [
|
||||
{
|
||||
"name": "Condition results",
|
||||
"value": "%s",
|
||||
"inline": false
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, message, alert.Description, colorCode, results),
|
||||
Headers: map[string]string{"Content-Type": "application/json"},
|
||||
}
|
||||
}
|
||||
65
alerting/provider/discord/discord_test.go
Normal file
65
alerting/provider/discord/discord_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package discord
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
func TestAlertProvider_IsValid(t *testing.T) {
|
||||
invalidProvider := AlertProvider{WebhookURL: ""}
|
||||
if invalidProvider.IsValid() {
|
||||
t.Error("provider shouldn't have been valid")
|
||||
}
|
||||
validProvider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
if !validProvider.IsValid() {
|
||||
t.Error("provider should've been valid")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "resolved") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring resolved")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.com" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.com", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "triggered") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring triggered")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.com" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.com", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
@@ -38,7 +38,7 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
} else {
|
||||
prefix = ":x:"
|
||||
}
|
||||
results += fmt.Sprintf("%s - `%s`\n", prefix, conditionResult.Condition)
|
||||
results += fmt.Sprintf("%s - `%s`\\n", prefix, conditionResult.Condition)
|
||||
}
|
||||
return &custom.AlertProvider{
|
||||
URL: provider.WebhookURL,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package mattermost
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -19,23 +21,45 @@ func TestAlertProvider_IsValid(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
provider := AlertProvider{WebhookURL: "http://example.org"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "resolved") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring resolved")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.org" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.org", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
provider := AlertProvider{WebhookURL: "http://example.org"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "triggered") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring triggered")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.org" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.org", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package messagebird
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -30,11 +32,22 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "RESOLVED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring RESOLVED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://rest.messagebird.com/messages" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://rest.messagebird.com/messages", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
@@ -45,9 +58,20 @@ func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "TRIGGERED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring TRIGGERED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://rest.messagebird.com/messages" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://rest.messagebird.com/messages", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package pagerduty
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -22,20 +24,42 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{IntegrationKey: "00000000000000000000000000000000"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "RESOLVED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring RESOLVED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://events.pagerduty.com/v2/enqueue" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://events.pagerduty.com/v2/enqueue", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{IntegrationKey: "00000000000000000000000000000000"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "TRIGGERED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring TRIGGERED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://events.pagerduty.com/v2/enqueue" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://events.pagerduty.com/v2/enqueue", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package provider
|
||||
|
||||
import (
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/discord"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/mattermost"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/messagebird"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/pagerduty"
|
||||
@@ -22,9 +23,10 @@ type AlertProvider interface {
|
||||
var (
|
||||
// Validate interface implementation on compile
|
||||
_ AlertProvider = (*custom.AlertProvider)(nil)
|
||||
_ AlertProvider = (*twilio.AlertProvider)(nil)
|
||||
_ AlertProvider = (*slack.AlertProvider)(nil)
|
||||
_ AlertProvider = (*discord.AlertProvider)(nil)
|
||||
_ AlertProvider = (*mattermost.AlertProvider)(nil)
|
||||
_ AlertProvider = (*messagebird.AlertProvider)(nil)
|
||||
_ AlertProvider = (*pagerduty.AlertProvider)(nil)
|
||||
_ AlertProvider = (*slack.AlertProvider)(nil)
|
||||
_ AlertProvider = (*twilio.AlertProvider)(nil)
|
||||
)
|
||||
|
||||
@@ -31,11 +31,11 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
for _, conditionResult := range result.ConditionResults {
|
||||
var prefix string
|
||||
if conditionResult.Success {
|
||||
prefix = ":heavy_check_mark:"
|
||||
prefix = ":white_check_mark:"
|
||||
} else {
|
||||
prefix = ":x:"
|
||||
}
|
||||
results += fmt.Sprintf("%s - `%s`\n", prefix, conditionResult.Condition)
|
||||
results += fmt.Sprintf("%s - `%s`\\n", prefix, conditionResult.Condition)
|
||||
}
|
||||
return &custom.AlertProvider{
|
||||
URL: provider.WebhookURL,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package slack
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -22,20 +24,42 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "resolved") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring resolved")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.com" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.com", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "triggered") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring triggered")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.com" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.com", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package twilio
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -26,31 +27,49 @@ func TestTwilioAlertProvider_IsValid(t *testing.T) {
|
||||
func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{
|
||||
SID: "1",
|
||||
Token: "1",
|
||||
From: "1",
|
||||
To: "1",
|
||||
Token: "2",
|
||||
From: "3",
|
||||
To: "4",
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{Name: "service-name"}, &core.Alert{Description: "alert-description"}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "RESOLVED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring RESOLVED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://api.twilio.com/2010-04-01/Accounts/1/Messages.json" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://api.twilio.com/2010-04-01/Accounts/1/Messages.json", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
if customAlertProvider.Body != "Body=RESOLVED%3A+service-name+-+alert-description&From=3&To=4" {
|
||||
t.Errorf("expected body to be %s, got %s", "Body=RESOLVED%3A+service-name+-+alert-description&From=3&To=4", customAlertProvider.Body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{
|
||||
SID: "1",
|
||||
Token: "1",
|
||||
From: "1",
|
||||
SID: "4",
|
||||
Token: "3",
|
||||
From: "2",
|
||||
To: "1",
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, false)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{Name: "service-name"}, &core.Alert{Description: "alert-description"}, &core.Result{}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "TRIGGERED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring TRIGGERED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://api.twilio.com/2010-04-01/Accounts/4/Messages.json" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://api.twilio.com/2010-04-01/Accounts/4/Messages.json", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
if customAlertProvider.Body != "Body=TRIGGERED%3A+service-name+-+alert-description&From=2&To=1" {
|
||||
t.Errorf("expected body to be %s, got %s", "Body=TRIGGERED%3A+service-name+-+alert-description&From=2&To=1", customAlertProvider.Body)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ func GetHTTPClient(insecure bool) *http.Client {
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 20,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
@@ -41,6 +42,7 @@ func GetHTTPClient(insecure bool) *http.Client {
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 20,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
services:
|
||||
- name: frontend
|
||||
- name: front-end
|
||||
group: core
|
||||
url: "https://twinnation.org/health"
|
||||
interval: 1m
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].status == UP"
|
||||
- "[RESPONSE_TIME] < 70"
|
||||
- "[RESPONSE_TIME] < 150"
|
||||
|
||||
- name: backend
|
||||
- name: back-end
|
||||
group: core
|
||||
url: "http://example.org/"
|
||||
interval: 5m
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/k8s"
|
||||
"github.com/TwinProduction/gatus/security"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@@ -28,9 +30,6 @@ const (
|
||||
|
||||
// DefaultPort is the default port the service will listen on
|
||||
DefaultPort = 8080
|
||||
|
||||
// DefaultContextRoot is the default context root of the web application
|
||||
DefaultContextRoot = "/"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -74,8 +73,11 @@ type Config struct {
|
||||
// Kubernetes is the Kubernetes configuration
|
||||
Kubernetes *k8s.Config `yaml:"kubernetes"`
|
||||
|
||||
// Storage is the configuration for how the data is stored
|
||||
Storage *storage.Config `yaml:"storage"`
|
||||
|
||||
// Web is the configuration for the web listener
|
||||
Web *webConfig `yaml:"web"`
|
||||
Web *WebConfig `yaml:"web"`
|
||||
}
|
||||
|
||||
// Get returns the configuration, or panics if the configuration hasn't loaded yet
|
||||
@@ -147,13 +149,33 @@ func parseAndValidateConfigBytes(yamlBytes []byte) (config *Config, err error) {
|
||||
validateServicesConfig(config)
|
||||
validateKubernetesConfig(config)
|
||||
validateWebConfig(config)
|
||||
validateStorageConfig(config)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateStorageConfig(config *Config) {
|
||||
if config.Storage == nil {
|
||||
config.Storage = &storage.Config{}
|
||||
}
|
||||
err := storage.Initialize(config.Storage)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Remove all ServiceStatus that represent services which no longer exist in the configuration
|
||||
var keys []string
|
||||
for _, service := range config.Services {
|
||||
keys = append(keys, util.ConvertGroupAndServiceToKey(service.Group, service.Name))
|
||||
}
|
||||
numberOfServiceStatusesDeleted := storage.Get().DeleteAllServiceStatusesNotInKeys(keys)
|
||||
if numberOfServiceStatusesDeleted > 0 {
|
||||
log.Printf("[config][validateStorageConfig] Deleted %d service statuses because their matching services no longer existed", numberOfServiceStatusesDeleted)
|
||||
}
|
||||
}
|
||||
|
||||
func validateWebConfig(config *Config) {
|
||||
if config.Web == nil {
|
||||
config.Web = &webConfig{Address: DefaultAddress, Port: DefaultPort, ContextRoot: DefaultContextRoot}
|
||||
config.Web = &WebConfig{Address: DefaultAddress, Port: DefaultPort}
|
||||
} else {
|
||||
config.Web.validateAndSetDefaults()
|
||||
}
|
||||
@@ -206,12 +228,13 @@ func validateAlertingConfig(config *Config) {
|
||||
return
|
||||
}
|
||||
alertTypes := []core.AlertType{
|
||||
core.SlackAlert,
|
||||
core.CustomAlert,
|
||||
core.DiscordAlert,
|
||||
core.MattermostAlert,
|
||||
core.MessagebirdAlert,
|
||||
core.TwilioAlert,
|
||||
core.PagerDutyAlert,
|
||||
core.CustomAlert,
|
||||
core.SlackAlert,
|
||||
core.TwilioAlert,
|
||||
}
|
||||
var validProviders, invalidProviders []core.AlertType
|
||||
for _, alertType := range alertTypes {
|
||||
@@ -233,12 +256,18 @@ func validateAlertingConfig(config *Config) {
|
||||
// GetAlertingProviderByAlertType returns an provider.AlertProvider by its corresponding core.AlertType
|
||||
func GetAlertingProviderByAlertType(config *Config, alertType core.AlertType) provider.AlertProvider {
|
||||
switch alertType {
|
||||
case core.SlackAlert:
|
||||
if config.Alerting.Slack == nil {
|
||||
case core.CustomAlert:
|
||||
if config.Alerting.Custom == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Slack
|
||||
return config.Alerting.Custom
|
||||
case core.DiscordAlert:
|
||||
if config.Alerting.Discord == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Discord
|
||||
case core.MattermostAlert:
|
||||
if config.Alerting.Mattermost == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
@@ -251,24 +280,24 @@ func GetAlertingProviderByAlertType(config *Config, alertType core.AlertType) pr
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Messagebird
|
||||
case core.TwilioAlert:
|
||||
if config.Alerting.Twilio == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Twilio
|
||||
case core.PagerDutyAlert:
|
||||
if config.Alerting.PagerDuty == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.PagerDuty
|
||||
case core.CustomAlert:
|
||||
if config.Alerting.Custom == nil {
|
||||
case core.SlackAlert:
|
||||
if config.Alerting.Slack == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Custom
|
||||
return config.Alerting.Slack
|
||||
case core.TwilioAlert:
|
||||
if config.Alerting.Twilio == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Twilio
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/discord"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/mattermost"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/messagebird"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/pagerduty"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/slack"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/twilio"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/k8stest"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -42,7 +50,10 @@ func TestLoadDefaultConfigurationFile(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytes(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
file := t.TempDir() + "/test.db"
|
||||
config, err := parseAndValidateConfigBytes([]byte(fmt.Sprintf(`
|
||||
storage:
|
||||
file: %s
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/health
|
||||
@@ -54,9 +65,9 @@ services:
|
||||
conditions:
|
||||
- "[STATUS] != 400"
|
||||
- "[STATUS] != 500"
|
||||
`))
|
||||
`, file)))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -99,7 +110,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -119,9 +130,6 @@ services:
|
||||
if config.Web.Port != DefaultPort {
|
||||
t.Errorf("Port should have been %d, because it is the default value", DefaultPort)
|
||||
}
|
||||
if config.Web.ContextRoot != DefaultContextRoot {
|
||||
t.Errorf("ContextRoot should have been %s, because it is the default value", DefaultContextRoot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithAddress(t *testing.T) {
|
||||
@@ -135,7 +143,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -165,12 +173,12 @@ web:
|
||||
port: 12345
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/actuator/health
|
||||
url: https://twinnation.org/health
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -178,17 +186,15 @@ services:
|
||||
if config.Metrics {
|
||||
t.Error("Metrics should've been false by default")
|
||||
}
|
||||
if config.Services[0].URL != "https://twinnation.org/actuator/health" {
|
||||
t.Errorf("URL should have been %s", "https://twinnation.org/actuator/health")
|
||||
if config.Services[0].URL != "https://twinnation.org/health" {
|
||||
t.Errorf("URL should have been %s", "https://twinnation.org/health")
|
||||
}
|
||||
if config.Services[0].Interval != 60*time.Second {
|
||||
t.Errorf("Interval should have been %s, because it is the default value", 60*time.Second)
|
||||
}
|
||||
|
||||
if config.Web.Address != DefaultAddress {
|
||||
t.Errorf("Bind address should have been %s, because it is the default value", DefaultAddress)
|
||||
}
|
||||
|
||||
if config.Web.Port != 12345 {
|
||||
t.Errorf("Port should have been %d, because it is specified in config", 12345)
|
||||
}
|
||||
@@ -206,7 +212,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -228,44 +234,6 @@ services:
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithPortAndHostAndContextRoot(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
web:
|
||||
port: 12345
|
||||
address: 127.0.0.1
|
||||
context-root: /deeply/nested/down=/their
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/health
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
}
|
||||
if config.Metrics {
|
||||
t.Error("Metrics should've been false by default")
|
||||
}
|
||||
if config.Services[0].URL != "https://twinnation.org/health" {
|
||||
t.Errorf("URL should have been %s", "https://twinnation.org/health")
|
||||
}
|
||||
if config.Services[0].Interval != 60*time.Second {
|
||||
t.Errorf("Interval should have been %s, because it is the default value", 60*time.Second)
|
||||
}
|
||||
if config.Web.Address != "127.0.0.1" {
|
||||
t.Errorf("Bind address should have been %s, because it is specified in config", "127.0.0.1")
|
||||
}
|
||||
if config.Web.Port != 12345 {
|
||||
t.Errorf("Port should have been %d, because it is specified in config", 12345)
|
||||
}
|
||||
if config.Web.ContextRoot != "/deeply/nested/down=/their/" {
|
||||
t.Errorf("Port should have been %s, because it is specified in config", "/deeply/nested/down=/their/")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithInvalidPort(t *testing.T) {
|
||||
defer func() { recover() }()
|
||||
_, _ = parseAndValidateConfigBytes([]byte(`
|
||||
@@ -293,7 +261,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -313,9 +281,6 @@ services:
|
||||
if config.Web.Port != DefaultPort {
|
||||
t.Errorf("Port should have been %d, because it is the default value", DefaultPort)
|
||||
}
|
||||
if config.Web.ContextRoot != DefaultContextRoot {
|
||||
t.Errorf("ContextRoot should have been %s, because it is the default value", DefaultContextRoot)
|
||||
}
|
||||
if userAgent := config.Services[0].Headers["User-Agent"]; userAgent != "Test/2.0" {
|
||||
t.Errorf("User-Agent should've been %s, got %s", "Test/2.0", userAgent)
|
||||
}
|
||||
@@ -334,7 +299,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -378,10 +343,11 @@ badconfig:
|
||||
func TestParseAndValidateConfigBytesWithAlerting(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
debug: true
|
||||
|
||||
alerting:
|
||||
slack:
|
||||
webhook-url: "http://example.com"
|
||||
discord:
|
||||
webhook-url: "http://example.org"
|
||||
pagerduty:
|
||||
integration-key: "00000000000000000000000000000000"
|
||||
messagebird:
|
||||
@@ -400,12 +366,14 @@ services:
|
||||
success-threshold: 5
|
||||
description: "Healthcheck failed 7 times in a row"
|
||||
- type: messagebird
|
||||
- type: discord
|
||||
enabled: true
|
||||
failure-threshold: 10
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -413,6 +381,7 @@ services:
|
||||
if config.Metrics {
|
||||
t.Error("Metrics should've been false by default")
|
||||
}
|
||||
// Alerting providers
|
||||
if config.Alerting == nil {
|
||||
t.Fatal("config.Alerting shouldn't have been nil")
|
||||
}
|
||||
@@ -440,6 +409,16 @@ services:
|
||||
if config.Alerting.Messagebird.Recipients != "31619191919" {
|
||||
t.Errorf("Messagebird to recipients should've been %s, but was %s", "31619191919", config.Alerting.Messagebird.Recipients)
|
||||
}
|
||||
if config.Alerting.Discord == nil || !config.Alerting.Discord.IsValid() {
|
||||
t.Fatal("Discord alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Discord.WebhookURL != "http://example.org" {
|
||||
t.Errorf("Discord webhook should've been %s, but was %s", "http://example.org", config.Alerting.Discord.WebhookURL)
|
||||
}
|
||||
if GetAlertingProviderByAlertType(config, core.DiscordAlert) != config.Alerting.Discord {
|
||||
t.Error("expected discord configuration")
|
||||
}
|
||||
// Services
|
||||
if len(config.Services) != 1 {
|
||||
t.Error("There should've been 1 service")
|
||||
}
|
||||
@@ -449,11 +428,12 @@ services:
|
||||
if config.Services[0].Interval != 60*time.Second {
|
||||
t.Errorf("Interval should have been %s, because it is the default value", 60*time.Second)
|
||||
}
|
||||
if config.Services[0].Alerts == nil {
|
||||
t.Fatal("The service alerts shouldn't have been nil")
|
||||
if len(config.Services[0].Alerts) != 4 {
|
||||
t.Fatal("There should've been 4 alerts configured")
|
||||
}
|
||||
if len(config.Services[0].Alerts) != 3 {
|
||||
t.Fatal("There should've been 3 alert configured")
|
||||
|
||||
if config.Services[0].Alerts[0].Type != core.SlackAlert {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", core.SlackAlert, config.Services[0].Alerts[0].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[0].Enabled {
|
||||
t.Error("The alert should've been enabled")
|
||||
@@ -464,23 +444,35 @@ services:
|
||||
if config.Services[0].Alerts[0].SuccessThreshold != 2 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Services[0].Alerts[0].SuccessThreshold)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[1].Type != core.PagerDutyAlert {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", core.PagerDutyAlert, config.Services[0].Alerts[1].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[1].Description != "Healthcheck failed 7 times in a row" {
|
||||
t.Errorf("The description of the alert should've been %s, but it was %s", "Healthcheck failed 7 times in a row", config.Services[0].Alerts[1].Description)
|
||||
}
|
||||
if config.Services[0].Alerts[1].FailureThreshold != 7 {
|
||||
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 7, config.Services[0].Alerts[1].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[1].SuccessThreshold != 5 {
|
||||
t.Errorf("The success threshold of the alert should've been %d, but it was %d", 5, config.Services[0].Alerts[1].SuccessThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[0].Type != core.SlackAlert {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", core.SlackAlert, config.Services[0].Alerts[0].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[1].Type != core.PagerDutyAlert {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", core.PagerDutyAlert, config.Services[0].Alerts[1].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[1].Description != "Healthcheck failed 7 times in a row" {
|
||||
t.Errorf("The description of the alert should've been %s, but it was %s", "Healthcheck failed 7 times in a row", config.Services[0].Alerts[0].Description)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[2].Type != core.MessagebirdAlert {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", core.MessagebirdAlert, config.Services[0].Alerts[1].Type)
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", core.MessagebirdAlert, config.Services[0].Alerts[2].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[2].Enabled {
|
||||
t.Error("The alert should've been disabled")
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[3].Type != core.DiscordAlert {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", core.DiscordAlert, config.Services[0].Alerts[3].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[3].FailureThreshold != 10 {
|
||||
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 10, config.Services[0].Alerts[3].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[3].SuccessThreshold != 2 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Services[0].Alerts[3].SuccessThreshold)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -498,7 +490,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -532,7 +524,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -546,11 +538,106 @@ services:
|
||||
if !config.Alerting.Custom.IsValid() {
|
||||
t.Fatal("Custom alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(true) != "RESOLVED" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for RESOLVED should've been 'RESOLVED', got", config.Alerting.Custom.GetAlertStatePlaceholderValue(true))
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(false) != "TRIGGERED" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for TRIGGERED should've been 'TRIGGERED', got", config.Alerting.Custom.GetAlertStatePlaceholderValue(false))
|
||||
}
|
||||
if config.Alerting.Custom.Insecure {
|
||||
t.Fatal("config.Alerting.Custom.Insecure shouldn't have been true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithCustomAlertingConfigAndCustomPlaceholderValues(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
alerting:
|
||||
custom:
|
||||
placeholders:
|
||||
ALERT_TRIGGERED_OR_RESOLVED:
|
||||
TRIGGERED: "partial_outage"
|
||||
RESOLVED: "operational"
|
||||
url: "https://example.com"
|
||||
insecure: true
|
||||
body: "[ALERT_TRIGGERED_OR_RESOLVED]: [SERVICE_NAME] - [ALERT_DESCRIPTION]"
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/health
|
||||
alerts:
|
||||
- type: custom
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting == nil {
|
||||
t.Fatal("config.Alerting shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting.Custom == nil {
|
||||
t.Fatal("PagerDuty alerting config shouldn't have been nil")
|
||||
}
|
||||
if !config.Alerting.Custom.IsValid() {
|
||||
t.Fatal("Custom alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(true) != "operational" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for RESOLVED should've been 'operational'")
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(false) != "partial_outage" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for TRIGGERED should've been 'partial_outage'")
|
||||
}
|
||||
if !config.Alerting.Custom.Insecure {
|
||||
t.Fatal("config.Alerting.Custom.Insecure shouldn't have been true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithCustomAlertingConfigAndOneCustomPlaceholderValue(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
alerting:
|
||||
custom:
|
||||
placeholders:
|
||||
ALERT_TRIGGERED_OR_RESOLVED:
|
||||
TRIGGERED: "partial_outage"
|
||||
url: "https://example.com"
|
||||
insecure: true
|
||||
body: "[ALERT_TRIGGERED_OR_RESOLVED]: [SERVICE_NAME] - [ALERT_DESCRIPTION]"
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/health
|
||||
alerts:
|
||||
- type: custom
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting == nil {
|
||||
t.Fatal("config.Alerting shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting.Custom == nil {
|
||||
t.Fatal("PagerDuty alerting config shouldn't have been nil")
|
||||
}
|
||||
if !config.Alerting.Custom.IsValid() {
|
||||
t.Fatal("Custom alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(true) != "RESOLVED" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for RESOLVED should've been 'RESOLVED'")
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(false) != "partial_outage" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for TRIGGERED should've been 'partial_outage'")
|
||||
}
|
||||
if !config.Alerting.Custom.Insecure {
|
||||
t.Fatal("config.Alerting.Custom.Insecure shouldn't have been true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithCustomAlertingConfigThatHasInsecureSetToTrue(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
alerting:
|
||||
@@ -571,7 +658,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -624,7 +711,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`, expectedUsername, expectedPasswordHash)))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -691,7 +778,7 @@ kubernetes:
|
||||
target-path: "/health"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -758,3 +845,38 @@ kubernetes:
|
||||
// TODO: find a way to test this?
|
||||
t.Error("Function should've panicked because testing with ClusterModeIn isn't supported")
|
||||
}
|
||||
|
||||
func TestGetAlertingProviderByAlertType(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Alerting: &alerting.Config{
|
||||
Custom: &custom.AlertProvider{},
|
||||
Discord: &discord.AlertProvider{},
|
||||
Mattermost: &mattermost.AlertProvider{},
|
||||
Messagebird: &messagebird.AlertProvider{},
|
||||
PagerDuty: &pagerduty.AlertProvider{},
|
||||
Slack: &slack.AlertProvider{},
|
||||
Twilio: &twilio.AlertProvider{},
|
||||
},
|
||||
}
|
||||
if GetAlertingProviderByAlertType(cfg, core.CustomAlert) != cfg.Alerting.Custom {
|
||||
t.Error("expected Custom configuration")
|
||||
}
|
||||
if GetAlertingProviderByAlertType(cfg, core.DiscordAlert) != cfg.Alerting.Discord {
|
||||
t.Error("expected Discord configuration")
|
||||
}
|
||||
if GetAlertingProviderByAlertType(cfg, core.MattermostAlert) != cfg.Alerting.Mattermost {
|
||||
t.Error("expected Mattermost configuration")
|
||||
}
|
||||
if GetAlertingProviderByAlertType(cfg, core.MessagebirdAlert) != cfg.Alerting.Messagebird {
|
||||
t.Error("expected Messagebird configuration")
|
||||
}
|
||||
if GetAlertingProviderByAlertType(cfg, core.PagerDutyAlert) != cfg.Alerting.PagerDuty {
|
||||
t.Error("expected PagerDuty configuration")
|
||||
}
|
||||
if GetAlertingProviderByAlertType(cfg, core.SlackAlert) != cfg.Alerting.Slack {
|
||||
t.Error("expected Slack configuration")
|
||||
}
|
||||
if GetAlertingProviderByAlertType(cfg, core.TwilioAlert) != cfg.Alerting.Twilio {
|
||||
t.Error("expected Twilio configuration")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,25 +3,20 @@ package config
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// webConfig is the structure which supports the configuration of the endpoint
|
||||
// WebConfig is the structure which supports the configuration of the endpoint
|
||||
// which provides access to the web frontend
|
||||
type webConfig struct {
|
||||
type WebConfig struct {
|
||||
// Address to listen on (defaults to 0.0.0.0 specified by DefaultAddress)
|
||||
Address string `yaml:"address"`
|
||||
|
||||
// Port to listen on (default to 8080 specified by DefaultPort)
|
||||
Port int `yaml:"port"`
|
||||
|
||||
// ContextRoot set the root context for the web application
|
||||
ContextRoot string `yaml:"context-root"`
|
||||
}
|
||||
|
||||
// validateAndSetDefaults checks and sets the default values for fields that are not set
|
||||
func (web *webConfig) validateAndSetDefaults() {
|
||||
func (web *WebConfig) validateAndSetDefaults() {
|
||||
// Validate the Address
|
||||
if len(web.Address) == 0 {
|
||||
web.Address = DefaultAddress
|
||||
@@ -32,32 +27,9 @@ func (web *webConfig) validateAndSetDefaults() {
|
||||
} else if web.Port < 0 || web.Port > math.MaxUint16 {
|
||||
panic(fmt.Sprintf("invalid port: value should be between %d and %d", 0, math.MaxUint16))
|
||||
}
|
||||
// Validate the ContextRoot
|
||||
if len(web.ContextRoot) == 0 {
|
||||
web.ContextRoot = DefaultContextRoot
|
||||
} else {
|
||||
trimmedContextRoot := strings.Trim(web.ContextRoot, "/")
|
||||
if len(trimmedContextRoot) == 0 {
|
||||
web.ContextRoot = DefaultContextRoot
|
||||
return
|
||||
}
|
||||
rootContextURL, err := url.Parse(trimmedContextRoot)
|
||||
if err != nil {
|
||||
panic("invalid context root:" + err.Error())
|
||||
}
|
||||
if rootContextURL.Path != trimmedContextRoot {
|
||||
panic("invalid context root: too complex")
|
||||
}
|
||||
web.ContextRoot = "/" + strings.Trim(rootContextURL.Path, "/") + "/"
|
||||
}
|
||||
}
|
||||
|
||||
// SocketAddress returns the combination of the Address and the Port
|
||||
func (web *webConfig) SocketAddress() string {
|
||||
func (web *WebConfig) SocketAddress() string {
|
||||
return fmt.Sprintf("%s:%d", web.Address, web.Port)
|
||||
}
|
||||
|
||||
// PrependWithContextRoot appends the given path to the ContextRoot
|
||||
func (web *webConfig) PrependWithContextRoot(path string) string {
|
||||
return web.ContextRoot + strings.Trim(path, "/")
|
||||
}
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWebConfig_SocketAddress(t *testing.T) {
|
||||
web := &webConfig{
|
||||
web := &WebConfig{
|
||||
Address: "0.0.0.0",
|
||||
Port: 8081,
|
||||
}
|
||||
@@ -14,86 +13,3 @@ func TestWebConfig_SocketAddress(t *testing.T) {
|
||||
t.Errorf("expected %s, got %s", "0.0.0.0:8081", web.SocketAddress())
|
||||
}
|
||||
}
|
||||
|
||||
func TestWebConfig_PrependWithContextRoot(t *testing.T) {
|
||||
web := &webConfig{ContextRoot: "/status/"}
|
||||
if result := web.PrependWithContextRoot("/api/v1/results"); result != "/status/api/v1/results" {
|
||||
t.Errorf("expected %s, got %s", "/status/api/v1/results", result)
|
||||
}
|
||||
if result := web.PrependWithContextRoot("/health"); result != "/status/health" {
|
||||
t.Errorf("expected %s, got %s", "/status/health", result)
|
||||
}
|
||||
if result := web.PrependWithContextRoot("/health/"); result != "/status/health" {
|
||||
t.Errorf("expected %s, got %s", "/status/health", result)
|
||||
}
|
||||
}
|
||||
|
||||
// validContextRootTest specifies all test case which should end up in
|
||||
// a valid context root used to bind the web interface to
|
||||
var validContextRootTests = []struct {
|
||||
name string
|
||||
path string
|
||||
expectedPath string
|
||||
}{
|
||||
{"Empty", "", "/"},
|
||||
{"/", "/", "/"},
|
||||
{"///", "///", "/"},
|
||||
{"Single character 'a'", "a", "/a/"},
|
||||
{"Slash at the beginning", "/status", "/status/"},
|
||||
{"Slashes at start and end", "/status/", "/status/"},
|
||||
{"Multiple slashes at start", "//status", "/status/"},
|
||||
{"Multiple slashes at start and end", "///status////", "/status/"},
|
||||
{"Contains '@' in path'", "me@/status/gatus", "/me@/status/gatus/"},
|
||||
{"Nested context with trailing slash", "/status/gatus/", "/status/gatus/"},
|
||||
{"Nested context without trailing slash", "/status/gatus/system", "/status/gatus/system/"},
|
||||
}
|
||||
|
||||
func TestWebConfig_ValidContextRoots(t *testing.T) {
|
||||
for idx, test := range validContextRootTests {
|
||||
t.Run(fmt.Sprintf("%d: %s", idx, test.name), func(t *testing.T) {
|
||||
expectValidResultForContextRoot(t, test.path, test.expectedPath)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func expectValidResultForContextRoot(t *testing.T, path string, expected string) {
|
||||
web := &webConfig{
|
||||
ContextRoot: path,
|
||||
}
|
||||
web.validateAndSetDefaults()
|
||||
if web.ContextRoot != expected {
|
||||
t.Errorf("expected %s, got %s", expected, web.ContextRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// invalidContextRootTests contains all tests for context root which are
|
||||
// expected to fail and stop program execution
|
||||
var invalidContextRootTests = []struct {
|
||||
name string
|
||||
path string
|
||||
}{
|
||||
{"Only a fragment identifier", "#"},
|
||||
{"Invalid character in path", "/invalid" + string([]byte{0x7F})},
|
||||
{"Starts with protocol", "http://status/gatus"},
|
||||
{"Path with fragment", "/status/gatus#here"},
|
||||
{"Starts with '://'", "://status"},
|
||||
{"Contains query parameter", "/status/h?ello=world"},
|
||||
{"Contains '?'", "/status?"},
|
||||
}
|
||||
|
||||
func TestWebConfig_InvalidContextRoots(t *testing.T) {
|
||||
for idx, test := range invalidContextRootTests {
|
||||
t.Run(fmt.Sprintf("%d: %s", idx, test.name), func(t *testing.T) {
|
||||
expectInvalidResultForContextRoot(t, test.path)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func expectInvalidResultForContextRoot(t *testing.T, path string) {
|
||||
defer func() { recover() }()
|
||||
|
||||
web := &webConfig{ContextRoot: path}
|
||||
web.validateAndSetDefaults()
|
||||
|
||||
t.Fatal(fmt.Sprintf("Should've panicked because the configuration specifies an invalid context root: %s", path))
|
||||
}
|
||||
|
||||
@@ -7,44 +7,41 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/watchdog"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// badgeHandler handles the automatic generation of badge based on the group name and service name passed.
|
||||
//
|
||||
// Valid values for {duration}: 7d, 24h, 1h
|
||||
// Pattern for {identifier}: group-<GROUP_NAME>-service-<SERVICE_NAME>.svg
|
||||
// Pattern for {identifier}: <KEY>.svg
|
||||
func badgeHandler(writer http.ResponseWriter, request *http.Request) {
|
||||
variables := mux.Vars(request)
|
||||
duration := variables["duration"]
|
||||
// group-<GROUP_NAME>-service-<SERVICE_NAME>.svg
|
||||
identifier := variables["identifier"]
|
||||
if duration != "7d" && duration != "24h" && duration != "1h" {
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, _ = writer.Write([]byte("Durations supported: 7d, 24h, 1h"))
|
||||
return
|
||||
}
|
||||
parts := strings.Split(identifier, "-service-")
|
||||
if len(parts) != 2 || !strings.HasPrefix(identifier, "group-") || !strings.HasSuffix(identifier, ".svg") {
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, _ = writer.Write([]byte("Invalid path: Pattern should look like /group-<GROUP_NAME>-service-<SERVICE_NAME>.svg"))
|
||||
return
|
||||
}
|
||||
groupName := strings.TrimPrefix(parts[0], "group-")
|
||||
serviceName := strings.TrimSuffix(parts[1], ".svg")
|
||||
uptime := watchdog.GetUptimeByServiceGroupAndName(groupName, serviceName)
|
||||
if uptime == nil {
|
||||
identifier := variables["identifier"]
|
||||
key := strings.TrimSuffix(identifier, ".svg")
|
||||
serviceStatus := storage.Get().GetServiceStatusByKey(key)
|
||||
if serviceStatus == nil {
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
_, _ = writer.Write([]byte("Requested service not found"))
|
||||
return
|
||||
}
|
||||
if serviceStatus.Uptime == nil {
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = writer.Write([]byte("Failed to compute uptime"))
|
||||
return
|
||||
}
|
||||
formattedDate := time.Now().Format(http.TimeFormat)
|
||||
writer.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||
writer.Header().Set("Date", formattedDate)
|
||||
writer.Header().Set("Expires", formattedDate)
|
||||
writer.Header().Set("Content-Type", "image/svg+xml")
|
||||
_, _ = writer.Write(generateSVG(duration, uptime))
|
||||
_, _ = writer.Write(generateSVG(duration, serviceStatus.Uptime))
|
||||
}
|
||||
|
||||
func generateSVG(duration string, uptime *core.Uptime) []byte {
|
||||
|
||||
@@ -3,16 +3,20 @@ package controller
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/config"
|
||||
"github.com/TwinProduction/gatus/security"
|
||||
"github.com/TwinProduction/gatus/watchdog"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gocache"
|
||||
"github.com/TwinProduction/health"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
@@ -22,7 +26,15 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
cache = gocache.NewCache().WithMaxSize(100).WithEvictionPolicy(gocache.LeastRecentlyUsed)
|
||||
cache = gocache.NewCache().WithMaxSize(100).WithEvictionPolicy(gocache.FirstInFirstOut)
|
||||
|
||||
// staticFolder is the path to the location of the static folder from the root path of the project
|
||||
// The only reason this is exposed is to allow running tests from a different path than the root path of the project
|
||||
staticFolder = "./web/static"
|
||||
|
||||
// server is the http.Server created by Handle.
|
||||
// The only reason it exists is for testing purposes.
|
||||
server *http.Server
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -34,54 +46,79 @@ func init() {
|
||||
// Handle creates the router and starts the server
|
||||
func Handle() {
|
||||
cfg := config.Get()
|
||||
router := CreateRouter(cfg)
|
||||
server := &http.Server{
|
||||
var router http.Handler = CreateRouter(cfg)
|
||||
if os.Getenv("ENVIRONMENT") == "dev" {
|
||||
router = developmentCorsHandler(router)
|
||||
}
|
||||
server = &http.Server{
|
||||
Addr: fmt.Sprintf("%s:%d", cfg.Web.Address, cfg.Web.Port),
|
||||
Handler: router,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
IdleTimeout: 15 * time.Second,
|
||||
}
|
||||
log.Printf("[controller][Handle] Listening on %s%s\n", cfg.Web.SocketAddress(), cfg.Web.ContextRoot)
|
||||
log.Fatal(server.ListenAndServe())
|
||||
log.Println("[controller][Handle] Listening on " + cfg.Web.SocketAddress())
|
||||
if os.Getenv("ROUTER_TEST") == "true" {
|
||||
return
|
||||
}
|
||||
log.Println("[controller][Handle]", server.ListenAndServe())
|
||||
}
|
||||
|
||||
// Shutdown stops the server
|
||||
func Shutdown() {
|
||||
if server != nil {
|
||||
_ = server.Shutdown(context.TODO())
|
||||
server = nil
|
||||
}
|
||||
}
|
||||
|
||||
// CreateRouter creates the router for the http server
|
||||
func CreateRouter(cfg *config.Config) *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
statusesHandler := serviceStatusesHandler
|
||||
if cfg.Security != nil && cfg.Security.IsValid() {
|
||||
statusesHandler = security.Handler(serviceStatusesHandler, cfg.Security)
|
||||
}
|
||||
router.HandleFunc("/favicon.ico", favIconHandler).Methods("GET") // favicon needs to be always served from the root
|
||||
router.HandleFunc(cfg.Web.PrependWithContextRoot("/api/v1/statuses"), statusesHandler).Methods("GET")
|
||||
router.HandleFunc(cfg.Web.PrependWithContextRoot("/api/v1/badges/uptime/{duration}/{identifier}"), badgeHandler).Methods("GET")
|
||||
router.HandleFunc(cfg.Web.PrependWithContextRoot("/health"), healthHandler).Methods("GET")
|
||||
router.PathPrefix(cfg.Web.ContextRoot).Handler(GzipHandler(http.StripPrefix(cfg.Web.ContextRoot, http.FileServer(http.Dir("./static")))))
|
||||
if cfg.Metrics {
|
||||
router.Handle(cfg.Web.PrependWithContextRoot("/metrics"), promhttp.Handler()).Methods("GET")
|
||||
router.Handle("/metrics", promhttp.Handler()).Methods("GET")
|
||||
}
|
||||
router.Handle("/health", health.Handler().WithJSON(true)).Methods("GET")
|
||||
router.HandleFunc("/favicon.ico", favIconHandler).Methods("GET")
|
||||
router.HandleFunc("/api/v1/statuses", secureIfNecessary(cfg, serviceStatusesHandler)).Methods("GET") // No GzipHandler for this one, because we cache the content
|
||||
router.HandleFunc("/api/v1/statuses/{key}", secureIfNecessary(cfg, GzipHandlerFunc(serviceStatusHandler))).Methods("GET")
|
||||
router.HandleFunc("/api/v1/badges/uptime/{duration}/{identifier}", badgeHandler).Methods("GET")
|
||||
// SPA
|
||||
router.HandleFunc("/services/{service}", spaHandler).Methods("GET")
|
||||
// Everything else falls back on static content
|
||||
router.PathPrefix("/").Handler(GzipHandler(http.FileServer(http.Dir(staticFolder))))
|
||||
return router
|
||||
}
|
||||
|
||||
func secureIfNecessary(cfg *config.Config, handler http.HandlerFunc) http.HandlerFunc {
|
||||
if cfg.Security != nil && cfg.Security.IsValid() {
|
||||
return security.Handler(handler, cfg.Security)
|
||||
}
|
||||
return handler
|
||||
}
|
||||
|
||||
// serviceStatusesHandler handles requests to retrieve all service statuses
|
||||
// Due to the size of the response, this function leverages a cache.
|
||||
// Must not be wrapped by GzipHandler
|
||||
func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
page, pageSize := extractPageAndPageSizeFromRequest(r)
|
||||
gzipped := strings.Contains(r.Header.Get("Accept-Encoding"), "gzip")
|
||||
var exists bool
|
||||
var value interface{}
|
||||
if gzipped {
|
||||
writer.Header().Set("Content-Encoding", "gzip")
|
||||
value, exists = cache.Get("service-status-gzipped")
|
||||
value, exists = cache.Get(fmt.Sprintf("service-status-%d-%d-gzipped", page, pageSize))
|
||||
} else {
|
||||
value, exists = cache.Get("service-status")
|
||||
value, exists = cache.Get(fmt.Sprintf("service-status-%d-%d", page, pageSize))
|
||||
}
|
||||
var data []byte
|
||||
if !exists {
|
||||
var err error
|
||||
buffer := &bytes.Buffer{}
|
||||
gzipWriter := gzip.NewWriter(buffer)
|
||||
data, err = watchdog.GetServiceStatusesAsJSON()
|
||||
data, err = json.Marshal(storage.Get().GetAllServiceStatusesWithResultPagination(page, pageSize))
|
||||
if err != nil {
|
||||
log.Printf("[main][serviceStatusesHandler] Unable to marshal object to JSON: %s", err.Error())
|
||||
log.Printf("[controller][serviceStatusesHandler] Unable to marshal object to JSON: %s", err.Error())
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = writer.Write([]byte("Unable to marshal object to JSON"))
|
||||
return
|
||||
@@ -89,8 +126,8 @@ func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
_, _ = gzipWriter.Write(data)
|
||||
_ = gzipWriter.Close()
|
||||
gzippedData := buffer.Bytes()
|
||||
cache.SetWithTTL("service-status", data, cacheTTL)
|
||||
cache.SetWithTTL("service-status-gzipped", gzippedData, cacheTTL)
|
||||
cache.SetWithTTL(fmt.Sprintf("service-status-%d-%d", page, pageSize), data, cacheTTL)
|
||||
cache.SetWithTTL(fmt.Sprintf("service-status-%d-%d-gzipped", page, pageSize), gzippedData, cacheTTL)
|
||||
if gzipped {
|
||||
data = gzippedData
|
||||
}
|
||||
@@ -102,13 +139,33 @@ func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
_, _ = writer.Write(data)
|
||||
}
|
||||
|
||||
func healthHandler(writer http.ResponseWriter, _ *http.Request) {
|
||||
// serviceStatusHandler retrieves a single ServiceStatus by group name and service name
|
||||
func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
page, pageSize := extractPageAndPageSizeFromRequest(r)
|
||||
vars := mux.Vars(r)
|
||||
serviceStatus := storage.Get().GetServiceStatusByKey(vars["key"])
|
||||
if serviceStatus == nil {
|
||||
log.Printf("[controller][serviceStatusHandler] Service with key=%s not found", vars["key"])
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
_, _ = writer.Write([]byte("not found"))
|
||||
return
|
||||
}
|
||||
data := map[string]interface{}{
|
||||
"serviceStatus": serviceStatus.WithResultPagination(page, pageSize),
|
||||
// The following fields, while present on core.ServiceStatus, are annotated to remain hidden so that we can
|
||||
// expose only the necessary data on /api/v1/statuses.
|
||||
// Since the /api/v1/statuses/{key} endpoint does need this data, however, we explicitly expose it here
|
||||
"events": serviceStatus.Events,
|
||||
"uptime": serviceStatus.Uptime,
|
||||
}
|
||||
output, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
log.Printf("[controller][serviceStatusHandler] Unable to marshal object to JSON: %s", err.Error())
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = writer.Write([]byte("unable to marshal object to JSON"))
|
||||
return
|
||||
}
|
||||
writer.Header().Add("Content-Type", "application/json")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, _ = writer.Write([]byte("{\"status\":\"UP\"}"))
|
||||
}
|
||||
|
||||
// favIconHandler handles requests for /favicon.ico
|
||||
func favIconHandler(writer http.ResponseWriter, request *http.Request) {
|
||||
http.ServeFile(writer, request, "./static/favicon.ico")
|
||||
_, _ = writer.Write(output)
|
||||
}
|
||||
|
||||
331
controller/controller_test.go
Normal file
331
controller/controller_test.go
Normal file
@@ -0,0 +1,331 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/config"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gatus/watchdog"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = core.Condition("[STATUS] == 200")
|
||||
secondCondition = core.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = core.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
timestamp = time.Now()
|
||||
|
||||
testService = core.Service{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []*core.Condition{&firstCondition, &secondCondition, &thirdCondition},
|
||||
Alerts: nil,
|
||||
Insecure: false,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = core.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
Timestamp: timestamp,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = core.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
Timestamp: timestamp,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestCreateRouter(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer cache.Clear()
|
||||
staticFolder = "../web/static"
|
||||
cfg := &config.Config{
|
||||
Metrics: true,
|
||||
Services: []*core.Service{
|
||||
{
|
||||
Name: "frontend",
|
||||
Group: "core",
|
||||
},
|
||||
{
|
||||
Name: "backend",
|
||||
Group: "core",
|
||||
},
|
||||
},
|
||||
}
|
||||
watchdog.UpdateServiceStatuses(cfg.Services[0], &core.Result{Success: true, Duration: time.Millisecond, Timestamp: time.Now()})
|
||||
watchdog.UpdateServiceStatuses(cfg.Services[1], &core.Result{Success: false, Duration: time.Second, Timestamp: time.Now()})
|
||||
router := CreateRouter(cfg)
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Path string
|
||||
ExpectedCode int
|
||||
Gzip bool
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "health",
|
||||
Path: "/health",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "metrics",
|
||||
Path: "/metrics",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "badges-1h",
|
||||
Path: "/api/v1/badges/uptime/1h/core_frontend.svg",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "badges-24h",
|
||||
Path: "/api/v1/badges/uptime/24h/core_backend.svg",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "badges-7d",
|
||||
Path: "/api/v1/badges/uptime/7d/core_frontend.svg",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "badges-with-invalid-duration",
|
||||
Path: "/api/v1/badges/uptime/3d/core_backend.svg",
|
||||
ExpectedCode: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
Name: "badges-for-invalid-key",
|
||||
Path: "/api/v1/badges/uptime/7d/invalid_key.svg",
|
||||
ExpectedCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
Name: "service-statuses",
|
||||
Path: "/api/v1/statuses",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "service-statuses-gzip",
|
||||
Path: "/api/v1/statuses",
|
||||
ExpectedCode: http.StatusOK,
|
||||
Gzip: true,
|
||||
},
|
||||
{
|
||||
Name: "service-statuses-pagination",
|
||||
Path: "/api/v1/statuses?page=1&pageSize=20",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "service-status",
|
||||
Path: "/api/v1/statuses/core_frontend",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "service-status-gzip",
|
||||
Path: "/api/v1/statuses/core_frontend",
|
||||
ExpectedCode: http.StatusOK,
|
||||
Gzip: true,
|
||||
},
|
||||
{
|
||||
Name: "service-status-for-invalid-key",
|
||||
Path: "/api/v1/statuses/invalid_key",
|
||||
ExpectedCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
Name: "favicon",
|
||||
Path: "/favicon.ico",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "frontend-home",
|
||||
Path: "/",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "frontend-assets",
|
||||
Path: "/js/app.js",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "frontend-service",
|
||||
Path: "/services/core_frontend",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
request, _ := http.NewRequest("GET", scenario.Path, nil)
|
||||
if scenario.Gzip {
|
||||
request.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(responseRecorder, request)
|
||||
if responseRecorder.Code != scenario.ExpectedCode {
|
||||
t.Errorf("%s %s should have returned %d, but returned %d instead", request.Method, request.URL, scenario.ExpectedCode, responseRecorder.Code)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandle(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer cache.Clear()
|
||||
cfg := &config.Config{
|
||||
Web: &config.WebConfig{
|
||||
Address: "0.0.0.0",
|
||||
Port: rand.Intn(65534),
|
||||
},
|
||||
Services: []*core.Service{
|
||||
{
|
||||
Name: "frontend",
|
||||
Group: "core",
|
||||
},
|
||||
{
|
||||
Name: "backend",
|
||||
Group: "core",
|
||||
},
|
||||
},
|
||||
}
|
||||
config.Set(cfg)
|
||||
defer config.Set(nil)
|
||||
_ = os.Setenv("ROUTER_TEST", "true")
|
||||
_ = os.Setenv("ENVIRONMENT", "dev")
|
||||
defer os.Clearenv()
|
||||
Handle()
|
||||
defer Shutdown()
|
||||
request, _ := http.NewRequest("GET", "/health", nil)
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
server.Handler.ServeHTTP(responseRecorder, request)
|
||||
if responseRecorder.Code != http.StatusOK {
|
||||
t.Error("expected GET /health to return status code 200")
|
||||
}
|
||||
if server == nil {
|
||||
t.Fatal("server should've been set (but because we set ROUTER_TEST, it shouldn't have been started)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestShutdown(t *testing.T) {
|
||||
// Pretend that we called controller.Handle(), which initializes the server variable
|
||||
server = &http.Server{}
|
||||
Shutdown()
|
||||
if server != nil {
|
||||
t.Error("server should've been shut down")
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceStatusesHandler(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer cache.Clear()
|
||||
staticFolder = "../web/static"
|
||||
firstResult := &testSuccessfulResult
|
||||
secondResult := &testUnsuccessfulResult
|
||||
storage.Get().Insert(&testService, firstResult)
|
||||
storage.Get().Insert(&testService, secondResult)
|
||||
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||
firstResult.Timestamp = time.Time{}
|
||||
secondResult.Timestamp = time.Time{}
|
||||
router := CreateRouter(&config.Config{})
|
||||
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Path string
|
||||
ExpectedCode int
|
||||
ExpectedBody string
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "no-pagination",
|
||||
Path: "/api/v1/statuses",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}}`,
|
||||
},
|
||||
{
|
||||
Name: "pagination-first-result",
|
||||
Path: "/api/v1/statuses?page=1&pageSize=1",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}}`,
|
||||
},
|
||||
{
|
||||
Name: "pagination-second-result",
|
||||
Path: "/api/v1/statuses?page=2&pageSize=1",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"}]}}`,
|
||||
},
|
||||
{
|
||||
Name: "pagination-no-results",
|
||||
Path: "/api/v1/statuses?page=5&pageSize=20",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `{"group_name":{"name":"name","group":"group","key":"group_name","results":[]}}`,
|
||||
},
|
||||
{
|
||||
Name: "invalid-pagination-should-fall-back-to-default",
|
||||
Path: "/api/v1/statuses?page=INVALID&pageSize=INVALID",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
request, _ := http.NewRequest("GET", scenario.Path, nil)
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(responseRecorder, request)
|
||||
if responseRecorder.Code != scenario.ExpectedCode {
|
||||
t.Errorf("%s %s should have returned %d, but returned %d instead", request.Method, request.URL, scenario.ExpectedCode, responseRecorder.Code)
|
||||
}
|
||||
output := responseRecorder.Body.String()
|
||||
if output != scenario.ExpectedBody {
|
||||
t.Errorf("expected:\n %s\n\ngot:\n %s", scenario.ExpectedBody, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
10
controller/cors.go
Normal file
10
controller/cors.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package controller
|
||||
|
||||
import "net/http"
|
||||
|
||||
func developmentCorsHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "http://localhost:8081")
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
8
controller/favicon.go
Normal file
8
controller/favicon.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package controller
|
||||
|
||||
import "net/http"
|
||||
|
||||
// favIconHandler handles requests for /favicon.ico
|
||||
func favIconHandler(writer http.ResponseWriter, request *http.Request) {
|
||||
http.ServeFile(writer, request, staticFolder+"/favicon.ico")
|
||||
}
|
||||
@@ -32,10 +32,18 @@ func (w *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
return w.Writer.Write(b)
|
||||
}
|
||||
|
||||
// GzipHandler compresses the response of a given handler if the request's headers specify that the client
|
||||
// GzipHandler compresses the response of a given http.Handler if the request's headers specify that the client
|
||||
// supports gzip encoding
|
||||
func GzipHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(writer http.ResponseWriter, r *http.Request) {
|
||||
return GzipHandlerFunc(func(writer http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(writer, r)
|
||||
})
|
||||
}
|
||||
|
||||
// GzipHandlerFunc compresses the response of a given http.HandlerFunc if the request's headers specify that the client
|
||||
// supports gzip encoding
|
||||
func GzipHandlerFunc(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(writer http.ResponseWriter, r *http.Request) {
|
||||
// If the request doesn't specify that it supports gzip, then don't compress it
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
next.ServeHTTP(writer, r)
|
||||
@@ -47,5 +55,5 @@ func GzipHandler(next http.Handler) http.Handler {
|
||||
gz.Reset(writer)
|
||||
defer gz.Close()
|
||||
next.ServeHTTP(&gzipResponseWriter{ResponseWriter: writer, Writer: gz}, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
8
controller/spa.go
Normal file
8
controller/spa.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package controller
|
||||
|
||||
import "net/http"
|
||||
|
||||
// spaHandler handles requests for /
|
||||
func spaHandler(writer http.ResponseWriter, request *http.Request) {
|
||||
http.ServeFile(writer, request, staticFolder+"/index.html")
|
||||
}
|
||||
46
controller/util.go
Normal file
46
controller/util.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultPage is the default page to use if none is specified or an invalid value is provided
|
||||
DefaultPage = 1
|
||||
|
||||
// DefaultPageSize is the default page siZE to use if none is specified or an invalid value is provided
|
||||
DefaultPageSize = 20
|
||||
|
||||
// MaximumPageSize is the maximum page size allowed
|
||||
MaximumPageSize = 100
|
||||
)
|
||||
|
||||
func extractPageAndPageSizeFromRequest(r *http.Request) (page int, pageSize int) {
|
||||
var err error
|
||||
if pageParameter := r.URL.Query().Get("page"); len(pageParameter) == 0 {
|
||||
page = DefaultPage
|
||||
} else {
|
||||
page, err = strconv.Atoi(pageParameter)
|
||||
if err != nil {
|
||||
page = DefaultPage
|
||||
}
|
||||
if page < 1 {
|
||||
page = DefaultPage
|
||||
}
|
||||
}
|
||||
if pageSizeParameter := r.URL.Query().Get("pageSize"); len(pageSizeParameter) == 0 {
|
||||
pageSize = DefaultPageSize
|
||||
} else {
|
||||
pageSize, err = strconv.Atoi(pageSizeParameter)
|
||||
if err != nil {
|
||||
pageSize = DefaultPageSize
|
||||
}
|
||||
if pageSize > MaximumPageSize {
|
||||
pageSize = MaximumPageSize
|
||||
} else if pageSize < 1 {
|
||||
pageSize = DefaultPageSize
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
67
controller/util_test.go
Normal file
67
controller/util_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExtractPageAndPageSizeFromRequest(t *testing.T) {
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Page string
|
||||
PageSize string
|
||||
ExpectedPage int
|
||||
ExpectedPageSize int
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Page: "1",
|
||||
PageSize: "20",
|
||||
ExpectedPage: 1,
|
||||
ExpectedPageSize: 20,
|
||||
},
|
||||
{
|
||||
Page: "2",
|
||||
PageSize: "10",
|
||||
ExpectedPage: 2,
|
||||
ExpectedPageSize: 10,
|
||||
},
|
||||
{
|
||||
Page: "2",
|
||||
PageSize: "10",
|
||||
ExpectedPage: 2,
|
||||
ExpectedPageSize: 10,
|
||||
},
|
||||
{
|
||||
Page: "1",
|
||||
PageSize: "999999",
|
||||
ExpectedPage: 1,
|
||||
ExpectedPageSize: MaximumPageSize,
|
||||
},
|
||||
{
|
||||
Page: "-1",
|
||||
PageSize: "-1",
|
||||
ExpectedPage: DefaultPage,
|
||||
ExpectedPageSize: DefaultPageSize,
|
||||
},
|
||||
{
|
||||
Page: "invalid",
|
||||
PageSize: "invalid",
|
||||
ExpectedPage: DefaultPage,
|
||||
ExpectedPageSize: DefaultPageSize,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run("page-"+scenario.Page+"-pageSize-"+scenario.PageSize, func(t *testing.T) {
|
||||
request, _ := http.NewRequest("GET", fmt.Sprintf("/api/v1/statuses?page=%s&pageSize=%s", scenario.Page, scenario.PageSize), nil)
|
||||
actualPage, actualPageSize := extractPageAndPageSizeFromRequest(request)
|
||||
if actualPage != scenario.ExpectedPage {
|
||||
t.Errorf("expected %d, got %d", scenario.ExpectedPage, actualPage)
|
||||
}
|
||||
if actualPageSize != scenario.ExpectedPageSize {
|
||||
t.Errorf("expected %d, got %d", scenario.ExpectedPageSize, actualPageSize)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,12 @@ type Alert struct {
|
||||
|
||||
// Triggered is used to determine whether an alert has been triggered. When an alert is resolved, this value
|
||||
// should be set back to false. It is used to prevent the same alert from going out twice.
|
||||
//
|
||||
// This value should only be modified if the provider.AlertProvider's Send function does not return an error for an
|
||||
// alert that hasn't been triggered yet. This doubles as a lazy retry. The reason why this behavior isn't also
|
||||
// applied for alerts that are already triggered and has become "healthy" again is to prevent a case where, for
|
||||
// some reason, the alert provider always returns errors when trying to send the resolved notification
|
||||
// (SendOnResolved).
|
||||
Triggered bool
|
||||
}
|
||||
|
||||
@@ -34,8 +40,11 @@ type Alert struct {
|
||||
type AlertType string
|
||||
|
||||
const (
|
||||
// SlackAlert is the AlertType for the slack alerting provider
|
||||
SlackAlert AlertType = "slack"
|
||||
// CustomAlert is the AlertType for the custom alerting provider
|
||||
CustomAlert AlertType = "custom"
|
||||
|
||||
// DiscordAlert is the AlertType for the discord alerting provider
|
||||
DiscordAlert AlertType = "discord"
|
||||
|
||||
// MattermostAlert is the AlertType for the mattermost alerting provider
|
||||
MattermostAlert AlertType = "mattermost"
|
||||
@@ -46,9 +55,9 @@ const (
|
||||
// PagerDutyAlert is the AlertType for the pagerduty alerting provider
|
||||
PagerDutyAlert AlertType = "pagerduty"
|
||||
|
||||
// SlackAlert is the AlertType for the slack alerting provider
|
||||
SlackAlert AlertType = "slack"
|
||||
|
||||
// TwilioAlert is the AlertType for the twilio alerting provider
|
||||
TwilioAlert AlertType = "twilio"
|
||||
|
||||
// CustomAlert is the AlertType for the custom alerting provider
|
||||
CustomAlert AlertType = "custom"
|
||||
)
|
||||
|
||||
@@ -2,7 +2,6 @@ package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -24,7 +23,7 @@ const (
|
||||
|
||||
// DNSRCodePlaceholder is a place holder for DNS_RCODE
|
||||
//
|
||||
// Values that could be NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP and REFUSED
|
||||
// Values that could replace the placeholder: NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED
|
||||
DNSRCodePlaceholder = "[DNS_RCODE]"
|
||||
|
||||
// ResponseTimePlaceholder is a placeholder for the request response time, in milliseconds.
|
||||
@@ -48,11 +47,25 @@ const (
|
||||
CertificateExpirationPlaceholder = "[CERTIFICATE_EXPIRATION]"
|
||||
|
||||
// LengthFunctionPrefix is the prefix for the length function
|
||||
//
|
||||
// Usage: len([BODY].articles) == 10, len([BODY].name) > 5
|
||||
LengthFunctionPrefix = "len("
|
||||
|
||||
// HasFunctionPrefix is the prefix for the has function
|
||||
//
|
||||
// Usage: has([BODY].errors) == true
|
||||
HasFunctionPrefix = "has("
|
||||
|
||||
// PatternFunctionPrefix is the prefix for the pattern function
|
||||
//
|
||||
// Usage: [IP] == pat(192.168.*.*)
|
||||
PatternFunctionPrefix = "pat("
|
||||
|
||||
// AnyFunctionPrefix is the prefix for the any function
|
||||
//
|
||||
// Usage: [IP] == any(1.1.1.1, 1.0.0.1)
|
||||
AnyFunctionPrefix = "any("
|
||||
|
||||
// FunctionSuffix is the suffix for all functions
|
||||
FunctionSuffix = ")"
|
||||
|
||||
@@ -64,85 +77,125 @@ const (
|
||||
type Condition string
|
||||
|
||||
// evaluate the Condition with the Result of the health check
|
||||
func (c *Condition) evaluate(result *Result) bool {
|
||||
condition := string(*c)
|
||||
func (c Condition) evaluate(result *Result) bool {
|
||||
condition := string(c)
|
||||
success := false
|
||||
var resolvedCondition string
|
||||
conditionToDisplay := condition
|
||||
if strings.Contains(condition, "==") {
|
||||
parts := sanitizeAndResolve(strings.Split(condition, "=="), result)
|
||||
success = isEqual(parts[0], parts[1])
|
||||
resolvedCondition = fmt.Sprintf("%v == %v", parts[0], parts[1])
|
||||
parameters, resolvedParameters := sanitizeAndResolve(strings.Split(condition, "=="), result)
|
||||
success = isEqual(resolvedParameters[0], resolvedParameters[1])
|
||||
if !success {
|
||||
conditionToDisplay = prettify(parameters, resolvedParameters, "==")
|
||||
}
|
||||
} else if strings.Contains(condition, "!=") {
|
||||
parts := sanitizeAndResolve(strings.Split(condition, "!="), result)
|
||||
success = !isEqual(parts[0], parts[1])
|
||||
resolvedCondition = fmt.Sprintf("%v != %v", parts[0], parts[1])
|
||||
parameters, resolvedParameters := sanitizeAndResolve(strings.Split(condition, "!="), result)
|
||||
success = !isEqual(resolvedParameters[0], resolvedParameters[1])
|
||||
if !success {
|
||||
conditionToDisplay = prettify(parameters, resolvedParameters, "!=")
|
||||
}
|
||||
} else if strings.Contains(condition, "<=") {
|
||||
parts := sanitizeAndResolveNumerical(strings.Split(condition, "<="), result)
|
||||
success = parts[0] <= parts[1]
|
||||
resolvedCondition = fmt.Sprintf("%v <= %v", parts[0], parts[1])
|
||||
parameters, resolvedParameters := sanitizeAndResolveNumerical(strings.Split(condition, "<="), result)
|
||||
success = resolvedParameters[0] <= resolvedParameters[1]
|
||||
if !success {
|
||||
conditionToDisplay = prettifyNumericalParameters(parameters, resolvedParameters, "<=")
|
||||
}
|
||||
} else if strings.Contains(condition, ">=") {
|
||||
parts := sanitizeAndResolveNumerical(strings.Split(condition, ">="), result)
|
||||
success = parts[0] >= parts[1]
|
||||
resolvedCondition = fmt.Sprintf("%v >= %v", parts[0], parts[1])
|
||||
parameters, resolvedParameters := sanitizeAndResolveNumerical(strings.Split(condition, ">="), result)
|
||||
success = resolvedParameters[0] >= resolvedParameters[1]
|
||||
if !success {
|
||||
conditionToDisplay = prettifyNumericalParameters(parameters, resolvedParameters, ">=")
|
||||
}
|
||||
} else if strings.Contains(condition, ">") {
|
||||
parts := sanitizeAndResolveNumerical(strings.Split(condition, ">"), result)
|
||||
success = parts[0] > parts[1]
|
||||
resolvedCondition = fmt.Sprintf("%v > %v", parts[0], parts[1])
|
||||
parameters, resolvedParameters := sanitizeAndResolveNumerical(strings.Split(condition, ">"), result)
|
||||
success = resolvedParameters[0] > resolvedParameters[1]
|
||||
if !success {
|
||||
conditionToDisplay = prettifyNumericalParameters(parameters, resolvedParameters, ">")
|
||||
}
|
||||
} else if strings.Contains(condition, "<") {
|
||||
parts := sanitizeAndResolveNumerical(strings.Split(condition, "<"), result)
|
||||
success = parts[0] < parts[1]
|
||||
resolvedCondition = fmt.Sprintf("%v < %v", parts[0], parts[1])
|
||||
parameters, resolvedParameters := sanitizeAndResolveNumerical(strings.Split(condition, "<"), result)
|
||||
success = resolvedParameters[0] < resolvedParameters[1]
|
||||
if !success {
|
||||
conditionToDisplay = prettifyNumericalParameters(parameters, resolvedParameters, "<")
|
||||
}
|
||||
} else {
|
||||
result.Errors = append(result.Errors, fmt.Sprintf("invalid condition '%s' has been provided", condition))
|
||||
return false
|
||||
}
|
||||
conditionToDisplay := condition
|
||||
// If the condition isn't a success, return what the resolved condition was too
|
||||
if !success {
|
||||
log.Printf("[Condition][evaluate] Condition '%s' did not succeed because '%s' is false", condition, resolvedCondition)
|
||||
// Check if the resolved condition was an invalid path
|
||||
isResolvedConditionInvalidPath := strings.ReplaceAll(resolvedCondition, fmt.Sprintf("%s ", InvalidConditionElementSuffix), "") == condition
|
||||
if isResolvedConditionInvalidPath {
|
||||
// Since, in the event of an invalid path, the resolvedCondition contains the condition itself,
|
||||
// we'll only display the resolvedCondition
|
||||
conditionToDisplay = resolvedCondition
|
||||
} else {
|
||||
conditionToDisplay = fmt.Sprintf("%s (%s)", condition, resolvedCondition)
|
||||
}
|
||||
//log.Printf("[Condition][evaluate] Condition '%s' did not succeed because '%s' is false", condition, condition)
|
||||
}
|
||||
result.ConditionResults = append(result.ConditionResults, &ConditionResult{Condition: conditionToDisplay, Success: success})
|
||||
return success
|
||||
}
|
||||
|
||||
// isEqual compares two strings.
|
||||
//
|
||||
// It also supports the pattern function. That is to say, if one of the strings starts with PatternFunctionPrefix
|
||||
// and ends with FunctionSuffix, it will be treated like a pattern.
|
||||
func isEqual(first, second string) bool {
|
||||
var isFirstPattern, isSecondPattern bool
|
||||
if strings.HasPrefix(first, PatternFunctionPrefix) && strings.HasSuffix(first, FunctionSuffix) {
|
||||
isFirstPattern = true
|
||||
first = strings.TrimSuffix(strings.TrimPrefix(first, PatternFunctionPrefix), FunctionSuffix)
|
||||
}
|
||||
if strings.HasPrefix(second, PatternFunctionPrefix) && strings.HasSuffix(second, FunctionSuffix) {
|
||||
isSecondPattern = true
|
||||
second = strings.TrimSuffix(strings.TrimPrefix(second, PatternFunctionPrefix), FunctionSuffix)
|
||||
}
|
||||
if isFirstPattern && !isSecondPattern {
|
||||
return pattern.Match(first, second)
|
||||
} else if !isFirstPattern && isSecondPattern {
|
||||
return pattern.Match(second, first)
|
||||
} else {
|
||||
return first == second
|
||||
}
|
||||
// hasBodyPlaceholder checks whether the condition has a BodyPlaceholder
|
||||
// Used for determining whether the response body should be read or not
|
||||
func (c Condition) hasBodyPlaceholder() bool {
|
||||
return strings.Contains(string(c), BodyPlaceholder)
|
||||
}
|
||||
|
||||
// sanitizeAndResolve sanitizes and resolves a list of element and returns the list of resolved elements
|
||||
func sanitizeAndResolve(list []string, result *Result) []string {
|
||||
var sanitizedList []string
|
||||
body := strings.TrimSpace(string(result.Body))
|
||||
for _, element := range list {
|
||||
// isEqual compares two strings.
|
||||
//
|
||||
// Supports the pattern and the any functions.
|
||||
// i.e. if one of the parameters starts with PatternFunctionPrefix and ends with FunctionSuffix, it will be treated like
|
||||
// a pattern.
|
||||
func isEqual(first, second string) bool {
|
||||
firstHasFunctionSuffix := strings.HasSuffix(first, FunctionSuffix)
|
||||
secondHasFunctionSuffix := strings.HasSuffix(second, FunctionSuffix)
|
||||
if firstHasFunctionSuffix || secondHasFunctionSuffix {
|
||||
var isFirstPattern, isSecondPattern bool
|
||||
if strings.HasPrefix(first, PatternFunctionPrefix) && firstHasFunctionSuffix {
|
||||
isFirstPattern = true
|
||||
first = strings.TrimSuffix(strings.TrimPrefix(first, PatternFunctionPrefix), FunctionSuffix)
|
||||
}
|
||||
if strings.HasPrefix(second, PatternFunctionPrefix) && secondHasFunctionSuffix {
|
||||
isSecondPattern = true
|
||||
second = strings.TrimSuffix(strings.TrimPrefix(second, PatternFunctionPrefix), FunctionSuffix)
|
||||
}
|
||||
if isFirstPattern && !isSecondPattern {
|
||||
return pattern.Match(first, second)
|
||||
} else if !isFirstPattern && isSecondPattern {
|
||||
return pattern.Match(second, first)
|
||||
}
|
||||
var isFirstAny, isSecondAny bool
|
||||
if strings.HasPrefix(first, AnyFunctionPrefix) && firstHasFunctionSuffix {
|
||||
isFirstAny = true
|
||||
first = strings.TrimSuffix(strings.TrimPrefix(first, AnyFunctionPrefix), FunctionSuffix)
|
||||
}
|
||||
if strings.HasPrefix(second, AnyFunctionPrefix) && secondHasFunctionSuffix {
|
||||
isSecondAny = true
|
||||
second = strings.TrimSuffix(strings.TrimPrefix(second, AnyFunctionPrefix), FunctionSuffix)
|
||||
}
|
||||
if isFirstAny && !isSecondAny {
|
||||
options := strings.Split(first, ",")
|
||||
for _, option := range options {
|
||||
if strings.TrimSpace(option) == second {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
} else if !isFirstAny && isSecondAny {
|
||||
options := strings.Split(second, ",")
|
||||
for _, option := range options {
|
||||
if strings.TrimSpace(option) == first {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
return first == second
|
||||
}
|
||||
|
||||
// sanitizeAndResolve sanitizes and resolves a list of elements and returns the list of parameters as well as a list
|
||||
// of resolved parameters
|
||||
func sanitizeAndResolve(elements []string, result *Result) ([]string, []string) {
|
||||
parameters := make([]string, len(elements))
|
||||
resolvedParameters := make([]string, len(elements))
|
||||
body := strings.TrimSpace(string(result.body))
|
||||
for i, element := range elements {
|
||||
element = strings.TrimSpace(element)
|
||||
parameters[i] = element
|
||||
switch strings.ToUpper(element) {
|
||||
case StatusPlaceholder:
|
||||
element = strconv.Itoa(result.HTTPStatus)
|
||||
@@ -161,47 +214,86 @@ func sanitizeAndResolve(list []string, result *Result) []string {
|
||||
default:
|
||||
// if contains the BodyPlaceholder, then evaluate json path
|
||||
if strings.Contains(element, BodyPlaceholder) {
|
||||
wantLength := false
|
||||
checkingForLength := false
|
||||
checkingForExistence := false
|
||||
if strings.HasPrefix(element, LengthFunctionPrefix) && strings.HasSuffix(element, FunctionSuffix) {
|
||||
wantLength = true
|
||||
checkingForLength = true
|
||||
element = strings.TrimSuffix(strings.TrimPrefix(element, LengthFunctionPrefix), FunctionSuffix)
|
||||
}
|
||||
resolvedElement, resolvedElementLength, err := jsonpath.Eval(strings.Replace(element, fmt.Sprintf("%s.", BodyPlaceholder), "", 1), result.Body)
|
||||
if err != nil {
|
||||
if err.Error() != "unexpected end of JSON input" {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
if wantLength {
|
||||
element = fmt.Sprintf("%s%s%s %s", LengthFunctionPrefix, element, FunctionSuffix, InvalidConditionElementSuffix)
|
||||
if strings.HasPrefix(element, HasFunctionPrefix) && strings.HasSuffix(element, FunctionSuffix) {
|
||||
checkingForExistence = true
|
||||
element = strings.TrimSuffix(strings.TrimPrefix(element, HasFunctionPrefix), FunctionSuffix)
|
||||
}
|
||||
resolvedElement, resolvedElementLength, err := jsonpath.Eval(strings.TrimPrefix(element, BodyPlaceholder+"."), result.body)
|
||||
if checkingForExistence {
|
||||
if err != nil {
|
||||
element = "false"
|
||||
} else {
|
||||
element = fmt.Sprintf("%s %s", element, InvalidConditionElementSuffix)
|
||||
element = "true"
|
||||
}
|
||||
} else {
|
||||
if wantLength {
|
||||
element = fmt.Sprintf("%d", resolvedElementLength)
|
||||
if err != nil {
|
||||
if err.Error() != "unexpected end of JSON input" {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
if checkingForLength {
|
||||
element = LengthFunctionPrefix + element + FunctionSuffix + " " + InvalidConditionElementSuffix
|
||||
} else {
|
||||
element = element + " " + InvalidConditionElementSuffix
|
||||
}
|
||||
} else {
|
||||
element = resolvedElement
|
||||
if checkingForLength {
|
||||
element = strconv.Itoa(resolvedElementLength)
|
||||
} else {
|
||||
element = resolvedElement
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sanitizedList = append(sanitizedList, element)
|
||||
resolvedParameters[i] = element
|
||||
}
|
||||
return sanitizedList
|
||||
return parameters, resolvedParameters
|
||||
}
|
||||
|
||||
func sanitizeAndResolveNumerical(list []string, result *Result) []int64 {
|
||||
var sanitizedNumbers []int64
|
||||
sanitizedList := sanitizeAndResolve(list, result)
|
||||
for _, element := range sanitizedList {
|
||||
func sanitizeAndResolveNumerical(list []string, result *Result) (parameters []string, resolvedNumericalParameters []int64) {
|
||||
parameters, resolvedParameters := sanitizeAndResolve(list, result)
|
||||
for _, element := range resolvedParameters {
|
||||
if duration, err := time.ParseDuration(element); duration != 0 && err == nil {
|
||||
sanitizedNumbers = append(sanitizedNumbers, duration.Milliseconds())
|
||||
resolvedNumericalParameters = append(resolvedNumericalParameters, duration.Milliseconds())
|
||||
} else if number, err := strconv.ParseInt(element, 10, 64); err != nil {
|
||||
// Default to 0 if the string couldn't be converted to an integer
|
||||
sanitizedNumbers = append(sanitizedNumbers, 0)
|
||||
resolvedNumericalParameters = append(resolvedNumericalParameters, 0)
|
||||
} else {
|
||||
sanitizedNumbers = append(sanitizedNumbers, number)
|
||||
resolvedNumericalParameters = append(resolvedNumericalParameters, number)
|
||||
}
|
||||
}
|
||||
return sanitizedNumbers
|
||||
return parameters, resolvedNumericalParameters
|
||||
}
|
||||
|
||||
func prettifyNumericalParameters(parameters []string, resolvedParameters []int64, operator string) string {
|
||||
return prettify(parameters, []string{strconv.Itoa(int(resolvedParameters[0])), strconv.Itoa(int(resolvedParameters[1]))}, operator)
|
||||
}
|
||||
|
||||
// XXX: make this configurable? i.e. show-resolved-conditions-on-failure
|
||||
func prettify(parameters []string, resolvedParameters []string, operator string) string {
|
||||
// Since, in the event of an invalid path, the resolvedParameters also contain the condition itself,
|
||||
// we'll return the resolvedParameters as-is.
|
||||
if strings.HasSuffix(resolvedParameters[0], InvalidConditionElementSuffix) || strings.HasSuffix(resolvedParameters[1], InvalidConditionElementSuffix) {
|
||||
return resolvedParameters[0] + " " + operator + " " + resolvedParameters[1]
|
||||
}
|
||||
// First element is a placeholder
|
||||
if parameters[0] != resolvedParameters[0] && parameters[1] == resolvedParameters[1] {
|
||||
return parameters[0] + " (" + resolvedParameters[0] + ") " + operator + " " + parameters[1]
|
||||
}
|
||||
// Second element is a placeholder
|
||||
if parameters[0] == resolvedParameters[0] && parameters[1] != resolvedParameters[1] {
|
||||
return parameters[0] + " " + operator + " " + parameters[1] + " (" + resolvedParameters[1] + ")"
|
||||
}
|
||||
// Both elements are placeholders...?
|
||||
if parameters[0] != resolvedParameters[0] && parameters[1] != resolvedParameters[1] {
|
||||
return parameters[0] + " (" + resolvedParameters[0] + ") " + operator + " " + parameters[1] + " (" + resolvedParameters[1] + ")"
|
||||
}
|
||||
// Neither elements are placeholders
|
||||
return parameters[0] + " " + operator + " " + parameters[1]
|
||||
}
|
||||
|
||||
75
core/condition_bench_test.go
Normal file
75
core/condition_bench_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package core
|
||||
|
||||
import "testing"
|
||||
|
||||
func BenchmarkCondition_evaluateWithBodyStringAny(b *testing.B) {
|
||||
condition := Condition("[BODY].name == any(john.doe, jane.doe)")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{body: []byte("{\"name\": \"john.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
func BenchmarkCondition_evaluateWithBodyStringAnyFailure(b *testing.B) {
|
||||
condition := Condition("[BODY].name == any(john.doe, jane.doe)")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{body: []byte("{\"name\": \"bob.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
func BenchmarkCondition_evaluateWithBodyString(b *testing.B) {
|
||||
condition := Condition("[BODY].name == john.doe")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{body: []byte("{\"name\": \"john.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
func BenchmarkCondition_evaluateWithBodyStringFailure(b *testing.B) {
|
||||
condition := Condition("[BODY].name == john.doe")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{body: []byte("{\"name\": \"bob.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
func BenchmarkCondition_evaluateWithBodyStringLen(b *testing.B) {
|
||||
condition := Condition("len([BODY].name) == 8")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{body: []byte("{\"name\": \"john.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
func BenchmarkCondition_evaluateWithBodyStringLenFailure(b *testing.B) {
|
||||
condition := Condition("len([BODY].name) == 8")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{body: []byte("{\"name\": \"bob.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
func BenchmarkCondition_evaluateWithStatus(b *testing.B) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{HTTPStatus: 200}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
func BenchmarkCondition_evaluateWithStatusFailure(b *testing.B) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{HTTPStatus: 400}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
@@ -6,383 +6,443 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCondition_evaluateWithIP(t *testing.T) {
|
||||
condition := Condition("[IP] == 127.0.0.1")
|
||||
result := &Result{IP: "127.0.0.1"}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
func TestCondition_evaluate(t *testing.T) {
|
||||
type scenario struct {
|
||||
Name string
|
||||
Condition Condition
|
||||
Result *Result
|
||||
ExpectedSuccess bool
|
||||
ExpectedOutput string
|
||||
}
|
||||
scenarios := []scenario{
|
||||
{
|
||||
Name: "ip",
|
||||
Condition: Condition("[IP] == 127.0.0.1"),
|
||||
Result: &Result{IP: "127.0.0.1"},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[IP] == 127.0.0.1",
|
||||
},
|
||||
{
|
||||
Name: "status",
|
||||
Condition: Condition("[STATUS] == 200"),
|
||||
Result: &Result{HTTPStatus: 200},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[STATUS] == 200",
|
||||
},
|
||||
{
|
||||
Name: "status-failure",
|
||||
Condition: Condition("[STATUS] == 200"),
|
||||
Result: &Result{HTTPStatus: 500},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[STATUS] (500) == 200",
|
||||
},
|
||||
{
|
||||
Name: "status-using-less-than",
|
||||
Condition: Condition("[STATUS] < 300"),
|
||||
Result: &Result{HTTPStatus: 201},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[STATUS] < 300",
|
||||
},
|
||||
{
|
||||
Name: "status-using-less-than-failure",
|
||||
Condition: Condition("[STATUS] < 300"),
|
||||
Result: &Result{HTTPStatus: 404},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[STATUS] (404) < 300",
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-less-than",
|
||||
Condition: Condition("[RESPONSE_TIME] < 500"),
|
||||
Result: &Result{Duration: 50 * time.Millisecond},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[RESPONSE_TIME] < 500",
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-less-than-with-duration",
|
||||
Condition: Condition("[RESPONSE_TIME] < 1s"),
|
||||
Result: &Result{Duration: 50 * time.Millisecond},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[RESPONSE_TIME] < 1s",
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-less-than-invalid",
|
||||
Condition: Condition("[RESPONSE_TIME] < potato"),
|
||||
Result: &Result{Duration: 50 * time.Millisecond},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[RESPONSE_TIME] (50) < potato (0)", // Non-numerical values automatically resolve to 0
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-greater-than",
|
||||
Condition: Condition("[RESPONSE_TIME] > 500"),
|
||||
Result: &Result{Duration: 750 * time.Millisecond},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[RESPONSE_TIME] > 500",
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-greater-than-with-duration",
|
||||
Condition: Condition("[RESPONSE_TIME] > 1s"),
|
||||
Result: &Result{Duration: 2 * time.Second},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[RESPONSE_TIME] > 1s",
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-greater-than-or-equal-to-equal",
|
||||
Condition: Condition("[RESPONSE_TIME] >= 500"),
|
||||
Result: &Result{Duration: 500 * time.Millisecond},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[RESPONSE_TIME] >= 500",
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-greater-than-or-equal-to-greater",
|
||||
Condition: Condition("[RESPONSE_TIME] >= 500"),
|
||||
Result: &Result{Duration: 499 * time.Millisecond},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[RESPONSE_TIME] (499) >= 500",
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-greater-than-or-equal-to-failure",
|
||||
Condition: Condition("[RESPONSE_TIME] >= 500"),
|
||||
Result: &Result{Duration: 499 * time.Millisecond},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[RESPONSE_TIME] (499) >= 500",
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-less-than-or-equal-to-equal",
|
||||
Condition: Condition("[RESPONSE_TIME] <= 500"),
|
||||
Result: &Result{Duration: 500 * time.Millisecond},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[RESPONSE_TIME] <= 500",
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-less-than-or-equal-to-less",
|
||||
Condition: Condition("[RESPONSE_TIME] <= 500"),
|
||||
Result: &Result{Duration: 25 * time.Millisecond},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[RESPONSE_TIME] <= 500",
|
||||
},
|
||||
{
|
||||
Name: "response-time-using-less-than-or-equal-to-failure",
|
||||
Condition: Condition("[RESPONSE_TIME] <= 500"),
|
||||
Result: &Result{Duration: 750 * time.Millisecond},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[RESPONSE_TIME] (750) <= 500",
|
||||
},
|
||||
{
|
||||
Name: "body",
|
||||
Condition: Condition("[BODY] == test"),
|
||||
Result: &Result{body: []byte("test")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY] == test",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath",
|
||||
Condition: Condition("[BODY].status == UP"),
|
||||
Result: &Result{body: []byte("{\"status\":\"UP\"}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY].status == UP",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-complex",
|
||||
Condition: Condition("[BODY].data.name == john"),
|
||||
Result: &Result{body: []byte("{\"data\": {\"id\": 1, \"name\": \"john\"}}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY].data.name == john",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-complex-invalid",
|
||||
Condition: Condition("[BODY].data.name == john"),
|
||||
Result: &Result{body: []byte("{\"data\": {\"id\": 1}}")},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[BODY].data.name (INVALID) == john",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-complex-len",
|
||||
Condition: Condition("len([BODY].data.name) == 4"),
|
||||
Result: &Result{body: []byte("{\"data\": {\"name\": \"john\"}}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "len([BODY].data.name) == 4",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-complex-len-invalid",
|
||||
Condition: Condition("len([BODY].data.name) == john"),
|
||||
Result: &Result{body: []byte("{\"data\": {\"id\": 1}}")},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "len([BODY].data.name) (INVALID) == john",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-double-placeholder",
|
||||
Condition: Condition("[BODY].user.firstName != [BODY].user.lastName"),
|
||||
Result: &Result{body: []byte("{\"user\": {\"firstName\": \"john\", \"lastName\": \"doe\"}}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY].user.firstName != [BODY].user.lastName",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-double-placeholder-failure",
|
||||
Condition: Condition("[BODY].user.firstName == [BODY].user.lastName"),
|
||||
Result: &Result{body: []byte("{\"user\": {\"firstName\": \"john\", \"lastName\": \"doe\"}}")},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[BODY].user.firstName (john) == [BODY].user.lastName (doe)",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-complex-int",
|
||||
Condition: Condition("[BODY].data.id == 1"),
|
||||
Result: &Result{body: []byte("{\"data\": {\"id\": 1}}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY].data.id == 1",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-complex-array-int",
|
||||
Condition: Condition("[BODY].data[1].id == 2"),
|
||||
Result: &Result{body: []byte("{\"data\": [{\"id\": 1}, {\"id\": 2}, {\"id\": 3}]}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY].data[1].id == 2",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-complex-int-using-greater-than",
|
||||
Condition: Condition("[BODY].data.id > 0"),
|
||||
Result: &Result{body: []byte("{\"data\": {\"id\": 1}}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY].data.id > 0",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-complex-int-using-greater-than-failure",
|
||||
Condition: Condition("[BODY].data.id > 5"),
|
||||
Result: &Result{body: []byte("{\"data\": {\"id\": 1}}")},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[BODY].data.id (1) > 5",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-complex-int-using-less-than",
|
||||
Condition: Condition("[BODY].data.id < 5"),
|
||||
Result: &Result{body: []byte("{\"data\": {\"id\": 2}}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY].data.id < 5",
|
||||
},
|
||||
{
|
||||
Name: "body-jsonpath-complex-int-using-less-than-failure",
|
||||
Condition: Condition("[BODY].data.id < 5"),
|
||||
Result: &Result{body: []byte("{\"data\": {\"id\": 10}}")},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[BODY].data.id (10) < 5",
|
||||
},
|
||||
{
|
||||
Name: "body-len-array",
|
||||
Condition: Condition("len([BODY].data) == 3"),
|
||||
Result: &Result{body: []byte("{\"data\": [{\"id\": 1}, {\"id\": 2}, {\"id\": 3}]}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "len([BODY].data) == 3",
|
||||
},
|
||||
{
|
||||
Name: "body-len-array-invalid",
|
||||
Condition: Condition("len([BODY].data) == 8"),
|
||||
Result: &Result{body: []byte("{\"name\": \"john.doe\"}")},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "len([BODY].data) (INVALID) == 8",
|
||||
},
|
||||
{
|
||||
Name: "body-len-string",
|
||||
Condition: Condition("len([BODY].name) == 8"),
|
||||
Result: &Result{body: []byte("{\"name\": \"john.doe\"}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "len([BODY].name) == 8",
|
||||
},
|
||||
{
|
||||
Name: "body-pattern",
|
||||
Condition: Condition("[BODY] == pat(*john*)"),
|
||||
Result: &Result{body: []byte("{\"name\": \"john.doe\"}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY] == pat(*john*)",
|
||||
},
|
||||
{
|
||||
Name: "body-pattern-2",
|
||||
Condition: Condition("[BODY].name == pat(john*)"),
|
||||
Result: &Result{body: []byte("{\"name\": \"john.doe\"}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY].name == pat(john*)",
|
||||
},
|
||||
{
|
||||
Name: "body-pattern-failure",
|
||||
Condition: Condition("[BODY].name == pat(bob*)"),
|
||||
Result: &Result{body: []byte("{\"name\": \"john.doe\"}")},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[BODY].name (john.doe) == pat(bob*)",
|
||||
},
|
||||
{
|
||||
Name: "body-pattern-html",
|
||||
Condition: Condition("[BODY] == pat(*<div id=\"user\">john.doe</div>*)"),
|
||||
Result: &Result{body: []byte(`<!DOCTYPE html><html lang="en"><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /></head><body><div id="user">john.doe</div></body></html>`)},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY] == pat(*<div id=\"user\">john.doe</div>*)",
|
||||
},
|
||||
{
|
||||
Name: "ip-pattern",
|
||||
Condition: Condition("[IP] == pat(10.*)"),
|
||||
Result: &Result{IP: "10.0.0.0"},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[IP] == pat(10.*)",
|
||||
},
|
||||
{
|
||||
Name: "ip-pattern-failure",
|
||||
Condition: Condition("[IP] == pat(10.*)"),
|
||||
Result: &Result{IP: "255.255.255.255"},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[IP] (255.255.255.255) == pat(10.*)",
|
||||
},
|
||||
{
|
||||
Name: "status-pattern",
|
||||
Condition: Condition("[STATUS] == pat(4*)"),
|
||||
Result: &Result{HTTPStatus: 404},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[STATUS] == pat(4*)",
|
||||
},
|
||||
{
|
||||
Name: "status-pattern-failure",
|
||||
Condition: Condition("[STATUS] == pat(4*)"),
|
||||
Result: &Result{HTTPStatus: 200},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[STATUS] (200) == pat(4*)",
|
||||
},
|
||||
{
|
||||
Name: "body-any",
|
||||
Condition: Condition("[BODY].name == any(john.doe, jane.doe)"),
|
||||
Result: &Result{body: []byte("{\"name\": \"john.doe\"}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY].name == any(john.doe, jane.doe)",
|
||||
},
|
||||
{
|
||||
Name: "body-any-2",
|
||||
Condition: Condition("[BODY].name == any(john.doe, jane.doe)"),
|
||||
Result: &Result{body: []byte("{\"name\": \"jane.doe\"}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[BODY].name == any(john.doe, jane.doe)",
|
||||
},
|
||||
{
|
||||
Name: "body-any-failure",
|
||||
Condition: Condition("[BODY].name == any(john.doe, jane.doe)"),
|
||||
Result: &Result{body: []byte("{\"name\": \"bob\"}")},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[BODY].name (bob) == any(john.doe, jane.doe)",
|
||||
},
|
||||
{
|
||||
Name: "status-any",
|
||||
Condition: Condition("[STATUS] == any(200, 429)"),
|
||||
Result: &Result{HTTPStatus: 200},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[STATUS] == any(200, 429)",
|
||||
},
|
||||
{
|
||||
Name: "status-any-2",
|
||||
Condition: Condition("[STATUS] == any(200, 429)"),
|
||||
Result: &Result{HTTPStatus: 429},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[STATUS] == any(200, 429)",
|
||||
},
|
||||
{
|
||||
Name: "status-any-reverse",
|
||||
Condition: Condition("any(200, 429) == [STATUS]"),
|
||||
Result: &Result{HTTPStatus: 429},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "any(200, 429) == [STATUS]",
|
||||
},
|
||||
{
|
||||
Name: "status-any-failure",
|
||||
Condition: Condition("[STATUS] == any(200, 429)"),
|
||||
Result: &Result{HTTPStatus: 404},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[STATUS] (404) == any(200, 429)",
|
||||
},
|
||||
{
|
||||
Name: "connected",
|
||||
Condition: Condition("[CONNECTED] == true"),
|
||||
Result: &Result{Connected: true},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[CONNECTED] == true",
|
||||
},
|
||||
{
|
||||
Name: "connected-failure",
|
||||
Condition: Condition("[CONNECTED] == true"),
|
||||
Result: &Result{Connected: false},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[CONNECTED] (false) == true",
|
||||
},
|
||||
{
|
||||
Name: "certificate-expiration-not-set",
|
||||
Condition: Condition("[CERTIFICATE_EXPIRATION] == 0"),
|
||||
Result: &Result{},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[CERTIFICATE_EXPIRATION] == 0",
|
||||
},
|
||||
{
|
||||
Name: "certificate-expiration-greater-than-numerical",
|
||||
Condition: Condition("[CERTIFICATE_EXPIRATION] > " + strconv.FormatInt((time.Hour*24*28).Milliseconds(), 10)),
|
||||
Result: &Result{CertificateExpiration: time.Hour * 24 * 60},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[CERTIFICATE_EXPIRATION] > 2419200000",
|
||||
},
|
||||
{
|
||||
Name: "certificate-expiration-greater-than-numerical-failure",
|
||||
Condition: Condition("[CERTIFICATE_EXPIRATION] > " + strconv.FormatInt((time.Hour*24*28).Milliseconds(), 10)),
|
||||
Result: &Result{CertificateExpiration: time.Hour * 24 * 14},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[CERTIFICATE_EXPIRATION] (1209600000) > 2419200000",
|
||||
},
|
||||
{
|
||||
Name: "certificate-expiration-greater-than-duration",
|
||||
Condition: Condition("[CERTIFICATE_EXPIRATION] > 12h"),
|
||||
Result: &Result{CertificateExpiration: 24 * time.Hour},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "[CERTIFICATE_EXPIRATION] > 12h",
|
||||
},
|
||||
{
|
||||
Name: "certificate-expiration-greater-than-duration",
|
||||
Condition: Condition("[CERTIFICATE_EXPIRATION] > 48h"),
|
||||
Result: &Result{CertificateExpiration: 24 * time.Hour},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "[CERTIFICATE_EXPIRATION] (86400000) > 48h (172800000)",
|
||||
},
|
||||
{
|
||||
Name: "has",
|
||||
Condition: Condition("has([BODY].errors) == false"),
|
||||
Result: &Result{body: []byte("{}")},
|
||||
ExpectedSuccess: true,
|
||||
ExpectedOutput: "has([BODY].errors) == false",
|
||||
},
|
||||
{
|
||||
Name: "has-failure",
|
||||
Condition: Condition("has([BODY].errors) == false"),
|
||||
Result: &Result{body: []byte("{\"errors\": [\"1\"]}")},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "has([BODY].errors) (true) == false",
|
||||
},
|
||||
{
|
||||
Name: "no-placeholders",
|
||||
Condition: Condition("1 == 2"),
|
||||
Result: &Result{},
|
||||
ExpectedSuccess: false,
|
||||
ExpectedOutput: "1 == 2",
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Condition.evaluate(scenario.Result)
|
||||
if scenario.Result.ConditionResults[0].Success != scenario.ExpectedSuccess {
|
||||
t.Errorf("Condition '%s' should have been success=%v", scenario.Condition, scenario.ExpectedSuccess)
|
||||
}
|
||||
if scenario.Result.ConditionResults[0].Condition != scenario.ExpectedOutput {
|
||||
t.Errorf("Condition '%s' should have resolved to '%s', got '%s'", scenario.Condition, scenario.ExpectedOutput, scenario.Result.ConditionResults[0].Condition)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithStatus(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 201")
|
||||
func TestCondition_evaluateWithInvalidOperator(t *testing.T) {
|
||||
condition := Condition("[STATUS] ? 201")
|
||||
result := &Result{HTTPStatus: 201}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithStatusFailure(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
result := &Result{HTTPStatus: 500}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithStatusUsingLessThan(t *testing.T) {
|
||||
condition := Condition("[STATUS] < 300")
|
||||
result := &Result{HTTPStatus: 201}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithStatusFailureUsingLessThan(t *testing.T) {
|
||||
condition := Condition("[STATUS] < 300")
|
||||
result := &Result{HTTPStatus: 404}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithResponseTimeUsingLessThan(t *testing.T) {
|
||||
condition := Condition("[RESPONSE_TIME] < 500")
|
||||
result := &Result{Duration: time.Millisecond * 50}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithResponseTimeUsingLessThanDuration(t *testing.T) {
|
||||
condition := Condition("[RESPONSE_TIME] < 1s")
|
||||
result := &Result{Duration: time.Millisecond * 50}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithResponseTimeUsingLessThanInvalid(t *testing.T) {
|
||||
condition := Condition("[RESPONSE_TIME] < potato")
|
||||
result := &Result{Duration: time.Millisecond * 50}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have failed because the condition has an invalid numerical value that should've automatically resolved to 0", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithResponseTimeUsingGreaterThan(t *testing.T) {
|
||||
// Not exactly sure why you'd want to have a condition that checks if the response time is too fast,
|
||||
// but hey, who am I to judge?
|
||||
condition := Condition("[RESPONSE_TIME] > 500")
|
||||
result := &Result{Duration: time.Millisecond * 750}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithResponseTimeUsingGreaterThanDuration(t *testing.T) {
|
||||
condition := Condition("[RESPONSE_TIME] > 1s")
|
||||
result := &Result{Duration: time.Second * 2}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithResponseTimeUsingGreaterThanOrEqualTo(t *testing.T) {
|
||||
condition := Condition("[RESPONSE_TIME] >= 500")
|
||||
result := &Result{Duration: time.Millisecond * 500}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithResponseTimeUsingLessThanOrEqualTo(t *testing.T) {
|
||||
condition := Condition("[RESPONSE_TIME] <= 500")
|
||||
result := &Result{Duration: time.Millisecond * 500}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBody(t *testing.T) {
|
||||
condition := Condition("[BODY] == test")
|
||||
result := &Result{Body: []byte("test")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyJSONPath(t *testing.T) {
|
||||
condition := Condition("[BODY].status == UP")
|
||||
result := &Result{Body: []byte("{\"status\":\"UP\"}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyJSONPathComplex(t *testing.T) {
|
||||
condition := Condition("[BODY].data.name == john")
|
||||
result := &Result{Body: []byte("{\"data\": {\"id\": 1, \"name\": \"john\"}}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithInvalidBodyJSONPathComplex(t *testing.T) {
|
||||
expectedResolvedCondition := "[BODY].data.name (INVALID) == john"
|
||||
condition := Condition("[BODY].data.name == john")
|
||||
result := &Result{Body: []byte("{\"data\": {\"id\": 1}}")}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure, because the path was invalid", condition)
|
||||
}
|
||||
if result.ConditionResults[0].Condition != expectedResolvedCondition {
|
||||
t.Errorf("Condition '%s' should have resolved to '%s', but resolved to '%s' instead", condition, expectedResolvedCondition, result.ConditionResults[0].Condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithInvalidBodyJSONPathComplexWithLengthFunction(t *testing.T) {
|
||||
expectedResolvedCondition := "len([BODY].data.name) (INVALID) == john"
|
||||
condition := Condition("len([BODY].data.name) == john")
|
||||
result := &Result{Body: []byte("{\"data\": {\"id\": 1}}")}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure, because the path was invalid", condition)
|
||||
}
|
||||
if result.ConditionResults[0].Condition != expectedResolvedCondition {
|
||||
t.Errorf("Condition '%s' should have resolved to '%s', but resolved to '%s' instead", condition, expectedResolvedCondition, result.ConditionResults[0].Condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyJSONPathDoublePlaceholders(t *testing.T) {
|
||||
condition := Condition("[BODY].user.firstName != [BODY].user.lastName")
|
||||
result := &Result{Body: []byte("{\"user\": {\"firstName\": \"john\", \"lastName\": \"doe\"}}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyJSONPathDoublePlaceholdersFailure(t *testing.T) {
|
||||
condition := Condition("[BODY].user.firstName == [BODY].user.lastName")
|
||||
result := &Result{Body: []byte("{\"user\": {\"firstName\": \"john\", \"lastName\": \"doe\"}}")}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyJSONPathLongInt(t *testing.T) {
|
||||
condition := Condition("[BODY].data.id == 1")
|
||||
result := &Result{Body: []byte("{\"data\": {\"id\": 1}}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyJSONPathComplexInt(t *testing.T) {
|
||||
condition := Condition("[BODY].data[1].id == 2")
|
||||
result := &Result{Body: []byte("{\"data\": [{\"id\": 1}, {\"id\": 2}, {\"id\": 3}]}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyJSONPathComplexIntUsingGreaterThan(t *testing.T) {
|
||||
condition := Condition("[BODY].data.id > 0")
|
||||
result := &Result{Body: []byte("{\"data\": {\"id\": 1}}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyJSONPathComplexIntFailureUsingGreaterThan(t *testing.T) {
|
||||
condition := Condition("[BODY].data.id > 5")
|
||||
result := &Result{Body: []byte("{\"data\": {\"id\": 1}}")}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyJSONPathComplexIntUsingLessThan(t *testing.T) {
|
||||
condition := Condition("[BODY].data.id < 5")
|
||||
result := &Result{Body: []byte("{\"data\": {\"id\": 2}}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyJSONPathComplexIntFailureUsingLessThan(t *testing.T) {
|
||||
condition := Condition("[BODY].data.id < 5")
|
||||
result := &Result{Body: []byte("{\"data\": {\"id\": 10}}")}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodySliceLength(t *testing.T) {
|
||||
condition := Condition("len([BODY].data) == 3")
|
||||
result := &Result{Body: []byte("{\"data\": [{\"id\": 1}, {\"id\": 2}, {\"id\": 3}]}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyStringLength(t *testing.T) {
|
||||
condition := Condition("len([BODY].name) == 8")
|
||||
result := &Result{Body: []byte("{\"name\": \"john.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyStringPattern(t *testing.T) {
|
||||
condition := Condition("[BODY].name == pat(*ohn*)")
|
||||
result := &Result{Body: []byte("{\"name\": \"john.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyStringPatternFailure(t *testing.T) {
|
||||
condition := Condition("[BODY].name == pat(bob*)")
|
||||
result := &Result{Body: []byte("{\"name\": \"john.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithBodyPatternFailure(t *testing.T) {
|
||||
condition := Condition("[BODY] == pat(*john*)")
|
||||
result := &Result{Body: []byte("{\"name\": \"john.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithIPPattern(t *testing.T) {
|
||||
condition := Condition("[IP] == pat(10.*)")
|
||||
result := &Result{IP: "10.0.0.0"}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithIPPatternFailure(t *testing.T) {
|
||||
condition := Condition("[IP] == pat(10.*)")
|
||||
result := &Result{IP: "255.255.255.255"}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithStatusPattern(t *testing.T) {
|
||||
condition := Condition("[STATUS] == pat(4*)")
|
||||
result := &Result{HTTPStatus: 404}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithStatusPatternFailure(t *testing.T) {
|
||||
condition := Condition("[STATUS] != pat(4*)")
|
||||
result := &Result{HTTPStatus: 404}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithConnected(t *testing.T) {
|
||||
condition := Condition("[CONNECTED] == true")
|
||||
result := &Result{Connected: true}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithConnectedFailure(t *testing.T) {
|
||||
condition := Condition("[CONNECTED] == true")
|
||||
result := &Result{Connected: false}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithUnsetCertificateExpiration(t *testing.T) {
|
||||
condition := Condition("[CERTIFICATE_EXPIRATION] == 0")
|
||||
result := &Result{}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithCertificateExpirationGreaterThanNumerical(t *testing.T) {
|
||||
acceptable := (time.Hour * 24 * 28).Milliseconds()
|
||||
condition := Condition("[CERTIFICATE_EXPIRATION] > " + strconv.FormatInt(acceptable, 10))
|
||||
result := &Result{CertificateExpiration: time.Hour * 24 * 60}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithCertificateExpirationGreaterThanNumericalFailure(t *testing.T) {
|
||||
acceptable := (time.Hour * 24 * 28).Milliseconds()
|
||||
condition := Condition("[CERTIFICATE_EXPIRATION] > " + strconv.FormatInt(acceptable, 10))
|
||||
result := &Result{CertificateExpiration: time.Hour * 24 * 14}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithCertificateExpirationGreaterThanDuration(t *testing.T) {
|
||||
condition := Condition("[CERTIFICATE_EXPIRATION] > 12h")
|
||||
result := &Result{CertificateExpiration: 24 * time.Hour}
|
||||
condition.evaluate(result)
|
||||
if !result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a success", condition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition_evaluateWithCertificateExpirationGreaterThanDurationFailure(t *testing.T) {
|
||||
condition := Condition("[CERTIFICATE_EXPIRATION] > 48h")
|
||||
result := &Result{CertificateExpiration: 24 * time.Hour}
|
||||
condition.evaluate(result)
|
||||
if result.ConditionResults[0].Success {
|
||||
t.Errorf("Condition '%s' should have been a failure", condition)
|
||||
if result.Success {
|
||||
t.Error("condition was invalid, result should've been a failure")
|
||||
}
|
||||
if len(result.Errors) != 1 {
|
||||
t.Error("condition was invalid, result should've had an error")
|
||||
}
|
||||
}
|
||||
|
||||
12
core/dns.go
12
core/dns.go
@@ -60,26 +60,26 @@ func (d *DNS) query(url string, result *Result) {
|
||||
switch rr.Header().Rrtype {
|
||||
case dns.TypeA:
|
||||
if a, ok := rr.(*dns.A); ok {
|
||||
result.Body = []byte(a.A.String())
|
||||
result.body = []byte(a.A.String())
|
||||
}
|
||||
case dns.TypeAAAA:
|
||||
if aaaa, ok := rr.(*dns.AAAA); ok {
|
||||
result.Body = []byte(aaaa.AAAA.String())
|
||||
result.body = []byte(aaaa.AAAA.String())
|
||||
}
|
||||
case dns.TypeCNAME:
|
||||
if cname, ok := rr.(*dns.CNAME); ok {
|
||||
result.Body = []byte(cname.Target)
|
||||
result.body = []byte(cname.Target)
|
||||
}
|
||||
case dns.TypeMX:
|
||||
if mx, ok := rr.(*dns.MX); ok {
|
||||
result.Body = []byte(mx.Mx)
|
||||
result.body = []byte(mx.Mx)
|
||||
}
|
||||
case dns.TypeNS:
|
||||
if ns, ok := rr.(*dns.NS); ok {
|
||||
result.Body = []byte(ns.Ns)
|
||||
result.body = []byte(ns.Ns)
|
||||
}
|
||||
default:
|
||||
result.Body = []byte("query type is not supported yet")
|
||||
result.body = []byte("query type is not supported yet")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,12 +91,12 @@ func TestIntegrationQuery(t *testing.T) {
|
||||
|
||||
if test.inputDNS.QueryType == "NS" {
|
||||
// Because there are often multiple nameservers backing a single domain, we'll only look at the suffix
|
||||
if !pattern.Match(test.expectedBody, string(result.Body)) {
|
||||
t.Errorf("got %s, expected result %s,", string(result.Body), test.expectedBody)
|
||||
if !pattern.Match(test.expectedBody, string(result.body)) {
|
||||
t.Errorf("got %s, expected result %s,", string(result.body), test.expectedBody)
|
||||
}
|
||||
} else {
|
||||
if string(result.Body) != test.expectedBody {
|
||||
t.Errorf("got %s, expected result %s,", string(result.Body), test.expectedBody)
|
||||
if string(result.body) != test.expectedBody {
|
||||
t.Errorf("got %s, expected result %s,", string(result.body), test.expectedBody)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
26
core/event.go
Normal file
26
core/event.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package core
|
||||
|
||||
import "time"
|
||||
|
||||
// Event is something that happens at a specific time
|
||||
type Event struct {
|
||||
// Type is the kind of event
|
||||
Type EventType `json:"type"`
|
||||
|
||||
// Timestamp is the moment at which the event happened
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// EventType is, uh, the types of events?
|
||||
type EventType string
|
||||
|
||||
var (
|
||||
// EventStart is a type of event that represents when a service starts being monitored
|
||||
EventStart EventType = "START"
|
||||
|
||||
// EventHealthy is a type of event that represents a service passing all of its conditions
|
||||
EventHealthy EventType = "HEALTHY"
|
||||
|
||||
// EventUnhealthy is a type of event that represents a service failing one or more of its conditions
|
||||
EventUnhealthy EventType = "UNHEALTHY"
|
||||
)
|
||||
@@ -12,10 +12,7 @@ type Result struct {
|
||||
// DNSRCode is the response code of a DNS query in a human readable format
|
||||
DNSRCode string `json:"-"`
|
||||
|
||||
// Body is the response body
|
||||
Body []byte `json:"-"`
|
||||
|
||||
// Hostname extracted from the Service URL
|
||||
// Hostname extracted from Service.URL
|
||||
Hostname string `json:"hostname"`
|
||||
|
||||
// IP resolved from the Service URL
|
||||
@@ -31,7 +28,7 @@ type Result struct {
|
||||
Errors []string `json:"errors"`
|
||||
|
||||
// ConditionResults results of the service's conditions
|
||||
ConditionResults []*ConditionResult `json:"condition-results"`
|
||||
ConditionResults []*ConditionResult `json:"conditionResults"`
|
||||
|
||||
// Success whether the result signifies a success or not
|
||||
Success bool `json:"success"`
|
||||
@@ -41,4 +38,11 @@ type Result struct {
|
||||
|
||||
// CertificateExpiration is the duration before the certificate expires
|
||||
CertificateExpiration time.Duration `json:"-"`
|
||||
|
||||
// body is the response body
|
||||
//
|
||||
// Note that this variable is only used during the evaluation of a service's health.
|
||||
// This means that the call Service.EvaluateHealth both populates the body (if necessary)
|
||||
// and sets it to nil after the evaluation has been completed.
|
||||
body []byte
|
||||
}
|
||||
|
||||
@@ -1,5 +1,19 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaximumNumberOfResults is the maximum number of results that ServiceStatus.Results can have
|
||||
MaximumNumberOfResults = 100
|
||||
|
||||
// MaximumNumberOfEvents is the maximum number of events that ServiceStatus.Events can have
|
||||
MaximumNumberOfEvents = 50
|
||||
)
|
||||
|
||||
// ServiceStatus contains the evaluation Results of a Service
|
||||
type ServiceStatus struct {
|
||||
// Name of the service
|
||||
@@ -8,11 +22,25 @@ type ServiceStatus struct {
|
||||
// Group the service is a part of. Used for grouping multiple services together on the front end.
|
||||
Group string `json:"group,omitempty"`
|
||||
|
||||
// Key is the key representing the ServiceStatus
|
||||
Key string `json:"key"`
|
||||
|
||||
// Results is the list of service evaluation results
|
||||
Results []*Result `json:"results"`
|
||||
|
||||
// Events is a list of events
|
||||
//
|
||||
// We don't expose this through JSON, because the main dashboard doesn't need to have this data.
|
||||
// However, the detailed service page does leverage this by including it to a map that will be
|
||||
// marshalled alongside the ServiceStatus.
|
||||
Events []*Event `json:"-"`
|
||||
|
||||
// Uptime information on the service's uptime
|
||||
Uptime *Uptime `json:"uptime"`
|
||||
//
|
||||
// We don't expose this through JSON, because the main dashboard doesn't need to have this data.
|
||||
// However, the detailed service page does leverage this by including it to a map that will be
|
||||
// marshalled alongside the ServiceStatus.
|
||||
Uptime *Uptime `json:"-"`
|
||||
}
|
||||
|
||||
// NewServiceStatus creates a new ServiceStatus
|
||||
@@ -20,17 +48,67 @@ func NewServiceStatus(service *Service) *ServiceStatus {
|
||||
return &ServiceStatus{
|
||||
Name: service.Name,
|
||||
Group: service.Group,
|
||||
Key: util.ConvertGroupAndServiceToKey(service.Group, service.Name),
|
||||
Results: make([]*Result, 0),
|
||||
Uptime: NewUptime(),
|
||||
Events: []*Event{{
|
||||
Type: EventStart,
|
||||
Timestamp: time.Now(),
|
||||
}},
|
||||
Uptime: NewUptime(),
|
||||
}
|
||||
}
|
||||
|
||||
// WithResultPagination returns a shallow copy of the ServiceStatus with only the results
|
||||
// within the range defined by the page and pageSize parameters
|
||||
func (ss ServiceStatus) WithResultPagination(page, pageSize int) *ServiceStatus {
|
||||
shallowCopy := ss
|
||||
numberOfResults := len(shallowCopy.Results)
|
||||
start := numberOfResults - (page * pageSize)
|
||||
end := numberOfResults - ((page - 1) * pageSize)
|
||||
if start > numberOfResults {
|
||||
start = -1
|
||||
} else if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
if end > numberOfResults {
|
||||
end = numberOfResults
|
||||
}
|
||||
if start < 0 || end < 0 {
|
||||
shallowCopy.Results = []*Result{}
|
||||
} else {
|
||||
shallowCopy.Results = shallowCopy.Results[start:end]
|
||||
}
|
||||
return &shallowCopy
|
||||
}
|
||||
|
||||
// AddResult adds a Result to ServiceStatus.Results and makes sure that there are
|
||||
// no more than 20 results in the Results slice
|
||||
func (ss *ServiceStatus) AddResult(result *Result) {
|
||||
if len(ss.Results) > 0 {
|
||||
// Check if there's any change since the last result
|
||||
// OR there's only 1 event, which only happens when there's a start event
|
||||
if ss.Results[len(ss.Results)-1].Success != result.Success || len(ss.Events) == 1 {
|
||||
event := &Event{Timestamp: result.Timestamp}
|
||||
if result.Success {
|
||||
event.Type = EventHealthy
|
||||
} else {
|
||||
event.Type = EventUnhealthy
|
||||
}
|
||||
ss.Events = append(ss.Events, event)
|
||||
if len(ss.Events) > MaximumNumberOfEvents {
|
||||
// Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has
|
||||
// more than one extra element, we can get rid of all of them at once and thus returning the slice to a
|
||||
// length of MaximumNumberOfEvents by using ss.Events[len(ss.Events)-MaximumNumberOfEvents:] instead
|
||||
ss.Events = ss.Events[len(ss.Events)-MaximumNumberOfEvents:]
|
||||
}
|
||||
}
|
||||
}
|
||||
ss.Results = append(ss.Results, result)
|
||||
if len(ss.Results) > 20 {
|
||||
ss.Results = ss.Results[1:]
|
||||
if len(ss.Results) > MaximumNumberOfResults {
|
||||
// Doing ss.Results[1:] would usually be sufficient, but in the case where for some reason, the slice has more
|
||||
// than one extra element, we can get rid of all of them at once and thus returning the slice to a length of
|
||||
// MaximumNumberOfResults by using ss.Results[len(ss.Results)-MaximumNumberOfResults:] instead
|
||||
ss.Results = ss.Results[len(ss.Results)-MaximumNumberOfResults:]
|
||||
}
|
||||
ss.Uptime.ProcessResult(result)
|
||||
}
|
||||
|
||||
92
core/service-status_bench_test.go
Normal file
92
core/service-status_bench_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = Condition("[STATUS] == 200")
|
||||
secondCondition = Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
timestamp = time.Now()
|
||||
|
||||
testService = Service{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []*Condition{&firstCondition, &secondCondition, &thirdCondition},
|
||||
Alerts: nil,
|
||||
Insecure: false,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
body: []byte("body"),
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
Timestamp: timestamp,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
body: []byte("body"),
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
Timestamp: timestamp,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func BenchmarkServiceStatus_WithResultPagination(b *testing.B) {
|
||||
service := &testService
|
||||
serviceStatus := NewServiceStatus(service)
|
||||
for i := 0; i < MaximumNumberOfResults; i++ {
|
||||
serviceStatus.AddResult(&testSuccessfulResult)
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
serviceStatus.WithResultPagination(1, 20)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
@@ -14,15 +14,53 @@ func TestNewServiceStatus(t *testing.T) {
|
||||
if serviceStatus.Group != service.Group {
|
||||
t.Errorf("expected %s, got %s", service.Group, serviceStatus.Group)
|
||||
}
|
||||
if serviceStatus.Key != "group_name" {
|
||||
t.Errorf("expected %s, got %s", "group_name", serviceStatus.Key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceStatus_AddResult(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service)
|
||||
for i := 0; i < 50; i++ {
|
||||
for i := 0; i < MaximumNumberOfResults+10; i++ {
|
||||
serviceStatus.AddResult(&Result{Timestamp: time.Now()})
|
||||
}
|
||||
if len(serviceStatus.Results) != 20 {
|
||||
t.Errorf("expected serviceStatus.Results to not exceed a length of 20")
|
||||
if len(serviceStatus.Results) != MaximumNumberOfResults {
|
||||
t.Errorf("expected serviceStatus.Results to not exceed a length of %d", MaximumNumberOfResults)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceStatus_WithResultPagination(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service)
|
||||
for i := 0; i < 25; i++ {
|
||||
serviceStatus.AddResult(&Result{Timestamp: time.Now()})
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 1).Results) != 1 {
|
||||
t.Errorf("expected to have 1 result")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(5, 0).Results) != 0 {
|
||||
t.Errorf("expected to have 0 results")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(-1, 20).Results) != 0 {
|
||||
t.Errorf("expected to have 0 result, because the page was invalid")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, -1).Results) != 0 {
|
||||
t.Errorf("expected to have 0 result, because the page size was invalid")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 10).Results) != 10 {
|
||||
t.Errorf("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(2, 10).Results) != 10 {
|
||||
t.Errorf("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(3, 10).Results) != 5 {
|
||||
t.Errorf("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(4, 10).Results) != 0 {
|
||||
t.Errorf("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 50).Results) != 25 {
|
||||
t.Errorf("expected to have 25 results, because there's only 25 results")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ type Service struct {
|
||||
// NumberOfFailuresInARow is the number of unsuccessful evaluations in a row
|
||||
NumberOfFailuresInARow int
|
||||
|
||||
// NumberOfFailuresInARow is the number of successful evaluations in a row
|
||||
// NumberOfSuccessesInARow is the number of successful evaluations in a row
|
||||
NumberOfSuccessesInARow int
|
||||
}
|
||||
|
||||
@@ -149,6 +149,8 @@ func (service *Service) EvaluateHealth() *Result {
|
||||
}
|
||||
}
|
||||
result.Timestamp = time.Now()
|
||||
// No need to keep the body after the service has been evaluated
|
||||
result.body = nil
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -213,15 +215,19 @@ func (service *Service) call(result *Result) {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
return
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.TLS != nil && len(response.TLS.PeerCertificates) > 0 {
|
||||
certificate := response.TLS.PeerCertificates[0]
|
||||
result.CertificateExpiration = certificate.NotAfter.Sub(time.Now())
|
||||
result.CertificateExpiration = time.Until(certificate.NotAfter)
|
||||
}
|
||||
result.HTTPStatus = response.StatusCode
|
||||
result.Connected = response.StatusCode > 0
|
||||
result.Body, err = ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
// Only read the body if there's a condition that uses the BodyPlaceholder
|
||||
if service.needsToReadBody() {
|
||||
result.body, err = ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -246,3 +252,13 @@ func (service *Service) buildHTTPRequest() *http.Request {
|
||||
}
|
||||
return request
|
||||
}
|
||||
|
||||
// needsToReadBody checks if there's any conditions that requires the response body to be read
|
||||
func (service *Service) needsToReadBody() bool {
|
||||
for _, condition := range service.Conditions {
|
||||
if condition.hasBodyPlaceholder() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
func TestService_ValidateAndSetDefaults(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
Alerts: []*Alert{{Type: PagerDutyAlert}},
|
||||
@@ -94,7 +94,7 @@ func TestService_ValidateAndSetDefaultsWithDNS(t *testing.T) {
|
||||
func TestService_GetAlertsTriggered(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
Alerts: []*Alert{{Type: PagerDutyAlert, Enabled: true}},
|
||||
@@ -118,7 +118,7 @@ func TestService_GetAlertsTriggered(t *testing.T) {
|
||||
func TestService_buildHTTPRequest(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
}
|
||||
@@ -138,7 +138,7 @@ func TestService_buildHTTPRequest(t *testing.T) {
|
||||
func TestService_buildHTTPRequestWithCustomUserAgent(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
Headers: map[string]string{
|
||||
@@ -161,7 +161,7 @@ func TestService_buildHTTPRequestWithCustomUserAgent(t *testing.T) {
|
||||
func TestService_buildHTTPRequestWithHostHeader(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Method: "POST",
|
||||
Conditions: []*Condition{&condition},
|
||||
@@ -182,13 +182,13 @@ func TestService_buildHTTPRequestWithHostHeader(t *testing.T) {
|
||||
func TestService_buildHTTPRequestWithGraphQLEnabled(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-graphql",
|
||||
URL: "https://twinnation.org/graphql",
|
||||
Method: "POST",
|
||||
Conditions: []*Condition{&condition},
|
||||
GraphQL: true,
|
||||
Body: `{
|
||||
user(gender: "female") {
|
||||
users(gender: "female") {
|
||||
id
|
||||
name
|
||||
gender
|
||||
@@ -206,16 +206,17 @@ func TestService_buildHTTPRequestWithGraphQLEnabled(t *testing.T) {
|
||||
}
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
if !strings.HasPrefix(string(body), "{\"query\":") {
|
||||
t.Error("request.Body should've started with '{\"query\":', but it didn't:", string(body))
|
||||
t.Error("request.body should've started with '{\"query\":', but it didn't:", string(body))
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegrationEvaluateHealth(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
bodyCondition := Condition("[BODY].status == UP")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
Conditions: []*Condition{&condition, &bodyCondition},
|
||||
}
|
||||
result := service.EvaluateHealth()
|
||||
if !result.ConditionResults[0].Success {
|
||||
@@ -232,7 +233,7 @@ func TestIntegrationEvaluateHealth(t *testing.T) {
|
||||
func TestIntegrationEvaluateHealthWithFailure(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 500")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
}
|
||||
@@ -252,7 +253,7 @@ func TestIntegrationEvaluateHealthForDNS(t *testing.T) {
|
||||
conditionSuccess := Condition("[DNS_RCODE] == NOERROR")
|
||||
conditionBody := Condition("[BODY] == 93.184.216.34")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "example",
|
||||
URL: "8.8.8.8",
|
||||
DNS: &DNS{
|
||||
QueryType: "A",
|
||||
@@ -275,7 +276,7 @@ func TestIntegrationEvaluateHealthForDNS(t *testing.T) {
|
||||
func TestIntegrationEvaluateHealthForICMP(t *testing.T) {
|
||||
conditionSuccess := Condition("[CONNECTED] == true")
|
||||
service := Service{
|
||||
Name: "ICMP test",
|
||||
Name: "icmp-test",
|
||||
URL: "icmp://127.0.0.1",
|
||||
Conditions: []*Condition{&conditionSuccess},
|
||||
}
|
||||
@@ -294,7 +295,7 @@ func TestIntegrationEvaluateHealthForICMP(t *testing.T) {
|
||||
func TestService_getIP(t *testing.T) {
|
||||
conditionSuccess := Condition("[CONNECTED] == true")
|
||||
service := Service{
|
||||
Name: "Invalid URL test",
|
||||
Name: "invalid-url-test",
|
||||
URL: "",
|
||||
Conditions: []*Condition{&conditionSuccess},
|
||||
}
|
||||
@@ -304,3 +305,27 @@ func TestService_getIP(t *testing.T) {
|
||||
t.Error("service.getIP(result) should've thrown an error because the URL is invalid, thus cannot be parsed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_NeedsToReadBody(t *testing.T) {
|
||||
statusCondition := Condition("[STATUS] == 200")
|
||||
bodyCondition := Condition("[BODY].status == UP")
|
||||
bodyConditionWithLength := Condition("len([BODY].tags) > 0")
|
||||
if (&Service{Conditions: []*Condition{&statusCondition}}).needsToReadBody() {
|
||||
t.Error("expected false, got true")
|
||||
}
|
||||
if !(&Service{Conditions: []*Condition{&bodyCondition}}).needsToReadBody() {
|
||||
t.Error("expected true, got false")
|
||||
}
|
||||
if !(&Service{Conditions: []*Condition{&bodyConditionWithLength}}).needsToReadBody() {
|
||||
t.Error("expected true, got false")
|
||||
}
|
||||
if !(&Service{Conditions: []*Condition{&statusCondition, &bodyCondition}}).needsToReadBody() {
|
||||
t.Error("expected true, got false")
|
||||
}
|
||||
if !(&Service{Conditions: []*Condition{&bodyCondition, &statusCondition}}).needsToReadBody() {
|
||||
t.Error("expected true, got false")
|
||||
}
|
||||
if !(&Service{Conditions: []*Condition{&bodyConditionWithLength, &statusCondition}}).needsToReadBody() {
|
||||
t.Error("expected true, got false")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// RFC3339WithoutMinutesAndSeconds is the format defined by RFC3339 (see time.RFC3339) but with the minutes
|
||||
// and seconds hardcoded to 0.
|
||||
RFC3339WithoutMinutesAndSeconds = "2006-01-02T15:00:00Z07:00"
|
||||
|
||||
numberOfHoursInTenDays = 10 * 24
|
||||
sevenDays = 7 * 24 * time.Hour
|
||||
)
|
||||
@@ -25,42 +20,45 @@ type Uptime struct {
|
||||
// LastHour is the uptime percentage over the past hour
|
||||
LastHour float64 `json:"1h"`
|
||||
|
||||
successCountPerHour map[string]uint64
|
||||
totalCountPerHour map[string]uint64
|
||||
// SuccessfulExecutionsPerHour is a map containing the number of successes (value)
|
||||
// for every hourly unix timestamps (key)
|
||||
SuccessfulExecutionsPerHour map[int64]uint64 `json:"-"`
|
||||
|
||||
// TotalExecutionsPerHour is a map containing the total number of checks (value)
|
||||
// for every hourly unix timestamps (key)
|
||||
TotalExecutionsPerHour map[int64]uint64 `json:"-"`
|
||||
}
|
||||
|
||||
// NewUptime creates a new Uptime
|
||||
func NewUptime() *Uptime {
|
||||
return &Uptime{
|
||||
successCountPerHour: make(map[string]uint64),
|
||||
totalCountPerHour: make(map[string]uint64),
|
||||
SuccessfulExecutionsPerHour: make(map[int64]uint64),
|
||||
TotalExecutionsPerHour: make(map[int64]uint64),
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessResult processes the result by extracting the relevant from the result and recalculating the uptime
|
||||
// if necessary
|
||||
func (uptime *Uptime) ProcessResult(result *Result) {
|
||||
timestampDateWithHour := result.Timestamp.Format(RFC3339WithoutMinutesAndSeconds)
|
||||
if result.Success {
|
||||
uptime.successCountPerHour[timestampDateWithHour]++
|
||||
if uptime.SuccessfulExecutionsPerHour == nil || uptime.TotalExecutionsPerHour == nil {
|
||||
uptime.SuccessfulExecutionsPerHour = make(map[int64]uint64)
|
||||
uptime.TotalExecutionsPerHour = make(map[int64]uint64)
|
||||
}
|
||||
uptime.totalCountPerHour[timestampDateWithHour]++
|
||||
unixTimestampFlooredAtHour := result.Timestamp.Unix() - (result.Timestamp.Unix() % 3600)
|
||||
if result.Success {
|
||||
uptime.SuccessfulExecutionsPerHour[unixTimestampFlooredAtHour]++
|
||||
}
|
||||
uptime.TotalExecutionsPerHour[unixTimestampFlooredAtHour]++
|
||||
// Clean up only when we're starting to have too many useless keys
|
||||
// Note that this is only triggered when there are more entries than there should be after
|
||||
// 10 days, despite the fact that we are deleting everything that's older than 7 days.
|
||||
// This is to prevent re-iterating on every `ProcessResult` as soon as the uptime has been logged for 7 days.
|
||||
if len(uptime.totalCountPerHour) > numberOfHoursInTenDays {
|
||||
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour))
|
||||
for k := range uptime.totalCountPerHour {
|
||||
dateWithHour, err := time.Parse(time.RFC3339, k)
|
||||
if err != nil {
|
||||
// This shouldn't happen, but we'll log it in case it does happen
|
||||
log.Println("[uptime][ProcessResult] Failed to parse programmatically generated timestamp:", err.Error())
|
||||
continue
|
||||
}
|
||||
if sevenDaysAgo.Unix() > dateWithHour.Unix() {
|
||||
delete(uptime.totalCountPerHour, k)
|
||||
delete(uptime.successCountPerHour, k)
|
||||
if len(uptime.TotalExecutionsPerHour) > numberOfHoursInTenDays {
|
||||
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour)).Unix()
|
||||
for hourlyUnixTimestamp := range uptime.TotalExecutionsPerHour {
|
||||
if sevenDaysAgo > hourlyUnixTimestamp {
|
||||
delete(uptime.TotalExecutionsPerHour, hourlyUnixTimestamp)
|
||||
delete(uptime.SuccessfulExecutionsPerHour, hourlyUnixTimestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -87,9 +85,9 @@ func (uptime *Uptime) recalculate() {
|
||||
// The oldest uptime bracket starts 7 days ago, so we'll start from there
|
||||
timestamp := now.Add(-sevenDays)
|
||||
for now.Sub(timestamp) >= 0 {
|
||||
timestampDateWithHour := timestamp.Format(RFC3339WithoutMinutesAndSeconds)
|
||||
successCountForTimestamp := uptime.successCountPerHour[timestampDateWithHour]
|
||||
totalCountForTimestamp := uptime.totalCountPerHour[timestampDateWithHour]
|
||||
hourlyUnixTimestamp := timestamp.Unix() - (timestamp.Unix() % 3600)
|
||||
successCountForTimestamp := uptime.SuccessfulExecutionsPerHour[hourlyUnixTimestamp]
|
||||
totalCountForTimestamp := uptime.TotalExecutionsPerHour[hourlyUnixTimestamp]
|
||||
uptimeBrackets["7d_success"] += successCountForTimestamp
|
||||
uptimeBrackets["7d_total"] += totalCountForTimestamp
|
||||
if now.Sub(timestamp) <= 24*time.Hour {
|
||||
|
||||
@@ -51,10 +51,9 @@ func TestServiceStatus_AddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||
for timestamp.Unix() <= now.Unix() {
|
||||
serviceStatus.AddResult(&Result{Timestamp: timestamp, Success: true})
|
||||
if len(serviceStatus.Uptime.successCountPerHour) > numberOfHoursInTenDays {
|
||||
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.successCountPerHour", numberOfHoursInTenDays)
|
||||
if len(serviceStatus.Uptime.SuccessfulExecutionsPerHour) > numberOfHoursInTenDays {
|
||||
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.SuccessfulExecutionsPerHour, but there are %d", numberOfHoursInTenDays, len(serviceStatus.Uptime.SuccessfulExecutionsPerHour))
|
||||
}
|
||||
//fmt.Printf("timestamp=%s; uptimeDuringLastHour=%f; timeAgo=%s\n", timestamp.Format(time.RFC3339), serviceStatus.UptimeDuringLastHour, time.Since(timestamp))
|
||||
if now.Sub(timestamp) > time.Hour && serviceStatus.Uptime.LastHour != 0 {
|
||||
t.Error("most recent timestamp > 1h ago, expected serviceStatus.Uptime.LastHour to be 0, got", serviceStatus.Uptime.LastHour)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
alerting:
|
||||
mattermost:
|
||||
webhook-url: "http://mattermost:8065/hooks/tokengoeshere"
|
||||
insecure: true
|
||||
|
||||
services:
|
||||
- name: example
|
||||
url: http://example.org
|
||||
interval: 30s
|
||||
interval: 1m
|
||||
alerts:
|
||||
- type: mattermost
|
||||
enabled: true
|
||||
description: "healthcheck failed 3 times in a row"
|
||||
send-on-resolved: true
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
|
||||
@@ -1,13 +1,24 @@
|
||||
version: "3.8"
|
||||
services:
|
||||
gatus:
|
||||
image: twinproduction/gatus:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ./config.yaml:/config/config.yaml
|
||||
|
||||
mattermost-preview:
|
||||
image: mattermost/mattermost-preview:latest
|
||||
ports:
|
||||
- 8065:8065
|
||||
services:
|
||||
gatus:
|
||||
container_name: gatus
|
||||
image: twinproduction/gatus:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ./config.yaml:/config/config.yaml
|
||||
networks:
|
||||
- default
|
||||
|
||||
mattermost:
|
||||
container_name: mattermost
|
||||
image: mattermost/mattermost-preview:5.26.0
|
||||
ports:
|
||||
- 8065:8065
|
||||
networks:
|
||||
- default
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
|
||||
@@ -81,11 +81,11 @@ spec:
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 50M
|
||||
cpu: 250m
|
||||
memory: 100M
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 20M
|
||||
memory: 30M
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: gatus-config
|
||||
|
||||
@@ -59,11 +59,11 @@ spec:
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 50M
|
||||
cpu: 250m
|
||||
memory: 100M
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 20M
|
||||
memory: 30M
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: gatus-config
|
||||
|
||||
3
go.mod
3
go.mod
@@ -4,7 +4,8 @@ go 1.15
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.74.0 // indirect
|
||||
github.com/TwinProduction/gocache v1.1.0
|
||||
github.com/TwinProduction/gocache v1.2.1
|
||||
github.com/TwinProduction/health v1.0.0
|
||||
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0
|
||||
|
||||
10
go.sum
10
go.sum
@@ -50,8 +50,10 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/TwinProduction/gocache v1.1.0 h1:mibBUyccd8kGHlm5dXhTMDOvWBK4mjNqGyOOkG8mib8=
|
||||
github.com/TwinProduction/gocache v1.1.0/go.mod h1:+qH57V/K4oAcX9C7CvgJTwUX4lzfIUXQC/6XaRSOS1Y=
|
||||
github.com/TwinProduction/gocache v1.2.1 h1:NAdMwO9SQEZFmX69YWx6fzhwb6fHakkLri0451c+V1w=
|
||||
github.com/TwinProduction/gocache v1.2.1/go.mod h1:6zkBoLjrFLkIISwkZTgLy67qliCGSon1xpORM4Ri5HM=
|
||||
github.com/TwinProduction/health v1.0.0 h1:TVyYTAORQQZ8LaptX8jCHZRCGCAO6e+oJx19BUIzQYY=
|
||||
github.com/TwinProduction/health v1.0.0/go.mod h1:ys4mYKUeEfYrWmkm60xLtPjTuLIEDQNBZaTZvenLG1c=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@@ -73,8 +75,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@@ -432,6 +432,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
|
||||
@@ -20,6 +20,15 @@ func TestEval(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithInvalidPath(t *testing.T) {
|
||||
path := "errors"
|
||||
data := `{}`
|
||||
_, _, err := Eval(path, []byte(data))
|
||||
if err == nil {
|
||||
t.Error("Expected error, but got", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithLongSimpleWalk(t *testing.T) {
|
||||
path := "long.simple.walk"
|
||||
data := `{"long": {"simple": {"walk": "value"}}}`
|
||||
|
||||
@@ -16,13 +16,13 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// KubernetesClientApi is a minimal interface for interacting with Kubernetes
|
||||
// KubernetesClientAPI is a minimal interface for interacting with Kubernetes
|
||||
// Created mostly to make mocking the Kubernetes client easier
|
||||
type KubernetesClientApi interface {
|
||||
type KubernetesClientAPI interface {
|
||||
GetServices(namespace string) ([]v1.Service, error)
|
||||
}
|
||||
|
||||
// KubernetesClient is a working implementation of KubernetesClientApi
|
||||
// KubernetesClient is a working implementation of KubernetesClientAPI
|
||||
type KubernetesClient struct {
|
||||
client *kubernetes.Clientset
|
||||
}
|
||||
@@ -44,7 +44,7 @@ func NewKubernetesClient(client *kubernetes.Clientset) *KubernetesClient {
|
||||
}
|
||||
|
||||
// NewClient creates a Kubernetes client for the given ClusterMode
|
||||
func NewClient(clusterMode ClusterMode) (KubernetesClientApi, error) {
|
||||
func NewClient(clusterMode ClusterMode) (KubernetesClientAPI, error) {
|
||||
var kubeConfig *rest.Config
|
||||
var err error
|
||||
switch clusterMode {
|
||||
|
||||
@@ -5,6 +5,6 @@ import (
|
||||
)
|
||||
|
||||
// GetKubernetesServices return a list of Services from the given namespace
|
||||
func GetKubernetesServices(client KubernetesClientApi, namespace string) ([]v1.Service, error) {
|
||||
func GetKubernetesServices(client KubernetesClientAPI, namespace string) ([]v1.Service, error) {
|
||||
return client.GetServices(namespace)
|
||||
}
|
||||
|
||||
22
main.go
22
main.go
@@ -1,17 +1,37 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/TwinProduction/gatus/config"
|
||||
"github.com/TwinProduction/gatus/controller"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gatus/watchdog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg := loadConfiguration()
|
||||
go watchdog.Monitor(cfg)
|
||||
controller.Handle()
|
||||
go controller.Handle()
|
||||
// Wait for termination signal
|
||||
sig := make(chan os.Signal, 1)
|
||||
done := make(chan bool, 1)
|
||||
signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-sig
|
||||
log.Println("Received termination signal, attempting to gracefully shut down")
|
||||
controller.Shutdown()
|
||||
err := storage.Get().Save()
|
||||
if err != nil {
|
||||
log.Println("Failed to save storage provider:", err.Error())
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
<-done
|
||||
log.Println("Shutting down")
|
||||
}
|
||||
|
||||
func loadConfiguration() *config.Config {
|
||||
|
||||
@@ -10,10 +10,11 @@ func Match(pattern, s string) bool {
|
||||
if pattern == "*" {
|
||||
return true
|
||||
}
|
||||
// Backslashes break filepath.Match, so we'll remove all of them.
|
||||
// This has a pretty significant impact on performance when there
|
||||
// are backslashes, but at least it doesn't break filepath.Match.
|
||||
s = strings.ReplaceAll(s, "\\", "")
|
||||
// Separators found in the string break filepath.Match, so we'll remove all of them.
|
||||
// This has a pretty significant impact on performance when there are separators in
|
||||
// the strings, but at least it doesn't break filepath.Match.
|
||||
s = strings.ReplaceAll(s, string(filepath.Separator), "")
|
||||
pattern = strings.ReplaceAll(pattern, string(filepath.Separator), "")
|
||||
matched, _ := filepath.Match(pattern, s)
|
||||
return matched
|
||||
}
|
||||
|
||||
6
static/bootstrap.min.css
vendored
6
static/bootstrap.min.css
vendored
File diff suppressed because one or more lines are too long
@@ -1,414 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>Health Dashboard</title>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<link rel="stylesheet" href="./bootstrap.min.css" />
|
||||
<style>
|
||||
html, body {
|
||||
background-color: #f7f9fb;
|
||||
}
|
||||
html {
|
||||
height: 100%;
|
||||
}
|
||||
#results div.container:first-child {
|
||||
border-top-left-radius: 3px;
|
||||
border-top-right-radius: 3px;
|
||||
}
|
||||
#results div.container:last-child {
|
||||
border-bottom-left-radius: 3px;
|
||||
border-bottom-right-radius: 3px;
|
||||
border-bottom-width: 1px;
|
||||
border-color: #dee2e6;
|
||||
border-style: solid;
|
||||
}
|
||||
.status {
|
||||
cursor: pointer;
|
||||
transition: all 500ms ease-in-out;
|
||||
overflow-x: hidden;
|
||||
padding: .25em 0;
|
||||
color: white;
|
||||
}
|
||||
.title {
|
||||
font-size: 2.5rem;
|
||||
}
|
||||
.status:hover {
|
||||
opacity: 0.7;
|
||||
transition: opacity 100ms ease-in-out;
|
||||
color: black;
|
||||
}
|
||||
.status-over-time {
|
||||
overflow: auto;
|
||||
}
|
||||
.status-over-time>span:not(:last-child) {
|
||||
margin-left: 2px;
|
||||
}
|
||||
.status-time-ago {
|
||||
color: #6a737d;
|
||||
opacity: 0.5;
|
||||
margin-top: 5px;
|
||||
}
|
||||
.status-min-max-ms {
|
||||
overflow-x: hidden;
|
||||
}
|
||||
#tooltip {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
background-color: white;
|
||||
border: 1px solid lightgray;
|
||||
border-radius: 4px;
|
||||
padding: 6px;
|
||||
font-size: 13px;
|
||||
}
|
||||
#tooltip code {
|
||||
color: #212529;
|
||||
line-height: 1;
|
||||
}
|
||||
#tooltip .tooltip-title {
|
||||
font-weight: bold;
|
||||
margin-bottom: 0;
|
||||
display: block;
|
||||
}
|
||||
#tooltip .tooltip-title {
|
||||
margin-top: 8px;
|
||||
}
|
||||
#tooltip>.tooltip-title:first-child {
|
||||
margin-top: 0;
|
||||
}
|
||||
#social {
|
||||
position: fixed;
|
||||
right: 5px;
|
||||
bottom: 5px;
|
||||
padding: 5px;
|
||||
margin: 0;
|
||||
z-index: 100;
|
||||
}
|
||||
#social img {
|
||||
opacity: 0.3;
|
||||
}
|
||||
#social img:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
#settings {
|
||||
position: fixed;
|
||||
left: 5px;
|
||||
bottom: 5px;
|
||||
padding: 5px;
|
||||
}
|
||||
#settings select:focus {
|
||||
box-shadow: none;
|
||||
}
|
||||
.service-group {
|
||||
cursor: pointer;
|
||||
user-select: none;
|
||||
}
|
||||
.service-group h5:hover {
|
||||
color: #1b1e21 !important;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container my-3 rounded p-3 border shadow">
|
||||
<div class="mb-2">
|
||||
<div class="row">
|
||||
<div class="col-8 text-left my-auto">
|
||||
<div class="title display-4">Health Status</div>
|
||||
</div>
|
||||
<div class="col-4 text-right">
|
||||
<img src="logo.png" alt="Gatus" style="position: relative; min-width: 50px; max-width: 200px; width: 20%;"/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="results"></div>
|
||||
</div>
|
||||
|
||||
<div id="tooltip" style="display: none">
|
||||
<div class="tooltip-title">Timestamp:</div>
|
||||
<code id="tooltip-timestamp">...</code>
|
||||
<div class="tooltip-title">Response time:</div>
|
||||
<code id="tooltip-response-time">...</code>
|
||||
<div class="tooltip-title">Conditions:</div>
|
||||
<code id="tooltip-conditions">...</code>
|
||||
<div id="tooltip-errors-container">
|
||||
<div class="tooltip-title">Errors:</div>
|
||||
<code id="tooltip-errors">...</code>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script src="./jquery.min.js"></script>
|
||||
|
||||
<div id="social">
|
||||
<a href="https://github.com/TwinProduction/gatus" target="_blank" title="Gatus on GitHub">
|
||||
<img src="./github.png" alt="GitHub" width="32" height="auto" />
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div id="settings">
|
||||
<div class="input-group input-group-sm">
|
||||
<div class="input-group-prepend">
|
||||
<div class="input-group-text">↻</div>
|
||||
</div>
|
||||
<select class="form-control form-control-sm" id="refresh-rate">
|
||||
<option value="10">10s</option>
|
||||
<option value="30" selected>30s</option>
|
||||
<option value="60">1m</option>
|
||||
<option value="120">2m</option>
|
||||
<option value="300">5m</option>
|
||||
<option value="600">10m</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let serviceStatuses = {};
|
||||
let timerHandler = 0;
|
||||
let refreshIntervalHandler = 0;
|
||||
let userClickedStatus = false;
|
||||
|
||||
// TODO: make this variable configurable and persist the choice in localStorage
|
||||
let showStatusOnHover = true;
|
||||
|
||||
function showTooltip(serviceName, index, element) {
|
||||
//userClickedStatus = false;
|
||||
clearTimeout(timerHandler);
|
||||
let serviceResult = serviceStatuses[serviceName].results[index];
|
||||
$("#tooltip-timestamp").text(prettifyTimestamp(serviceResult.timestamp));
|
||||
$("#tooltip-response-time").text(parseInt(serviceResult.duration/1000000) + "ms");
|
||||
// Populate the condition section
|
||||
let conditions = "";
|
||||
for (let i in serviceResult['condition-results']) {
|
||||
let conditionResult = serviceResult['condition-results'][i];
|
||||
conditions += (conditionResult.success ? "✓" : "X") + " ~ " + htmlEntities(conditionResult.condition) + "<br />";
|
||||
}
|
||||
$("#tooltip-conditions").html(conditions);
|
||||
// Populate the error section only if there are errors
|
||||
if (serviceResult.errors && serviceResult.errors.length > 0) {
|
||||
let errors = "";
|
||||
for (let i in serviceResult.errors) {
|
||||
errors += "- " + htmlEntities(serviceResult.errors[i]) + "<br />";
|
||||
}
|
||||
$("#tooltip-errors").html(errors);
|
||||
$("#tooltip-errors-container").show();
|
||||
} else {
|
||||
$("#tooltip-errors-container").hide();
|
||||
}
|
||||
// Position tooltip
|
||||
$("#tooltip").css({top: "0px", left: "0px"}).show();
|
||||
let targetTopPosition = element.getBoundingClientRect().y + 30;
|
||||
let targetLeftPosition = element.getBoundingClientRect().x;
|
||||
// Make adjustments if necessary
|
||||
let tooltipBoundingClientRect = document.querySelector('#tooltip').getBoundingClientRect();
|
||||
if (targetLeftPosition + window.scrollX + tooltipBoundingClientRect.width + 50 > document.body.getBoundingClientRect().width) {
|
||||
targetLeftPosition = element.getBoundingClientRect().x - tooltipBoundingClientRect.width + element.getBoundingClientRect().width;
|
||||
if (targetLeftPosition < 0) {
|
||||
targetLeftPosition += -targetLeftPosition;
|
||||
}
|
||||
}
|
||||
if (targetTopPosition + window.scrollY + tooltipBoundingClientRect.height + 50 > document.body.getBoundingClientRect().height && targetTopPosition >= 0) {
|
||||
targetTopPosition = element.getBoundingClientRect().y - (tooltipBoundingClientRect.height + 10);
|
||||
if (targetTopPosition < 0) {
|
||||
targetTopPosition = element.getBoundingClientRect().y + 30;
|
||||
}
|
||||
}
|
||||
$("#tooltip").css({top: targetTopPosition + "px", left: targetLeftPosition + "px"});
|
||||
}
|
||||
|
||||
function fadeTooltip() {
|
||||
if (!userClickedStatus) {
|
||||
timerHandler = setTimeout(function () {
|
||||
$("#tooltip").hide();
|
||||
}, 500);
|
||||
}
|
||||
}
|
||||
|
||||
function toggleTooltip(serviceName, index, element) {
|
||||
console.log("userClickedStatus="+userClickedStatus);
|
||||
if (!userClickedStatus) {
|
||||
showTooltip(serviceName, index, element);
|
||||
userClickedStatus = true;
|
||||
} else {
|
||||
$("#tooltip").hide();
|
||||
userClickedStatus = false;
|
||||
}
|
||||
}
|
||||
|
||||
function createStatusBadge(serviceStatusIndex, index, success) {
|
||||
if (success) {
|
||||
if (showStatusOnHover) {
|
||||
return "<span class='status badge badge-success' style='width: 5%' onmouseenter='showTooltip(\"" + serviceStatusIndex + "\", " + index + ", this)' onmouseleave='fadeTooltip()' onclick='userClickedStatus = !userClickedStatus;'>✓</span>";
|
||||
} else {
|
||||
return "<span class='status badge badge-success' style='width: 5%' onclick='toggleTooltip(\"" + serviceStatusIndex + "\", " + index + ", this)'>✓</span>";
|
||||
}
|
||||
}
|
||||
if (showStatusOnHover) {
|
||||
return "<span class='status badge badge-danger' style='width: 5%' onmouseenter='showTooltip(\"" + serviceStatusIndex + "\", " + index + ", this)' onmouseleave='fadeTooltip()' onclick='userClickedStatus = !userClickedStatus;'>X</span>";
|
||||
} else {
|
||||
return "<span class='status badge badge-danger' style='width: 5%' onclick='toggleTooltip(\"" + serviceStatusIndex + "\", " + index + ", this)'>X</span>";
|
||||
}
|
||||
}
|
||||
|
||||
function refreshStatuses() {
|
||||
$.getJSON("./api/v1/statuses", function (data) {
|
||||
// Update the table only if there's a change
|
||||
if (JSON.stringify(serviceStatuses) !== JSON.stringify(data)) {
|
||||
serviceStatuses = data;
|
||||
buildTable();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function buildTable() {
|
||||
let outputByGroup = {};
|
||||
for (let serviceStatusIndex in serviceStatuses) {
|
||||
let serviceStatusOverTime = "";
|
||||
let serviceStatus = serviceStatuses[serviceStatusIndex];
|
||||
let hostname = serviceStatus.results[serviceStatus.results.length-1].hostname;
|
||||
let minResponseTime = null;
|
||||
let maxResponseTime = null;
|
||||
let newestTimestamp = null;
|
||||
let oldestTimestamp = null;
|
||||
for (let resultIndex in serviceStatus.results) {
|
||||
let serviceResult = serviceStatus.results[resultIndex];
|
||||
serviceStatusOverTime = createStatusBadge(serviceStatusIndex, resultIndex, serviceResult.success) + serviceStatusOverTime;
|
||||
const responseTime = parseInt(serviceResult.duration/1000000);
|
||||
if (minResponseTime == null || minResponseTime > responseTime) {
|
||||
minResponseTime = responseTime;
|
||||
}
|
||||
if (maxResponseTime == null || maxResponseTime < responseTime) {
|
||||
maxResponseTime = responseTime;
|
||||
}
|
||||
const timestamp = new Date(serviceResult.timestamp);
|
||||
if (newestTimestamp == null || newestTimestamp < timestamp) {
|
||||
newestTimestamp = timestamp;
|
||||
}
|
||||
if (oldestTimestamp == null || oldestTimestamp > timestamp) {
|
||||
oldestTimestamp = timestamp;
|
||||
}
|
||||
}
|
||||
let output = ""
|
||||
+ "<div class='container py-3 border-left border-right border-top border-black rounded-0'>"
|
||||
+ " <div class='row mb-2'>"
|
||||
+ " <div class='col-md-10'>"
|
||||
+ " <span class='font-weight-bold'>" + serviceStatus.name + "</span> <span class='text-secondary font-weight-lighter'>- " + hostname + "</span>"
|
||||
+ " </div>"
|
||||
+ " <div class='col-md-2 text-right'>"
|
||||
+ " <span class='font-weight-lighter status-min-max-ms'>" + (minResponseTime === maxResponseTime ? minResponseTime : (minResponseTime + "-" + maxResponseTime)) + "ms</span>"
|
||||
+ " </div>"
|
||||
+ " </div>"
|
||||
+ " <div class='row'>"
|
||||
+ " <div class='col-12 d-flex flex-row-reverse status-over-time'>"
|
||||
+ " " + serviceStatusOverTime
|
||||
+ " </div>"
|
||||
+ " </div>"
|
||||
+ " <div class='row status-time-ago'>"
|
||||
+ " <div class='col-6'>"
|
||||
+ " " + generatePrettyTimeAgo(oldestTimestamp)
|
||||
+ " </div>"
|
||||
+ " <div class='col-6 text-right'>"
|
||||
+ " " + generatePrettyTimeAgo(newestTimestamp)
|
||||
+ " </div>"
|
||||
+ " </div>"
|
||||
+ "</div>";
|
||||
// create an empty entry if this group is new
|
||||
if (!outputByGroup[serviceStatus.group]) {
|
||||
outputByGroup[serviceStatus.group] = "";
|
||||
}
|
||||
outputByGroup[serviceStatus.group] += output;
|
||||
}
|
||||
let output = "";
|
||||
for (let group in outputByGroup) {
|
||||
// Services that don't have a group should be skipped and left for last
|
||||
if (group === 'undefined') {
|
||||
continue
|
||||
}
|
||||
let key = group.replace(/[^a-zA-Z0-9]/g, '');
|
||||
let existingGroupContentSelector = $("#service-group-" + key + "-content");
|
||||
let isCurrentlyHidden = existingGroupContentSelector.length && existingGroupContentSelector[0].style.display === 'none';
|
||||
let groupStatus = "<span class='text-success'>✓</span>";
|
||||
if (outputByGroup[group].includes("badge badge-danger")) {
|
||||
groupStatus = "<span class='text-warning'>~</span>";
|
||||
}
|
||||
output += ""
|
||||
+ "<div class='mt-" + (output.length ? '4' : '3') + "'>"
|
||||
+ " <div class='container pt-2 border-left border-right border-top border-black border-bottom service-group' id='service-group-" + key + "' data-group='" + key + "' onclick='toggleGroup(this)'>"
|
||||
+ " <h5 class='text-secondary text-monospace pb-0'>"
|
||||
+ " " + groupStatus + " " + group
|
||||
+ " <span class='float-right service-group-arrow' id='service-group-" + key + "-arrow'>" + (isCurrentlyHidden ? "▼" : "▲") + "</span>"
|
||||
+ " </h5>"
|
||||
+ " </div>"
|
||||
+ " <div class='service-group-content' id='service-group-" + key + "-content' style='" + (isCurrentlyHidden ? "display: none;" : "") + "'>"
|
||||
+ " " + outputByGroup[group]
|
||||
+ " </div>"
|
||||
+ "</div>";
|
||||
}
|
||||
// Add all services that don't have a group at the end
|
||||
if (outputByGroup['undefined']) {
|
||||
output += ""
|
||||
+ "<div class='mt-" + (output.length ? '4' : '3') + "'>"
|
||||
+ " " + outputByGroup['undefined']
|
||||
+ "</div>"
|
||||
}
|
||||
$("#results").html(output);
|
||||
}
|
||||
|
||||
function toggleGroup(element) {
|
||||
let selector = $("#service-group-" + element.dataset.group + "-content");
|
||||
selector.toggle("fast", function() {
|
||||
if (selector.length && selector[0].style.display === 'none') {
|
||||
$("#service-group-" + element.dataset.group + "-arrow").html("▼");
|
||||
} else {
|
||||
$("#service-group-" + element.dataset.group + "-arrow").html("▲");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function prettifyTimestamp(timestamp) {
|
||||
let date = new Date(timestamp);
|
||||
let YYYY = date.getFullYear();
|
||||
let MM = ((date.getMonth()+1)<10?"0":"")+""+(date.getMonth()+1);
|
||||
let DD = ((date.getDate())<10?"0":"")+""+(date.getDate());
|
||||
let hh = ((date.getHours())<10?"0":"")+""+(date.getHours());
|
||||
let mm = ((date.getMinutes())<10?"0":"")+""+(date.getMinutes());
|
||||
let ss = ((date.getSeconds())<10?"0":"")+""+(date.getSeconds());
|
||||
return YYYY + "-" + MM + "-" + DD + " " + hh + ":" + mm + ":" + ss;
|
||||
}
|
||||
|
||||
function generatePrettyTimeAgo(t) {
|
||||
let differenceInMs = new Date().getTime() - new Date(t).getTime();
|
||||
if (differenceInMs > 3600000) {
|
||||
let hours = (differenceInMs/3600000).toFixed(0);
|
||||
return hours + " hour" + (hours !== "1" ? "s" : "") + " ago";
|
||||
}
|
||||
if (differenceInMs > 60000) {
|
||||
let minutes = (differenceInMs/60000).toFixed(0);
|
||||
return minutes + " minute" + (minutes !== "1" ? "s" : "") + " ago";
|
||||
}
|
||||
return (differenceInMs/1000).toFixed(0) + " seconds ago";
|
||||
}
|
||||
|
||||
function htmlEntities(s) {
|
||||
return String(s)
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
}
|
||||
|
||||
function setRefreshInterval(seconds) {
|
||||
refreshStatuses();
|
||||
refreshIntervalHandler = setInterval(function() {
|
||||
refreshStatuses();
|
||||
}, seconds * 1000);
|
||||
}
|
||||
|
||||
$("#refresh-rate").change(function() {
|
||||
clearInterval(refreshIntervalHandler);
|
||||
setRefreshInterval($(this).val());
|
||||
});
|
||||
setRefreshInterval(30);
|
||||
$("#refresh-rate").val(30);
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
2
static/jquery.min.js
vendored
2
static/jquery.min.js
vendored
File diff suppressed because one or more lines are too long
Binary file not shown.
|
Before Width: | Height: | Size: 20 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 83 KiB |
8
storage/config.go
Normal file
8
storage/config.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package storage
|
||||
|
||||
// Config is the configuration for alerting providers
|
||||
type Config struct {
|
||||
// File is the path of the file to use for persistence
|
||||
// If blank, persistence is disabled.
|
||||
File string `yaml:"file"`
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
// InMemoryStore implements an in-memory store
|
||||
type InMemoryStore struct {
|
||||
serviceStatuses map[string]*core.ServiceStatus
|
||||
serviceResultsMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewInMemoryStore returns an in-memory store. Note that the store acts as a singleton, so although new-ing
|
||||
// up in-memory stores will give you a unique reference to a struct each time, all structs returned
|
||||
// by this function will act on the same in-memory store.
|
||||
func NewInMemoryStore() *InMemoryStore {
|
||||
return &InMemoryStore{
|
||||
serviceStatuses: make(map[string]*core.ServiceStatus),
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllAsJSON returns the JSON encoding of all monitored core.ServiceStatus
|
||||
func (ims *InMemoryStore) GetAllAsJSON() ([]byte, error) {
|
||||
ims.serviceResultsMutex.RLock()
|
||||
serviceStatuses, err := json.Marshal(ims.serviceStatuses)
|
||||
ims.serviceResultsMutex.RUnlock()
|
||||
return serviceStatuses, err
|
||||
}
|
||||
|
||||
// GetServiceStatus returns the service status for a given service name in the given group
|
||||
func (ims *InMemoryStore) GetServiceStatus(group, name string) *core.ServiceStatus {
|
||||
key := fmt.Sprintf("%s_%s", group, name)
|
||||
ims.serviceResultsMutex.RLock()
|
||||
serviceStatus := ims.serviceStatuses[key]
|
||||
ims.serviceResultsMutex.RUnlock()
|
||||
return serviceStatus
|
||||
}
|
||||
|
||||
// Insert inserts the observed result for the specified service into the in memory store
|
||||
func (ims *InMemoryStore) Insert(service *core.Service, result *core.Result) {
|
||||
key := fmt.Sprintf("%s_%s", service.Group, service.Name)
|
||||
ims.serviceResultsMutex.Lock()
|
||||
serviceStatus, exists := ims.serviceStatuses[key]
|
||||
if !exists {
|
||||
serviceStatus = core.NewServiceStatus(service)
|
||||
ims.serviceStatuses[key] = serviceStatus
|
||||
}
|
||||
serviceStatus.AddResult(result)
|
||||
ims.serviceResultsMutex.Unlock()
|
||||
}
|
||||
|
||||
// Clear will empty all the results from the in memory store
|
||||
func (ims *InMemoryStore) Clear() {
|
||||
ims.serviceResultsMutex.Lock()
|
||||
ims.serviceStatuses = make(map[string]*core.ServiceStatus)
|
||||
ims.serviceResultsMutex.Unlock()
|
||||
}
|
||||
63
storage/storage.go
Normal file
63
storage/storage.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/storage/store"
|
||||
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||
)
|
||||
|
||||
var (
|
||||
provider store.Store
|
||||
|
||||
// initialized keeps track of whether the storage provider was initialized
|
||||
// Because store.Store is an interface, a nil check wouldn't be sufficient, so instead of doing reflection
|
||||
// every single time Get is called, we'll just lazily keep track of its existence through this variable
|
||||
initialized bool
|
||||
)
|
||||
|
||||
// Get retrieves the storage provider
|
||||
func Get() store.Store {
|
||||
if !initialized {
|
||||
log.Println("[storage][Get] Provider requested before it was initialized, automatically initializing")
|
||||
err := Initialize(nil)
|
||||
if err != nil {
|
||||
panic("failed to automatically initialize store: " + err.Error())
|
||||
}
|
||||
}
|
||||
return provider
|
||||
}
|
||||
|
||||
// Initialize instantiates the storage provider based on the Config provider
|
||||
func Initialize(cfg *Config) error {
|
||||
initialized = true
|
||||
var err error
|
||||
if cfg == nil || len(cfg.File) == 0 {
|
||||
log.Println("[storage][Initialize] Creating storage provider")
|
||||
provider, err = memory.NewStore("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("[storage][Initialize] Creating storage provider with file=%s", cfg.File)
|
||||
provider, err = memory.NewStore(cfg.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go autoSave(7 * time.Minute)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// autoSave automatically calls the Save function of the provider at every interval
|
||||
func autoSave(interval time.Duration) {
|
||||
for {
|
||||
time.Sleep(interval)
|
||||
log.Printf("[storage][autoSave] Saving")
|
||||
err := provider.Save()
|
||||
if err != nil {
|
||||
log.Println("[storage][autoSave] Save failed:", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
104
storage/store/memory/memory.go
Normal file
104
storage/store/memory/memory.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
"github.com/TwinProduction/gocache"
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(&core.ServiceStatus{})
|
||||
gob.Register(&core.Uptime{})
|
||||
gob.Register(&core.Result{})
|
||||
gob.Register(&core.Event{})
|
||||
}
|
||||
|
||||
// Store that leverages gocache
|
||||
type Store struct {
|
||||
file string
|
||||
cache *gocache.Cache
|
||||
}
|
||||
|
||||
// NewStore creates a new store
|
||||
func NewStore(file string) (*Store, error) {
|
||||
store := &Store{
|
||||
file: file,
|
||||
cache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize),
|
||||
}
|
||||
if len(file) > 0 {
|
||||
_, err := store.cache.ReadFromFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return store, nil
|
||||
}
|
||||
|
||||
// GetAllServiceStatusesWithResultPagination returns all monitored core.ServiceStatus
|
||||
// with a subset of core.Result defined by the page and pageSize parameters
|
||||
func (s *Store) GetAllServiceStatusesWithResultPagination(page, pageSize int) map[string]*core.ServiceStatus {
|
||||
serviceStatuses := s.cache.GetAll()
|
||||
pagedServiceStatuses := make(map[string]*core.ServiceStatus, len(serviceStatuses))
|
||||
for k, v := range serviceStatuses {
|
||||
pagedServiceStatuses[k] = v.(*core.ServiceStatus).WithResultPagination(page, pageSize)
|
||||
}
|
||||
return pagedServiceStatuses
|
||||
}
|
||||
|
||||
// GetServiceStatus returns the service status for a given service name in the given group
|
||||
func (s *Store) GetServiceStatus(groupName, serviceName string) *core.ServiceStatus {
|
||||
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName))
|
||||
}
|
||||
|
||||
// GetServiceStatusByKey returns the service status for a given key
|
||||
func (s *Store) GetServiceStatusByKey(key string) *core.ServiceStatus {
|
||||
serviceStatus := s.cache.GetValue(key)
|
||||
if serviceStatus == nil {
|
||||
return nil
|
||||
}
|
||||
return serviceStatus.(*core.ServiceStatus)
|
||||
}
|
||||
|
||||
// Insert adds the observed result for the specified service into the store
|
||||
func (s *Store) Insert(service *core.Service, result *core.Result) {
|
||||
key := util.ConvertGroupAndServiceToKey(service.Group, service.Name)
|
||||
serviceStatus, exists := s.cache.Get(key)
|
||||
if !exists {
|
||||
serviceStatus = core.NewServiceStatus(service)
|
||||
}
|
||||
serviceStatus.(*core.ServiceStatus).AddResult(result)
|
||||
s.cache.Set(key, serviceStatus)
|
||||
}
|
||||
|
||||
// DeleteAllServiceStatusesNotInKeys removes all ServiceStatus that are not within the keys provided
|
||||
func (s *Store) DeleteAllServiceStatusesNotInKeys(keys []string) int {
|
||||
var keysToDelete []string
|
||||
for _, existingKey := range s.cache.GetKeysByPattern("*", 0) {
|
||||
shouldDelete := true
|
||||
for _, key := range keys {
|
||||
if existingKey == key {
|
||||
shouldDelete = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldDelete {
|
||||
keysToDelete = append(keysToDelete, existingKey)
|
||||
}
|
||||
}
|
||||
return s.cache.DeleteAll(keysToDelete)
|
||||
}
|
||||
|
||||
// Clear deletes everything from the store
|
||||
func (s *Store) Clear() {
|
||||
s.cache.Clear()
|
||||
}
|
||||
|
||||
// Save persists the cache to the store file
|
||||
func (s *Store) Save() error {
|
||||
if len(s.file) > 0 {
|
||||
return s.cache.SaveToFile(s.file)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package storage
|
||||
package memory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -32,7 +33,6 @@ var (
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Body: []byte("body"),
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
@@ -58,7 +58,6 @@ var (
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Body: []byte("body"),
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
@@ -82,17 +81,17 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func TestInMemoryStore_Insert(t *testing.T) {
|
||||
store := NewInMemoryStore()
|
||||
func TestStore_Insert(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
|
||||
if len(store.serviceStatuses) != 1 {
|
||||
t.Fatalf("expected 1 ServiceStatus, got %d", len(store.serviceStatuses))
|
||||
if store.cache.Count() != 1 {
|
||||
t.Fatalf("expected 1 ServiceStatus, got %d", store.cache.Count())
|
||||
}
|
||||
key := fmt.Sprintf("%s_%s", testService.Group, testService.Name)
|
||||
serviceStatus, exists := store.serviceStatuses[key]
|
||||
if !exists {
|
||||
serviceStatus := store.GetServiceStatusByKey(key)
|
||||
if serviceStatus == nil {
|
||||
t.Fatalf("Store should've had key '%s', but didn't", key)
|
||||
}
|
||||
if len(serviceStatus.Results) != 2 {
|
||||
@@ -106,9 +105,6 @@ func TestInMemoryStore_Insert(t *testing.T) {
|
||||
if r.DNSRCode != expectedResult.DNSRCode {
|
||||
t.Errorf("Result at index %d should've had a DNSRCode of %s, but was actually %s", i, expectedResult.DNSRCode, r.DNSRCode)
|
||||
}
|
||||
if len(r.Body) != len(expectedResult.Body) {
|
||||
t.Errorf("Result at index %d should've had a body of length %d, but was actually %d", i, len(expectedResult.Body), len(r.Body))
|
||||
}
|
||||
if r.Hostname != expectedResult.Hostname {
|
||||
t.Errorf("Result at index %d should've had a Hostname of %s, but was actually %s", i, expectedResult.Hostname, r.Hostname)
|
||||
}
|
||||
@@ -139,8 +135,8 @@ func TestInMemoryStore_Insert(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryStore_GetServiceStatus(t *testing.T) {
|
||||
store := NewInMemoryStore()
|
||||
func TestStore_GetServiceStatus(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
|
||||
@@ -160,11 +156,10 @@ func TestInMemoryStore_GetServiceStatus(t *testing.T) {
|
||||
if serviceStatus.Uptime.LastSevenDays != 0.5 {
|
||||
t.Errorf("serviceStatus.Uptime.LastSevenDays should've been 0.5")
|
||||
}
|
||||
fmt.Println(serviceStatus.Results[0].Timestamp.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
func TestInMemoryStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
|
||||
store := NewInMemoryStore()
|
||||
func TestStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
|
||||
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname")
|
||||
@@ -181,8 +176,31 @@ func TestInMemoryStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryStore_GetAllAsJSON(t *testing.T) {
|
||||
store := NewInMemoryStore()
|
||||
func TestStore_GetServiceStatusByKey(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
|
||||
serviceStatus := store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(testService.Group, testService.Name))
|
||||
if serviceStatus == nil {
|
||||
t.Fatalf("serviceStatus shouldn't have been nil")
|
||||
}
|
||||
if serviceStatus.Uptime == nil {
|
||||
t.Fatalf("serviceStatus.Uptime shouldn't have been nil")
|
||||
}
|
||||
if serviceStatus.Uptime.LastHour != 0.5 {
|
||||
t.Errorf("serviceStatus.Uptime.LastHour should've been 0.5")
|
||||
}
|
||||
if serviceStatus.Uptime.LastTwentyFourHours != 0.5 {
|
||||
t.Errorf("serviceStatus.Uptime.LastTwentyFourHours should've been 0.5")
|
||||
}
|
||||
if serviceStatus.Uptime.LastSevenDays != 0.5 {
|
||||
t.Errorf("serviceStatus.Uptime.LastSevenDays should've been 0.5")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetAllServiceStatusesWithResultPagination(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
firstResult := &testSuccessfulResult
|
||||
secondResult := &testUnsuccessfulResult
|
||||
store.Insert(&testService, firstResult)
|
||||
@@ -190,12 +208,65 @@ func TestInMemoryStore_GetAllAsJSON(t *testing.T) {
|
||||
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||
firstResult.Timestamp = time.Time{}
|
||||
secondResult.Timestamp = time.Time{}
|
||||
output, err := store.GetAllAsJSON()
|
||||
if err != nil {
|
||||
t.Fatal("shouldn't have returned an error, got", err.Error())
|
||||
serviceStatuses := store.GetAllServiceStatusesWithResultPagination(1, 20)
|
||||
if len(serviceStatuses) != 1 {
|
||||
t.Fatal("expected 1 service status")
|
||||
}
|
||||
expectedOutput := `{"group_name":{"name":"name","group":"group","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"condition-results":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"condition-results":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}],"uptime":{"7d":0.5,"24h":0.5,"1h":0.5}}}`
|
||||
if string(output) != expectedOutput {
|
||||
t.Errorf("expected:\n %s\n\ngot:\n %s", expectedOutput, string(output))
|
||||
actual, exists := serviceStatuses[util.ConvertGroupAndServiceToKey(testService.Group, testService.Name)]
|
||||
if !exists {
|
||||
t.Fatal("expected service status to exist")
|
||||
}
|
||||
if len(actual.Results) != 2 {
|
||||
t.Error("expected 2 results, got", len(actual.Results))
|
||||
}
|
||||
if len(actual.Events) != 2 {
|
||||
t.Error("expected 2 events, got", len(actual.Events))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_DeleteAllServiceStatusesNotInKeys(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
firstService := core.Service{Name: "service-1", Group: "group"}
|
||||
secondService := core.Service{Name: "service-2", Group: "group"}
|
||||
result := &testSuccessfulResult
|
||||
store.Insert(&firstService, result)
|
||||
store.Insert(&secondService, result)
|
||||
if store.cache.Count() != 2 {
|
||||
t.Errorf("expected cache to have 2 keys, got %d", store.cache.Count())
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)) == nil {
|
||||
t.Fatal("firstService should exist")
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(secondService.Group, secondService.Name)) == nil {
|
||||
t.Fatal("secondService should exist")
|
||||
}
|
||||
store.DeleteAllServiceStatusesNotInKeys([]string{util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)})
|
||||
if store.cache.Count() != 1 {
|
||||
t.Fatalf("expected cache to have 1 keys, got %d", store.cache.Count())
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)) == nil {
|
||||
t.Error("secondService should've been deleted")
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(secondService.Group, secondService.Name)) != nil {
|
||||
t.Error("firstService should still exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_Save(t *testing.T) {
|
||||
files := []string{
|
||||
"",
|
||||
t.TempDir() + "/test.db",
|
||||
}
|
||||
for _, file := range files {
|
||||
t.Run(file, func(t *testing.T) {
|
||||
store, err := NewStore(file)
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
err = store.Save()
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
38
storage/store/store.go
Normal file
38
storage/store/store.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||
)
|
||||
|
||||
// Store is the interface that each stores should implement
|
||||
type Store interface {
|
||||
// GetAllAsJSON returns the JSON encoding of all monitored core.ServiceStatus
|
||||
// with a subset of core.Result defined by the page and pageSize parameters
|
||||
GetAllServiceStatusesWithResultPagination(page, pageSize int) map[string]*core.ServiceStatus
|
||||
|
||||
// GetServiceStatus returns the service status for a given service name in the given group
|
||||
GetServiceStatus(groupName, serviceName string) *core.ServiceStatus
|
||||
|
||||
// GetServiceStatusByKey returns the service status for a given key
|
||||
GetServiceStatusByKey(key string) *core.ServiceStatus
|
||||
|
||||
// Insert adds the observed result for the specified service into the store
|
||||
Insert(service *core.Service, result *core.Result)
|
||||
|
||||
// DeleteAllServiceStatusesNotInKeys removes all ServiceStatus that are not within the keys provided
|
||||
//
|
||||
// Used to delete services that have been persisted but are no longer part of the configured services
|
||||
DeleteAllServiceStatusesNotInKeys(keys []string) int
|
||||
|
||||
// Clear deletes everything from the store
|
||||
Clear()
|
||||
|
||||
// Save persists the data if and where it needs to be persisted
|
||||
Save() error
|
||||
}
|
||||
|
||||
var (
|
||||
// Validate interface implementation on compile
|
||||
_ Store = (*memory.Store)(nil)
|
||||
)
|
||||
137
storage/store/store_bench_test.go
Normal file
137
storage/store/store_bench_test.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = core.Condition("[STATUS] == 200")
|
||||
secondCondition = core.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = core.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
timestamp = time.Now()
|
||||
|
||||
testService = core.Service{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []*core.Condition{&firstCondition, &secondCondition, &thirdCondition},
|
||||
Alerts: nil,
|
||||
Insecure: false,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = core.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
Timestamp: timestamp,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = core.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
Timestamp: timestamp,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func BenchmarkStore_GetAllAsJSON(b *testing.B) {
|
||||
memoryStore, err := memory.NewStore("")
|
||||
if err != nil {
|
||||
b.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Store Store
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "memory",
|
||||
Store: memoryStore,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
scenario.Store.Insert(&testService, &testSuccessfulResult)
|
||||
scenario.Store.Insert(&testService, &testUnsuccessfulResult)
|
||||
b.Run(scenario.Name, func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
scenario.Store.GetAllServiceStatusesWithResultPagination(1, 20)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStore_Insert(b *testing.B) {
|
||||
memoryStore, err := memory.NewStore("")
|
||||
if err != nil {
|
||||
b.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Store Store
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "memory",
|
||||
Store: memoryStore,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
b.Run(scenario.Name, func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
if n%100 == 0 {
|
||||
scenario.Store.Insert(&testService, &testSuccessfulResult)
|
||||
} else {
|
||||
scenario.Store.Insert(&testService, &testUnsuccessfulResult)
|
||||
}
|
||||
}
|
||||
b.ReportAllocs()
|
||||
})
|
||||
}
|
||||
}
|
||||
18
util/key.go
Normal file
18
util/key.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package util
|
||||
|
||||
import "strings"
|
||||
|
||||
// ConvertGroupAndServiceToKey converts a group and a service to a key
|
||||
func ConvertGroupAndServiceToKey(group, service string) string {
|
||||
return sanitize(group) + "_" + sanitize(service)
|
||||
}
|
||||
|
||||
func sanitize(s string) string {
|
||||
s = strings.TrimSpace(strings.ToLower(s))
|
||||
s = strings.ReplaceAll(s, "/", "-")
|
||||
s = strings.ReplaceAll(s, "_", "-")
|
||||
s = strings.ReplaceAll(s, ".", "-")
|
||||
s = strings.ReplaceAll(s, ",", "-")
|
||||
s = strings.ReplaceAll(s, " ", "-")
|
||||
return s
|
||||
}
|
||||
36
util/key_test.go
Normal file
36
util/key_test.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package util
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestConvertGroupAndServiceToKey(t *testing.T) {
|
||||
type Scenario struct {
|
||||
GroupName string
|
||||
ServiceName string
|
||||
ExpectedOutput string
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
GroupName: "Core",
|
||||
ServiceName: "Front End",
|
||||
ExpectedOutput: "core_front-end",
|
||||
},
|
||||
{
|
||||
GroupName: "Load balancers",
|
||||
ServiceName: "us-west-2",
|
||||
ExpectedOutput: "load-balancers_us-west-2",
|
||||
},
|
||||
{
|
||||
GroupName: "a/b test",
|
||||
ServiceName: "a",
|
||||
ExpectedOutput: "a-b-test_a",
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.ExpectedOutput, func(t *testing.T) {
|
||||
output := ConvertGroupAndServiceToKey(scenario.GroupName, scenario.ServiceName)
|
||||
if output != scenario.ExpectedOutput {
|
||||
t.Errorf("Expected '%s', got '%s'", scenario.ExpectedOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
106
vendor/github.com/TwinProduction/gocache/README.md
generated
vendored
106
vendor/github.com/TwinProduction/gocache/README.md
generated
vendored
@@ -306,9 +306,9 @@ If you do not start the janitor, there will be no passive deletion of expired ke
|
||||
For the sake of convenience, a ready-to-go cache server is available
|
||||
through the `gocacheserver` package.
|
||||
|
||||
The reason why the server is in a different package is because `gocache` does not use
|
||||
any external dependencies, but rather than re-inventing the wheel, the server
|
||||
implementation uses redcon, which is a Redis server framework for Go.
|
||||
The reason why the server is in a different package is because `gocache` limit its external dependencies to the strict
|
||||
minimum (e.g. boltdb for persistence), however, rather than re-inventing the wheel, the server implementation uses
|
||||
redcon, which is a very good Redis server framework for Go.
|
||||
|
||||
That way, those who desire to use gocache without the server will not add any extra dependencies
|
||||
as long as they don't import the `gocacheserver` package.
|
||||
@@ -323,7 +323,7 @@ import (
|
||||
|
||||
func main() {
|
||||
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(100000)
|
||||
server := gocacheserver.NewServer(cache)
|
||||
server := gocacheserver.NewServer(cache).WithPort(6379)
|
||||
server.Start()
|
||||
}
|
||||
```
|
||||
@@ -382,43 +382,67 @@ but if you're looking into using a library like gocache, odds are, you want more
|
||||
| mem | 32G DDR4 |
|
||||
|
||||
```
|
||||
BenchmarkMap_Get-8 95936680 26.3 ns/op
|
||||
BenchmarkMap_SetSmallValue-8 7738132 424 ns/op
|
||||
BenchmarkMap_SetMediumValue-8 7766346 424 ns/op
|
||||
BenchmarkMap_SetLargeValue-8 7947063 435 ns/op
|
||||
BenchmarkCache_Get-8 54549049 45.7 ns/op
|
||||
BenchmarkCache_SetSmallValue-8 35225013 69.2 ns/op
|
||||
BenchmarkCache_SetMediumValue-8 5952064 412 ns/op
|
||||
BenchmarkCache_SetLargeValue-8 5969121 411 ns/op
|
||||
BenchmarkCache_GetUsingLRU-8 54545949 45.6 ns/op
|
||||
BenchmarkCache_SetSmallValueUsingLRU-8 5909504 419 ns/op
|
||||
BenchmarkCache_SetMediumValueUsingLRU-8 5910885 418 ns/op
|
||||
BenchmarkCache_SetLargeValueUsingLRU-8 5867544 419 ns/op
|
||||
BenchmarkCache_SetSmallValueWhenUsingMaxMemoryUsage-8 5477178 462 ns/op
|
||||
BenchmarkCache_SetMediumValueWhenUsingMaxMemoryUsage-8 5417595 475 ns/op
|
||||
BenchmarkCache_SetLargeValueWhenUsingMaxMemoryUsage-8 5215263 479 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize10-8 10115574 236 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize10-8 10242792 241 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize10-8 10201894 241 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize1000-8 9637113 253 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize1000-8 9635175 253 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize1000-8 9598982 260 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize100000-8 7642584 337 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize100000-8 7407571 344 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize100000-8 7071360 345 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize100000AndLRU-8 7544194 332 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize100000AndLRU-8 7667004 344 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize100000AndLRU-8 7357642 338 ns/op
|
||||
BenchmarkCache_GetAndSetMultipleConcurrently-8 1442306 1684 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndLRU-8 5117271 477 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndFIFO-8 5228412 475 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndLRU-8 5139195 529 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndFIFO-8 5251639 511 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndLRU-8 7384626 334 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndFIFO-8 7361985 332 ns/op
|
||||
BenchmarkCache_GetConcurrentlyWithLRU-8 3370784 726 ns/op
|
||||
BenchmarkCache_GetConcurrentlyWithFIFO-8 3749994 681 ns/op
|
||||
BenchmarkCache_GetKeysThatDoNotExistConcurrently-8 17647344 143 ns/op
|
||||
// Normal map
|
||||
BenchmarkMap_Get
|
||||
BenchmarkMap_Get-8 46087372 26.7 ns/op
|
||||
BenchmarkMap_Set
|
||||
BenchmarkMap_Set/small_value-8 3841911 389 ns/op
|
||||
BenchmarkMap_Set/medium_value-8 3887074 391 ns/op
|
||||
BenchmarkMap_Set/large_value-8 3921956 393 ns/op
|
||||
// Gocache
|
||||
BenchmarkCache_Get
|
||||
BenchmarkCache_Get/FirstInFirstOut-8 27273036 46.4 ns/op
|
||||
BenchmarkCache_Get/LeastRecentlyUsed-8 26648248 46.3 ns/op
|
||||
BenchmarkCache_Set
|
||||
BenchmarkCache_Set/FirstInFirstOut_small_value-8 2919584 405 ns/op
|
||||
BenchmarkCache_Set/FirstInFirstOut_medium_value-8 2990841 391 ns/op
|
||||
BenchmarkCache_Set/FirstInFirstOut_large_value-8 2970513 391 ns/op
|
||||
BenchmarkCache_Set/LeastRecentlyUsed_small_value-8 2962939 402 ns/op
|
||||
BenchmarkCache_Set/LeastRecentlyUsed_medium_value-8 2962963 390 ns/op
|
||||
BenchmarkCache_Set/LeastRecentlyUsed_large_value-8 2962928 394 ns/op
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage/small_value-8 2683356 447 ns/op
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage/medium_value-8 2637578 441 ns/op
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage/large_value-8 2672434 443 ns/op
|
||||
BenchmarkCache_SetWithMaxSize
|
||||
BenchmarkCache_SetWithMaxSize/100_small_value-8 4782966 252 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/10000_small_value-8 4067967 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100000_small_value-8 3762055 328 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100_medium_value-8 4760479 252 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/10000_medium_value-8 4081050 295 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100000_medium_value-8 3785050 330 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100_large_value-8 4732909 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/10000_large_value-8 4079533 297 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100000_large_value-8 3712820 331 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_small_value-8 4761732 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_small_value-8 4084474 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_small_value-8 3761402 329 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_medium_value-8 4783075 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_medium_value-8 4103980 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_medium_value-8 3646023 331 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_large_value-8 4779025 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_large_value-8 4096192 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_large_value-8 3726823 331 ns/op
|
||||
BenchmarkCache_GetSetMultipleConcurrent
|
||||
BenchmarkCache_GetSetMultipleConcurrent-8 707142 1698 ns/op
|
||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction
|
||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction/FirstInFirstOut-8 3616256 334 ns/op
|
||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction/LeastRecentlyUsed-8 3636367 331 ns/op
|
||||
BenchmarkCache_GetConcurrentWithLRU
|
||||
BenchmarkCache_GetConcurrentWithLRU/FirstInFirstOut-8 4405557 268 ns/op
|
||||
BenchmarkCache_GetConcurrentWithLRU/LeastRecentlyUsed-8 4445475 269 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/true_with_nil_struct_pointer-8 6184591 191 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/true-8 6090482 191 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/false_with_nil_struct_pointer-8 6184629 187 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/false-8 6281781 186 ns/op
|
||||
(Trimmed "BenchmarkCache_" for readability)
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/true_with_nil_struct_pointer-8 4379564 268 ns/op
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/true-8 4379558 265 ns/op
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/false_with_nil_struct_pointer-8 4444456 261 ns/op
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/false-8 4493896 262 ns/op
|
||||
```
|
||||
|
||||
|
||||
|
||||
2
vendor/github.com/TwinProduction/gocache/go.mod
generated
vendored
2
vendor/github.com/TwinProduction/gocache/go.mod
generated
vendored
@@ -3,9 +3,9 @@ module github.com/TwinProduction/gocache
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/onsi/ginkgo v1.14.1 // indirect
|
||||
github.com/onsi/gomega v1.10.2 // indirect
|
||||
github.com/tidwall/redcon v1.3.2
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
)
|
||||
|
||||
5
vendor/github.com/TwinProduction/gocache/go.sum
generated
vendored
5
vendor/github.com/TwinProduction/gocache/go.sum
generated
vendored
@@ -1,5 +1,3 @@
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
@@ -30,6 +28,8 @@ github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
|
||||
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/tidwall/redcon v1.3.2 h1:8INx/Nm3VSUbDUT16TH1rMgYQsbXNqy9xcX70edHXbo=
|
||||
github.com/tidwall/redcon v1.3.2/go.mod h1:bdYBm4rlcWpst2XMwKVzWDF9CoUxEbUmM7CQrKeOZas=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
|
||||
@@ -40,6 +40,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
||||
12
vendor/github.com/TwinProduction/gocache/gocache.go
generated
vendored
12
vendor/github.com/TwinProduction/gocache/gocache.go
generated
vendored
@@ -170,7 +170,7 @@ func (cache *Cache) WithEvictionPolicy(policy EvictionPolicy) *Cache {
|
||||
// value, _ := cache.Get("key")
|
||||
// // the following returns true, because the interface{} was forcefully set to nil
|
||||
// if value == nil {}
|
||||
// // the following will panic, because the value has been casted to its type
|
||||
// // the following will panic, because the value has been casted to its type (which is nil)
|
||||
// if value.(*Struct) == nil {}
|
||||
//
|
||||
// If set to false:
|
||||
@@ -218,7 +218,8 @@ func (cache *Cache) Set(key string, value interface{}) {
|
||||
// The TTL provided must be greater than 0, or NoExpiration (-1). If a negative value that isn't -1 (NoExpiration) is
|
||||
// provided, the entry will not be created if the key doesn't exist
|
||||
func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration) {
|
||||
// An interface is only nil if both its value and its type are nil, however, passing a pointer
|
||||
// An interface is only nil if both its value and its type are nil, however, passing a nil pointer as an interface{}
|
||||
// means that the interface itself is not nil, because the interface value is nil but not the type.
|
||||
if cache.forceNilInterfaceOnNilPointer {
|
||||
if value != nil && (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) {
|
||||
value = nil
|
||||
@@ -334,6 +335,13 @@ func (cache *Cache) Get(key string) (interface{}, bool) {
|
||||
return entry.Value, true
|
||||
}
|
||||
|
||||
// GetValue retrieves an entry using the key passed as parameter
|
||||
// Unlike Get, this function only returns the value
|
||||
func (cache *Cache) GetValue(key string) interface{} {
|
||||
value, _ := cache.Get(key)
|
||||
return value
|
||||
}
|
||||
|
||||
// GetByKeys retrieves multiple entries using the keys passed as parameter
|
||||
// All keys are returned in the map, regardless of whether they exist or not, however, entries that do not exist in the
|
||||
// cache will return nil, meaning that there is no way of determining whether a key genuinely has the value nil, or
|
||||
|
||||
2
vendor/github.com/TwinProduction/gocache/persistence.go
generated
vendored
2
vendor/github.com/TwinProduction/gocache/persistence.go
generated
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// SaveToFile stores the content of the cache to a file so that it can be read using
|
||||
|
||||
26
vendor/github.com/TwinProduction/gocache/policy.go
generated
vendored
26
vendor/github.com/TwinProduction/gocache/policy.go
generated
vendored
@@ -3,6 +3,30 @@ package gocache
|
||||
type EvictionPolicy string
|
||||
|
||||
var (
|
||||
// LeastRecentlyUsed is an eviction policy that causes the most recently accessed cache entry to be moved to the
|
||||
// head of the cache. Effectively, this causes the cache entries that have not been accessed for some time to
|
||||
// gradually move closer and closer to the tail, and since the tail is the entry that gets deleted when an eviction
|
||||
// is required, it allows less used cache entries to be evicted while keeping recently accessed entries at or close
|
||||
// to the head.
|
||||
//
|
||||
// For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would
|
||||
// put 3 at the head and 1 at the tail:
|
||||
// 3 (head) -> 2 -> 1 (tail)
|
||||
// If the cache entry 1 was then accessed, 1 would become the head and 2 the tail:
|
||||
// 1 (head) -> 3 -> 2 (tail)
|
||||
// If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (2) would then be evicted:
|
||||
// 4 (head) -> 1 -> 3 (tail)
|
||||
LeastRecentlyUsed EvictionPolicy = "LeastRecentlyUsed"
|
||||
FirstInFirstOut EvictionPolicy = "FirstInFirstOut"
|
||||
|
||||
// FirstInFirstOut is an eviction policy that causes cache entries to be evicted in the same order that they are
|
||||
// created.
|
||||
//
|
||||
// For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would
|
||||
// put 3 at the head and 1 at the tail:
|
||||
// 3 (head) -> 2 -> 1 (tail)
|
||||
// If the cache entry 1 was then accessed, unlike with LeastRecentlyUsed, nothing would change:
|
||||
// 3 (head) -> 2 -> 1 (tail)
|
||||
// If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (1) would then be evicted:
|
||||
// 4 (head) -> 3 -> 2 (tail)
|
||||
FirstInFirstOut EvictionPolicy = "FirstInFirstOut"
|
||||
)
|
||||
|
||||
1
vendor/github.com/TwinProduction/health/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/TwinProduction/health/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* text=lf
|
||||
2
vendor/github.com/TwinProduction/health/.gitignore
generated
vendored
Normal file
2
vendor/github.com/TwinProduction/health/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.idea
|
||||
*.iml
|
||||
72
vendor/github.com/TwinProduction/health/README.md
generated
vendored
Normal file
72
vendor/github.com/TwinProduction/health/README.md
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
# health
|
||||
|
||||

|
||||
[](https://goreportcard.com/report/github.com/TwinProduction/health)
|
||||
[](https://codecov.io/gh/TwinProduction/health)
|
||||
[](https://github.com/TwinProduction/health)
|
||||
[](https://pkg.go.dev/github.com/TwinProduction/health)
|
||||
|
||||
Health is a library used for creating a very simple health endpoint.
|
||||
|
||||
While implementing a health endpoint is very simple, I've grown tired of implementing
|
||||
it over and over again.
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
go get -u github.com/TwinProduction/health
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
To retrieve the handler, you must use `health.Handler()` and are expected to pass it to the router like so:
|
||||
```go
|
||||
router := http.NewServeMux()
|
||||
router.Handle("/health", health.Handler())
|
||||
server := &http.Server{
|
||||
Addr: ":8080",
|
||||
Handler: router,
|
||||
}
|
||||
```
|
||||
|
||||
By default, the handler will return `UP` when the status is down, and `DOWN` when the status is down.
|
||||
If you prefer using JSON, however, you may initialize the health handler like so:
|
||||
```go
|
||||
router.Handle("/health", health.Handler().WithJSON(true))
|
||||
```
|
||||
The above will cause the response body to become `{"status":"UP"}` and `{"status":"DOWN"}` for both status respectively.
|
||||
|
||||
To change the health of the application, you can use `health.SetStatus(<status>)` where `<status>` is one `health.Up`
|
||||
or `health.Down`:
|
||||
|
||||
```go
|
||||
health.SetStatus(health.Up)
|
||||
health.SetStatus(health.Down)
|
||||
```
|
||||
|
||||
### Complete example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/health"
|
||||
)
|
||||
|
||||
func main() {
|
||||
router := http.NewServeMux()
|
||||
router.Handle("/health", health.Handler())
|
||||
server := &http.Server{
|
||||
Addr: "0.0.0.0:8080",
|
||||
Handler: router,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
IdleTimeout: 15 * time.Second,
|
||||
}
|
||||
server.ListenAndServe()
|
||||
}
|
||||
```
|
||||
3
vendor/github.com/TwinProduction/health/go.mod
generated
vendored
Normal file
3
vendor/github.com/TwinProduction/health/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/TwinProduction/health
|
||||
|
||||
go 1.15
|
||||
51
vendor/github.com/TwinProduction/health/health.go
generated
vendored
Normal file
51
vendor/github.com/TwinProduction/health/health.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package health
|
||||
|
||||
import "net/http"
|
||||
|
||||
var (
|
||||
handler = &healthHandler{
|
||||
useJSON: false,
|
||||
status: Up,
|
||||
}
|
||||
)
|
||||
|
||||
type healthHandler struct {
|
||||
useJSON bool
|
||||
status Status
|
||||
}
|
||||
|
||||
// WithJSON configures whether the handler should output a response in JSON or in raw text
|
||||
//
|
||||
// Defaults to false
|
||||
func (h *healthHandler) WithJSON(v bool) *healthHandler {
|
||||
h.useJSON = v
|
||||
return h
|
||||
}
|
||||
|
||||
func (h healthHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
|
||||
var status int
|
||||
var body []byte
|
||||
if h.status == Up {
|
||||
status = http.StatusOK
|
||||
} else {
|
||||
status = http.StatusInternalServerError
|
||||
}
|
||||
if h.useJSON {
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
body = []byte(`{"status":"` + h.status + `"}`)
|
||||
} else {
|
||||
body = []byte(h.status)
|
||||
}
|
||||
writer.WriteHeader(status)
|
||||
_, _ = writer.Write(body)
|
||||
}
|
||||
|
||||
// Handler retrieves the health handler
|
||||
func Handler() *healthHandler {
|
||||
return handler
|
||||
}
|
||||
|
||||
// SetStatus sets the status to be reflected by the health handler
|
||||
func SetStatus(status Status) {
|
||||
handler.status = status
|
||||
}
|
||||
8
vendor/github.com/TwinProduction/health/status.go
generated
vendored
Normal file
8
vendor/github.com/TwinProduction/health/status.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package health
|
||||
|
||||
type Status string
|
||||
|
||||
var (
|
||||
Down Status = "DOWN"
|
||||
Up Status = "UP"
|
||||
)
|
||||
18
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
18
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
@@ -1,18 +0,0 @@
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
||||
|
||||
test:
|
||||
@go test -v -cover .
|
||||
@go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: fmt test
|
||||
18
vendor/github.com/boltdb/bolt/appveyor.yml
generated
vendored
18
vendor/github.com/boltdb/bolt/appveyor.yml
generated
vendored
@@ -1,18 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\boltdb\bolt
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
||||
28
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
28
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
@@ -1,28 +0,0 @@
|
||||
package bolt
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned bool
|
||||
|
||||
func init() {
|
||||
// Simple check to see whether this arch handles unaligned load/stores
|
||||
// correctly.
|
||||
|
||||
// ARM9 and older devices require load/stores to be from/to aligned
|
||||
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||
// read in a jumbled up order.
|
||||
|
||||
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||
|
||||
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||
|
||||
brokenUnaligned = val != 0x11222211
|
||||
}
|
||||
252
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
252
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
@@ -1,252 +0,0 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
ids []pgid // all free and available free page ids.
|
||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist() *freelist {
|
||||
return &freelist{
|
||||
pending: make(map[txid][]pgid),
|
||||
cache: make(map[pgid]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
n := f.count()
|
||||
if n >= 0xFFFF {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
func (f *freelist) count() int {
|
||||
return f.free_count() + f.pending_count()
|
||||
}
|
||||
|
||||
// free_count returns count of free pages
|
||||
func (f *freelist) free_count() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, list := range f.pending {
|
||||
count += len(list)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, list := range f.pending {
|
||||
m = append(m, list...)
|
||||
}
|
||||
sort.Sort(m)
|
||||
mergepgids(dst, f.ids, m)
|
||||
}
|
||||
|
||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) allocate(n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free then a panic will occur.
|
||||
func (f *freelist) free(txid txid, p *page) {
|
||||
if p.id <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
var ids = f.pending[txid]
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
|
||||
// Add to the freelist and cache.
|
||||
ids = append(ids, id)
|
||||
f.cache[id] = true
|
||||
}
|
||||
f.pending[txid] = ids
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, ids := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
for _, id := range f.pending[txid] {
|
||||
delete(f.cache, id)
|
||||
}
|
||||
|
||||
// Remove pages from pending list.
|
||||
delete(f.pending, txid)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
func (f *freelist) freed(pgid pgid) bool {
|
||||
return f.cache[pgid]
|
||||
}
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(f.ids))
|
||||
}
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
lenids := f.count()
|
||||
if lenids == 0 {
|
||||
p.count = uint16(lenids)
|
||||
} else if lenids < 0xFFFF {
|
||||
p.count = uint16(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reload reads the freelist from a page and filters out pending items.
|
||||
func (f *freelist) reload(p *page) {
|
||||
f.read(p)
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range f.ids {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
f.ids = a
|
||||
|
||||
// Once the available list is rebuilt then rebuild the free cache so that
|
||||
// it includes the available and pending free pages.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool, len(f.ids))
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
1
vendor/github.com/boltdb/bolt/.gitignore → vendor/go.etcd.io/bbolt/.gitignore
generated
vendored
1
vendor/github.com/boltdb/bolt/.gitignore → vendor/go.etcd.io/bbolt/.gitignore
generated
vendored
@@ -2,3 +2,4 @@
|
||||
*.test
|
||||
*.swp
|
||||
/bin/
|
||||
cover.out
|
||||
17
vendor/go.etcd.io/bbolt/.travis.yml
generated
vendored
Normal file
17
vendor/go.etcd.io/bbolt/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
language: go
|
||||
go_import_path: go.etcd.io/bbolt
|
||||
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.12
|
||||
|
||||
before_install:
|
||||
- go get -v honnef.co/go/tools/...
|
||||
- go get -v github.com/kisielk/errcheck
|
||||
|
||||
script:
|
||||
- make fmt
|
||||
- make test
|
||||
- make race
|
||||
# - make errcheck
|
||||
0
vendor/github.com/boltdb/bolt/LICENSE → vendor/go.etcd.io/bbolt/LICENSE
generated
vendored
0
vendor/github.com/boltdb/bolt/LICENSE → vendor/go.etcd.io/bbolt/LICENSE
generated
vendored
38
vendor/go.etcd.io/bbolt/Makefile
generated
vendored
Normal file
38
vendor/go.etcd.io/bbolt/Makefile
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
@echo "array freelist test"
|
||||
@TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
fmt:
|
||||
!(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
|
||||
|
||||
# go get honnef.co/go/tools/simple
|
||||
gosimple:
|
||||
gosimple ./...
|
||||
|
||||
# go get honnef.co/go/tools/unused
|
||||
unused:
|
||||
unused ./...
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
|
||||
|
||||
test:
|
||||
TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic
|
||||
# Note: gets "program not an importable package" in out of path builds
|
||||
TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
|
||||
|
||||
@echo "array freelist test"
|
||||
|
||||
@TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic
|
||||
# Note: gets "program not an importable package" in out of path builds
|
||||
@TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
|
||||
|
||||
.PHONY: race fmt errcheck test gosimple unused
|
||||
205
vendor/github.com/boltdb/bolt/README.md → vendor/go.etcd.io/bbolt/README.md
generated
vendored
205
vendor/github.com/boltdb/bolt/README.md → vendor/go.etcd.io/bbolt/README.md
generated
vendored
@@ -1,5 +1,18 @@
|
||||
Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
|
||||
====
|
||||
bbolt
|
||||
=====
|
||||
|
||||
[](https://goreportcard.com/report/github.com/etcd-io/bbolt)
|
||||
[](https://codecov.io/gh/etcd-io/bbolt)
|
||||
[](https://travis-ci.com/etcd-io/bbolt)
|
||||
[](https://godoc.org/github.com/etcd-io/bbolt)
|
||||
[](https://github.com/etcd-io/bbolt/releases)
|
||||
[](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
|
||||
|
||||
bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
|
||||
store. The purpose of this fork is to provide the Go community with an active
|
||||
maintenance and development target for Bolt; the goal is improved reliability
|
||||
and stability. bbolt includes bug fixes, performance enhancements, and features
|
||||
not found in Bolt while preserving backwards compatibility with the Bolt API.
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
@@ -10,6 +23,8 @@ Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[gh_ben]: https://github.com/benbjohnson
|
||||
[bolt]: https://github.com/boltdb/bolt
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
@@ -21,36 +36,42 @@ consistency and thread safety. Bolt is currently used in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## Project versioning
|
||||
|
||||
bbolt uses [semantic versioning](http://semver.org).
|
||||
API should not change between patch and minor releases.
|
||||
New minor versions may add additional features to the API.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
@@ -59,13 +80,28 @@ Shopify and Heroku use Bolt-backed services every day.
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
$ go get go.etcd.io/bbolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Importing bbolt
|
||||
|
||||
To use bbolt as an embedded key-value store, import as:
|
||||
|
||||
```go
|
||||
import bolt "go.etcd.io/bbolt"
|
||||
|
||||
db, err := bolt.Open(path, 0666, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
```
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
@@ -79,7 +115,7 @@ package main
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -116,11 +152,12 @@ are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
Transactions should not depend on one another and generally shouldn't be opened
|
||||
simultaneously in the same goroutine. This can cause a deadlock as the read-write
|
||||
transaction needs to periodically re-map the data file but it cannot do so while
|
||||
any read-only transaction is open. Even a nested read-only transaction can cause
|
||||
a deadlock, as the child transaction can block the parent transaction from releasing
|
||||
its resources.
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
@@ -239,7 +276,7 @@ should be writable.
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
bucket must be unique. You can create a bucket using the `Tx.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
@@ -522,7 +559,7 @@ this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
@@ -811,7 +848,7 @@ Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
@@ -863,54 +900,58 @@ them via pull request.
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more)
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
||||
5
vendor/github.com/boltdb/bolt/bolt_386.go → vendor/go.etcd.io/bbolt/bolt_386.go
generated
vendored
5
vendor/github.com/boltdb/bolt/bolt_386.go → vendor/go.etcd.io/bbolt/bolt_386.go
generated
vendored
@@ -1,10 +1,7 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
7
vendor/go.etcd.io/bbolt/bolt_amd64.go
generated
vendored
Normal file
7
vendor/go.etcd.io/bbolt/bolt_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
7
vendor/go.etcd.io/bbolt/bolt_arm.go
generated
vendored
Normal file
7
vendor/go.etcd.io/bbolt/bolt_arm.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
5
vendor/github.com/boltdb/bolt/bolt_arm64.go → vendor/go.etcd.io/bbolt/bolt_arm64.go
generated
vendored
5
vendor/github.com/boltdb/bolt/bolt_arm64.go → vendor/go.etcd.io/bbolt/bolt_arm64.go
generated
vendored
@@ -1,12 +1,9 @@
|
||||
// +build arm64
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user