Compare commits
160 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
be4e9aba1e | ||
|
|
ac0d00fdb5 | ||
|
|
3293222cd6 | ||
|
|
892f3ada6f | ||
|
|
f22a79eb7d | ||
|
|
911deb91d1 | ||
|
|
bcd4105af3 | ||
|
|
423ada68b3 | ||
|
|
70fa17349f | ||
|
|
e640ede709 | ||
|
|
fb3447eaf3 | ||
|
|
46cf616a57 | ||
|
|
cf48072167 | ||
|
|
97dd868ae8 | ||
|
|
c18b2728c9 | ||
|
|
b3fd290e4d | ||
|
|
89e23a986c | ||
|
|
c454c868f6 | ||
|
|
6d82a54518 | ||
|
|
bd3c01a4f4 | ||
|
|
43150ae484 | ||
|
|
acb6757dc8 | ||
|
|
2037d9aca6 | ||
|
|
c700154f5e | ||
|
|
aac72e3741 | ||
|
|
1a597f92ba | ||
|
|
56fedcedd1 | ||
|
|
6bdce4fe29 | ||
|
|
381488a1b2 | ||
|
|
42a909c1ad | ||
|
|
5a4fa6f2b0 | ||
|
|
bbbfe7f466 | ||
|
|
7cf1750f86 | ||
|
|
b88ae5fcf6 | ||
|
|
8516c41b43 | ||
|
|
b90a64e2a6 | ||
|
|
627173e64f | ||
|
|
8b5e5f54cc | ||
|
|
2c95cce7b3 | ||
|
|
2ef9329fa6 | ||
|
|
9384373f43 | ||
|
|
d3a81a2d57 | ||
|
|
fed32d3909 | ||
|
|
c1d9006aaf | ||
|
|
7126d36d85 | ||
|
|
677c7faffe | ||
|
|
8dedcf7c74 | ||
|
|
a4c69d6fc3 | ||
|
|
943d0a19d1 | ||
|
|
fd08c8b1e5 | ||
|
|
393147c300 | ||
|
|
f73e8a56ef | ||
|
|
4203355edc | ||
|
|
5cc1c11b1a | ||
|
|
796228466d | ||
|
|
23ba9795a6 | ||
|
|
1291e86a6f | ||
|
|
14316cfd31 | ||
|
|
670272f411 | ||
|
|
ffc3e644c5 | ||
|
|
bc42d15625 | ||
|
|
20594b902c | ||
|
|
0a3267e499 | ||
|
|
9c8bf2b69e | ||
|
|
bd1eb7c61b | ||
|
|
e6335da94f | ||
|
|
1498b6d8a2 | ||
|
|
7aed826d65 | ||
|
|
9b68582622 | ||
|
|
a1afeea56b | ||
|
|
38de0ec9cd | ||
|
|
9d8a3f1574 | ||
|
|
b904afb8b5 | ||
|
|
5bf560221f | ||
|
|
574dd50b98 | ||
|
|
35c33620a5 | ||
|
|
fc0c3499f4 | ||
|
|
d03271d128 | ||
|
|
0560b98de4 | ||
|
|
ca87547430 | ||
|
|
e214d56af1 | ||
|
|
8997eeef05 | ||
|
|
5e00752c5a | ||
|
|
f9d132c369 | ||
|
|
ca977fefa8 | ||
|
|
d07d3434a6 | ||
|
|
2131fa4412 | ||
|
|
81aeb7a48e | ||
|
|
eaf395738d | ||
|
|
f6f1ecf623 | ||
|
|
177081cf54 | ||
|
|
651bfcba22 | ||
|
|
3cd1953c6c | ||
|
|
9dd4e7047d | ||
|
|
067ab78666 | ||
|
|
28acaeb067 | ||
|
|
749aeb9e42 | ||
|
|
8e02572880 | ||
|
|
1f6f0ce426 | ||
|
|
7bc381b356 | ||
|
|
18420c2d60 | ||
|
|
0b4dc34c57 | ||
|
|
030212c156 | ||
|
|
63b0ac8b35 | ||
|
|
263b2f0f94 | ||
|
|
db23bd9073 | ||
|
|
40dc1cc270 | ||
|
|
67c3bf6e5e | ||
|
|
57ef931d38 | ||
|
|
e3038f0e80 | ||
|
|
8106832d69 | ||
|
|
758428b312 | ||
|
|
77de4c4742 | ||
|
|
a85c5d5486 | ||
|
|
c7d554efa5 | ||
|
|
f3afdf2977 | ||
|
|
19a0ba7271 | ||
|
|
4a4c88ae17 | ||
|
|
253e6f8338 | ||
|
|
1d412678ff | ||
|
|
48c7514fa5 | ||
|
|
d7b437595c | ||
|
|
50f530a05c | ||
|
|
2a632e8f87 | ||
|
|
857ad584e7 | ||
|
|
8b3b2f70bf | ||
|
|
4308f2c1ef | ||
|
|
425c93ed8f | ||
|
|
752e82d80b | ||
|
|
e91462ce41 | ||
|
|
347297a8ea | ||
|
|
56dbe2fea0 | ||
|
|
ebcca4317d | ||
|
|
e6355dfee8 | ||
|
|
7309888db5 | ||
|
|
f60eee86ee | ||
|
|
e46acb885c | ||
|
|
24da853820 | ||
|
|
4e5a86031f | ||
|
|
7f6f127f4f | ||
|
|
12c352254f | ||
|
|
2b9d986932 | ||
|
|
cdbf5f6c6f | ||
|
|
33562e97f4 | ||
|
|
c9acc83141 | ||
|
|
8c4c360472 | ||
|
|
2c8714f1fa | ||
|
|
8ec256edbf | ||
|
|
a48ec41bca | ||
|
|
541e0264ab | ||
|
|
f945e4b8a2 | ||
|
|
076b92a2b4 | ||
|
|
02e9f74a04 | ||
|
|
b37dd5e819 | ||
|
|
1775f80ffe | ||
|
|
3187db1e9a | ||
|
|
932eab00a0 | ||
|
|
c842ac2343 | ||
|
|
6320237326 | ||
|
|
8fe9d013b5 |
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
github: [TwinProduction]
|
||||
BIN
.github/assets/dark-mode.png
vendored
Normal file
BIN
.github/assets/dark-mode.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 38 KiB |
BIN
.github/assets/telegram-alerts.png
vendored
Normal file
BIN
.github/assets/telegram-alerts.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 36 KiB |
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
@@ -14,10 +14,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Set up Go 1.15
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
go-version: 1.16
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
- name: Build binary to make sure it works
|
||||
@@ -28,6 +28,6 @@ jobs:
|
||||
# was configured by the "Set up Go 1.15" step (otherwise, it'd use sudo's "go" executable)
|
||||
run: sudo env "PATH=$PATH" "GOROOT=$GOROOT" go test -mod vendor ./... -race -coverprofile=coverage.txt -covermode=atomic
|
||||
- name: Codecov
|
||||
uses: codecov/codecov-action@v1.0.14
|
||||
uses: codecov/codecov-action@v1.5.2
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
|
||||
1
.github/workflows/publish.yml
vendored
1
.github/workflows/publish.yml
vendored
@@ -6,6 +6,7 @@ jobs:
|
||||
build:
|
||||
name: Publish
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,4 +1,7 @@
|
||||
.idea
|
||||
.vscode
|
||||
gatus
|
||||
db.db
|
||||
db.db
|
||||
config/config.yml
|
||||
db.db-shm
|
||||
db.db-wal
|
||||
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 TwinProduction
|
||||
Copyright (c) 2021 TwinProduction
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
|
||||
454
README.md
454
README.md
@@ -10,31 +10,44 @@
|
||||
Gatus is a health dashboard that gives you the ability to monitor your services using HTTP, ICMP, TCP, and even DNS
|
||||
queries as well as evaluate the result of said queries by using a list of conditions on values like the status code,
|
||||
the response time, the certificate expiration, the body and many others. The icing on top is that each of these health
|
||||
checks can be paired with alerting via Slack, PagerDuty and even Twilio.
|
||||
checks can be paired with alerting via Slack, PagerDuty, Discord and even Twilio.
|
||||
|
||||
I personally deploy it in my Kubernetes cluster and let it monitor the status of my
|
||||
core applications: https://status.twinnation.org/
|
||||
|
||||
<details>
|
||||
<summary><b>Quick start</b></summary>
|
||||
|
||||
```
|
||||
docker run -p 8080:8080 --name gatus twinproduction/gatus
|
||||
```
|
||||
For more details, see [Usage](#usage)
|
||||
</details>
|
||||
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Why Gatus?](#why-gatus)
|
||||
- [Features](#features)
|
||||
- [Usage](#usage)
|
||||
- [Configuration](#configuration)
|
||||
- [Configuration](#configuration)
|
||||
- [Conditions](#conditions)
|
||||
- [Placeholders](#placeholders)
|
||||
- [Functions](#functions)
|
||||
- [Alerting](#alerting)
|
||||
- [Configuring Slack alerts](#configuring-slack-alerts)
|
||||
- [Configuring Discord alerts](#configuring-discord-alerts)
|
||||
- [Configuring PagerDuty alerts](#configuring-pagerduty-alerts)
|
||||
- [Configuring Twilio alerts](#configuring-twilio-alerts)
|
||||
- [Configuring Mattermost alerts](#configuring-mattermost-alerts)
|
||||
- [Configuring Messagebird alerts](#configuring-messagebird-alerts)
|
||||
- [Configuring Telegram alerts](#configuring-telegram-alerts)
|
||||
- [Configuring custom alerts](#configuring-custom-alerts)
|
||||
- [Kubernetes (ALPHA)](#kubernetes-alpha)
|
||||
- [Auto Discovery](#auto-discovery)
|
||||
- [Deploying](#deploying)
|
||||
- [Docker](#docker)
|
||||
- [Deployment](#deployment)
|
||||
- [Docker](#docker)
|
||||
- [Helm Chart](#helm-chart)
|
||||
- [Terraform](#terraform)
|
||||
- [Running the tests](#running-the-tests)
|
||||
- [Using in Production](#using-in-production)
|
||||
- [FAQ](#faq)
|
||||
@@ -44,14 +57,36 @@ core applications: https://status.twinnation.org/
|
||||
- [Monitoring a TCP service](#monitoring-a-tcp-service)
|
||||
- [Monitoring a service using ICMP](#monitoring-a-service-using-icmp)
|
||||
- [Monitoring a service using DNS queries](#monitoring-a-service-using-dns-queries)
|
||||
- [Monitoring a service using STARTTLS](#monitoring-a-service-using-starttls)
|
||||
- [Basic authentication](#basic-authentication)
|
||||
- [disable-monitoring-lock](#disable-monitoring-lock)
|
||||
- [Reloading configuration on the fly](#reloading-configuration-on-the-fly)
|
||||
- [Service groups](#service-groups)
|
||||
- [Exposing Gatus on a custom port](#exposing-gatus-on-a-custom-port)
|
||||
- [Uptime Badges (ALPHA)](#uptime-badges)
|
||||
- [API](#API)
|
||||
- [Sponsors](#sponsors)
|
||||
|
||||
## Why Gatus?
|
||||
Before getting into the specifics, I want to address the most common question:
|
||||
> Why would I use Gatus when I can just use Prometheus’ Alertmanager, Cloudwatch or even Splunk?
|
||||
|
||||
Neither of these can tell you that there’s a problem if there are no clients actively calling the endpoint.
|
||||
In other words, it's because monitoring metrics mostly rely on existing traffic, which effectively means that unless
|
||||
your clients are already experiencing a problem, you won't be notified.
|
||||
|
||||
Gatus, on the other hand, allows you to configure health checks for each of your features, which in turn allows it to
|
||||
monitor these features and potentially alert you before any clients are impacted.
|
||||
|
||||
A sign you may want to look into Gatus is by simply asking yourself whether you'd receive an alert if your load balancer
|
||||
was to go down right now. Will any of your existing alerts by triggered? Your metrics won’t report an increase in errors
|
||||
if there’s no traffic that makes it to your applications. This puts you in a situation where your clients are the ones
|
||||
that will notify you about the degradation of your services rather than you reassuring them that you're working on
|
||||
fixing the issue before they even know about it.
|
||||
|
||||
|
||||
## Features
|
||||

|
||||
|
||||
The main features of Gatus are:
|
||||
- **Highly flexible health check conditions**: While checking the response status may be enough for some use cases, Gatus goes much further and allows you to add conditions on the response time, the response body and even the IP address.
|
||||
@@ -61,11 +96,9 @@ The main features of Gatus are:
|
||||
- **Metrics**
|
||||
- **Low resource consumption**: As with most Go applications, the resource footprint that this application requires is negligibly small.
|
||||
- **GitHub uptime badges**:   
|
||||
- **Service auto discovery in Kubernetes** (ALPHA)
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
By default, the configuration file is expected to be at `config/config.yaml`.
|
||||
|
||||
You can specify a custom path by setting the `GATUS_CONFIG_FILE` environment variable.
|
||||
@@ -84,7 +117,7 @@ services:
|
||||
- "[RESPONSE_TIME] < 300" # Response time must be under 300ms
|
||||
- name: example
|
||||
url: "https://example.org/"
|
||||
interval: 30s
|
||||
interval: 5m
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
```
|
||||
@@ -93,74 +126,52 @@ This example would look like this:
|
||||
|
||||

|
||||
|
||||
Note that you can also add environment variables in the configuration file (i.e. `$DOMAIN`, `${DOMAIN}`)
|
||||
Note that you can also use environment variables in the configuration file (e.g. `$DOMAIN`, `${DOMAIN}`)
|
||||
|
||||
If you want to test it locally, see [Docker](#docker).
|
||||
|
||||
|
||||
### Configuration
|
||||
|
||||
## Configuration
|
||||
| Parameter | Description | Default |
|
||||
|:---------------------------------------- |:----------------------------------------------------------------------------- |:-------------- |
|
||||
| `debug` | Whether to enable debug logs | `false` |
|
||||
| `metrics` | Whether to expose metrics at /metrics | `false` |
|
||||
| `storage` | Storage configuration | `{}` |
|
||||
| `storage.file` | File to persist the data in. If not set, storage is in-memory only. | `""` |
|
||||
| `services` | List of services to monitor | Required `[]` |
|
||||
| `debug` | Whether to enable debug logs. | `false` |
|
||||
| `metrics` | Whether to expose metrics at /metrics. | `false` |
|
||||
| `storage` | Storage configuration. See [Storage](#storage). | `{}` |
|
||||
| `services` | List of services to monitor. | Required `[]` |
|
||||
| `services[].name` | Name of the service. Can be anything. | Required `""` |
|
||||
| `services[].group` | Group name. Used to group multiple services together on the dashboard. See [Service groups](#service-groups). | `""` |
|
||||
| `services[].url` | URL to send the request to | Required `""` |
|
||||
| `services[].method` | Request method | `GET` |
|
||||
| `services[].insecure` | Whether to skip verifying the server's certificate chain and host name | `false` |
|
||||
| `services[].conditions` | Conditions used to determine the health of the service | `[]` |
|
||||
| `services[].interval` | Duration to wait between every status check | `60s` |
|
||||
| `services[].graphql` | Whether to wrap the body in a query param (`{"query":"$body"}`) | `false` |
|
||||
| `services[].body` | Request body | `""` |
|
||||
| `services[].headers` | Request headers | `{}` |
|
||||
| `services[].dns` | Configuration for a service of type DNS. See [Monitoring using DNS queries](#monitoring-using-dns-queries) | `""` |
|
||||
| `services[].dns.query-type` | Query type for DNS service | `""` |
|
||||
| `services[].dns.query-name` | Query name for DNS service | `""` |
|
||||
| `services[].alerts[].type` | Type of alert. Valid types: `slack`, `pagerduty`, `twilio`, `mattermost`, `messagebird`, `custom` | Required `""` |
|
||||
| `services[].alerts[].enabled` | Whether to enable the alert | `false` |
|
||||
| `services[].alerts[].failure-threshold` | Number of failures in a row needed before triggering the alert | `3` |
|
||||
| `services[].alerts[].success-threshold` | Number of successes in a row before an ongoing incident is marked as resolved | `2` |
|
||||
| `services[].alerts[].send-on-resolved` | Whether to send a notification once a triggered alert is marked as resolved | `false` |
|
||||
| `services[].alerts[].description` | Description of the alert. Will be included in the alert sent | `""` |
|
||||
| `alerting` | Configuration for alerting | `{}` |
|
||||
| `alerting.slack` | Configuration for alerts of type `slack` | `{}` |
|
||||
| `alerting.slack.webhook-url` | Slack Webhook URL | Required `""` |
|
||||
| `alerting.pagerduty` | Configuration for alerts of type `pagerduty` | `{}` |
|
||||
| `alerting.pagerduty.integration-key` | PagerDuty Events API v2 integration key. | Required `""` |
|
||||
| `alerting.twilio` | Settings for alerts of type `twilio` | `{}` |
|
||||
| `alerting.twilio.sid` | Twilio account SID | Required `""` |
|
||||
| `alerting.twilio.token` | Twilio auth token | Required `""` |
|
||||
| `alerting.twilio.from` | Number to send Twilio alerts from | Required `""` |
|
||||
| `alerting.twilio.to` | Number to send twilio alerts to | Required `""` |
|
||||
| `alerting.mattermost` | Configuration for alerts of type `mattermost` | `{}` |
|
||||
| `alerting.mattermost.webhook-url` | Mattermost Webhook URL | Required `""` |
|
||||
| `alerting.mattermost.insecure` | Whether to skip verifying the server's certificate chain and host name | `false` |
|
||||
| `alerting.messagebird` | Settings for alerts of type `messagebird` | `{}` |
|
||||
| `alerting.messagebird.access-key` | Messagebird access key | Required `""` |
|
||||
| `alerting.messagebird.originator` | The sender of the message | Required `""` |
|
||||
| `alerting.messagebird.recipients` | The recipients of the message | Required `""` |
|
||||
| `alerting.custom` | Configuration for custom actions on failure or alerts | `{}` |
|
||||
| `alerting.custom.url` | Custom alerting request url | Required `""` |
|
||||
| `alerting.custom.method` | Request method | `GET` |
|
||||
| `alerting.custom.insecure` | Whether to skip verifying the server's certificate chain and host name | `false` |
|
||||
| `alerting.custom.body` | Custom alerting request body. | `""` |
|
||||
| `alerting.custom.headers` | Custom alerting request headers | `{}` |
|
||||
| `security` | Security configuration | `{}` |
|
||||
| `security.basic` | Basic authentication security configuration | `{}` |
|
||||
| `security.basic.username` | Username for Basic authentication | Required `""` |
|
||||
| `security.basic.password-sha512` | Password's SHA512 hash for Basic authentication | Required `""` |
|
||||
| `disable-monitoring-lock` | Whether to [disable the monitoring lock](#disable-monitoring-lock) | `false` |
|
||||
| `web` | Web configuration | `{}` |
|
||||
| `web.address` | Address to listen on | `0.0.0.0` |
|
||||
| `web.port` | Port to listen on | `8080` |
|
||||
| `services[].url` | URL to send the request to. | Required `""` |
|
||||
| `services[].method` | Request method. | `GET` |
|
||||
| `services[].insecure` | Whether to skip verifying the server's certificate chain and host name. | `false` |
|
||||
| `services[].conditions` | Conditions used to determine the health of the service. See [Conditions](#conditions). | `[]` |
|
||||
| `services[].interval` | Duration to wait between every status check. | `60s` |
|
||||
| `services[].graphql` | Whether to wrap the body in a query param (`{"query":"$body"}`). | `false` |
|
||||
| `services[].body` | Request body. | `""` |
|
||||
| `services[].headers` | Request headers. | `{}` |
|
||||
| `services[].dns` | Configuration for a service of type DNS. See [Monitoring a service using DNS queries](#monitoring-a-service-using-dns-queries). | `""` |
|
||||
| `services[].dns.query-type` | Query type for DNS service. | `""` |
|
||||
| `services[].dns.query-name` | Query name for DNS service. | `""` |
|
||||
| `services[].alerts[].type` | Type of alert. Valid types: `slack`, `discord`, `pagerduty`, `twilio`, `mattermost`, `messagebird`, `custom`. | Required `""` |
|
||||
| `services[].alerts[].enabled` | Whether to enable the alert. | `false` |
|
||||
| `services[].alerts[].failure-threshold` | Number of failures in a row needed before triggering the alert. | `3` |
|
||||
| `services[].alerts[].success-threshold` | Number of successes in a row before an ongoing incident is marked as resolved. | `2` |
|
||||
| `services[].alerts[].send-on-resolved` | Whether to send a notification once a triggered alert is marked as resolved. | `false` |
|
||||
| `services[].alerts[].description` | Description of the alert. Will be included in the alert sent. | `""` |
|
||||
| `alerting` | Configuration for alerting. See [Alerting](#alerting). | `{}` |
|
||||
| `security` | Security configuration. | `{}` |
|
||||
| `security.basic` | Basic authentication security configuration. | `{}` |
|
||||
| `security.basic.username` | Username for Basic authentication. | Required `""` |
|
||||
| `security.basic.password-sha512` | Password's SHA512 hash for Basic authentication. | Required `""` |
|
||||
| `disable-monitoring-lock` | Whether to [disable the monitoring lock](#disable-monitoring-lock). | `false` |
|
||||
| `skip-invalid-config-update` | Whether to ignore invalid configuration update. See [Reloading configuration on the fly](#reloading-configuration-on-the-fly). | `false` |
|
||||
| `web` | Web configuration. | `{}` |
|
||||
| `web.address` | Address to listen on. | `0.0.0.0` |
|
||||
| `web.port` | Port to listen on. | `8080` |
|
||||
|
||||
For Kubernetes configuration, see [Kubernetes](#kubernetes-alpha)
|
||||
For Kubernetes configuration, see [Kubernetes](#kubernetes-alpha).
|
||||
|
||||
|
||||
### Conditions
|
||||
|
||||
Here are some examples of conditions you can use:
|
||||
|
||||
| Condition | Description | Passing values | Failing values |
|
||||
@@ -179,13 +190,14 @@ Here are some examples of conditions you can use:
|
||||
| `[BODY].age == [BODY].id` | JSONPath value of `$.age` is equal JSONPath `$.id` | `{"age":1,"id":1}` | |
|
||||
| `len([BODY].data) < 5` | Array at JSONPath `$.data` has less than 5 elements | `{"data":[{"id":1}]}` | |
|
||||
| `len([BODY].name) == 8` | String at JSONPath `$.name` has a length of 8 | `{"name":"john.doe"}` | `{"name":"bob"}` |
|
||||
| `has([BODY].errors) == false` | JSONPath `$.errors` does not exist | `{"name":"john.doe"}` | `{"errors":[]}` |
|
||||
| `has([BODY].users) == true` | JSONPath `$.users` exists | `{"users":[]}` | `{}` |
|
||||
| `[BODY].name == pat(john*)` | String at JSONPath `$.name` matches pattern `john*` | `{"name":"john.doe"}` | `{"name":"bob"}` |
|
||||
| `[BODY].id == any(1, 2)` | Value at JSONPath `$.id` is equal to `1` or `2` | 1, 2 | 3, 4, 5 |
|
||||
| `[CERTIFICATE_EXPIRATION] > 48h` | Certificate expiration is more than 48h away | 49h, 50h, 123h | 1h, 24h, ... |
|
||||
|
||||
|
||||
#### Placeholders
|
||||
|
||||
| Placeholder | Description | Example of resolved value |
|
||||
|:-------------------------- |:--------------------------------------------------------------- |:------------------------- |
|
||||
| `[STATUS]` | Resolves into the HTTP status of the request | 404
|
||||
@@ -198,31 +210,83 @@ Here are some examples of conditions you can use:
|
||||
|
||||
|
||||
#### Functions
|
||||
|
||||
| Function | Description | Example |
|
||||
|:-----------|:---------------------------------------------------------------------------------------------------------------- |:-------------------------- |
|
||||
| `len` | Returns the length of the object/slice. Works only with the `[BODY]` placeholder. | `len([BODY].username) > 8`
|
||||
| `has` | Returns `true` or `false` based on whether a given path is valid. Works only with the `[BODY]` placeholder. | `has([BODY].errors) == false`
|
||||
| `pat` | Specifies that the string passed as parameter should be evaluated as a pattern. Works only with `==` and `!=`. | `[IP] == pat(192.168.*)`
|
||||
| `any` | Specifies that any one of the values passed as parameters is a valid value. Works only with `==` and `!=`. | `[BODY].ip == any(127.0.0.1, ::1)`
|
||||
|
||||
**NOTE**: Use `pat` only when you need to. `[STATUS] == pat(2*)` is a lot more expensive than `[STATUS] < 300`.
|
||||
|
||||
|
||||
### Alerting
|
||||
### Storage
|
||||
| Parameter | Description | Default |
|
||||
|:------------------ |:-------------------------------------------------------------------------------------- |:-------------- |
|
||||
| `storage` | Storage configuration | `{}` |
|
||||
| `storage.file` | File to persist the data in. If the type is `inmemory`, data is persisted on interval. | `""` |
|
||||
| `storage.type` | Type of storage. Valid types: `inmemory`, `sqlite`. | `"inmemory"` |
|
||||
|
||||
- If `storage.type` is `inmemory` (default) and `storage.file` is set to a non-blank value.
|
||||
Furthermore, the data is periodically persisted, but everything remains in memory.
|
||||
- If `storage.type` is `sqlite`, `storage.file` must not be blank.
|
||||
```yaml
|
||||
storage:
|
||||
type: sqlite
|
||||
file: data.db
|
||||
```
|
||||
See [examples/docker-compose-sqlite-storage](examples/docker-compose-sqlite-storage) for an example.
|
||||
|
||||
|
||||
### Alerting
|
||||
Gatus supports multiple alerting providers, such as Slack and PagerDuty, and supports different alerts for each
|
||||
individual services with configurable descriptions and thresholds.
|
||||
|
||||
Note that if an alerting provider is not configured properly, all alerts configured with the provider's type will be
|
||||
Note that if an alerting provider is not properly configured, all alerts configured with the provider's type will be
|
||||
ignored.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|:---------------------------------------- |:----------------------------------------------------------------------------- |:-------------- |
|
||||
| `alerting.slack` | Configuration for alerts of type `slack` | `{}` |
|
||||
| `alerting.slack.webhook-url` | Slack Webhook URL | Required `""` |
|
||||
| `alerting.discord` | Configuration for alerts of type `discord` | `{}` |
|
||||
| `alerting.discord.webhook-url` | Discord Webhook URL | Required `""` |
|
||||
| `alerting.pagerduty` | Configuration for alerts of type `pagerduty` | `{}` |
|
||||
| `alerting.pagerduty.integration-key` | PagerDuty Events API v2 integration key. | Required `""` |
|
||||
| `alerting.twilio` | Settings for alerts of type `twilio` | `{}` |
|
||||
| `alerting.twilio.sid` | Twilio account SID | Required `""` |
|
||||
| `alerting.twilio.token` | Twilio auth token | Required `""` |
|
||||
| `alerting.twilio.from` | Number to send Twilio alerts from | Required `""` |
|
||||
| `alerting.twilio.to` | Number to send twilio alerts to | Required `""` |
|
||||
| `alerting.mattermost` | Configuration for alerts of type `mattermost` | `{}` |
|
||||
| `alerting.mattermost.webhook-url` | Mattermost Webhook URL | Required `""` |
|
||||
| `alerting.mattermost.insecure` | Whether to skip verifying the server's certificate chain and host name | `false` |
|
||||
| `alerting.messagebird` | Settings for alerts of type `messagebird` | `{}` |
|
||||
| `alerting.messagebird.access-key` | Messagebird access key | Required `""` |
|
||||
| `alerting.messagebird.originator` | The sender of the message | Required `""` |
|
||||
| `alerting.messagebird.recipients` | The recipients of the message | Required `""` |
|
||||
| `alerting.telegram` | Configuration for alerts of type `telegram` | `{}` |
|
||||
| `alerting.telegram.token` | Telegram Bot Token | Required `""` |
|
||||
| `alerting.telegram.id` | Telegram User ID | Required `""` |
|
||||
| `alerting.custom` | Configuration for custom actions on failure or alerts | `{}` |
|
||||
| `alerting.custom.url` | Custom alerting request url | Required `""` |
|
||||
| `alerting.custom.method` | Request method | `GET` |
|
||||
| `alerting.custom.insecure` | Whether to skip verifying the server's certificate chain and host name | `false` |
|
||||
| `alerting.custom.body` | Custom alerting request body. | `""` |
|
||||
| `alerting.custom.headers` | Custom alerting request headers | `{}` |
|
||||
| `alerting.*.default-alert.enabled` | Whether to enable the alert | N/A |
|
||||
| `alerting.*.default-alert.failure-threshold` | Number of failures in a row needed before triggering the alert | N/A |
|
||||
| `alerting.*.default-alert.success-threshold` | Number of successes in a row before an ongoing incident is marked as resolved | N/A |
|
||||
| `alerting.*.default-alert.send-on-resolved` | Whether to send a notification once a triggered alert is marked as resolved | N/A |
|
||||
| `alerting.*.default-alert.description` | Description of the alert. Will be included in the alert sent | N/A |
|
||||
|
||||
|
||||
#### Configuring Slack alerts
|
||||
|
||||
```yaml
|
||||
alerting:
|
||||
slack:
|
||||
webhook-url: "https://hooks.slack.com/services/**********/**********/**********"
|
||||
|
||||
services:
|
||||
- name: twinnation
|
||||
url: "https://twinnation.org/health"
|
||||
@@ -248,8 +312,29 @@ Here's an example of what the notifications look like:
|
||||

|
||||
|
||||
|
||||
#### Configuring PagerDuty alerts
|
||||
#### Configuring Discord alerts
|
||||
```yaml
|
||||
alerting:
|
||||
discord:
|
||||
webhook-url: "https://discord.com/api/webhooks/**********/**********"
|
||||
|
||||
services:
|
||||
- name: twinnation
|
||||
url: "https://twinnation.org/health"
|
||||
interval: 30s
|
||||
alerts:
|
||||
- type: discord
|
||||
enabled: true
|
||||
description: "healthcheck failed"
|
||||
send-on-resolved: true
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].status == UP"
|
||||
- "[RESPONSE_TIME] < 300"
|
||||
```
|
||||
|
||||
|
||||
#### Configuring PagerDuty alerts
|
||||
It is highly recommended to set `services[].alerts[].send-on-resolved` to `true` for alerts
|
||||
of type `pagerduty`, because unlike other alerts, the operation resulting from setting said
|
||||
parameter to `true` will not create another incident, but mark the incident as resolved on
|
||||
@@ -259,6 +344,7 @@ PagerDuty instead.
|
||||
alerting:
|
||||
pagerduty:
|
||||
integration-key: "********************************"
|
||||
|
||||
services:
|
||||
- name: twinnation
|
||||
url: "https://twinnation.org/health"
|
||||
@@ -269,7 +355,7 @@ services:
|
||||
failure-threshold: 3
|
||||
success-threshold: 5
|
||||
send-on-resolved: true
|
||||
description: "healthcheck failed 3 times in a row"
|
||||
description: "healthcheck failed"
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].status == UP"
|
||||
@@ -278,7 +364,6 @@ services:
|
||||
|
||||
|
||||
#### Configuring Twilio alerts
|
||||
|
||||
```yaml
|
||||
alerting:
|
||||
twilio:
|
||||
@@ -286,6 +371,7 @@ alerting:
|
||||
token: "..."
|
||||
from: "+1-234-567-8901"
|
||||
to: "+1-234-567-8901"
|
||||
|
||||
services:
|
||||
- name: twinnation
|
||||
interval: 30s
|
||||
@@ -295,7 +381,7 @@ services:
|
||||
enabled: true
|
||||
failure-threshold: 5
|
||||
send-on-resolved: true
|
||||
description: "healthcheck failed 5 times in a row"
|
||||
description: "healthcheck failed"
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].status == UP"
|
||||
@@ -304,12 +390,12 @@ services:
|
||||
|
||||
|
||||
#### Configuring Mattermost alerts
|
||||
|
||||
```yaml
|
||||
alerting:
|
||||
mattermost:
|
||||
webhook-url: "http://**********/hooks/**********"
|
||||
insecure: true
|
||||
|
||||
services:
|
||||
- name: twinnation
|
||||
url: "https://twinnation.org/health"
|
||||
@@ -317,7 +403,7 @@ services:
|
||||
alerts:
|
||||
- type: mattermost
|
||||
enabled: true
|
||||
description: "healthcheck failed 3 times in a row"
|
||||
description: "healthcheck failed"
|
||||
send-on-resolved: true
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
@@ -349,7 +435,7 @@ services:
|
||||
enabled: true
|
||||
failure-threshold: 3
|
||||
send-on-resolved: true
|
||||
description: "healthcheck failed 3 times in a row"
|
||||
description: "healthcheck failed"
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].status == UP"
|
||||
@@ -357,8 +443,32 @@ services:
|
||||
```
|
||||
|
||||
|
||||
#### Configuring custom alerts
|
||||
#### Configuring Telegram alerts
|
||||
```yaml
|
||||
alerting:
|
||||
telegram:
|
||||
token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11"
|
||||
id: "0123456789"
|
||||
|
||||
services:
|
||||
- name: twinnation
|
||||
url: "https://twinnation.org/health"
|
||||
interval: 30s
|
||||
alerts:
|
||||
- type: telegram
|
||||
enabled: true
|
||||
send-on-resolved: true
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].status == UP"
|
||||
```
|
||||
|
||||
Here's an example of what the notifications look like:
|
||||
|
||||

|
||||
|
||||
|
||||
#### Configuring custom alerts
|
||||
While they're called alerts, you can use this feature to call anything.
|
||||
|
||||
For instance, you could automate rollbacks by having an application that keeps tracks of new deployments, and by
|
||||
@@ -395,7 +505,7 @@ services:
|
||||
failure-threshold: 10
|
||||
success-threshold: 3
|
||||
send-on-resolved: true
|
||||
description: "healthcheck failed 10 times in a row"
|
||||
description: "healthcheck failed"
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].status == UP"
|
||||
@@ -414,10 +524,64 @@ alerting:
|
||||
As a result, the `[ALERT_TRIGGERED_OR_RESOLVED]` in the body of first example of this section would be replaced by
|
||||
`partial_outage` when an alert is triggered and `operational` when an alert is resolved.
|
||||
|
||||
### Kubernetes (ALPHA)
|
||||
|
||||
#### Setting a default provider alert
|
||||
While you can specify the alert configuration directly in the service definition, it's tedious and may lead to a very
|
||||
long configuration file.
|
||||
|
||||
To avoid such problem, you can use the `default-alert` parameter present in each provider configuration:
|
||||
```yaml
|
||||
alerting:
|
||||
slack:
|
||||
webhook-url: "https://hooks.slack.com/services/**********/**********/**********"
|
||||
default-alert:
|
||||
enabled: true
|
||||
description: "healthcheck failed"
|
||||
send-on-resolved: true
|
||||
failure-threshold: 5
|
||||
success-threshold: 5
|
||||
```
|
||||
|
||||
As a result, your service configuration looks a lot tidier:
|
||||
```yaml
|
||||
services:
|
||||
- name: example
|
||||
url: "https://example.org"
|
||||
alerts:
|
||||
- type: slack
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
|
||||
- name: other-example
|
||||
url: "https://example.com"
|
||||
alerts:
|
||||
- type: slack
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
```
|
||||
|
||||
It also allows you to do things like this:
|
||||
```yaml
|
||||
services:
|
||||
- name: twinnation
|
||||
url: "https://twinnation.org/health"
|
||||
alerts:
|
||||
- type: slack
|
||||
failure-threshold: 5
|
||||
- type: slack
|
||||
failure-threshold: 10
|
||||
- type: slack
|
||||
failure-threshold: 15
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
```
|
||||
|
||||
|
||||
### Kubernetes (ALPHA)
|
||||
> **WARNING**: This feature is in ALPHA. This means that it is very likely to change in the near future, which means that
|
||||
> while you can use this feature as you see fit, there may be breaking changes in future releases.
|
||||
>
|
||||
> **NOTICE**: This feature may be removed. To give your opinion on the subject, see https://github.com/TwinProduction/gatus/discussions/135.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|:------------------------------------------- |:----------------------------------------------------------------------------- |:-------------- |
|
||||
@@ -434,7 +598,6 @@ As a result, the `[ALERT_TRIGGERED_OR_RESOLVED]` in the body of first example of
|
||||
|
||||
|
||||
#### Auto Discovery
|
||||
|
||||
Auto discovery works by reading all `Service` resources from the configured `namespaces` and appending the `hostname-suffix` as
|
||||
well as the configured `target-path` to the service name and making an HTTP call.
|
||||
|
||||
@@ -473,15 +636,21 @@ Note that `hostname-suffix` could also be something like `.yourdomain.com`, in w
|
||||
monitored would be `potato.example.com/health`, assuming you have a service named `potato` and a matching ingress
|
||||
to map `potato.example.com` to the `potato` service.
|
||||
|
||||
#### Deploying
|
||||
|
||||
See [example/kubernetes-with-auto-discovery](example/kubernetes-with-auto-discovery)
|
||||
For a full example, see [examples/kubernetes-with-auto-discovery](examples/kubernetes-with-auto-discovery)
|
||||
|
||||
|
||||
## Docker
|
||||
## Deployment
|
||||
Many examples can be found in the [examples](examples) folder, but this section will focus on the most popular ways of deploying Gatus.
|
||||
|
||||
|
||||
### Docker
|
||||
To run Gatus locally with Docker:
|
||||
```
|
||||
docker run -p 8080:8080 --name gatus twinproduction/gatus
|
||||
```
|
||||
|
||||
Other than using one of the examples provided in the `examples` folder, you can also try it out locally by
|
||||
creating a configuration file - we'll call it `config.yaml` for this example - and running the following
|
||||
creating a configuration file, we'll call it `config.yaml` for this example, and running the following
|
||||
command:
|
||||
```
|
||||
docker run -p 8080:8080 --mount type=bind,source="$(pwd)"/config.yaml,target=/config/config.yaml --name gatus twinproduction/gatus
|
||||
@@ -492,23 +661,43 @@ If you're on Windows, replace `"$(pwd)"` by the absolute path to your current di
|
||||
docker run -p 8080:8080 --mount type=bind,source=C:/Users/Chris/Desktop/config.yaml,target=/config/config.yaml --name gatus twinproduction/gatus
|
||||
```
|
||||
|
||||
To build the image locally:
|
||||
```
|
||||
docker build . -t twinproduction/gatus
|
||||
```
|
||||
|
||||
|
||||
### Helm Chart
|
||||
[Helm](https://helm.sh) must be installed to use the chart.
|
||||
Please refer to Helm's [documentation](https://helm.sh/docs/) to get started.
|
||||
|
||||
Once Helm is set up properly, add the repository as follows:
|
||||
|
||||
```console
|
||||
helm repo add gatus https://avakarev.github.io/gatus-chart
|
||||
```
|
||||
|
||||
To get more details, please check chart's [configuration](https://github.com/avakarev/gatus-chart#configuration)
|
||||
and [helmfile example](https://github.com/avakarev/gatus-chart#helmfileyaml-example)
|
||||
|
||||
|
||||
### Terraform
|
||||
Gatus can be deployed on Terraform by using the following module: [terraform-kubernetes-gatus](https://github.com/TwinProduction/terraform-kubernetes-gatus).
|
||||
|
||||
|
||||
|
||||
## Running the tests
|
||||
|
||||
```
|
||||
go test ./... -mod vendor
|
||||
```
|
||||
|
||||
|
||||
## Using in Production
|
||||
|
||||
See the [example](example) folder.
|
||||
See the [Deployment](#deployment) section.
|
||||
|
||||
|
||||
## FAQ
|
||||
|
||||
### Sending a GraphQL request
|
||||
|
||||
By setting `services[].graphql` to true, the body will automatically be wrapped by the standard GraphQL `query` parameter.
|
||||
|
||||
For instance, the following configuration:
|
||||
@@ -527,8 +716,6 @@ services:
|
||||
avatar
|
||||
}
|
||||
}
|
||||
headers:
|
||||
Content-Type: application/json # XXX: as of v1.9.2, this header is automatically added when graphql is set to true
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].data.users[0].gender == female"
|
||||
@@ -541,9 +728,8 @@ will send a `POST` request to `http://localhost:8080/playground` with the follow
|
||||
|
||||
|
||||
### Recommended interval
|
||||
|
||||
**NOTE**: This does not _really_ apply if `disable-monitoring-lock` is set to `true`, as the monitoring lock is what
|
||||
tells Gatus to only evaluate one service at a time.
|
||||
> **NOTE**: This does not _really_ apply if `disable-monitoring-lock` is set to `true`, as the monitoring lock is what
|
||||
> tells Gatus to only evaluate one service at a time.
|
||||
|
||||
To ensure that Gatus provides reliable and accurate results (i.e. response time), Gatus only evaluates one service at a time
|
||||
In other words, even if you have multiple services with the exact same interval, they will not execute at the same time.
|
||||
@@ -573,7 +759,6 @@ simple health checks used for alerting (PagerDuty/Twilio) to `30s`.
|
||||
|
||||
|
||||
### Default timeouts
|
||||
|
||||
| Protocol | Timeout |
|
||||
|:-------- |:------- |
|
||||
| HTTP | 10s
|
||||
@@ -602,7 +787,6 @@ established.
|
||||
|
||||
|
||||
### Monitoring a service using ICMP
|
||||
|
||||
By prefixing `services[].url` with `icmp:\\`, you can monitor services at a very basic level using ICMP, or more
|
||||
commonly known as "ping" or "echo":
|
||||
|
||||
@@ -619,7 +803,6 @@ You can specify a domain prefixed by `icmp://`, or an IP address prefixed by `ic
|
||||
|
||||
|
||||
### Monitoring a service using DNS queries
|
||||
|
||||
Defining a `dns` configuration in a service will automatically mark that service as a service of type DNS:
|
||||
```yaml
|
||||
services:
|
||||
@@ -640,10 +823,22 @@ There are two placeholders that can be used in the conditions for services of ty
|
||||
`NOERROR`, `FORMERR`, `SERVFAIL`, `NXDOMAIN`, etc.
|
||||
|
||||
|
||||
### Monitoring a service using STARTTLS
|
||||
If you have an email server that you want to ensure there are no problems with, monitoring it through STARTTLS
|
||||
will serve as a good initial indicator:
|
||||
```yaml
|
||||
services:
|
||||
- name: starttls-smtp-example
|
||||
url: "starttls://smtp.gmail.com:587"
|
||||
interval: 30m
|
||||
conditions:
|
||||
- "[CONNECTED] == true"
|
||||
- "[CERTIFICATE_EXPIRATION] > 48h"
|
||||
```
|
||||
|
||||
|
||||
### Basic authentication
|
||||
|
||||
You can require Basic authentication by leveraging the `security.basic` configuration:
|
||||
|
||||
```yaml
|
||||
security:
|
||||
basic:
|
||||
@@ -655,7 +850,6 @@ The example above will require that you authenticate with the username `john.doe
|
||||
|
||||
|
||||
### disable-monitoring-lock
|
||||
|
||||
Setting `disable-monitoring-lock` to `true` means that multiple services could be monitored at the same time.
|
||||
|
||||
While this behavior wouldn't generally be harmful, conditions using the `[RESPONSE_TIME]` placeholder could be impacted
|
||||
@@ -668,8 +862,30 @@ technically, if you create 100 services with a 1 seconds interval, Gatus will se
|
||||
- You want to test multiple services at very short interval (< 5s)
|
||||
|
||||
|
||||
### Service groups
|
||||
### Reloading configuration on the fly
|
||||
For the sake on convenience, Gatus automatically reloads the configuration on the fly if the loaded configuration file
|
||||
is updated while Gatus is running.
|
||||
|
||||
By default, the application will exit if the updating configuration is invalid, but you can configure
|
||||
Gatus to continue running if the configuration file is updated with an invalid configuration by
|
||||
setting `skip-invalid-config-update` to `true`.
|
||||
|
||||
Keep in mind that it is in your best interest to ensure the validity of the configuration file after each update you
|
||||
apply to the configuration file while Gatus is running by looking at the log and making sure that you do not see the
|
||||
following message:
|
||||
```
|
||||
The configuration file was updated, but it is not valid. The old configuration will continue being used.
|
||||
```
|
||||
Failure to do so may result in Gatus being unable to start if the application is restarted for whatever reason.
|
||||
|
||||
I recommend not setting `skip-invalid-config-update` to `true` to avoid a situation like this, but the choice is yours
|
||||
to make.
|
||||
|
||||
Note that if you are not using a file storage, updating the configuration while Gatus is running is effectively
|
||||
the same as restarting the application.
|
||||
|
||||
|
||||
### Service groups
|
||||
Service groups are used for grouping multiple services together on the dashboard.
|
||||
|
||||
```yaml
|
||||
@@ -715,7 +931,6 @@ The configuration above will result in a dashboard that looks like this:
|
||||
|
||||
|
||||
### Exposing Gatus on a custom port
|
||||
|
||||
By default, Gatus is exposed on port `8080`, but you may specify a different port by setting the `web.port` parameter:
|
||||
```yaml
|
||||
web:
|
||||
@@ -763,3 +978,32 @@ Example: .
|
||||
|
||||
[<img src="https://github.com/math280h.png" width="35" />](https://github.com/math280h)
|
||||
[<img src="https://github.com/mateothegreat.png" width="35" />](https://github.com/mateothegreat)
|
||||
|
||||
@@ -1,21 +1,30 @@
|
||||
package core
|
||||
package alert
|
||||
|
||||
// Alert is the service's alert configuration
|
||||
type Alert struct {
|
||||
// Type of alert
|
||||
Type AlertType `yaml:"type"`
|
||||
// Type of alert (required)
|
||||
Type Type `yaml:"type"`
|
||||
|
||||
// Enabled defines whether or not the alert is enabled
|
||||
Enabled bool `yaml:"enabled"`
|
||||
//
|
||||
// This is a pointer, because it is populated by YAML and we need to know whether it was explicitly set to a value
|
||||
// or not for provider.ParseWithDefaultAlert to work.
|
||||
Enabled *bool `yaml:"enabled"`
|
||||
|
||||
// FailureThreshold is the number of failures in a row needed before triggering the alert
|
||||
FailureThreshold int `yaml:"failure-threshold"`
|
||||
|
||||
// Description of the alert. Will be included in the alert sent.
|
||||
Description string `yaml:"description"`
|
||||
//
|
||||
// This is a pointer, because it is populated by YAML and we need to know whether it was explicitly set to a value
|
||||
// or not for provider.ParseWithDefaultAlert to work.
|
||||
Description *string `yaml:"description"`
|
||||
|
||||
// SendOnResolved defines whether to send a second notification when the issue has been resolved
|
||||
SendOnResolved bool `yaml:"send-on-resolved"`
|
||||
//
|
||||
// This is a pointer, because it is populated by YAML and we need to know whether it was explicitly set to a value
|
||||
// or not for provider.ParseWithDefaultAlert to work. Use Alert.IsSendingOnResolved() for a non-pointer
|
||||
SendOnResolved *bool `yaml:"send-on-resolved"`
|
||||
|
||||
// SuccessThreshold defines how many successful executions must happen in a row before an ongoing incident is marked as resolved
|
||||
SuccessThreshold int `yaml:"success-threshold"`
|
||||
@@ -35,26 +44,26 @@ type Alert struct {
|
||||
Triggered bool
|
||||
}
|
||||
|
||||
// AlertType is the type of the alert.
|
||||
// The value will generally be the name of the alert provider
|
||||
type AlertType string
|
||||
// GetDescription retrieves the description of the alert
|
||||
func (alert Alert) GetDescription() string {
|
||||
if alert.Description == nil {
|
||||
return ""
|
||||
}
|
||||
return *alert.Description
|
||||
}
|
||||
|
||||
const (
|
||||
// SlackAlert is the AlertType for the slack alerting provider
|
||||
SlackAlert AlertType = "slack"
|
||||
// IsEnabled returns whether an alert is enabled or not
|
||||
func (alert Alert) IsEnabled() bool {
|
||||
if alert.Enabled == nil {
|
||||
return false
|
||||
}
|
||||
return *alert.Enabled
|
||||
}
|
||||
|
||||
// MattermostAlert is the AlertType for the mattermost alerting provider
|
||||
MattermostAlert AlertType = "mattermost"
|
||||
|
||||
// MessagebirdAlert is the AlertType for the messagebird alerting provider
|
||||
MessagebirdAlert AlertType = "messagebird"
|
||||
|
||||
// PagerDutyAlert is the AlertType for the pagerduty alerting provider
|
||||
PagerDutyAlert AlertType = "pagerduty"
|
||||
|
||||
// TwilioAlert is the AlertType for the twilio alerting provider
|
||||
TwilioAlert AlertType = "twilio"
|
||||
|
||||
// CustomAlert is the AlertType for the custom alerting provider
|
||||
CustomAlert AlertType = "custom"
|
||||
)
|
||||
// IsSendingOnResolved returns whether an alert is sending on resolve or not
|
||||
func (alert Alert) IsSendingOnResolved() bool {
|
||||
if alert.SendOnResolved == nil {
|
||||
return false
|
||||
}
|
||||
return *alert.SendOnResolved
|
||||
}
|
||||
36
alerting/alert/alert_test.go
Normal file
36
alerting/alert/alert_test.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package alert
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestAlert_IsEnabled(t *testing.T) {
|
||||
if (Alert{Enabled: nil}).IsEnabled() {
|
||||
t.Error("alert.IsEnabled() should've returned false, because Enabled was set to nil")
|
||||
}
|
||||
if value := false; (Alert{Enabled: &value}).IsEnabled() {
|
||||
t.Error("alert.IsEnabled() should've returned false, because Enabled was set to false")
|
||||
}
|
||||
if value := true; !(Alert{Enabled: &value}).IsEnabled() {
|
||||
t.Error("alert.IsEnabled() should've returned true, because Enabled was set to true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlert_GetDescription(t *testing.T) {
|
||||
if (Alert{Description: nil}).GetDescription() != "" {
|
||||
t.Error("alert.GetDescription() should've returned an empty string, because Description was set to nil")
|
||||
}
|
||||
if value := "description"; (Alert{Description: &value}).GetDescription() != value {
|
||||
t.Error("alert.GetDescription() should've returned false, because Description was set to 'description'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlert_IsSendingOnResolved(t *testing.T) {
|
||||
if (Alert{SendOnResolved: nil}).IsSendingOnResolved() {
|
||||
t.Error("alert.IsSendingOnResolved() should've returned false, because SendOnResolved was set to nil")
|
||||
}
|
||||
if value := false; (Alert{SendOnResolved: &value}).IsSendingOnResolved() {
|
||||
t.Error("alert.IsSendingOnResolved() should've returned false, because SendOnResolved was set to false")
|
||||
}
|
||||
if value := true; !(Alert{SendOnResolved: &value}).IsSendingOnResolved() {
|
||||
t.Error("alert.IsSendingOnResolved() should've returned true, because SendOnResolved was set to true")
|
||||
}
|
||||
}
|
||||
31
alerting/alert/type.go
Normal file
31
alerting/alert/type.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package alert
|
||||
|
||||
// Type is the type of the alert.
|
||||
// The value will generally be the name of the alert provider
|
||||
type Type string
|
||||
|
||||
const (
|
||||
// TypeCustom is the Type for the custom alerting provider
|
||||
TypeCustom Type = "custom"
|
||||
|
||||
// TypeDiscord is the Type for the discord alerting provider
|
||||
TypeDiscord Type = "discord"
|
||||
|
||||
// TypeMattermost is the Type for the mattermost alerting provider
|
||||
TypeMattermost Type = "mattermost"
|
||||
|
||||
// TypeMessagebird is the Type for the messagebird alerting provider
|
||||
TypeMessagebird Type = "messagebird"
|
||||
|
||||
// TypePagerDuty is the Type for the pagerduty alerting provider
|
||||
TypePagerDuty Type = "pagerduty"
|
||||
|
||||
// TypeSlack is the Type for the slack alerting provider
|
||||
TypeSlack Type = "slack"
|
||||
|
||||
// TypeTelegram is the Type for the telegram alerting provider
|
||||
TypeTelegram Type = "telegram"
|
||||
|
||||
// TypeTwilio is the Type for the twilio alerting provider
|
||||
TypeTwilio Type = "twilio"
|
||||
)
|
||||
@@ -1,18 +1,25 @@
|
||||
package alerting
|
||||
|
||||
import (
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/discord"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/mattermost"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/messagebird"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/pagerduty"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/slack"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/telegram"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/twilio"
|
||||
)
|
||||
|
||||
// Config is the configuration for alerting providers
|
||||
type Config struct {
|
||||
// Slack is the configuration for the slack alerting provider
|
||||
Slack *slack.AlertProvider `yaml:"slack"`
|
||||
// Custom is the configuration for the custom alerting provider
|
||||
Custom *custom.AlertProvider `yaml:"custom"`
|
||||
|
||||
// Discord is the configuration for the discord alerting provider
|
||||
Discord *discord.AlertProvider `yaml:"discord"`
|
||||
|
||||
// Mattermost is the configuration for the mattermost alerting provider
|
||||
Mattermost *mattermost.AlertProvider `yaml:"mattermost"`
|
||||
@@ -20,12 +27,70 @@ type Config struct {
|
||||
// Messagebird is the configuration for the messagebird alerting provider
|
||||
Messagebird *messagebird.AlertProvider `yaml:"messagebird"`
|
||||
|
||||
// Pagerduty is the configuration for the pagerduty alerting provider
|
||||
// PagerDuty is the configuration for the pagerduty alerting provider
|
||||
PagerDuty *pagerduty.AlertProvider `yaml:"pagerduty"`
|
||||
|
||||
// Slack is the configuration for the slack alerting provider
|
||||
Slack *slack.AlertProvider `yaml:"slack"`
|
||||
|
||||
// Telegram is the configuration for the telegram alerting provider
|
||||
Telegram *telegram.AlertProvider `yaml:"telegram"`
|
||||
|
||||
// Twilio is the configuration for the twilio alerting provider
|
||||
Twilio *twilio.AlertProvider `yaml:"twilio"`
|
||||
|
||||
// Custom is the configuration for the custom alerting provider
|
||||
Custom *custom.AlertProvider `yaml:"custom"`
|
||||
}
|
||||
|
||||
// GetAlertingProviderByAlertType returns an provider.AlertProvider by its corresponding alert.Type
|
||||
func (config Config) GetAlertingProviderByAlertType(alertType alert.Type) provider.AlertProvider {
|
||||
switch alertType {
|
||||
case alert.TypeCustom:
|
||||
if config.Custom == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Custom
|
||||
case alert.TypeDiscord:
|
||||
if config.Discord == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Discord
|
||||
case alert.TypeMattermost:
|
||||
if config.Mattermost == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Mattermost
|
||||
case alert.TypeMessagebird:
|
||||
if config.Messagebird == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Messagebird
|
||||
case alert.TypePagerDuty:
|
||||
if config.PagerDuty == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.PagerDuty
|
||||
case alert.TypeSlack:
|
||||
if config.Slack == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Slack
|
||||
case alert.TypeTelegram:
|
||||
if config.Telegram == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Telegram
|
||||
case alert.TypeTwilio:
|
||||
if config.Twilio == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Twilio
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/client"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
@@ -22,6 +23,9 @@ type AlertProvider struct {
|
||||
Body string `yaml:"body,omitempty"`
|
||||
Headers map[string]string `yaml:"headers,omitempty"`
|
||||
Placeholders map[string]map[string]string `yaml:"placeholders,omitempty"`
|
||||
|
||||
// DefaultAlert is the default alert configuration to use for services with an alert of the appropriate type
|
||||
DefaultAlert *alert.Alert `yaml:"default-alert"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
@@ -30,7 +34,7 @@ func (provider *AlertProvider) IsValid() bool {
|
||||
}
|
||||
|
||||
// ToCustomAlertProvider converts the provider into a custom.AlertProvider
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *core.Alert, result *core.Result, resolved bool) *AlertProvider {
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *alert.Alert, result *core.Result, resolved bool) *AlertProvider {
|
||||
return provider
|
||||
}
|
||||
|
||||
@@ -112,3 +116,8 @@ func (provider *AlertProvider) Send(serviceName, alertDescription string, resolv
|
||||
}
|
||||
return ioutil.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
// GetDefaultAlert returns the provider's default alert configuration
|
||||
func (provider AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||
return provider.DefaultAlert
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
@@ -60,7 +61,7 @@ func TestAlertProvider_buildHTTPRequestWhenTriggered(t *testing.T) {
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProvider(t *testing.T) {
|
||||
provider := AlertProvider{URL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
|
||||
72
alerting/provider/discord/discord.go
Normal file
72
alerting/provider/discord/discord.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package discord
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
// AlertProvider is the configuration necessary for sending an alert using Discord
|
||||
type AlertProvider struct {
|
||||
WebhookURL string `yaml:"webhook-url"`
|
||||
|
||||
// DefaultAlert is the default alert configuration to use for services with an alert of the appropriate type
|
||||
DefaultAlert *alert.Alert `yaml:"default-alert"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
func (provider *AlertProvider) IsValid() bool {
|
||||
return len(provider.WebhookURL) > 0
|
||||
}
|
||||
|
||||
// ToCustomAlertProvider converts the provider into a custom.AlertProvider
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *alert.Alert, result *core.Result, resolved bool) *custom.AlertProvider {
|
||||
var message, results string
|
||||
var colorCode int
|
||||
if resolved {
|
||||
message = fmt.Sprintf("An alert for **%s** has been resolved after passing successfully %d time(s) in a row", service.Name, alert.SuccessThreshold)
|
||||
colorCode = 3066993
|
||||
} else {
|
||||
message = fmt.Sprintf("An alert for **%s** has been triggered due to having failed %d time(s) in a row", service.Name, alert.FailureThreshold)
|
||||
colorCode = 15158332
|
||||
}
|
||||
for _, conditionResult := range result.ConditionResults {
|
||||
var prefix string
|
||||
if conditionResult.Success {
|
||||
prefix = ":white_check_mark:"
|
||||
} else {
|
||||
prefix = ":x:"
|
||||
}
|
||||
results += fmt.Sprintf("%s - `%s`\\n", prefix, conditionResult.Condition)
|
||||
}
|
||||
return &custom.AlertProvider{
|
||||
URL: provider.WebhookURL,
|
||||
Method: http.MethodPost,
|
||||
Body: fmt.Sprintf(`{
|
||||
"content": "",
|
||||
"embeds": [
|
||||
{
|
||||
"title": ":helmet_with_white_cross: Gatus",
|
||||
"description": "%s:\n> %s",
|
||||
"color": %d,
|
||||
"fields": [
|
||||
{
|
||||
"name": "Condition results",
|
||||
"value": "%s",
|
||||
"inline": false
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, message, alert.GetDescription(), colorCode, results),
|
||||
Headers: map[string]string{"Content-Type": "application/json"},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultAlert returns the provider's default alert configuration
|
||||
func (provider AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||
return provider.DefaultAlert
|
||||
}
|
||||
66
alerting/provider/discord/discord_test.go
Normal file
66
alerting/provider/discord/discord_test.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package discord
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
func TestAlertProvider_IsValid(t *testing.T) {
|
||||
invalidProvider := AlertProvider{WebhookURL: ""}
|
||||
if invalidProvider.IsValid() {
|
||||
t.Error("provider shouldn't have been valid")
|
||||
}
|
||||
validProvider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
if !validProvider.IsValid() {
|
||||
t.Error("provider should've been valid")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "resolved") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring resolved")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.com" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.com", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "triggered") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring triggered")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.com" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.com", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
@@ -12,6 +13,9 @@ import (
|
||||
type AlertProvider struct {
|
||||
WebhookURL string `yaml:"webhook-url"`
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
|
||||
// DefaultAlert is the default alert configuration to use for services with an alert of the appropriate type
|
||||
DefaultAlert *alert.Alert `yaml:"default-alert"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
@@ -20,7 +24,7 @@ func (provider *AlertProvider) IsValid() bool {
|
||||
}
|
||||
|
||||
// ToCustomAlertProvider converts the provider into a custom.AlertProvider
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *core.Alert, result *core.Result, resolved bool) *custom.AlertProvider {
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *alert.Alert, result *core.Result, resolved bool) *custom.AlertProvider {
|
||||
var message string
|
||||
var color string
|
||||
if resolved {
|
||||
@@ -69,7 +73,12 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, message, message, alert.Description, color, service.URL, results),
|
||||
}`, message, message, alert.GetDescription(), color, service.URL, results),
|
||||
Headers: map[string]string{"Content-Type": "application/json"},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultAlert returns the provider's default alert configuration
|
||||
func (provider AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||
return provider.DefaultAlert
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
@@ -22,7 +23,7 @@ func TestAlertProvider_IsValid(t *testing.T) {
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.org"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
@@ -44,7 +45,7 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.org"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
@@ -17,6 +18,9 @@ type AlertProvider struct {
|
||||
AccessKey string `yaml:"access-key"`
|
||||
Originator string `yaml:"originator"`
|
||||
Recipients string `yaml:"recipients"`
|
||||
|
||||
// DefaultAlert is the default alert configuration to use for services with an alert of the appropriate type
|
||||
DefaultAlert *alert.Alert `yaml:"default-alert"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
@@ -26,12 +30,12 @@ func (provider *AlertProvider) IsValid() bool {
|
||||
|
||||
// ToCustomAlertProvider converts the provider into a custom.AlertProvider
|
||||
// Reference doc for messagebird https://developers.messagebird.com/api/sms-messaging/#send-outbound-sms
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *core.Alert, _ *core.Result, resolved bool) *custom.AlertProvider {
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *alert.Alert, _ *core.Result, resolved bool) *custom.AlertProvider {
|
||||
var message string
|
||||
if resolved {
|
||||
message = fmt.Sprintf("RESOLVED: %s - %s", service.Name, alert.Description)
|
||||
message = fmt.Sprintf("RESOLVED: %s - %s", service.Name, alert.GetDescription())
|
||||
} else {
|
||||
message = fmt.Sprintf("TRIGGERED: %s - %s", service.Name, alert.Description)
|
||||
message = fmt.Sprintf("TRIGGERED: %s - %s", service.Name, alert.GetDescription())
|
||||
}
|
||||
|
||||
return &custom.AlertProvider{
|
||||
@@ -48,3 +52,8 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultAlert returns the provider's default alert configuration
|
||||
func (provider AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||
return provider.DefaultAlert
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
@@ -30,7 +31,7 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
Originator: "1",
|
||||
Recipients: "1",
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
@@ -56,7 +57,7 @@ func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
Originator: "1",
|
||||
Recipients: "1",
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, false)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
@@ -11,6 +12,9 @@ import (
|
||||
// AlertProvider is the configuration necessary for sending an alert using PagerDuty
|
||||
type AlertProvider struct {
|
||||
IntegrationKey string `yaml:"integration-key"`
|
||||
|
||||
// DefaultAlert is the default alert configuration to use for services with an alert of the appropriate type
|
||||
DefaultAlert *alert.Alert `yaml:"default-alert"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
@@ -21,14 +25,14 @@ func (provider *AlertProvider) IsValid() bool {
|
||||
// ToCustomAlertProvider converts the provider into a custom.AlertProvider
|
||||
//
|
||||
// relevant: https://developer.pagerduty.com/docs/events-api-v2/trigger-events/
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *core.Alert, _ *core.Result, resolved bool) *custom.AlertProvider {
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *alert.Alert, _ *core.Result, resolved bool) *custom.AlertProvider {
|
||||
var message, eventAction, resolveKey string
|
||||
if resolved {
|
||||
message = fmt.Sprintf("RESOLVED: %s - %s", service.Name, alert.Description)
|
||||
message = fmt.Sprintf("RESOLVED: %s - %s", service.Name, alert.GetDescription())
|
||||
eventAction = "resolve"
|
||||
resolveKey = alert.ResolveKey
|
||||
} else {
|
||||
message = fmt.Sprintf("TRIGGERED: %s - %s", service.Name, alert.Description)
|
||||
message = fmt.Sprintf("TRIGGERED: %s - %s", service.Name, alert.GetDescription())
|
||||
eventAction = "trigger"
|
||||
resolveKey = ""
|
||||
}
|
||||
@@ -50,3 +54,8 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultAlert returns the provider's default alert configuration
|
||||
func (provider AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||
return provider.DefaultAlert
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
@@ -22,7 +23,7 @@ func TestAlertProvider_IsValid(t *testing.T) {
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{IntegrationKey: "00000000000000000000000000000000"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
@@ -44,7 +45,7 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{IntegrationKey: "00000000000000000000000000000000"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, false)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/discord"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/mattermost"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/messagebird"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/pagerduty"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/slack"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/telegram"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/twilio"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
@@ -16,15 +19,42 @@ type AlertProvider interface {
|
||||
IsValid() bool
|
||||
|
||||
// ToCustomAlertProvider converts the provider into a custom.AlertProvider
|
||||
ToCustomAlertProvider(service *core.Service, alert *core.Alert, result *core.Result, resolved bool) *custom.AlertProvider
|
||||
ToCustomAlertProvider(service *core.Service, alert *alert.Alert, result *core.Result, resolved bool) *custom.AlertProvider
|
||||
|
||||
// GetDefaultAlert returns the provider's default alert configuration
|
||||
GetDefaultAlert() *alert.Alert
|
||||
}
|
||||
|
||||
// ParseWithDefaultAlert parses a service alert by using the provider's default alert as a baseline
|
||||
func ParseWithDefaultAlert(providerDefaultAlert, serviceAlert *alert.Alert) {
|
||||
if providerDefaultAlert == nil || serviceAlert == nil {
|
||||
return
|
||||
}
|
||||
if serviceAlert.Enabled == nil {
|
||||
serviceAlert.Enabled = providerDefaultAlert.Enabled
|
||||
}
|
||||
if serviceAlert.SendOnResolved == nil {
|
||||
serviceAlert.SendOnResolved = providerDefaultAlert.SendOnResolved
|
||||
}
|
||||
if serviceAlert.Description == nil {
|
||||
serviceAlert.Description = providerDefaultAlert.Description
|
||||
}
|
||||
if serviceAlert.FailureThreshold == 0 {
|
||||
serviceAlert.FailureThreshold = providerDefaultAlert.FailureThreshold
|
||||
}
|
||||
if serviceAlert.SuccessThreshold == 0 {
|
||||
serviceAlert.SuccessThreshold = providerDefaultAlert.SuccessThreshold
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// Validate interface implementation on compile
|
||||
_ AlertProvider = (*custom.AlertProvider)(nil)
|
||||
_ AlertProvider = (*twilio.AlertProvider)(nil)
|
||||
_ AlertProvider = (*slack.AlertProvider)(nil)
|
||||
_ AlertProvider = (*discord.AlertProvider)(nil)
|
||||
_ AlertProvider = (*mattermost.AlertProvider)(nil)
|
||||
_ AlertProvider = (*messagebird.AlertProvider)(nil)
|
||||
_ AlertProvider = (*pagerduty.AlertProvider)(nil)
|
||||
_ AlertProvider = (*slack.AlertProvider)(nil)
|
||||
_ AlertProvider = (*telegram.AlertProvider)(nil)
|
||||
_ AlertProvider = (*twilio.AlertProvider)(nil)
|
||||
)
|
||||
|
||||
153
alerting/provider/provider_test.go
Normal file
153
alerting/provider/provider_test.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
)
|
||||
|
||||
func TestParseWithDefaultAlert(t *testing.T) {
|
||||
type Scenario struct {
|
||||
Name string
|
||||
DefaultAlert, ServiceAlert, ExpectedOutputAlert *alert.Alert
|
||||
}
|
||||
enabled := true
|
||||
disabled := false
|
||||
firstDescription := "description-1"
|
||||
secondDescription := "description-2"
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "service-alert-type-only",
|
||||
DefaultAlert: &alert.Alert{
|
||||
Enabled: &enabled,
|
||||
SendOnResolved: &enabled,
|
||||
Description: &firstDescription,
|
||||
FailureThreshold: 5,
|
||||
SuccessThreshold: 10,
|
||||
},
|
||||
ServiceAlert: &alert.Alert{
|
||||
Type: alert.TypeDiscord,
|
||||
},
|
||||
ExpectedOutputAlert: &alert.Alert{
|
||||
Type: alert.TypeDiscord,
|
||||
Enabled: &enabled,
|
||||
SendOnResolved: &enabled,
|
||||
Description: &firstDescription,
|
||||
FailureThreshold: 5,
|
||||
SuccessThreshold: 10,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "service-alert-overwrites-default-alert",
|
||||
DefaultAlert: &alert.Alert{
|
||||
Enabled: &disabled,
|
||||
SendOnResolved: &disabled,
|
||||
Description: &firstDescription,
|
||||
FailureThreshold: 5,
|
||||
SuccessThreshold: 10,
|
||||
},
|
||||
ServiceAlert: &alert.Alert{
|
||||
Type: alert.TypeTelegram,
|
||||
Enabled: &enabled,
|
||||
SendOnResolved: &enabled,
|
||||
Description: &secondDescription,
|
||||
FailureThreshold: 6,
|
||||
SuccessThreshold: 11,
|
||||
},
|
||||
ExpectedOutputAlert: &alert.Alert{
|
||||
Type: alert.TypeTelegram,
|
||||
Enabled: &enabled,
|
||||
SendOnResolved: &enabled,
|
||||
Description: &secondDescription,
|
||||
FailureThreshold: 6,
|
||||
SuccessThreshold: 11,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "service-alert-partially-overwrites-default-alert",
|
||||
DefaultAlert: &alert.Alert{
|
||||
Enabled: &enabled,
|
||||
SendOnResolved: &enabled,
|
||||
Description: &firstDescription,
|
||||
FailureThreshold: 5,
|
||||
SuccessThreshold: 10,
|
||||
},
|
||||
ServiceAlert: &alert.Alert{
|
||||
Type: alert.TypeDiscord,
|
||||
Enabled: nil,
|
||||
SendOnResolved: nil,
|
||||
FailureThreshold: 6,
|
||||
SuccessThreshold: 11,
|
||||
},
|
||||
ExpectedOutputAlert: &alert.Alert{
|
||||
Type: alert.TypeDiscord,
|
||||
Enabled: &enabled,
|
||||
SendOnResolved: &enabled,
|
||||
Description: &firstDescription,
|
||||
FailureThreshold: 6,
|
||||
SuccessThreshold: 11,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "default-alert-type-should-be-ignored",
|
||||
DefaultAlert: &alert.Alert{
|
||||
Type: alert.TypeTelegram,
|
||||
Enabled: &enabled,
|
||||
SendOnResolved: &enabled,
|
||||
Description: &firstDescription,
|
||||
FailureThreshold: 5,
|
||||
SuccessThreshold: 10,
|
||||
},
|
||||
ServiceAlert: &alert.Alert{
|
||||
Type: alert.TypeDiscord,
|
||||
},
|
||||
ExpectedOutputAlert: &alert.Alert{
|
||||
Type: alert.TypeDiscord,
|
||||
Enabled: &enabled,
|
||||
SendOnResolved: &enabled,
|
||||
Description: &firstDescription,
|
||||
FailureThreshold: 5,
|
||||
SuccessThreshold: 10,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "no-default-alert",
|
||||
DefaultAlert: &alert.Alert{
|
||||
Type: alert.TypeDiscord,
|
||||
Enabled: nil,
|
||||
SendOnResolved: nil,
|
||||
Description: &firstDescription,
|
||||
FailureThreshold: 2,
|
||||
SuccessThreshold: 5,
|
||||
},
|
||||
ServiceAlert: nil,
|
||||
ExpectedOutputAlert: nil,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
ParseWithDefaultAlert(scenario.DefaultAlert, scenario.ServiceAlert)
|
||||
if scenario.ExpectedOutputAlert == nil {
|
||||
if scenario.ServiceAlert != nil {
|
||||
t.Fail()
|
||||
}
|
||||
return
|
||||
}
|
||||
if scenario.ServiceAlert.IsEnabled() != scenario.ExpectedOutputAlert.IsEnabled() {
|
||||
t.Errorf("expected ServiceAlert.IsEnabled() to be %v, got %v", scenario.ExpectedOutputAlert.IsEnabled(), scenario.ServiceAlert.IsEnabled())
|
||||
}
|
||||
if scenario.ServiceAlert.IsSendingOnResolved() != scenario.ExpectedOutputAlert.IsSendingOnResolved() {
|
||||
t.Errorf("expected ServiceAlert.IsSendingOnResolved() to be %v, got %v", scenario.ExpectedOutputAlert.IsSendingOnResolved(), scenario.ServiceAlert.IsSendingOnResolved())
|
||||
}
|
||||
if scenario.ServiceAlert.GetDescription() != scenario.ExpectedOutputAlert.GetDescription() {
|
||||
t.Errorf("expected ServiceAlert.GetDescription() to be %v, got %v", scenario.ExpectedOutputAlert.GetDescription(), scenario.ServiceAlert.GetDescription())
|
||||
}
|
||||
if scenario.ServiceAlert.FailureThreshold != scenario.ExpectedOutputAlert.FailureThreshold {
|
||||
t.Errorf("expected ServiceAlert.FailureThreshold to be %v, got %v", scenario.ExpectedOutputAlert.FailureThreshold, scenario.ServiceAlert.FailureThreshold)
|
||||
}
|
||||
if scenario.ServiceAlert.SuccessThreshold != scenario.ExpectedOutputAlert.SuccessThreshold {
|
||||
t.Errorf("expected ServiceAlert.SuccessThreshold to be %v, got %v", scenario.ExpectedOutputAlert.SuccessThreshold, scenario.ServiceAlert.SuccessThreshold)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4,13 +4,17 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
// AlertProvider is the configuration necessary for sending an alert using Slack
|
||||
type AlertProvider struct {
|
||||
WebhookURL string `yaml:"webhook-url"`
|
||||
WebhookURL string `yaml:"webhook-url"` // Slack webhook URL
|
||||
|
||||
// DefaultAlert is the default alert configuration to use for services with an alert of the appropriate type
|
||||
DefaultAlert *alert.Alert `yaml:"default-alert"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
@@ -19,7 +23,7 @@ func (provider *AlertProvider) IsValid() bool {
|
||||
}
|
||||
|
||||
// ToCustomAlertProvider converts the provider into a custom.AlertProvider
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *core.Alert, result *core.Result, resolved bool) *custom.AlertProvider {
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *alert.Alert, result *core.Result, resolved bool) *custom.AlertProvider {
|
||||
var message, color, results string
|
||||
if resolved {
|
||||
message = fmt.Sprintf("An alert for *%s* has been resolved after passing successfully %d time(s) in a row", service.Name, alert.SuccessThreshold)
|
||||
@@ -57,7 +61,12 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, message, alert.Description, color, results),
|
||||
}`, message, alert.GetDescription(), color, results),
|
||||
Headers: map[string]string{"Content-Type": "application/json"},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultAlert returns the provider's default alert configuration
|
||||
func (provider AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||
return provider.DefaultAlert
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
@@ -22,7 +23,7 @@ func TestAlertProvider_IsValid(t *testing.T) {
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
@@ -44,7 +45,7 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
|
||||
60
alerting/provider/telegram/telegram.go
Normal file
60
alerting/provider/telegram/telegram.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package telegram
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
// AlertProvider is the configuration necessary for sending an alert using Telegram
|
||||
type AlertProvider struct {
|
||||
Token string `yaml:"token"`
|
||||
ID string `yaml:"id"`
|
||||
|
||||
// DefaultAlert is the default alert configuration to use for services with an alert of the appropriate type
|
||||
DefaultAlert *alert.Alert `yaml:"default-alert"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
func (provider *AlertProvider) IsValid() bool {
|
||||
return len(provider.Token) > 0 && len(provider.ID) > 0
|
||||
}
|
||||
|
||||
// ToCustomAlertProvider converts the provider into a custom.AlertProvider
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *alert.Alert, result *core.Result, resolved bool) *custom.AlertProvider {
|
||||
var message, results string
|
||||
if resolved {
|
||||
message = fmt.Sprintf("An alert for *%s* has been resolved:\\n—\\n _healthcheck passing successfully %d time(s) in a row_\\n— ", service.Name, alert.FailureThreshold)
|
||||
} else {
|
||||
message = fmt.Sprintf("An alert for *%s* has been triggered:\\n—\\n _healthcheck failed %d time(s) in a row_\\n— ", service.Name, alert.FailureThreshold)
|
||||
}
|
||||
for _, conditionResult := range result.ConditionResults {
|
||||
var prefix string
|
||||
if conditionResult.Success {
|
||||
prefix = "✅"
|
||||
} else {
|
||||
prefix = "❌"
|
||||
}
|
||||
results += fmt.Sprintf("%s - `%s`\\n", prefix, conditionResult.Condition)
|
||||
}
|
||||
var text string
|
||||
if len(alert.GetDescription()) > 0 {
|
||||
text = fmt.Sprintf("⛑ *Gatus* \\n%s \\n*Description* \\n_%s_ \\n\\n*Condition results*\\n%s", message, alert.GetDescription(), results)
|
||||
} else {
|
||||
text = fmt.Sprintf("⛑ *Gatus* \\n%s \\n*Condition results*\\n%s", message, results)
|
||||
}
|
||||
return &custom.AlertProvider{
|
||||
URL: fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", provider.Token),
|
||||
Method: http.MethodPost,
|
||||
Body: fmt.Sprintf(`{"chat_id": "%s", "text": "%s", "parse_mode": "MARKDOWN"}`, provider.ID, text),
|
||||
Headers: map[string]string{"Content-Type": "application/json"},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultAlert returns the provider's default alert configuration
|
||||
func (provider AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||
return provider.DefaultAlert
|
||||
}
|
||||
91
alerting/provider/telegram/telegram_test.go
Normal file
91
alerting/provider/telegram/telegram_test.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package telegram
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
func TestAlertProvider_IsValid(t *testing.T) {
|
||||
invalidProvider := AlertProvider{Token: "", ID: ""}
|
||||
if invalidProvider.IsValid() {
|
||||
t.Error("provider shouldn't have been valid")
|
||||
}
|
||||
validProvider := AlertProvider{Token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", ID: "12345678"}
|
||||
if !validProvider.IsValid() {
|
||||
t.Error("provider should've been valid")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{Token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", ID: "12345678"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "resolved") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring resolved")
|
||||
}
|
||||
if customAlertProvider.URL != fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", provider.Token) {
|
||||
t.Errorf("expected URL to be %s, got %s", fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", provider.Token), customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
//_, err := json.Marshal(customAlertProvider.Body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{Token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", ID: "0123456789"}
|
||||
description := "Healthcheck Successful"
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{Description: &description}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "triggered") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring triggered")
|
||||
}
|
||||
if customAlertProvider.URL != fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", provider.Token) {
|
||||
t.Errorf("expected URL to be %s, got %s", fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", provider.Token), customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithDescription(t *testing.T) {
|
||||
provider := AlertProvider{Token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", ID: "0123456789"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &alert.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "triggered") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring triggered")
|
||||
}
|
||||
if customAlertProvider.URL != fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", provider.Token) {
|
||||
t.Errorf("expected URL to be %s, got %s", fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", provider.Token), customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
@@ -16,6 +17,9 @@ type AlertProvider struct {
|
||||
Token string `yaml:"token"`
|
||||
From string `yaml:"from"`
|
||||
To string `yaml:"to"`
|
||||
|
||||
// DefaultAlert is the default alert configuration to use for services with an alert of the appropriate type
|
||||
DefaultAlert *alert.Alert `yaml:"default-alert"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
@@ -24,12 +28,12 @@ func (provider *AlertProvider) IsValid() bool {
|
||||
}
|
||||
|
||||
// ToCustomAlertProvider converts the provider into a custom.AlertProvider
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *core.Alert, _ *core.Result, resolved bool) *custom.AlertProvider {
|
||||
func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, alert *alert.Alert, _ *core.Result, resolved bool) *custom.AlertProvider {
|
||||
var message string
|
||||
if resolved {
|
||||
message = fmt.Sprintf("RESOLVED: %s - %s", service.Name, alert.Description)
|
||||
message = fmt.Sprintf("RESOLVED: %s - %s", service.Name, alert.GetDescription())
|
||||
} else {
|
||||
message = fmt.Sprintf("TRIGGERED: %s - %s", service.Name, alert.Description)
|
||||
message = fmt.Sprintf("TRIGGERED: %s - %s", service.Name, alert.GetDescription())
|
||||
}
|
||||
return &custom.AlertProvider{
|
||||
URL: fmt.Sprintf("https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json", provider.SID),
|
||||
@@ -45,3 +49,8 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultAlert returns the provider's default alert configuration
|
||||
func (provider AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||
return provider.DefaultAlert
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
@@ -31,7 +32,8 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
From: "3",
|
||||
To: "4",
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{Name: "service-name"}, &core.Alert{Description: "alert-description"}, &core.Result{}, true)
|
||||
description := "alert-description"
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{Name: "service-name"}, &alert.Alert{Description: &description}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
@@ -56,7 +58,8 @@ func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
From: "2",
|
||||
To: "1",
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{Name: "service-name"}, &core.Alert{Description: "alert-description"}, &core.Result{}, false)
|
||||
description := "alert-description"
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{Name: "service-name"}, &alert.Alert{Description: &description}, &core.Result{}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
|
||||
@@ -2,8 +2,13 @@ package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/smtp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-ping/ping"
|
||||
@@ -16,6 +21,9 @@ var (
|
||||
// pingTimeout is the timeout for the Ping function
|
||||
// This is mainly exposed for testing purposes
|
||||
pingTimeout = 5 * time.Second
|
||||
|
||||
// httpTimeout is the timeout for secureHTTPClient and insecureHTTPClient
|
||||
httpTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// GetHTTPClient returns the shared HTTP client
|
||||
@@ -23,7 +31,7 @@ func GetHTTPClient(insecure bool) *http.Client {
|
||||
if insecure {
|
||||
if insecureHTTPClient == nil {
|
||||
insecureHTTPClient = &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
Timeout: httpTimeout,
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 20,
|
||||
@@ -32,18 +40,24 @@ func GetHTTPClient(insecure bool) *http.Client {
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse // Don't follow redirects
|
||||
},
|
||||
}
|
||||
}
|
||||
return insecureHTTPClient
|
||||
}
|
||||
if secureHTTPClient == nil {
|
||||
secureHTTPClient = &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
Timeout: httpTimeout,
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 20,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse // Don't follow redirects
|
||||
},
|
||||
}
|
||||
}
|
||||
return secureHTTPClient
|
||||
@@ -59,6 +73,31 @@ func CanCreateTCPConnection(address string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// CanPerformStartTLS checks whether a connection can be established to an address using the STARTTLS protocol
|
||||
func CanPerformStartTLS(address string, insecure bool) (connected bool, certificate *x509.Certificate, err error) {
|
||||
hostAndPort := strings.Split(address, ":")
|
||||
if len(hostAndPort) != 2 {
|
||||
return false, nil, errors.New("invalid address for starttls, format must be host:port")
|
||||
}
|
||||
smtpClient, err := smtp.Dial(address)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = smtpClient.StartTLS(&tls.Config{
|
||||
InsecureSkipVerify: insecure,
|
||||
ServerName: hostAndPort[0],
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if state, ok := smtpClient.TLSConnectionState(); ok {
|
||||
certificate = state.PeerCertificates[0]
|
||||
} else {
|
||||
return false, nil, errors.New("could not get TLS connection state")
|
||||
}
|
||||
return true, certificate, nil
|
||||
}
|
||||
|
||||
// Ping checks if an address can be pinged and returns the round-trip time if the address can be pinged
|
||||
//
|
||||
// Note that this function takes at least 100ms, even if the address is 127.0.0.1
|
||||
@@ -69,8 +108,9 @@ func Ping(address string) (bool, time.Duration) {
|
||||
}
|
||||
pinger.Count = 1
|
||||
pinger.Timeout = pingTimeout
|
||||
pinger.SetNetwork("ip4")
|
||||
pinger.SetPrivileged(true)
|
||||
// Set the pinger's privileged mode to true for every operating system except darwin
|
||||
// https://github.com/TwinProduction/gatus/issues/132
|
||||
pinger.SetPrivileged(runtime.GOOS != "darwin")
|
||||
err = pinger.Run()
|
||||
if err != nil {
|
||||
return false, 0
|
||||
|
||||
@@ -49,3 +49,59 @@ func TestPing(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanPerformStartTLS(t *testing.T) {
|
||||
type args struct {
|
||||
address string
|
||||
insecure bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantConnected bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "invalid address",
|
||||
args: args{
|
||||
address: "test",
|
||||
},
|
||||
wantConnected: false,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "error dial",
|
||||
args: args{
|
||||
address: "test:1234",
|
||||
},
|
||||
wantConnected: false,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "valid starttls",
|
||||
args: args{
|
||||
address: "smtp.gmail.com:587",
|
||||
},
|
||||
wantConnected: true,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
connected, _, err := CanPerformStartTLS(tt.args.address, tt.args.insecure)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("CanPerformStartTLS() err=%v, wantErr=%v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if connected != tt.wantConnected {
|
||||
t.Errorf("CanPerformStartTLS() connected=%v, wantConnected=%v", connected, tt.wantConnected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanCreateTCPConnection(t *testing.T) {
|
||||
if CanCreateTCPConnection("127.0.0.1") {
|
||||
t.Error("should've failed, because there's no port in the address")
|
||||
}
|
||||
}
|
||||
|
||||
23
config.yaml
23
config.yaml
@@ -10,14 +10,15 @@ services:
|
||||
|
||||
- name: back-end
|
||||
group: core
|
||||
url: "http://example.org/"
|
||||
url: "https://example.org/"
|
||||
interval: 5m
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[CERTIFICATE_EXPIRATION] > 48h"
|
||||
|
||||
- name: monitoring
|
||||
group: internal
|
||||
url: "http://example.com/"
|
||||
url: "https://example.org/"
|
||||
interval: 5m
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
@@ -29,10 +30,18 @@ services:
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
|
||||
- name: cat-fact
|
||||
url: "https://cat-fact.herokuapp.com/facts/random"
|
||||
- name: example-dns-query
|
||||
url: "8.8.8.8" # Address of the DNS server to use
|
||||
interval: 5m
|
||||
dns:
|
||||
query-name: "example.com"
|
||||
query-type: "A"
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].deleted == false"
|
||||
- "len([BODY].text) > 0"
|
||||
- "[BODY] == 93.184.216.34"
|
||||
- "[DNS_RCODE] == NOERROR"
|
||||
|
||||
- name: icmp-ping
|
||||
url: "icmp://example.org"
|
||||
interval: 1m
|
||||
conditions:
|
||||
- "[CONNECTED] == true"
|
||||
|
||||
202
config/config.go
202
config/config.go
@@ -5,14 +5,15 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting"
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/k8s"
|
||||
"github.com/TwinProduction/gatus/security"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@@ -39,13 +40,8 @@ var (
|
||||
// ErrConfigFileNotFound is an error returned when the configuration file could not be found
|
||||
ErrConfigFileNotFound = errors.New("configuration file not found")
|
||||
|
||||
// ErrConfigNotLoaded is an error returned when an attempt to Get() the configuration before loading it is made
|
||||
ErrConfigNotLoaded = errors.New("configuration is nil")
|
||||
|
||||
// ErrInvalidSecurityConfig is an error returned when the security configuration is invalid
|
||||
ErrInvalidSecurityConfig = errors.New("invalid security configuration")
|
||||
|
||||
config *Config
|
||||
)
|
||||
|
||||
// Config is the main configuration structure
|
||||
@@ -56,6 +52,10 @@ type Config struct {
|
||||
// Metrics Whether to expose metrics at /metrics
|
||||
Metrics bool `yaml:"metrics"`
|
||||
|
||||
// SkipInvalidConfigUpdate Whether to make the application ignore invalid configuration
|
||||
// if the configuration file is updated while the application is running
|
||||
SkipInvalidConfigUpdate bool `yaml:"skip-invalid-config-update"`
|
||||
|
||||
// DisableMonitoringLock Whether to disable the monitoring lock
|
||||
// The monitoring lock is what prevents multiple services from being processed at the same time.
|
||||
// Disabling this may lead to inaccurate response times
|
||||
@@ -78,47 +78,57 @@ type Config struct {
|
||||
|
||||
// Web is the configuration for the web listener
|
||||
Web *WebConfig `yaml:"web"`
|
||||
|
||||
filePath string // path to the file from which config was loaded from
|
||||
lastFileModTime time.Time // last modification time
|
||||
}
|
||||
|
||||
// Get returns the configuration, or panics if the configuration hasn't loaded yet
|
||||
func Get() *Config {
|
||||
if config == nil {
|
||||
panic(ErrConfigNotLoaded)
|
||||
// HasLoadedConfigurationFileBeenModified returns whether the file that the
|
||||
// configuration has been loaded from has been modified since it was last read
|
||||
func (config Config) HasLoadedConfigurationFileBeenModified() bool {
|
||||
if fileInfo, err := os.Stat(config.filePath); err == nil {
|
||||
if !fileInfo.ModTime().IsZero() {
|
||||
return config.lastFileModTime.Unix() != fileInfo.ModTime().Unix()
|
||||
}
|
||||
}
|
||||
return config
|
||||
return false
|
||||
}
|
||||
|
||||
// Set sets the configuration
|
||||
// Used only for testing
|
||||
func Set(cfg *Config) {
|
||||
config = cfg
|
||||
// UpdateLastFileModTime refreshes Config.lastFileModTime
|
||||
func (config *Config) UpdateLastFileModTime() {
|
||||
if fileInfo, err := os.Stat(config.filePath); err == nil {
|
||||
if !fileInfo.ModTime().IsZero() {
|
||||
config.lastFileModTime = fileInfo.ModTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load loads a custom configuration file
|
||||
// Note that the misconfiguration of some fields may lead to panics. This is on purpose.
|
||||
func Load(configFile string) error {
|
||||
func Load(configFile string) (*Config, error) {
|
||||
log.Printf("[config][Load] Reading configuration from configFile=%s", configFile)
|
||||
cfg, err := readConfigurationFile(configFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return ErrConfigFileNotFound
|
||||
return nil, ErrConfigFileNotFound
|
||||
}
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
config = cfg
|
||||
return nil
|
||||
cfg.filePath = configFile
|
||||
cfg.UpdateLastFileModTime()
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// LoadDefaultConfiguration loads the default configuration file
|
||||
func LoadDefaultConfiguration() error {
|
||||
err := Load(DefaultConfigurationFilePath)
|
||||
func LoadDefaultConfiguration() (*Config, error) {
|
||||
cfg, err := Load(DefaultConfigurationFilePath)
|
||||
if err != nil {
|
||||
if err == ErrConfigFileNotFound {
|
||||
return Load(DefaultFallbackConfigurationFilePath)
|
||||
}
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return nil
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func readConfigurationFile(fileName string) (config *Config, err error) {
|
||||
@@ -144,71 +154,92 @@ func parseAndValidateConfigBytes(yamlBytes []byte) (config *Config, err error) {
|
||||
} else {
|
||||
// Note that the functions below may panic, and this is on purpose to prevent Gatus from starting with
|
||||
// invalid configurations
|
||||
validateAlertingConfig(config)
|
||||
validateSecurityConfig(config)
|
||||
validateServicesConfig(config)
|
||||
validateKubernetesConfig(config)
|
||||
validateWebConfig(config)
|
||||
validateStorageConfig(config)
|
||||
validateAlertingConfig(config.Alerting, config.Services, config.Debug)
|
||||
if err := validateSecurityConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateServicesConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateKubernetesConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateWebConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateStorageConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateStorageConfig(config *Config) {
|
||||
func validateStorageConfig(config *Config) error {
|
||||
if config.Storage == nil {
|
||||
config.Storage = &storage.Config{}
|
||||
config.Storage = &storage.Config{
|
||||
Type: storage.TypeInMemory,
|
||||
}
|
||||
}
|
||||
err := storage.Initialize(config.Storage)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
// Remove all ServiceStatus that represent services which no longer exist in the configuration
|
||||
var keys []string
|
||||
for _, service := range config.Services {
|
||||
keys = append(keys, util.ConvertGroupAndServiceToKey(service.Group, service.Name))
|
||||
keys = append(keys, service.Key())
|
||||
}
|
||||
numberOfServiceStatusesDeleted := storage.Get().DeleteAllServiceStatusesNotInKeys(keys)
|
||||
if numberOfServiceStatusesDeleted > 0 {
|
||||
log.Printf("[config][validateStorageConfig] Deleted %d service statuses because their matching services no longer existed", numberOfServiceStatusesDeleted)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateWebConfig(config *Config) {
|
||||
func validateWebConfig(config *Config) error {
|
||||
if config.Web == nil {
|
||||
config.Web = &WebConfig{Address: DefaultAddress, Port: DefaultPort}
|
||||
} else {
|
||||
config.Web.validateAndSetDefaults()
|
||||
return config.Web.validateAndSetDefaults()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateKubernetesConfig(config *Config) {
|
||||
// deprecated
|
||||
// I don't like the current implementation.
|
||||
func validateKubernetesConfig(config *Config) error {
|
||||
if config.Kubernetes != nil && config.Kubernetes.AutoDiscover {
|
||||
log.Println("WARNING - The Kubernetes integration is planned to be removed in v3.0.0. If you're seeing this message, it's because you're currently using it, and you may want to give your opinion at https://github.com/TwinProduction/gatus/discussions/135")
|
||||
if config.Kubernetes.ServiceTemplate == nil {
|
||||
panic("kubernetes.service-template cannot be nil")
|
||||
return errors.New("kubernetes.service-template cannot be nil")
|
||||
}
|
||||
if config.Debug {
|
||||
log.Println("[config][validateKubernetesConfig] Automatically discovering Kubernetes services...")
|
||||
}
|
||||
discoveredServices, err := k8s.DiscoverServices(config.Kubernetes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
config.Services = append(config.Services, discoveredServices...)
|
||||
log.Printf("[config][validateKubernetesConfig] Discovered %d Kubernetes services", len(discoveredServices))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateServicesConfig(config *Config) {
|
||||
func validateServicesConfig(config *Config) error {
|
||||
for _, service := range config.Services {
|
||||
if config.Debug {
|
||||
log.Printf("[config][validateServicesConfig] Validating service '%s'", service.Name)
|
||||
}
|
||||
service.ValidateAndSetDefaults()
|
||||
if err := service.ValidateAndSetDefaults(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Printf("[config][validateServicesConfig] Validated %d services", len(config.Services))
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateSecurityConfig(config *Config) {
|
||||
func validateSecurityConfig(config *Config) error {
|
||||
if config.Security != nil {
|
||||
if config.Security.IsValid() {
|
||||
if config.Debug {
|
||||
@@ -217,29 +248,49 @@ func validateSecurityConfig(config *Config) {
|
||||
} else {
|
||||
// If there was an attempt to configure security, then it must mean that some confidential or private
|
||||
// data are exposed. As a result, we'll force a panic because it's better to be safe than sorry.
|
||||
panic(ErrInvalidSecurityConfig)
|
||||
return ErrInvalidSecurityConfig
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateAlertingConfig(config *Config) {
|
||||
if config.Alerting == nil {
|
||||
// validateAlertingConfig validates the alerting configuration
|
||||
// Note that the alerting configuration has to be validated before the service configuration, because the default alert
|
||||
// returned by provider.AlertProvider.GetDefaultAlert() must be parsed before core.Service.ValidateAndSetDefaults()
|
||||
// sets the default alert values when none are set.
|
||||
func validateAlertingConfig(alertingConfig *alerting.Config, services []*core.Service, debug bool) {
|
||||
if alertingConfig == nil {
|
||||
log.Printf("[config][validateAlertingConfig] Alerting is not configured")
|
||||
return
|
||||
}
|
||||
alertTypes := []core.AlertType{
|
||||
core.SlackAlert,
|
||||
core.MattermostAlert,
|
||||
core.MessagebirdAlert,
|
||||
core.TwilioAlert,
|
||||
core.PagerDutyAlert,
|
||||
core.CustomAlert,
|
||||
alertTypes := []alert.Type{
|
||||
alert.TypeCustom,
|
||||
alert.TypeDiscord,
|
||||
alert.TypeMattermost,
|
||||
alert.TypeMessagebird,
|
||||
alert.TypePagerDuty,
|
||||
alert.TypeSlack,
|
||||
alert.TypeTelegram,
|
||||
alert.TypeTwilio,
|
||||
}
|
||||
var validProviders, invalidProviders []core.AlertType
|
||||
var validProviders, invalidProviders []alert.Type
|
||||
for _, alertType := range alertTypes {
|
||||
alertProvider := GetAlertingProviderByAlertType(config, alertType)
|
||||
alertProvider := alertingConfig.GetAlertingProviderByAlertType(alertType)
|
||||
if alertProvider != nil {
|
||||
if alertProvider.IsValid() {
|
||||
// Parse alerts with the provider's default alert
|
||||
if alertProvider.GetDefaultAlert() != nil {
|
||||
for _, service := range services {
|
||||
for alertIndex, serviceAlert := range service.Alerts {
|
||||
if alertType == serviceAlert.Type {
|
||||
if debug {
|
||||
log.Printf("[config][validateAlertingConfig] Parsing alert %d with provider's default alert for provider=%s in service=%s", alertIndex, alertType, service.Name)
|
||||
}
|
||||
provider.ParseWithDefaultAlert(alertProvider.GetDefaultAlert(), serviceAlert)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
validProviders = append(validProviders, alertType)
|
||||
} else {
|
||||
log.Printf("[config][validateAlertingConfig] Ignoring provider=%s because configuration is invalid", alertType)
|
||||
@@ -251,46 +302,3 @@ func validateAlertingConfig(config *Config) {
|
||||
}
|
||||
log.Printf("[config][validateAlertingConfig] configuredProviders=%s; ignoredProviders=%s", validProviders, invalidProviders)
|
||||
}
|
||||
|
||||
// GetAlertingProviderByAlertType returns an provider.AlertProvider by its corresponding core.AlertType
|
||||
func GetAlertingProviderByAlertType(config *Config, alertType core.AlertType) provider.AlertProvider {
|
||||
switch alertType {
|
||||
case core.SlackAlert:
|
||||
if config.Alerting.Slack == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Slack
|
||||
case core.MattermostAlert:
|
||||
if config.Alerting.Mattermost == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Mattermost
|
||||
case core.MessagebirdAlert:
|
||||
if config.Alerting.Messagebird == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Messagebird
|
||||
case core.TwilioAlert:
|
||||
if config.Alerting.Twilio == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Twilio
|
||||
case core.PagerDutyAlert:
|
||||
if config.Alerting.PagerDuty == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.PagerDuty
|
||||
case core.CustomAlert:
|
||||
if config.Alerting.Custom == nil {
|
||||
// Since we're returning an interface, we need to explicitly return nil, even if the provider itself is nil
|
||||
return nil
|
||||
}
|
||||
return config.Alerting.Custom
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,36 +6,30 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting"
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/custom"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/discord"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/mattermost"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/messagebird"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/pagerduty"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/slack"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/telegram"
|
||||
"github.com/TwinProduction/gatus/alerting/provider/twilio"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/k8stest"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestGetBeforeConfigIsLoaded(t *testing.T) {
|
||||
defer func() { recover() }()
|
||||
Get()
|
||||
t.Fatal("Should've panicked because the configuration hasn't been loaded yet")
|
||||
}
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
if config != nil {
|
||||
t.Fatal("config should've been nil")
|
||||
}
|
||||
Set(&Config{})
|
||||
if config == nil {
|
||||
t.Fatal("config shouldn't have been nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFileThatDoesNotExist(t *testing.T) {
|
||||
err := Load("file-that-does-not-exist.yaml")
|
||||
_, err := Load("file-that-does-not-exist.yaml")
|
||||
if err == nil {
|
||||
t.Error("Should've returned an error, because the file specified doesn't exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadDefaultConfigurationFile(t *testing.T) {
|
||||
err := LoadDefaultConfiguration()
|
||||
_, err := LoadDefaultConfiguration()
|
||||
if err == nil {
|
||||
t.Error("Should've returned an error, because there's no configuration files at the default path nor the default fallback path")
|
||||
}
|
||||
@@ -227,8 +221,7 @@ services:
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithInvalidPort(t *testing.T) {
|
||||
defer func() { recover() }()
|
||||
_, _ = parseAndValidateConfigBytes([]byte(`
|
||||
_, err := parseAndValidateConfigBytes([]byte(`
|
||||
web:
|
||||
port: 65536
|
||||
address: 127.0.0.1
|
||||
@@ -238,7 +231,9 @@ services:
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
t.Fatal("Should've panicked because the configuration specifies an invalid port value")
|
||||
if err == nil {
|
||||
t.Fatal("Should've returned an error because the configuration specifies an invalid port value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithMetricsAndCustomUserAgentHeader(t *testing.T) {
|
||||
@@ -338,12 +333,25 @@ debug: true
|
||||
alerting:
|
||||
slack:
|
||||
webhook-url: "http://example.com"
|
||||
discord:
|
||||
webhook-url: "http://example.org"
|
||||
pagerduty:
|
||||
integration-key: "00000000000000000000000000000000"
|
||||
mattermost:
|
||||
webhook-url: "http://example.com"
|
||||
messagebird:
|
||||
access-key: "1"
|
||||
originator: "31619191918"
|
||||
recipients: "31619191919"
|
||||
telegram:
|
||||
token: 123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11
|
||||
id: 0123456789
|
||||
twilio:
|
||||
sid: "1234"
|
||||
token: "5678"
|
||||
from: "+1-234-567-8901"
|
||||
to: "+1-234-567-8901"
|
||||
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/health
|
||||
@@ -355,10 +363,196 @@ services:
|
||||
failure-threshold: 7
|
||||
success-threshold: 5
|
||||
description: "Healthcheck failed 7 times in a row"
|
||||
- type: messagebird
|
||||
- type: mattermost
|
||||
enabled: true
|
||||
- type: messagebird
|
||||
- type: discord
|
||||
enabled: true
|
||||
failure-threshold: 10
|
||||
- type: telegram
|
||||
enabled: true
|
||||
- type: twilio
|
||||
enabled: true
|
||||
failure-threshold: 12
|
||||
success-threshold: 15
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
}
|
||||
// Alerting providers
|
||||
if config.Alerting == nil {
|
||||
t.Fatal("config.Alerting shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting.Slack == nil || !config.Alerting.Slack.IsValid() {
|
||||
t.Fatal("Slack alerting config should've been valid")
|
||||
}
|
||||
// Services
|
||||
if len(config.Services) != 1 {
|
||||
t.Error("There should've been 1 service")
|
||||
}
|
||||
if config.Services[0].URL != "https://twinnation.org/health" {
|
||||
t.Errorf("URL should have been %s", "https://twinnation.org/health")
|
||||
}
|
||||
if config.Services[0].Interval != 60*time.Second {
|
||||
t.Errorf("Interval should have been %s, because it is the default value", 60*time.Second)
|
||||
}
|
||||
if len(config.Services[0].Alerts) != 7 {
|
||||
t.Fatal("There should've been 7 alerts configured")
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[0].Type != alert.TypeSlack {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeSlack, config.Services[0].Alerts[0].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[0].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[0].FailureThreshold != 3 {
|
||||
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 3, config.Services[0].Alerts[0].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[0].SuccessThreshold != 2 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Services[0].Alerts[0].SuccessThreshold)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[1].Type != alert.TypePagerDuty {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypePagerDuty, config.Services[0].Alerts[1].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[1].GetDescription() != "Healthcheck failed 7 times in a row" {
|
||||
t.Errorf("The description of the alert should've been %s, but it was %s", "Healthcheck failed 7 times in a row", config.Services[0].Alerts[1].GetDescription())
|
||||
}
|
||||
if config.Services[0].Alerts[1].FailureThreshold != 7 {
|
||||
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 7, config.Services[0].Alerts[1].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[1].SuccessThreshold != 5 {
|
||||
t.Errorf("The success threshold of the alert should've been %d, but it was %d", 5, config.Services[0].Alerts[1].SuccessThreshold)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[2].Type != alert.TypeMattermost {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeMattermost, config.Services[0].Alerts[2].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[2].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[2].FailureThreshold != 3 {
|
||||
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 3, config.Services[0].Alerts[2].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[2].SuccessThreshold != 2 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Services[0].Alerts[2].SuccessThreshold)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[3].Type != alert.TypeMessagebird {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeMessagebird, config.Services[0].Alerts[3].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[3].IsEnabled() {
|
||||
t.Error("The alert should've been disabled")
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[4].Type != alert.TypeDiscord {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeDiscord, config.Services[0].Alerts[4].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[4].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[4].FailureThreshold != 10 {
|
||||
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 10, config.Services[0].Alerts[4].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[4].SuccessThreshold != 2 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Services[0].Alerts[4].SuccessThreshold)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[5].Type != alert.TypeTelegram {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeTelegram, config.Services[0].Alerts[5].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[5].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[5].FailureThreshold != 3 {
|
||||
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 3, config.Services[0].Alerts[5].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[5].SuccessThreshold != 2 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Services[0].Alerts[5].SuccessThreshold)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[6].Type != alert.TypeTwilio {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeTwilio, config.Services[0].Alerts[6].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[6].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[6].FailureThreshold != 12 {
|
||||
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 12, config.Services[0].Alerts[6].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[6].SuccessThreshold != 15 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 15, config.Services[0].Alerts[6].SuccessThreshold)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithAlertingAndDefaultAlert(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
debug: true
|
||||
|
||||
alerting:
|
||||
slack:
|
||||
webhook-url: "http://example.com"
|
||||
default-alert:
|
||||
enabled: true
|
||||
discord:
|
||||
webhook-url: "http://example.org"
|
||||
default-alert:
|
||||
enabled: true
|
||||
failure-threshold: 10
|
||||
success-threshold: 1
|
||||
pagerduty:
|
||||
integration-key: "00000000000000000000000000000000"
|
||||
default-alert:
|
||||
enabled: true
|
||||
description: default description
|
||||
failure-threshold: 7
|
||||
success-threshold: 5
|
||||
mattermost:
|
||||
webhook-url: "http://example.com"
|
||||
default-alert:
|
||||
enabled: true
|
||||
messagebird:
|
||||
access-key: "1"
|
||||
originator: "31619191918"
|
||||
recipients: "31619191919"
|
||||
default-alert:
|
||||
enabled: false
|
||||
send-on-resolved: true
|
||||
telegram:
|
||||
token: 123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11
|
||||
id: 0123456789
|
||||
default-alert:
|
||||
enabled: true
|
||||
twilio:
|
||||
sid: "1234"
|
||||
token: "5678"
|
||||
from: "+1-234-567-8901"
|
||||
to: "+1-234-567-8901"
|
||||
default-alert:
|
||||
enabled: true
|
||||
failure-threshold: 12
|
||||
success-threshold: 15
|
||||
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/health
|
||||
alerts:
|
||||
- type: slack
|
||||
- type: pagerduty
|
||||
- type: mattermost
|
||||
- type: messagebird
|
||||
- type: discord
|
||||
success-threshold: 2 # test service alert override
|
||||
- type: telegram
|
||||
- type: twilio
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
@@ -369,24 +563,43 @@ services:
|
||||
if config.Metrics {
|
||||
t.Error("Metrics should've been false by default")
|
||||
}
|
||||
// Alerting providers
|
||||
if config.Alerting == nil {
|
||||
t.Fatal("config.Alerting shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting.Slack == nil || !config.Alerting.Slack.IsValid() {
|
||||
t.Fatal("Slack alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Slack.GetDefaultAlert() == nil {
|
||||
t.Fatal("Slack.GetDefaultAlert() shouldn't have returned nil")
|
||||
}
|
||||
if config.Alerting.Slack.WebhookURL != "http://example.com" {
|
||||
t.Errorf("Slack webhook should've been %s, but was %s", "http://example.com", config.Alerting.Slack.WebhookURL)
|
||||
}
|
||||
|
||||
if config.Alerting.PagerDuty == nil || !config.Alerting.PagerDuty.IsValid() {
|
||||
t.Fatal("PagerDuty alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Messagebird == nil || !config.Alerting.Messagebird.IsValid() {
|
||||
t.Fatal("Messagebird alerting config should've been valid")
|
||||
if config.Alerting.PagerDuty.GetDefaultAlert() == nil {
|
||||
t.Fatal("PagerDuty.GetDefaultAlert() shouldn't have returned nil")
|
||||
}
|
||||
if config.Alerting.PagerDuty.IntegrationKey != "00000000000000000000000000000000" {
|
||||
t.Errorf("PagerDuty integration key should've been %s, but was %s", "00000000000000000000000000000000", config.Alerting.PagerDuty.IntegrationKey)
|
||||
}
|
||||
|
||||
if config.Alerting.Mattermost == nil || !config.Alerting.Mattermost.IsValid() {
|
||||
t.Fatal("Mattermost alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Mattermost.GetDefaultAlert() == nil {
|
||||
t.Fatal("Mattermost.GetDefaultAlert() shouldn't have returned nil")
|
||||
}
|
||||
|
||||
if config.Alerting.Messagebird == nil || !config.Alerting.Messagebird.IsValid() {
|
||||
t.Fatal("Messagebird alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Messagebird.GetDefaultAlert() == nil {
|
||||
t.Fatal("Messagebird.GetDefaultAlert() shouldn't have returned nil")
|
||||
}
|
||||
if config.Alerting.Messagebird.AccessKey != "1" {
|
||||
t.Errorf("Messagebird access key should've been %s, but was %s", "1", config.Alerting.Messagebird.AccessKey)
|
||||
}
|
||||
@@ -396,6 +609,40 @@ services:
|
||||
if config.Alerting.Messagebird.Recipients != "31619191919" {
|
||||
t.Errorf("Messagebird to recipients should've been %s, but was %s", "31619191919", config.Alerting.Messagebird.Recipients)
|
||||
}
|
||||
|
||||
if config.Alerting.Discord == nil || !config.Alerting.Discord.IsValid() {
|
||||
t.Fatal("Discord alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Discord.GetDefaultAlert() == nil {
|
||||
t.Fatal("Discord.GetDefaultAlert() shouldn't have returned nil")
|
||||
}
|
||||
if config.Alerting.Discord.WebhookURL != "http://example.org" {
|
||||
t.Errorf("Discord webhook should've been %s, but was %s", "http://example.org", config.Alerting.Discord.WebhookURL)
|
||||
}
|
||||
if config.Alerting.GetAlertingProviderByAlertType(alert.TypeDiscord) != config.Alerting.Discord {
|
||||
t.Error("expected discord configuration")
|
||||
}
|
||||
|
||||
if config.Alerting.Telegram == nil || !config.Alerting.Telegram.IsValid() {
|
||||
t.Fatal("Telegram alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Telegram.GetDefaultAlert() == nil {
|
||||
t.Fatal("Telegram.GetDefaultAlert() shouldn't have returned nil")
|
||||
}
|
||||
if config.Alerting.Telegram.Token != "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11" {
|
||||
t.Errorf("Telegram token should've been %s, but was %s", "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", config.Alerting.Telegram.Token)
|
||||
}
|
||||
if config.Alerting.Telegram.ID != "0123456789" {
|
||||
t.Errorf("Telegram ID should've been %s, but was %s", "012345689", config.Alerting.Telegram.ID)
|
||||
}
|
||||
|
||||
if config.Alerting.Twilio == nil || !config.Alerting.Twilio.IsValid() {
|
||||
t.Fatal("Twilio alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Twilio.GetDefaultAlert() == nil {
|
||||
t.Fatal("Twilio.GetDefaultAlert() shouldn't have returned nil")
|
||||
}
|
||||
// Services
|
||||
if len(config.Services) != 1 {
|
||||
t.Error("There should've been 1 service")
|
||||
}
|
||||
@@ -405,13 +652,14 @@ services:
|
||||
if config.Services[0].Interval != 60*time.Second {
|
||||
t.Errorf("Interval should have been %s, because it is the default value", 60*time.Second)
|
||||
}
|
||||
if config.Services[0].Alerts == nil {
|
||||
t.Fatal("The service alerts shouldn't have been nil")
|
||||
if len(config.Services[0].Alerts) != 7 {
|
||||
t.Fatal("There should've been 7 alerts configured")
|
||||
}
|
||||
if len(config.Services[0].Alerts) != 3 {
|
||||
t.Fatal("There should've been 3 alert configured")
|
||||
|
||||
if config.Services[0].Alerts[0].Type != alert.TypeSlack {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeSlack, config.Services[0].Alerts[0].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[0].Enabled {
|
||||
if !config.Services[0].Alerts[0].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[0].FailureThreshold != 3 {
|
||||
@@ -420,23 +668,159 @@ services:
|
||||
if config.Services[0].Alerts[0].SuccessThreshold != 2 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Services[0].Alerts[0].SuccessThreshold)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[1].Type != alert.TypePagerDuty {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypePagerDuty, config.Services[0].Alerts[1].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[1].GetDescription() != "default description" {
|
||||
t.Errorf("The description of the alert should've been %s, but it was %s", "default description", config.Services[0].Alerts[1].GetDescription())
|
||||
}
|
||||
if config.Services[0].Alerts[1].FailureThreshold != 7 {
|
||||
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 7, config.Services[0].Alerts[1].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[1].SuccessThreshold != 5 {
|
||||
t.Errorf("The success threshold of the alert should've been %d, but it was %d", 5, config.Services[0].Alerts[1].SuccessThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[0].Type != core.SlackAlert {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", core.SlackAlert, config.Services[0].Alerts[0].Type)
|
||||
|
||||
if config.Services[0].Alerts[2].Type != alert.TypeMattermost {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeMattermost, config.Services[0].Alerts[2].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[1].Type != core.PagerDutyAlert {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", core.PagerDutyAlert, config.Services[0].Alerts[1].Type)
|
||||
if !config.Services[0].Alerts[2].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[1].Description != "Healthcheck failed 7 times in a row" {
|
||||
t.Errorf("The description of the alert should've been %s, but it was %s", "Healthcheck failed 7 times in a row", config.Services[0].Alerts[0].Description)
|
||||
if config.Services[0].Alerts[2].FailureThreshold != 3 {
|
||||
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 3, config.Services[0].Alerts[2].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[2].Type != core.MessagebirdAlert {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", core.MessagebirdAlert, config.Services[0].Alerts[1].Type)
|
||||
if config.Services[0].Alerts[2].SuccessThreshold != 2 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Services[0].Alerts[2].SuccessThreshold)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[3].Type != alert.TypeMessagebird {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeMessagebird, config.Services[0].Alerts[3].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[3].IsEnabled() {
|
||||
t.Error("The alert should've been disabled")
|
||||
}
|
||||
if !config.Services[0].Alerts[3].IsSendingOnResolved() {
|
||||
t.Error("The alert should be sending on resolve")
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[4].Type != alert.TypeDiscord {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeDiscord, config.Services[0].Alerts[4].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[4].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[4].FailureThreshold != 10 {
|
||||
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 10, config.Services[0].Alerts[4].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[4].SuccessThreshold != 2 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Services[0].Alerts[4].SuccessThreshold)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[5].Type != alert.TypeTelegram {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeTelegram, config.Services[0].Alerts[5].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[5].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[5].FailureThreshold != 3 {
|
||||
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 3, config.Services[0].Alerts[5].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[5].SuccessThreshold != 2 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Services[0].Alerts[5].SuccessThreshold)
|
||||
}
|
||||
|
||||
if config.Services[0].Alerts[6].Type != alert.TypeTwilio {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeTwilio, config.Services[0].Alerts[6].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[6].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[6].FailureThreshold != 12 {
|
||||
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 12, config.Services[0].Alerts[6].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[6].SuccessThreshold != 15 {
|
||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 15, config.Services[0].Alerts[6].SuccessThreshold)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithAlertingAndDefaultAlertAndMultipleAlertsOfSameTypeWithOverriddenParameters(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
alerting:
|
||||
slack:
|
||||
webhook-url: "http://example.com"
|
||||
default-alert:
|
||||
enabled: true
|
||||
description: "description"
|
||||
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/health
|
||||
alerts:
|
||||
- type: slack
|
||||
failure-threshold: 10
|
||||
- type: slack
|
||||
failure-threshold: 20
|
||||
description: "wow"
|
||||
- type: slack
|
||||
enabled: false
|
||||
failure-threshold: 30
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
}
|
||||
// Alerting providers
|
||||
if config.Alerting == nil {
|
||||
t.Fatal("config.Alerting shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting.Slack == nil || !config.Alerting.Slack.IsValid() {
|
||||
t.Fatal("Slack alerting config should've been valid")
|
||||
}
|
||||
// Services
|
||||
if len(config.Services) != 1 {
|
||||
t.Error("There should've been 2 services")
|
||||
}
|
||||
if config.Services[0].Alerts[0].Type != alert.TypeSlack {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeSlack, config.Services[0].Alerts[0].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[1].Type != alert.TypeSlack {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeSlack, config.Services[0].Alerts[1].Type)
|
||||
}
|
||||
if config.Services[0].Alerts[2].Type != alert.TypeSlack {
|
||||
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeSlack, config.Services[0].Alerts[2].Type)
|
||||
}
|
||||
if !config.Services[0].Alerts[0].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if !config.Services[0].Alerts[1].IsEnabled() {
|
||||
t.Error("The alert should've been enabled")
|
||||
}
|
||||
if config.Services[0].Alerts[2].IsEnabled() {
|
||||
t.Error("The alert should've been disabled")
|
||||
}
|
||||
if config.Services[0].Alerts[0].GetDescription() != "description" {
|
||||
t.Errorf("The description of the alert should've been %s, but it was %s", "description", config.Services[0].Alerts[0].GetDescription())
|
||||
}
|
||||
if config.Services[0].Alerts[1].GetDescription() != "wow" {
|
||||
t.Errorf("The description of the alert should've been %s, but it was %s", "description", config.Services[0].Alerts[1].GetDescription())
|
||||
}
|
||||
if config.Services[0].Alerts[2].GetDescription() != "description" {
|
||||
t.Errorf("The description of the alert should've been %s, but it was %s", "description", config.Services[0].Alerts[2].GetDescription())
|
||||
}
|
||||
if config.Services[0].Alerts[0].FailureThreshold != 10 {
|
||||
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 10, config.Services[0].Alerts[0].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[1].FailureThreshold != 20 {
|
||||
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 20, config.Services[0].Alerts[1].FailureThreshold)
|
||||
}
|
||||
if config.Services[0].Alerts[2].FailureThreshold != 30 {
|
||||
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 30, config.Services[0].Alerts[2].FailureThreshold)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -644,9 +1028,51 @@ services:
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithInvalidServiceName(t *testing.T) {
|
||||
_, err := parseAndValidateConfigBytes([]byte(`
|
||||
services:
|
||||
- name: ""
|
||||
url: https://twinnation.org/health
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != core.ErrServiceWithNoName {
|
||||
t.Error("should've returned an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithInvalidStorageConfig(t *testing.T) {
|
||||
_, err := parseAndValidateConfigBytes([]byte(`
|
||||
storage:
|
||||
type: sqlite
|
||||
services:
|
||||
- name: example
|
||||
url: https://example.org
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err == nil {
|
||||
t.Error("should've returned an error, because a file must be specified for a storage of type sqlite")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithInvalidYAML(t *testing.T) {
|
||||
_, err := parseAndValidateConfigBytes([]byte(`
|
||||
storage:
|
||||
invalid yaml
|
||||
services:
|
||||
- name: example
|
||||
url: https://example.org
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err == nil {
|
||||
t.Error("should've returned an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithInvalidSecurityConfig(t *testing.T) {
|
||||
defer func() { recover() }()
|
||||
_, _ = parseAndValidateConfigBytes([]byte(`
|
||||
_, err := parseAndValidateConfigBytes([]byte(`
|
||||
security:
|
||||
basic:
|
||||
username: "admin"
|
||||
@@ -657,13 +1083,15 @@ services:
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
t.Error("Function should've panicked")
|
||||
if err == nil {
|
||||
t.Error("should've returned an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithValidSecurityConfig(t *testing.T) {
|
||||
const expectedUsername = "admin"
|
||||
const expectedPasswordHash = "6b97ed68d14eb3f1aa959ce5d49c7dc612e1eb1dafd73b1e705847483fd6a6c809f2ceb4e8df6ff9984c6298ff0285cace6614bf8daa9f0070101b6c89899e22"
|
||||
config, err := parseAndValidateConfigBytes([]byte(fmt.Sprintf(`
|
||||
config, err := parseAndValidateConfigBytes([]byte(fmt.Sprintf(`debug: true
|
||||
security:
|
||||
basic:
|
||||
username: "%s"
|
||||
@@ -778,8 +1206,7 @@ kubernetes:
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithKubernetesAutoDiscoveryButNoServiceTemplate(t *testing.T) {
|
||||
defer func() { recover() }()
|
||||
_, _ = parseAndValidateConfigBytes([]byte(`
|
||||
_, err := parseAndValidateConfigBytes([]byte(`
|
||||
kubernetes:
|
||||
cluster-mode: "mock"
|
||||
auto-discover: true
|
||||
@@ -788,12 +1215,13 @@ kubernetes:
|
||||
hostname-suffix: ".default.svc.cluster.local"
|
||||
target-path: "/health"
|
||||
`))
|
||||
t.Error("Function should've panicked because providing a service-template is mandatory")
|
||||
if err == nil {
|
||||
t.Error("should've returned an error because providing a service-template is mandatory")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithKubernetesAutoDiscoveryUsingClusterModeIn(t *testing.T) {
|
||||
defer func() { recover() }()
|
||||
_, _ = parseAndValidateConfigBytes([]byte(`
|
||||
_, err := parseAndValidateConfigBytes([]byte(`
|
||||
kubernetes:
|
||||
cluster-mode: "in"
|
||||
auto-discover: true
|
||||
@@ -806,6 +1234,44 @@ kubernetes:
|
||||
hostname-suffix: ".default.svc.cluster.local"
|
||||
target-path: "/health"
|
||||
`))
|
||||
// TODO: find a way to test this?
|
||||
t.Error("Function should've panicked because testing with ClusterModeIn isn't supported")
|
||||
if err == nil {
|
||||
t.Error("should've returned an error because testing with ClusterModeIn isn't supported")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAlertingProviderByAlertType(t *testing.T) {
|
||||
alertingConfig := &alerting.Config{
|
||||
Custom: &custom.AlertProvider{},
|
||||
Discord: &discord.AlertProvider{},
|
||||
Mattermost: &mattermost.AlertProvider{},
|
||||
Messagebird: &messagebird.AlertProvider{},
|
||||
PagerDuty: &pagerduty.AlertProvider{},
|
||||
Slack: &slack.AlertProvider{},
|
||||
Telegram: &telegram.AlertProvider{},
|
||||
Twilio: &twilio.AlertProvider{},
|
||||
}
|
||||
if alertingConfig.GetAlertingProviderByAlertType(alert.TypeCustom) != alertingConfig.Custom {
|
||||
t.Error("expected Custom configuration")
|
||||
}
|
||||
if alertingConfig.GetAlertingProviderByAlertType(alert.TypeDiscord) != alertingConfig.Discord {
|
||||
t.Error("expected Discord configuration")
|
||||
}
|
||||
if alertingConfig.GetAlertingProviderByAlertType(alert.TypeMattermost) != alertingConfig.Mattermost {
|
||||
t.Error("expected Mattermost configuration")
|
||||
}
|
||||
if alertingConfig.GetAlertingProviderByAlertType(alert.TypeMessagebird) != alertingConfig.Messagebird {
|
||||
t.Error("expected Messagebird configuration")
|
||||
}
|
||||
if alertingConfig.GetAlertingProviderByAlertType(alert.TypePagerDuty) != alertingConfig.PagerDuty {
|
||||
t.Error("expected PagerDuty configuration")
|
||||
}
|
||||
if alertingConfig.GetAlertingProviderByAlertType(alert.TypeSlack) != alertingConfig.Slack {
|
||||
t.Error("expected Slack configuration")
|
||||
}
|
||||
if alertingConfig.GetAlertingProviderByAlertType(alert.TypeTelegram) != alertingConfig.Telegram {
|
||||
t.Error("expected Telegram configuration")
|
||||
}
|
||||
if alertingConfig.GetAlertingProviderByAlertType(alert.TypeTwilio) != alertingConfig.Twilio {
|
||||
t.Error("expected Twilio configuration")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ type WebConfig struct {
|
||||
}
|
||||
|
||||
// validateAndSetDefaults checks and sets the default values for fields that are not set
|
||||
func (web *WebConfig) validateAndSetDefaults() {
|
||||
func (web *WebConfig) validateAndSetDefaults() error {
|
||||
// Validate the Address
|
||||
if len(web.Address) == 0 {
|
||||
web.Address = DefaultAddress
|
||||
@@ -25,8 +25,9 @@ func (web *WebConfig) validateAndSetDefaults() {
|
||||
if web.Port == 0 {
|
||||
web.Port = DefaultPort
|
||||
} else if web.Port < 0 || web.Port > math.MaxUint16 {
|
||||
panic(fmt.Sprintf("invalid port: value should be between %d and %d", 0, math.MaxUint16))
|
||||
return fmt.Errorf("invalid port: value should be between %d and %d", 0, math.MaxUint16)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SocketAddress returns the combination of the Address and the Port
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
@@ -25,7 +26,7 @@ func badgeHandler(writer http.ResponseWriter, request *http.Request) {
|
||||
}
|
||||
identifier := variables["identifier"]
|
||||
key := strings.TrimSuffix(identifier, ".svg")
|
||||
serviceStatus := storage.Get().GetServiceStatusByKey(key)
|
||||
serviceStatus := storage.Get().GetServiceStatusByKey(key, paging.NewServiceStatusParams().WithUptime())
|
||||
if serviceStatus == nil {
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
_, _ = writer.Write([]byte("Requested service not found"))
|
||||
|
||||
@@ -13,8 +13,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/config"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/security"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||
"github.com/TwinProduction/gocache"
|
||||
"github.com/TwinProduction/health"
|
||||
"github.com/gorilla/mux"
|
||||
@@ -26,7 +28,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
cache = gocache.NewCache().WithMaxSize(100).WithEvictionPolicy(gocache.LeastRecentlyUsed)
|
||||
cache = gocache.NewCache().WithMaxSize(100).WithEvictionPolicy(gocache.FirstInFirstOut)
|
||||
|
||||
// staticFolder is the path to the location of the static folder from the root path of the project
|
||||
// The only reason this is exposed is to allow running tests from a different path than the root path of the project
|
||||
@@ -37,27 +39,20 @@ var (
|
||||
server *http.Server
|
||||
)
|
||||
|
||||
func init() {
|
||||
if err := cache.StartJanitor(); err != nil {
|
||||
log.Fatal("[controller][init] Failed to start cache janitor:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Handle creates the router and starts the server
|
||||
func Handle() {
|
||||
cfg := config.Get()
|
||||
var router http.Handler = CreateRouter(cfg)
|
||||
func Handle(securityConfig *security.Config, webConfig *config.WebConfig, enableMetrics bool) {
|
||||
var router http.Handler = CreateRouter(securityConfig, enableMetrics)
|
||||
if os.Getenv("ENVIRONMENT") == "dev" {
|
||||
router = developmentCorsHandler(router)
|
||||
}
|
||||
server = &http.Server{
|
||||
Addr: fmt.Sprintf("%s:%d", cfg.Web.Address, cfg.Web.Port),
|
||||
Addr: fmt.Sprintf("%s:%d", webConfig.Address, webConfig.Port),
|
||||
Handler: router,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
IdleTimeout: 15 * time.Second,
|
||||
}
|
||||
log.Println("[controller][Handle] Listening on " + cfg.Web.SocketAddress())
|
||||
log.Println("[controller][Handle] Listening on " + webConfig.SocketAddress())
|
||||
if os.Getenv("ROUTER_TEST") == "true" {
|
||||
return
|
||||
}
|
||||
@@ -73,15 +68,15 @@ func Shutdown() {
|
||||
}
|
||||
|
||||
// CreateRouter creates the router for the http server
|
||||
func CreateRouter(cfg *config.Config) *mux.Router {
|
||||
func CreateRouter(securityConfig *security.Config, enabledMetrics bool) *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
if cfg.Metrics {
|
||||
if enabledMetrics {
|
||||
router.Handle("/metrics", promhttp.Handler()).Methods("GET")
|
||||
}
|
||||
router.Handle("/health", health.Handler().WithJSON(true)).Methods("GET")
|
||||
router.HandleFunc("/favicon.ico", favIconHandler).Methods("GET")
|
||||
router.HandleFunc("/api/v1/statuses", secureIfNecessary(cfg, serviceStatusesHandler)).Methods("GET") // No GzipHandler for this one, because we cache the content
|
||||
router.HandleFunc("/api/v1/statuses/{key}", secureIfNecessary(cfg, GzipHandlerFunc(serviceStatusHandler))).Methods("GET")
|
||||
router.HandleFunc("/api/v1/statuses", secureIfNecessary(securityConfig, serviceStatusesHandler)).Methods("GET") // No GzipHandler for this one, because we cache the content
|
||||
router.HandleFunc("/api/v1/statuses/{key}", secureIfNecessary(securityConfig, GzipHandlerFunc(serviceStatusHandler))).Methods("GET")
|
||||
router.HandleFunc("/api/v1/badges/uptime/{duration}/{identifier}", badgeHandler).Methods("GET")
|
||||
// SPA
|
||||
router.HandleFunc("/services/{service}", spaHandler).Methods("GET")
|
||||
@@ -90,9 +85,9 @@ func CreateRouter(cfg *config.Config) *mux.Router {
|
||||
return router
|
||||
}
|
||||
|
||||
func secureIfNecessary(cfg *config.Config, handler http.HandlerFunc) http.HandlerFunc {
|
||||
if cfg.Security != nil && cfg.Security.IsValid() {
|
||||
return security.Handler(handler, cfg.Security)
|
||||
func secureIfNecessary(securityConfig *security.Config, handler http.HandlerFunc) http.HandlerFunc {
|
||||
if securityConfig != nil && securityConfig.IsValid() {
|
||||
return security.Handler(handler, securityConfig)
|
||||
}
|
||||
return handler
|
||||
}
|
||||
@@ -116,7 +111,7 @@ func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
var err error
|
||||
buffer := &bytes.Buffer{}
|
||||
gzipWriter := gzip.NewWriter(buffer)
|
||||
data, err = json.Marshal(storage.Get().GetAllServiceStatusesWithResultPagination(page, pageSize))
|
||||
data, err = json.Marshal(storage.Get().GetAllServiceStatuses(paging.NewServiceStatusParams().WithResults(page, pageSize)))
|
||||
if err != nil {
|
||||
log.Printf("[controller][serviceStatusesHandler] Unable to marshal object to JSON: %s", err.Error())
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
@@ -143,7 +138,7 @@ func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
page, pageSize := extractPageAndPageSizeFromRequest(r)
|
||||
vars := mux.Vars(r)
|
||||
serviceStatus := storage.Get().GetServiceStatusByKey(vars["key"])
|
||||
serviceStatus := storage.Get().GetServiceStatusByKey(vars["key"], paging.NewServiceStatusParams().WithResults(page, pageSize).WithEvents(1, core.MaximumNumberOfEvents).WithUptime())
|
||||
if serviceStatus == nil {
|
||||
log.Printf("[controller][serviceStatusHandler] Service with key=%s not found", vars["key"])
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
@@ -151,7 +146,7 @@ func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
data := map[string]interface{}{
|
||||
"serviceStatus": serviceStatus.WithResultPagination(page, pageSize),
|
||||
"serviceStatus": serviceStatus,
|
||||
// The following fields, while present on core.ServiceStatus, are annotated to remain hidden so that we can
|
||||
// expose only the necessary data on /api/v1/statuses.
|
||||
// Since the /api/v1/statuses/{key} endpoint does need this data, however, we explicitly expose it here
|
||||
|
||||
@@ -38,7 +38,6 @@ var (
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Body: []byte("body"),
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
@@ -64,7 +63,6 @@ var (
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Body: []byte("body"),
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
@@ -107,7 +105,7 @@ func TestCreateRouter(t *testing.T) {
|
||||
}
|
||||
watchdog.UpdateServiceStatuses(cfg.Services[0], &core.Result{Success: true, Duration: time.Millisecond, Timestamp: time.Now()})
|
||||
watchdog.UpdateServiceStatuses(cfg.Services[1], &core.Result{Success: false, Duration: time.Second, Timestamp: time.Now()})
|
||||
router := CreateRouter(cfg)
|
||||
router := CreateRouter(cfg.Security, cfg.Metrics)
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Path string
|
||||
@@ -237,12 +235,10 @@ func TestHandle(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
config.Set(cfg)
|
||||
defer config.Set(nil)
|
||||
_ = os.Setenv("ROUTER_TEST", "true")
|
||||
_ = os.Setenv("ENVIRONMENT", "dev")
|
||||
defer os.Clearenv()
|
||||
Handle()
|
||||
Handle(cfg.Security, cfg.Web, cfg.Metrics)
|
||||
defer Shutdown()
|
||||
request, _ := http.NewRequest("GET", "/health", nil)
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
@@ -275,7 +271,7 @@ func TestServiceStatusesHandler(t *testing.T) {
|
||||
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||
firstResult.Timestamp = time.Time{}
|
||||
secondResult.Timestamp = time.Time{}
|
||||
router := CreateRouter(&config.Config{})
|
||||
router := CreateRouter(nil, false)
|
||||
|
||||
type Scenario struct {
|
||||
Name string
|
||||
|
||||
@@ -3,6 +3,8 @@ package controller
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -13,7 +15,7 @@ const (
|
||||
DefaultPageSize = 20
|
||||
|
||||
// MaximumPageSize is the maximum page size allowed
|
||||
MaximumPageSize = 100
|
||||
MaximumPageSize = core.MaximumNumberOfResults
|
||||
)
|
||||
|
||||
func extractPageAndPageSizeFromRequest(r *http.Request) (page int, pageSize int) {
|
||||
|
||||
@@ -23,7 +23,7 @@ const (
|
||||
|
||||
// DNSRCodePlaceholder is a place holder for DNS_RCODE
|
||||
//
|
||||
// Values that could be NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP and REFUSED
|
||||
// Values that could replace the placeholder: NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED
|
||||
DNSRCodePlaceholder = "[DNS_RCODE]"
|
||||
|
||||
// ResponseTimePlaceholder is a placeholder for the request response time, in milliseconds.
|
||||
@@ -51,14 +51,19 @@ const (
|
||||
// Usage: len([BODY].articles) == 10, len([BODY].name) > 5
|
||||
LengthFunctionPrefix = "len("
|
||||
|
||||
// HasFunctionPrefix is the prefix for the has function
|
||||
//
|
||||
// Usage: has([BODY].errors) == true
|
||||
HasFunctionPrefix = "has("
|
||||
|
||||
// PatternFunctionPrefix is the prefix for the pattern function
|
||||
//
|
||||
// Usage: pat(192.168.*.*)
|
||||
// Usage: [IP] == pat(192.168.*.*)
|
||||
PatternFunctionPrefix = "pat("
|
||||
|
||||
// AnyFunctionPrefix is the prefix for the any function
|
||||
//
|
||||
// Usage: any(1.1.1.1, 1.0.0.1)
|
||||
// Usage: [IP] == any(1.1.1.1, 1.0.0.1)
|
||||
AnyFunctionPrefix = "any("
|
||||
|
||||
// FunctionSuffix is the suffix for all functions
|
||||
@@ -66,12 +71,20 @@ const (
|
||||
|
||||
// InvalidConditionElementSuffix is the suffix that will be appended to an invalid condition
|
||||
InvalidConditionElementSuffix = "(INVALID)"
|
||||
|
||||
// maximumLengthBeforeTruncatingWhenComparedWithPattern is the maximum length an element being compared to a
|
||||
// pattern can have.
|
||||
//
|
||||
// This is only used for aesthetic purposes; it does not influence whether the condition evaluation results in a
|
||||
// success or a failure
|
||||
maximumLengthBeforeTruncatingWhenComparedWithPattern = 25
|
||||
)
|
||||
|
||||
// Condition is a condition that needs to be met in order for a Service to be considered healthy.
|
||||
type Condition string
|
||||
|
||||
// evaluate the Condition with the Result of the health check
|
||||
// TODO: Add a mandatory space between each operators (e.g. " == " instead of "==") (BREAKING CHANGE)
|
||||
func (c Condition) evaluate(result *Result) bool {
|
||||
condition := string(c)
|
||||
success := false
|
||||
@@ -113,7 +126,7 @@ func (c Condition) evaluate(result *Result) bool {
|
||||
conditionToDisplay = prettifyNumericalParameters(parameters, resolvedParameters, "<")
|
||||
}
|
||||
} else {
|
||||
result.Errors = append(result.Errors, fmt.Sprintf("invalid condition '%s' has been provided", condition))
|
||||
result.AddError(fmt.Sprintf("invalid condition '%s' has been provided", condition))
|
||||
return false
|
||||
}
|
||||
if !success {
|
||||
@@ -123,6 +136,12 @@ func (c Condition) evaluate(result *Result) bool {
|
||||
return success
|
||||
}
|
||||
|
||||
// hasBodyPlaceholder checks whether the condition has a BodyPlaceholder
|
||||
// Used for determining whether the response body should be read or not
|
||||
func (c Condition) hasBodyPlaceholder() bool {
|
||||
return strings.Contains(string(c), BodyPlaceholder)
|
||||
}
|
||||
|
||||
// isEqual compares two strings.
|
||||
//
|
||||
// Supports the pattern and the any functions.
|
||||
@@ -181,7 +200,7 @@ func isEqual(first, second string) bool {
|
||||
func sanitizeAndResolve(elements []string, result *Result) ([]string, []string) {
|
||||
parameters := make([]string, len(elements))
|
||||
resolvedParameters := make([]string, len(elements))
|
||||
body := strings.TrimSpace(string(result.Body))
|
||||
body := strings.TrimSpace(string(result.body))
|
||||
for i, element := range elements {
|
||||
element = strings.TrimSpace(element)
|
||||
parameters[i] = element
|
||||
@@ -203,26 +222,39 @@ func sanitizeAndResolve(elements []string, result *Result) ([]string, []string)
|
||||
default:
|
||||
// if contains the BodyPlaceholder, then evaluate json path
|
||||
if strings.Contains(element, BodyPlaceholder) {
|
||||
wantLength := false
|
||||
checkingForLength := false
|
||||
checkingForExistence := false
|
||||
if strings.HasPrefix(element, LengthFunctionPrefix) && strings.HasSuffix(element, FunctionSuffix) {
|
||||
wantLength = true
|
||||
checkingForLength = true
|
||||
element = strings.TrimSuffix(strings.TrimPrefix(element, LengthFunctionPrefix), FunctionSuffix)
|
||||
}
|
||||
resolvedElement, resolvedElementLength, err := jsonpath.Eval(strings.TrimPrefix(element, BodyPlaceholder+"."), result.Body)
|
||||
if err != nil {
|
||||
if err.Error() != "unexpected end of JSON input" {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
if wantLength {
|
||||
element = LengthFunctionPrefix + element + FunctionSuffix + " " + InvalidConditionElementSuffix
|
||||
if strings.HasPrefix(element, HasFunctionPrefix) && strings.HasSuffix(element, FunctionSuffix) {
|
||||
checkingForExistence = true
|
||||
element = strings.TrimSuffix(strings.TrimPrefix(element, HasFunctionPrefix), FunctionSuffix)
|
||||
}
|
||||
resolvedElement, resolvedElementLength, err := jsonpath.Eval(strings.TrimPrefix(strings.TrimPrefix(element, BodyPlaceholder), "."), result.body)
|
||||
if checkingForExistence {
|
||||
if err != nil {
|
||||
element = "false"
|
||||
} else {
|
||||
element = element + " " + InvalidConditionElementSuffix
|
||||
element = "true"
|
||||
}
|
||||
} else {
|
||||
if wantLength {
|
||||
element = strconv.Itoa(resolvedElementLength)
|
||||
if err != nil {
|
||||
if err.Error() != "unexpected end of JSON input" {
|
||||
result.AddError(err.Error())
|
||||
}
|
||||
if checkingForLength {
|
||||
element = LengthFunctionPrefix + element + FunctionSuffix + " " + InvalidConditionElementSuffix
|
||||
} else {
|
||||
element = element + " " + InvalidConditionElementSuffix
|
||||
}
|
||||
} else {
|
||||
element = resolvedElement
|
||||
if checkingForLength {
|
||||
element = strconv.Itoa(resolvedElementLength)
|
||||
} else {
|
||||
element = resolvedElement
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -258,6 +290,13 @@ func prettify(parameters []string, resolvedParameters []string, operator string)
|
||||
if strings.HasSuffix(resolvedParameters[0], InvalidConditionElementSuffix) || strings.HasSuffix(resolvedParameters[1], InvalidConditionElementSuffix) {
|
||||
return resolvedParameters[0] + " " + operator + " " + resolvedParameters[1]
|
||||
}
|
||||
// If using the pattern function, truncate the parameter it's being compared to if said parameter is long enough
|
||||
if strings.HasPrefix(parameters[0], PatternFunctionPrefix) && strings.HasSuffix(parameters[0], FunctionSuffix) && len(resolvedParameters[1]) > maximumLengthBeforeTruncatingWhenComparedWithPattern {
|
||||
resolvedParameters[1] = fmt.Sprintf("%.25s...(truncated)", resolvedParameters[1])
|
||||
}
|
||||
if strings.HasPrefix(parameters[1], PatternFunctionPrefix) && strings.HasSuffix(parameters[1], FunctionSuffix) && len(resolvedParameters[0]) > maximumLengthBeforeTruncatingWhenComparedWithPattern {
|
||||
resolvedParameters[0] = fmt.Sprintf("%.25s...(truncated)", resolvedParameters[0])
|
||||
}
|
||||
// First element is a placeholder
|
||||
if parameters[0] != resolvedParameters[0] && parameters[1] == resolvedParameters[1] {
|
||||
return parameters[0] + " (" + resolvedParameters[0] + ") " + operator + " " + parameters[1]
|
||||
|
||||
@@ -5,7 +5,7 @@ import "testing"
|
||||
func BenchmarkCondition_evaluateWithBodyStringAny(b *testing.B) {
|
||||
condition := Condition("[BODY].name == any(john.doe, jane.doe)")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{Body: []byte("{\"name\": \"john.doe\"}")}
|
||||
result := &Result{body: []byte("{\"name\": \"john.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
@@ -14,7 +14,7 @@ func BenchmarkCondition_evaluateWithBodyStringAny(b *testing.B) {
|
||||
func BenchmarkCondition_evaluateWithBodyStringAnyFailure(b *testing.B) {
|
||||
condition := Condition("[BODY].name == any(john.doe, jane.doe)")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{Body: []byte("{\"name\": \"bob.doe\"}")}
|
||||
result := &Result{body: []byte("{\"name\": \"bob.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
@@ -23,7 +23,7 @@ func BenchmarkCondition_evaluateWithBodyStringAnyFailure(b *testing.B) {
|
||||
func BenchmarkCondition_evaluateWithBodyString(b *testing.B) {
|
||||
condition := Condition("[BODY].name == john.doe")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{Body: []byte("{\"name\": \"john.doe\"}")}
|
||||
result := &Result{body: []byte("{\"name\": \"john.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
@@ -32,7 +32,16 @@ func BenchmarkCondition_evaluateWithBodyString(b *testing.B) {
|
||||
func BenchmarkCondition_evaluateWithBodyStringFailure(b *testing.B) {
|
||||
condition := Condition("[BODY].name == john.doe")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{Body: []byte("{\"name\": \"bob.doe\"}")}
|
||||
result := &Result{body: []byte("{\"name\": \"bob.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
func BenchmarkCondition_evaluateWithBodyStringFailureInvalidPath(b *testing.B) {
|
||||
condition := Condition("[BODY].user.name == bob.doe")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{body: []byte("{\"name\": \"bob.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
@@ -41,7 +50,7 @@ func BenchmarkCondition_evaluateWithBodyStringFailure(b *testing.B) {
|
||||
func BenchmarkCondition_evaluateWithBodyStringLen(b *testing.B) {
|
||||
condition := Condition("len([BODY].name) == 8")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{Body: []byte("{\"name\": \"john.doe\"}")}
|
||||
result := &Result{body: []byte("{\"name\": \"john.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
@@ -50,7 +59,7 @@ func BenchmarkCondition_evaluateWithBodyStringLen(b *testing.B) {
|
||||
func BenchmarkCondition_evaluateWithBodyStringLenFailure(b *testing.B) {
|
||||
condition := Condition("len([BODY].name) == 8")
|
||||
for n := 0; n < b.N; n++ {
|
||||
result := &Result{Body: []byte("{\"name\": \"bob.doe\"}")}
|
||||
result := &Result{body: []byte("{\"name\": \"bob.doe\"}")}
|
||||
condition.evaluate(result)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
21
core/dns.go
21
core/dns.go
@@ -29,16 +29,17 @@ type DNS struct {
|
||||
QueryName string `yaml:"query-name"`
|
||||
}
|
||||
|
||||
func (d *DNS) validateAndSetDefault() {
|
||||
func (d *DNS) validateAndSetDefault() error {
|
||||
if len(d.QueryName) == 0 {
|
||||
panic(ErrDNSWithNoQueryName)
|
||||
return ErrDNSWithNoQueryName
|
||||
}
|
||||
if !strings.HasSuffix(d.QueryName, ".") {
|
||||
d.QueryName += "."
|
||||
}
|
||||
if _, ok := dns.StringToType[d.QueryType]; !ok {
|
||||
panic(ErrDNSWithInvalidQueryType)
|
||||
return ErrDNSWithInvalidQueryType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DNS) query(url string, result *Result) {
|
||||
@@ -51,7 +52,7 @@ func (d *DNS) query(url string, result *Result) {
|
||||
m.SetQuestion(d.QueryName, queryType)
|
||||
r, _, err := c.Exchange(m, url)
|
||||
if err != nil {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
result.AddError(err.Error())
|
||||
return
|
||||
}
|
||||
result.Connected = true
|
||||
@@ -60,26 +61,26 @@ func (d *DNS) query(url string, result *Result) {
|
||||
switch rr.Header().Rrtype {
|
||||
case dns.TypeA:
|
||||
if a, ok := rr.(*dns.A); ok {
|
||||
result.Body = []byte(a.A.String())
|
||||
result.body = []byte(a.A.String())
|
||||
}
|
||||
case dns.TypeAAAA:
|
||||
if aaaa, ok := rr.(*dns.AAAA); ok {
|
||||
result.Body = []byte(aaaa.AAAA.String())
|
||||
result.body = []byte(aaaa.AAAA.String())
|
||||
}
|
||||
case dns.TypeCNAME:
|
||||
if cname, ok := rr.(*dns.CNAME); ok {
|
||||
result.Body = []byte(cname.Target)
|
||||
result.body = []byte(cname.Target)
|
||||
}
|
||||
case dns.TypeMX:
|
||||
if mx, ok := rr.(*dns.MX); ok {
|
||||
result.Body = []byte(mx.Mx)
|
||||
result.body = []byte(mx.Mx)
|
||||
}
|
||||
case dns.TypeNS:
|
||||
if ns, ok := rr.(*dns.NS); ok {
|
||||
result.Body = []byte(ns.Ns)
|
||||
result.body = []byte(ns.Ns)
|
||||
}
|
||||
default:
|
||||
result.Body = []byte("query type is not supported yet")
|
||||
result.body = []byte("query type is not supported yet")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,12 +91,12 @@ func TestIntegrationQuery(t *testing.T) {
|
||||
|
||||
if test.inputDNS.QueryType == "NS" {
|
||||
// Because there are often multiple nameservers backing a single domain, we'll only look at the suffix
|
||||
if !pattern.Match(test.expectedBody, string(result.Body)) {
|
||||
t.Errorf("got %s, expected result %s,", string(result.Body), test.expectedBody)
|
||||
if !pattern.Match(test.expectedBody, string(result.body)) {
|
||||
t.Errorf("got %s, expected result %s,", string(result.body), test.expectedBody)
|
||||
}
|
||||
} else {
|
||||
if string(result.Body) != test.expectedBody {
|
||||
t.Errorf("got %s, expected result %s,", string(result.Body), test.expectedBody)
|
||||
if string(result.body) != test.expectedBody {
|
||||
t.Errorf("got %s, expected result %s,", string(result.body), test.expectedBody)
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -109,8 +109,10 @@ func TestService_ValidateAndSetDefaultsWithNoDNSQueryName(t *testing.T) {
|
||||
QueryType: "A",
|
||||
QueryName: "",
|
||||
}
|
||||
dns.validateAndSetDefault()
|
||||
t.Fatal("Should've panicked because service`s dns didn't have a query name, which is a mandatory field for dns")
|
||||
err := dns.validateAndSetDefault()
|
||||
if err == nil {
|
||||
t.Fatal("Should've returned an error because service`s dns didn't have a query name, which is a mandatory field for dns")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_ValidateAndSetDefaultsWithInvalidDNSQueryType(t *testing.T) {
|
||||
@@ -119,6 +121,8 @@ func TestService_ValidateAndSetDefaultsWithInvalidDNSQueryType(t *testing.T) {
|
||||
QueryType: "B",
|
||||
QueryName: "example.com",
|
||||
}
|
||||
dns.validateAndSetDefault()
|
||||
t.Fatal("Should've panicked because service`s dns query type is invalid, it needs to be a valid query name like A, AAAA, CNAME...")
|
||||
err := dns.validateAndSetDefault()
|
||||
if err == nil {
|
||||
t.Fatal("Should've returned an error because service`s dns query type is invalid, it needs to be a valid query name like A, AAAA, CNAME...")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,3 +24,14 @@ var (
|
||||
// EventUnhealthy is a type of event that represents a service failing one or more of its conditions
|
||||
EventUnhealthy EventType = "UNHEALTHY"
|
||||
)
|
||||
|
||||
// NewEventFromResult creates an Event from a Result
|
||||
func NewEventFromResult(result *Result) *Event {
|
||||
event := &Event{Timestamp: result.Timestamp}
|
||||
if result.Success {
|
||||
event.Type = EventHealthy
|
||||
} else {
|
||||
event.Type = EventUnhealthy
|
||||
}
|
||||
return event
|
||||
}
|
||||
|
||||
@@ -12,10 +12,7 @@ type Result struct {
|
||||
// DNSRCode is the response code of a DNS query in a human readable format
|
||||
DNSRCode string `json:"-"`
|
||||
|
||||
// Body is the response body
|
||||
Body []byte `json:"-"`
|
||||
|
||||
// Hostname extracted from the Service URL
|
||||
// Hostname extracted from Service.URL
|
||||
Hostname string `json:"hostname"`
|
||||
|
||||
// IP resolved from the Service URL
|
||||
@@ -41,4 +38,23 @@ type Result struct {
|
||||
|
||||
// CertificateExpiration is the duration before the certificate expires
|
||||
CertificateExpiration time.Duration `json:"-"`
|
||||
|
||||
// body is the response body
|
||||
//
|
||||
// Note that this variable is only used during the evaluation of a service's health.
|
||||
// This means that the call Service.EvaluateHealth both populates the body (if necessary)
|
||||
// and sets it to nil after the evaluation has been completed.
|
||||
body []byte
|
||||
}
|
||||
|
||||
// AddError adds an error to the result's list of errors.
|
||||
// It also ensures that there are no duplicates.
|
||||
func (r *Result) AddError(error string) {
|
||||
for _, resultError := range r.Errors {
|
||||
if resultError == error {
|
||||
// If the error already exists, don't add it
|
||||
return
|
||||
}
|
||||
}
|
||||
r.Errors = append(r.Errors, error)
|
||||
}
|
||||
|
||||
21
core/result_test.go
Normal file
21
core/result_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestResult_AddError(t *testing.T) {
|
||||
result := &Result{}
|
||||
result.AddError("potato")
|
||||
if len(result.Errors) != 1 {
|
||||
t.Error("should've had 1 error")
|
||||
}
|
||||
result.AddError("potato")
|
||||
if len(result.Errors) != 1 {
|
||||
t.Error("should've still had 1 error, because a duplicate error was added")
|
||||
}
|
||||
result.AddError("tomato")
|
||||
if len(result.Errors) != 2 {
|
||||
t.Error("should've had 2 error")
|
||||
}
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaximumNumberOfResults is the maximum number of results that ServiceStatus.Results can have
|
||||
MaximumNumberOfResults = 100
|
||||
|
||||
// MaximumNumberOfEvents is the maximum number of events that ServiceStatus.Events can have
|
||||
MaximumNumberOfEvents = 50
|
||||
)
|
||||
|
||||
// ServiceStatus contains the evaluation Results of a Service
|
||||
type ServiceStatus struct {
|
||||
// Name of the service
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// Group the service is a part of. Used for grouping multiple services together on the front end.
|
||||
Group string `json:"group,omitempty"`
|
||||
|
||||
// Key is the key representing the ServiceStatus
|
||||
Key string `json:"key"`
|
||||
|
||||
// Results is the list of service evaluation results
|
||||
Results []*Result `json:"results"`
|
||||
|
||||
// Events is a list of events
|
||||
//
|
||||
// We don't expose this through JSON, because the main dashboard doesn't need to have this data.
|
||||
// However, the detailed service page does leverage this by including it to a map that will be
|
||||
// marshalled alongside the ServiceStatus.
|
||||
Events []*Event `json:"-"`
|
||||
|
||||
// Uptime information on the service's uptime
|
||||
//
|
||||
// We don't expose this through JSON, because the main dashboard doesn't need to have this data.
|
||||
// However, the detailed service page does leverage this by including it to a map that will be
|
||||
// marshalled alongside the ServiceStatus.
|
||||
Uptime *Uptime `json:"-"`
|
||||
}
|
||||
|
||||
// NewServiceStatus creates a new ServiceStatus
|
||||
func NewServiceStatus(service *Service) *ServiceStatus {
|
||||
return &ServiceStatus{
|
||||
Name: service.Name,
|
||||
Group: service.Group,
|
||||
Key: util.ConvertGroupAndServiceToKey(service.Group, service.Name),
|
||||
Results: make([]*Result, 0),
|
||||
Events: []*Event{{
|
||||
Type: EventStart,
|
||||
Timestamp: time.Now(),
|
||||
}},
|
||||
Uptime: NewUptime(),
|
||||
}
|
||||
}
|
||||
|
||||
// ShallowCopy creates a shallow copy of ServiceStatus
|
||||
func (ss *ServiceStatus) ShallowCopy() *ServiceStatus {
|
||||
return &ServiceStatus{
|
||||
Name: ss.Name,
|
||||
Group: ss.Group,
|
||||
Key: ss.Key,
|
||||
Results: ss.Results,
|
||||
Events: ss.Events,
|
||||
Uptime: ss.Uptime,
|
||||
}
|
||||
}
|
||||
|
||||
// WithResultPagination makes a shallow copy of the ServiceStatus with only the results
|
||||
// within the range defined by the page and pageSize parameters
|
||||
func (ss *ServiceStatus) WithResultPagination(page, pageSize int) *ServiceStatus {
|
||||
shallowCopy := ss.ShallowCopy()
|
||||
numberOfResults := len(shallowCopy.Results)
|
||||
start := numberOfResults - (page * pageSize)
|
||||
end := numberOfResults - ((page - 1) * pageSize)
|
||||
if start > numberOfResults {
|
||||
start = -1
|
||||
} else if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
if end > numberOfResults {
|
||||
end = numberOfResults
|
||||
}
|
||||
if start < 0 || end < 0 {
|
||||
shallowCopy.Results = []*Result{}
|
||||
} else {
|
||||
shallowCopy.Results = shallowCopy.Results[start:end]
|
||||
}
|
||||
return shallowCopy
|
||||
}
|
||||
|
||||
// AddResult adds a Result to ServiceStatus.Results and makes sure that there are
|
||||
// no more than 20 results in the Results slice
|
||||
func (ss *ServiceStatus) AddResult(result *Result) {
|
||||
if len(ss.Results) > 0 {
|
||||
// Check if there's any change since the last result
|
||||
// OR there's only 1 event, which only happens when there's a start event
|
||||
if ss.Results[len(ss.Results)-1].Success != result.Success || len(ss.Events) == 1 {
|
||||
event := &Event{Timestamp: result.Timestamp}
|
||||
if result.Success {
|
||||
event.Type = EventHealthy
|
||||
} else {
|
||||
event.Type = EventUnhealthy
|
||||
}
|
||||
ss.Events = append(ss.Events, event)
|
||||
if len(ss.Events) > MaximumNumberOfEvents {
|
||||
// Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has
|
||||
// more than one extra element, we can get rid of all of them at once and thus returning the slice to a
|
||||
// length of MaximumNumberOfEvents by using ss.Events[len(ss.Events)-MaximumNumberOfEvents:] instead
|
||||
ss.Events = ss.Events[len(ss.Events)-MaximumNumberOfEvents:]
|
||||
}
|
||||
}
|
||||
}
|
||||
ss.Results = append(ss.Results, result)
|
||||
if len(ss.Results) > MaximumNumberOfResults {
|
||||
// Doing ss.Results[1:] would usually be sufficient, but in the case where for some reason, the slice has more
|
||||
// than one extra element, we can get rid of all of them at once and thus returning the slice to a length of
|
||||
// MaximumNumberOfResults by using ss.Results[len(ss.Results)-MaximumNumberOfResults:] instead
|
||||
ss.Results = ss.Results[len(ss.Results)-MaximumNumberOfResults:]
|
||||
}
|
||||
ss.Uptime.ProcessResult(result)
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewServiceStatus(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service)
|
||||
if serviceStatus.Name != service.Name {
|
||||
t.Errorf("expected %s, got %s", service.Name, serviceStatus.Name)
|
||||
}
|
||||
if serviceStatus.Group != service.Group {
|
||||
t.Errorf("expected %s, got %s", service.Group, serviceStatus.Group)
|
||||
}
|
||||
if serviceStatus.Key != "group_name" {
|
||||
t.Errorf("expected %s, got %s", "group_name", serviceStatus.Key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceStatus_AddResult(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service)
|
||||
for i := 0; i < MaximumNumberOfResults+10; i++ {
|
||||
serviceStatus.AddResult(&Result{Timestamp: time.Now()})
|
||||
}
|
||||
if len(serviceStatus.Results) != MaximumNumberOfResults {
|
||||
t.Errorf("expected serviceStatus.Results to not exceed a length of 20")
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceStatus_WithResultPagination(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service)
|
||||
for i := 0; i < 25; i++ {
|
||||
serviceStatus.AddResult(&Result{Timestamp: time.Now()})
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 1).Results) != 1 {
|
||||
t.Errorf("expected to have 1 result")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(5, 0).Results) != 0 {
|
||||
t.Errorf("expected to have 0 results")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(-1, 20).Results) != 0 {
|
||||
t.Errorf("expected to have 0 result, because the page was invalid")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, -1).Results) != 0 {
|
||||
t.Errorf("expected to have 0 result, because the page size was invalid")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 10).Results) != 10 {
|
||||
t.Errorf("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(2, 10).Results) != 10 {
|
||||
t.Errorf("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(3, 10).Results) != 5 {
|
||||
t.Errorf("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(4, 10).Results) != 0 {
|
||||
t.Errorf("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 50).Results) != 25 {
|
||||
t.Errorf("expected to have 25 results, because there's only 25 results")
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
@@ -11,7 +12,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
"github.com/TwinProduction/gatus/client"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -72,7 +75,7 @@ type Service struct {
|
||||
Conditions []*Condition `yaml:"conditions"`
|
||||
|
||||
// Alerts is the alerting configuration for the service in case of failure
|
||||
Alerts []*Alert `yaml:"alerts"`
|
||||
Alerts []*alert.Alert `yaml:"alerts"`
|
||||
|
||||
// Insecure is whether to skip verifying the server's certificate chain and host name
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
@@ -80,12 +83,12 @@ type Service struct {
|
||||
// NumberOfFailuresInARow is the number of unsuccessful evaluations in a row
|
||||
NumberOfFailuresInARow int
|
||||
|
||||
// NumberOfFailuresInARow is the number of successful evaluations in a row
|
||||
// NumberOfSuccessesInARow is the number of successful evaluations in a row
|
||||
NumberOfSuccessesInARow int
|
||||
}
|
||||
|
||||
// ValidateAndSetDefaults validates the service's configuration and sets the default value of fields that have one
|
||||
func (service *Service) ValidateAndSetDefaults() {
|
||||
func (service *Service) ValidateAndSetDefaults() error {
|
||||
// Set default values
|
||||
if service.Interval == 0 {
|
||||
service.Interval = 1 * time.Minute
|
||||
@@ -105,32 +108,37 @@ func (service *Service) ValidateAndSetDefaults() {
|
||||
if _, contentTypeHeaderExists := service.Headers[ContentTypeHeader]; !contentTypeHeaderExists && service.GraphQL {
|
||||
service.Headers[ContentTypeHeader] = "application/json"
|
||||
}
|
||||
for _, alert := range service.Alerts {
|
||||
if alert.FailureThreshold <= 0 {
|
||||
alert.FailureThreshold = 3
|
||||
for _, serviceAlert := range service.Alerts {
|
||||
if serviceAlert.FailureThreshold <= 0 {
|
||||
serviceAlert.FailureThreshold = 3
|
||||
}
|
||||
if alert.SuccessThreshold <= 0 {
|
||||
alert.SuccessThreshold = 2
|
||||
if serviceAlert.SuccessThreshold <= 0 {
|
||||
serviceAlert.SuccessThreshold = 2
|
||||
}
|
||||
}
|
||||
if len(service.Name) == 0 {
|
||||
panic(ErrServiceWithNoName)
|
||||
return ErrServiceWithNoName
|
||||
}
|
||||
if len(service.URL) == 0 {
|
||||
panic(ErrServiceWithNoURL)
|
||||
return ErrServiceWithNoURL
|
||||
}
|
||||
if len(service.Conditions) == 0 {
|
||||
panic(ErrServiceWithNoCondition)
|
||||
return ErrServiceWithNoCondition
|
||||
}
|
||||
if service.DNS != nil {
|
||||
service.DNS.validateAndSetDefault()
|
||||
return
|
||||
return service.DNS.validateAndSetDefault()
|
||||
}
|
||||
// Make sure that the request can be created
|
||||
_, err := http.NewRequest(service.Method, service.URL, bytes.NewBuffer([]byte(service.Body)))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the unique key for the Service
|
||||
func (service Service) Key() string {
|
||||
return util.ConvertGroupAndServiceToKey(service.Group, service.Name)
|
||||
}
|
||||
|
||||
// EvaluateHealth sends a request to the service's URL and evaluates the conditions of the service.
|
||||
@@ -149,38 +157,25 @@ func (service *Service) EvaluateHealth() *Result {
|
||||
}
|
||||
}
|
||||
result.Timestamp = time.Now()
|
||||
// No need to keep the body after the service has been evaluated
|
||||
result.body = nil
|
||||
return result
|
||||
}
|
||||
|
||||
// GetAlertsTriggered returns a slice of alerts that have been triggered
|
||||
func (service *Service) GetAlertsTriggered() []Alert {
|
||||
var alerts []Alert
|
||||
if service.NumberOfFailuresInARow == 0 {
|
||||
return alerts
|
||||
}
|
||||
for _, alert := range service.Alerts {
|
||||
if alert.Enabled && alert.FailureThreshold == service.NumberOfFailuresInARow {
|
||||
alerts = append(alerts, *alert)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return alerts
|
||||
}
|
||||
|
||||
func (service *Service) getIP(result *Result) {
|
||||
if service.DNS != nil {
|
||||
result.Hostname = strings.TrimSuffix(service.URL, ":53")
|
||||
} else {
|
||||
urlObject, err := url.Parse(service.URL)
|
||||
if err != nil {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
result.AddError(err.Error())
|
||||
return
|
||||
}
|
||||
result.Hostname = urlObject.Hostname()
|
||||
}
|
||||
ips, err := net.LookupIP(result.Hostname)
|
||||
if err != nil {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
result.AddError(err.Error())
|
||||
return
|
||||
}
|
||||
result.IP = ips[0].String()
|
||||
@@ -190,10 +185,12 @@ func (service *Service) call(result *Result) {
|
||||
var request *http.Request
|
||||
var response *http.Response
|
||||
var err error
|
||||
var certificate *x509.Certificate
|
||||
isServiceDNS := service.DNS != nil
|
||||
isServiceTCP := strings.HasPrefix(service.URL, "tcp://")
|
||||
isServiceICMP := strings.HasPrefix(service.URL, "icmp://")
|
||||
isServiceHTTP := !isServiceDNS && !isServiceTCP && !isServiceICMP
|
||||
isServiceStartTLS := strings.HasPrefix(service.URL, "starttls://")
|
||||
isServiceHTTP := !isServiceDNS && !isServiceTCP && !isServiceICMP && !isServiceStartTLS
|
||||
if isServiceHTTP {
|
||||
request = service.buildHTTPRequest()
|
||||
}
|
||||
@@ -201,6 +198,14 @@ func (service *Service) call(result *Result) {
|
||||
if isServiceDNS {
|
||||
service.DNS.query(service.URL, result)
|
||||
result.Duration = time.Since(startTime)
|
||||
} else if isServiceStartTLS {
|
||||
result.Connected, certificate, err = client.CanPerformStartTLS(strings.TrimPrefix(service.URL, "starttls://"), service.Insecure)
|
||||
if err != nil {
|
||||
result.AddError(err.Error())
|
||||
return
|
||||
}
|
||||
result.Duration = time.Since(startTime)
|
||||
result.CertificateExpiration = time.Until(certificate.NotAfter)
|
||||
} else if isServiceTCP {
|
||||
result.Connected = client.CanCreateTCPConnection(strings.TrimPrefix(service.URL, "tcp://"))
|
||||
result.Duration = time.Since(startTime)
|
||||
@@ -210,19 +215,22 @@ func (service *Service) call(result *Result) {
|
||||
response, err = client.GetHTTPClient(service.Insecure).Do(request)
|
||||
result.Duration = time.Since(startTime)
|
||||
if err != nil {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
result.AddError(err.Error())
|
||||
return
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.TLS != nil && len(response.TLS.PeerCertificates) > 0 {
|
||||
certificate := response.TLS.PeerCertificates[0]
|
||||
certificate = response.TLS.PeerCertificates[0]
|
||||
result.CertificateExpiration = time.Until(certificate.NotAfter)
|
||||
}
|
||||
result.HTTPStatus = response.StatusCode
|
||||
result.Connected = response.StatusCode > 0
|
||||
result.Body, err = ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
// Only read the body if there's a condition that uses the BodyPlaceholder
|
||||
if service.needsToReadBody() {
|
||||
result.body, err = ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
result.AddError(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -247,3 +255,13 @@ func (service *Service) buildHTTPRequest() *http.Request {
|
||||
}
|
||||
return request
|
||||
}
|
||||
|
||||
// needsToReadBody checks if there's any conditions that requires the response body to be read
|
||||
func (service *Service) needsToReadBody() bool {
|
||||
for _, condition := range service.Conditions {
|
||||
if condition.hasBodyPlaceholder() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
50
core/service_status.go
Normal file
50
core/service_status.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package core
|
||||
|
||||
const (
|
||||
// MaximumNumberOfResults is the maximum number of results that ServiceStatus.Results can have
|
||||
MaximumNumberOfResults = 100
|
||||
|
||||
// MaximumNumberOfEvents is the maximum number of events that ServiceStatus.Events can have
|
||||
MaximumNumberOfEvents = 50
|
||||
)
|
||||
|
||||
// ServiceStatus contains the evaluation Results of a Service
|
||||
type ServiceStatus struct {
|
||||
// Name of the service
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// Group the service is a part of. Used for grouping multiple services together on the front end.
|
||||
Group string `json:"group,omitempty"`
|
||||
|
||||
// Key is the key representing the ServiceStatus
|
||||
Key string `json:"key"`
|
||||
|
||||
// Results is the list of service evaluation results
|
||||
Results []*Result `json:"results"`
|
||||
|
||||
// Events is a list of events
|
||||
//
|
||||
// We don't expose this through JSON, because the main dashboard doesn't need to have this data.
|
||||
// However, the detailed service page does leverage this by including it to a map that will be
|
||||
// marshalled alongside the ServiceStatus.
|
||||
Events []*Event `json:"-"`
|
||||
|
||||
// Uptime information on the service's uptime
|
||||
//
|
||||
// We don't expose this through JSON, because the main dashboard doesn't need to have this data.
|
||||
// However, the detailed service page does leverage this by including it to a map that will be
|
||||
// marshalled alongside the ServiceStatus.
|
||||
Uptime *Uptime `json:"-"`
|
||||
}
|
||||
|
||||
// NewServiceStatus creates a new ServiceStatus
|
||||
func NewServiceStatus(serviceKey, serviceGroup, serviceName string) *ServiceStatus {
|
||||
return &ServiceStatus{
|
||||
Name: serviceName,
|
||||
Group: serviceGroup,
|
||||
Key: serviceKey,
|
||||
Results: make([]*Result, 0),
|
||||
Events: make([]*Event, 0),
|
||||
Uptime: NewUptime(),
|
||||
}
|
||||
}
|
||||
19
core/service_status_test.go
Normal file
19
core/service_status_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewServiceStatus(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
if serviceStatus.Name != service.Name {
|
||||
t.Errorf("expected %s, got %s", service.Name, serviceStatus.Name)
|
||||
}
|
||||
if serviceStatus.Group != service.Group {
|
||||
t.Errorf("expected %s, got %s", service.Group, serviceStatus.Group)
|
||||
}
|
||||
if serviceStatus.Key != "group_name" {
|
||||
t.Errorf("expected %s, got %s", "group_name", serviceStatus.Key)
|
||||
}
|
||||
}
|
||||
@@ -5,15 +5,17 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/alerting/alert"
|
||||
)
|
||||
|
||||
func TestService_ValidateAndSetDefaults(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
Alerts: []*Alert{{Type: PagerDutyAlert}},
|
||||
Alerts: []*alert.Alert{{Type: alert.TypePagerDuty}},
|
||||
}
|
||||
service.ValidateAndSetDefaults()
|
||||
if service.Method != "GET" {
|
||||
@@ -28,7 +30,7 @@ func TestService_ValidateAndSetDefaults(t *testing.T) {
|
||||
if len(service.Alerts) != 1 {
|
||||
t.Error("Service should've had 1 alert")
|
||||
}
|
||||
if service.Alerts[0].Enabled {
|
||||
if service.Alerts[0].IsEnabled() {
|
||||
t.Error("Service alert should've defaulted to disabled")
|
||||
}
|
||||
if service.Alerts[0].SuccessThreshold != 2 {
|
||||
@@ -47,8 +49,10 @@ func TestService_ValidateAndSetDefaultsWithNoName(t *testing.T) {
|
||||
URL: "http://example.com",
|
||||
Conditions: []*Condition{&condition},
|
||||
}
|
||||
service.ValidateAndSetDefaults()
|
||||
t.Fatal("Should've panicked because service didn't have a name, which is a mandatory field")
|
||||
err := service.ValidateAndSetDefaults()
|
||||
if err == nil {
|
||||
t.Fatal("Should've returned an error because service didn't have a name, which is a mandatory field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_ValidateAndSetDefaultsWithNoUrl(t *testing.T) {
|
||||
@@ -59,8 +63,10 @@ func TestService_ValidateAndSetDefaultsWithNoUrl(t *testing.T) {
|
||||
URL: "",
|
||||
Conditions: []*Condition{&condition},
|
||||
}
|
||||
service.ValidateAndSetDefaults()
|
||||
t.Fatal("Should've panicked because service didn't have an url, which is a mandatory field")
|
||||
err := service.ValidateAndSetDefaults()
|
||||
if err == nil {
|
||||
t.Fatal("Should've returned an error because service didn't have an url, which is a mandatory field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_ValidateAndSetDefaultsWithNoConditions(t *testing.T) {
|
||||
@@ -70,8 +76,10 @@ func TestService_ValidateAndSetDefaultsWithNoConditions(t *testing.T) {
|
||||
URL: "http://example.com",
|
||||
Conditions: nil,
|
||||
}
|
||||
service.ValidateAndSetDefaults()
|
||||
t.Fatal("Should've panicked because service didn't have at least 1 condition")
|
||||
err := service.ValidateAndSetDefaults()
|
||||
if err == nil {
|
||||
t.Fatal("Should've returned an error because service didn't have at least 1 condition")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_ValidateAndSetDefaultsWithDNS(t *testing.T) {
|
||||
@@ -85,40 +93,19 @@ func TestService_ValidateAndSetDefaultsWithDNS(t *testing.T) {
|
||||
},
|
||||
Conditions: []*Condition{&conditionSuccess},
|
||||
}
|
||||
service.ValidateAndSetDefaults()
|
||||
err := service.ValidateAndSetDefaults()
|
||||
if err != nil {
|
||||
|
||||
}
|
||||
if service.DNS.QueryName != "example.com." {
|
||||
t.Error("Service.dns.query-name should be formatted with . suffix")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_GetAlertsTriggered(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
Alerts: []*Alert{{Type: PagerDutyAlert, Enabled: true}},
|
||||
}
|
||||
service.ValidateAndSetDefaults()
|
||||
if service.NumberOfFailuresInARow != 0 {
|
||||
t.Error("Service.NumberOfFailuresInARow should start with 0")
|
||||
}
|
||||
if service.NumberOfSuccessesInARow != 0 {
|
||||
t.Error("Service.NumberOfSuccessesInARow should start with 0")
|
||||
}
|
||||
if len(service.GetAlertsTriggered()) > 0 {
|
||||
t.Error("No alerts should've been triggered, because service.NumberOfFailuresInARow is 0, which is below the failure threshold")
|
||||
}
|
||||
service.NumberOfFailuresInARow = service.Alerts[0].FailureThreshold
|
||||
if len(service.GetAlertsTriggered()) != 1 {
|
||||
t.Error("Alert should've been triggered")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_buildHTTPRequest(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
}
|
||||
@@ -138,7 +125,7 @@ func TestService_buildHTTPRequest(t *testing.T) {
|
||||
func TestService_buildHTTPRequestWithCustomUserAgent(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
Headers: map[string]string{
|
||||
@@ -161,7 +148,7 @@ func TestService_buildHTTPRequestWithCustomUserAgent(t *testing.T) {
|
||||
func TestService_buildHTTPRequestWithHostHeader(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Method: "POST",
|
||||
Conditions: []*Condition{&condition},
|
||||
@@ -182,13 +169,13 @@ func TestService_buildHTTPRequestWithHostHeader(t *testing.T) {
|
||||
func TestService_buildHTTPRequestWithGraphQLEnabled(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-graphql",
|
||||
URL: "https://twinnation.org/graphql",
|
||||
Method: "POST",
|
||||
Conditions: []*Condition{&condition},
|
||||
GraphQL: true,
|
||||
Body: `{
|
||||
user(gender: "female") {
|
||||
users(gender: "female") {
|
||||
id
|
||||
name
|
||||
gender
|
||||
@@ -206,16 +193,17 @@ func TestService_buildHTTPRequestWithGraphQLEnabled(t *testing.T) {
|
||||
}
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
if !strings.HasPrefix(string(body), "{\"query\":") {
|
||||
t.Error("request.Body should've started with '{\"query\":', but it didn't:", string(body))
|
||||
t.Error("request.body should've started with '{\"query\":', but it didn't:", string(body))
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegrationEvaluateHealth(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 200")
|
||||
bodyCondition := Condition("[BODY].status == UP")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
Conditions: []*Condition{&condition, &bodyCondition},
|
||||
}
|
||||
result := service.EvaluateHealth()
|
||||
if !result.ConditionResults[0].Success {
|
||||
@@ -232,7 +220,7 @@ func TestIntegrationEvaluateHealth(t *testing.T) {
|
||||
func TestIntegrationEvaluateHealthWithFailure(t *testing.T) {
|
||||
condition := Condition("[STATUS] == 500")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "twinnation-health",
|
||||
URL: "https://twinnation.org/health",
|
||||
Conditions: []*Condition{&condition},
|
||||
}
|
||||
@@ -252,7 +240,7 @@ func TestIntegrationEvaluateHealthForDNS(t *testing.T) {
|
||||
conditionSuccess := Condition("[DNS_RCODE] == NOERROR")
|
||||
conditionBody := Condition("[BODY] == 93.184.216.34")
|
||||
service := Service{
|
||||
Name: "TwiNNatioN",
|
||||
Name: "example",
|
||||
URL: "8.8.8.8",
|
||||
DNS: &DNS{
|
||||
QueryType: "A",
|
||||
@@ -275,7 +263,7 @@ func TestIntegrationEvaluateHealthForDNS(t *testing.T) {
|
||||
func TestIntegrationEvaluateHealthForICMP(t *testing.T) {
|
||||
conditionSuccess := Condition("[CONNECTED] == true")
|
||||
service := Service{
|
||||
Name: "ICMP test",
|
||||
Name: "icmp-test",
|
||||
URL: "icmp://127.0.0.1",
|
||||
Conditions: []*Condition{&conditionSuccess},
|
||||
}
|
||||
@@ -294,7 +282,7 @@ func TestIntegrationEvaluateHealthForICMP(t *testing.T) {
|
||||
func TestService_getIP(t *testing.T) {
|
||||
conditionSuccess := Condition("[CONNECTED] == true")
|
||||
service := Service{
|
||||
Name: "Invalid URL test",
|
||||
Name: "invalid-url-test",
|
||||
URL: "",
|
||||
Conditions: []*Condition{&conditionSuccess},
|
||||
}
|
||||
@@ -304,3 +292,27 @@ func TestService_getIP(t *testing.T) {
|
||||
t.Error("service.getIP(result) should've thrown an error because the URL is invalid, thus cannot be parsed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_NeedsToReadBody(t *testing.T) {
|
||||
statusCondition := Condition("[STATUS] == 200")
|
||||
bodyCondition := Condition("[BODY].status == UP")
|
||||
bodyConditionWithLength := Condition("len([BODY].tags) > 0")
|
||||
if (&Service{Conditions: []*Condition{&statusCondition}}).needsToReadBody() {
|
||||
t.Error("expected false, got true")
|
||||
}
|
||||
if !(&Service{Conditions: []*Condition{&bodyCondition}}).needsToReadBody() {
|
||||
t.Error("expected true, got false")
|
||||
}
|
||||
if !(&Service{Conditions: []*Condition{&bodyConditionWithLength}}).needsToReadBody() {
|
||||
t.Error("expected true, got false")
|
||||
}
|
||||
if !(&Service{Conditions: []*Condition{&statusCondition, &bodyCondition}}).needsToReadBody() {
|
||||
t.Error("expected true, got false")
|
||||
}
|
||||
if !(&Service{Conditions: []*Condition{&bodyCondition, &statusCondition}}).needsToReadBody() {
|
||||
t.Error("expected true, got false")
|
||||
}
|
||||
if !(&Service{Conditions: []*Condition{&bodyConditionWithLength, &statusCondition}}).needsToReadBody() {
|
||||
t.Error("expected true, got false")
|
||||
}
|
||||
}
|
||||
|
||||
116
core/uptime.go
116
core/uptime.go
@@ -1,119 +1,45 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// RFC3339WithoutMinutesAndSeconds is the format defined by RFC3339 (see time.RFC3339) but with the minutes
|
||||
// and seconds hardcoded to 0.
|
||||
RFC3339WithoutMinutesAndSeconds = "2006-01-02T15:00:00Z07:00"
|
||||
|
||||
numberOfHoursInTenDays = 10 * 24
|
||||
sevenDays = 7 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// Uptime is the struct that contains the relevant data for calculating the uptime as well as the uptime itself
|
||||
// and some other statistics
|
||||
type Uptime struct {
|
||||
// LastSevenDays is the uptime percentage over the past 7 days
|
||||
LastSevenDays float64 `json:"7d"`
|
||||
LastSevenDays float64 `json:"7d"` // Uptime percentage over the past 7 days
|
||||
LastTwentyFourHours float64 `json:"24h"` // Uptime percentage over the past 24 hours
|
||||
LastHour float64 `json:"1h"` // Uptime percentage over the past hour
|
||||
|
||||
// LastTwentyFourHours is the uptime percentage over the past 24 hours
|
||||
LastTwentyFourHours float64 `json:"24h"`
|
||||
// SuccessfulExecutionsPerHour is a map containing the number of successes (value)
|
||||
// for every hourly unix timestamps (key)
|
||||
// Deprecated
|
||||
SuccessfulExecutionsPerHour map[int64]uint64 `json:"-"`
|
||||
|
||||
// LastHour is the uptime percentage over the past hour
|
||||
LastHour float64 `json:"1h"`
|
||||
// TotalExecutionsPerHour is a map containing the total number of checks (value)
|
||||
// for every hourly unix timestamps (key)
|
||||
// Deprecated
|
||||
TotalExecutionsPerHour map[int64]uint64 `json:"-"`
|
||||
|
||||
// SuccessCountPerHour is a map containing the number of successes per hour, per timestamp following the
|
||||
// custom RFC3339WithoutMinutesAndSeconds format
|
||||
SuccessCountPerHour map[string]uint64 `json:"-"`
|
||||
// HourlyStatistics is a map containing metrics collected (value) for every hourly unix timestamps (key)
|
||||
HourlyStatistics map[int64]*HourlyUptimeStatistics `json:"-"`
|
||||
}
|
||||
|
||||
// TotalCountPerHour is a map containing the total number of checks per hour, per timestamp following the
|
||||
// custom RFC3339WithoutMinutesAndSeconds format
|
||||
TotalCountPerHour map[string]uint64 `json:"-"`
|
||||
// HourlyUptimeStatistics is a struct containing all metrics collected over the course of an hour
|
||||
type HourlyUptimeStatistics struct {
|
||||
TotalExecutions uint64 // Total number of checks
|
||||
SuccessfulExecutions uint64 // Number of successful executions
|
||||
TotalExecutionsResponseTime uint64 // Total response time for all executions
|
||||
}
|
||||
|
||||
// NewUptime creates a new Uptime
|
||||
func NewUptime() *Uptime {
|
||||
return &Uptime{
|
||||
SuccessCountPerHour: make(map[string]uint64),
|
||||
TotalCountPerHour: make(map[string]uint64),
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessResult processes the result by extracting the relevant from the result and recalculating the uptime
|
||||
// if necessary
|
||||
func (uptime *Uptime) ProcessResult(result *Result) {
|
||||
timestampDateWithHour := result.Timestamp.Format(RFC3339WithoutMinutesAndSeconds)
|
||||
if result.Success {
|
||||
uptime.SuccessCountPerHour[timestampDateWithHour]++
|
||||
}
|
||||
uptime.TotalCountPerHour[timestampDateWithHour]++
|
||||
// Clean up only when we're starting to have too many useless keys
|
||||
// Note that this is only triggered when there are more entries than there should be after
|
||||
// 10 days, despite the fact that we are deleting everything that's older than 7 days.
|
||||
// This is to prevent re-iterating on every `ProcessResult` as soon as the uptime has been logged for 7 days.
|
||||
if len(uptime.TotalCountPerHour) > numberOfHoursInTenDays {
|
||||
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour))
|
||||
for k := range uptime.TotalCountPerHour {
|
||||
dateWithHour, err := time.Parse(time.RFC3339, k)
|
||||
if err != nil {
|
||||
// This shouldn't happen, but we'll log it in case it does happen
|
||||
log.Println("[uptime][ProcessResult] Failed to parse programmatically generated timestamp:", err.Error())
|
||||
continue
|
||||
}
|
||||
if sevenDaysAgo.Unix() > dateWithHour.Unix() {
|
||||
delete(uptime.TotalCountPerHour, k)
|
||||
delete(uptime.SuccessCountPerHour, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
if result.Success {
|
||||
// Recalculate uptime if at least one of the 1h, 24h or 7d uptime are not 100%
|
||||
// If they're all 100%, then recalculating the uptime would be useless unless
|
||||
// the result added was a failure (!result.Success)
|
||||
if uptime.LastSevenDays != 1 || uptime.LastTwentyFourHours != 1 || uptime.LastHour != 1 {
|
||||
uptime.recalculate()
|
||||
}
|
||||
} else {
|
||||
// Recalculate uptime if at least one of the 1h, 24h or 7d uptime are not 0%
|
||||
// If they're all 0%, then recalculating the uptime would be useless unless
|
||||
// the result added was a success (result.Success)
|
||||
if uptime.LastSevenDays != 0 || uptime.LastTwentyFourHours != 0 || uptime.LastHour != 0 {
|
||||
uptime.recalculate()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (uptime *Uptime) recalculate() {
|
||||
uptimeBrackets := make(map[string]uint64)
|
||||
now := time.Now()
|
||||
// The oldest uptime bracket starts 7 days ago, so we'll start from there
|
||||
timestamp := now.Add(-sevenDays)
|
||||
for now.Sub(timestamp) >= 0 {
|
||||
timestampDateWithHour := timestamp.Format(RFC3339WithoutMinutesAndSeconds)
|
||||
successCountForTimestamp := uptime.SuccessCountPerHour[timestampDateWithHour]
|
||||
totalCountForTimestamp := uptime.TotalCountPerHour[timestampDateWithHour]
|
||||
uptimeBrackets["7d_success"] += successCountForTimestamp
|
||||
uptimeBrackets["7d_total"] += totalCountForTimestamp
|
||||
if now.Sub(timestamp) <= 24*time.Hour {
|
||||
uptimeBrackets["24h_success"] += successCountForTimestamp
|
||||
uptimeBrackets["24h_total"] += totalCountForTimestamp
|
||||
}
|
||||
if now.Sub(timestamp) <= time.Hour {
|
||||
uptimeBrackets["1h_success"] += successCountForTimestamp
|
||||
uptimeBrackets["1h_total"] += totalCountForTimestamp
|
||||
}
|
||||
timestamp = timestamp.Add(time.Hour)
|
||||
}
|
||||
if uptimeBrackets["7d_total"] > 0 {
|
||||
uptime.LastSevenDays = float64(uptimeBrackets["7d_success"]) / float64(uptimeBrackets["7d_total"])
|
||||
}
|
||||
if uptimeBrackets["24h_total"] > 0 {
|
||||
uptime.LastTwentyFourHours = float64(uptimeBrackets["24h_success"]) / float64(uptimeBrackets["24h_total"])
|
||||
}
|
||||
if uptimeBrackets["1h_total"] > 0 {
|
||||
uptime.LastHour = float64(uptimeBrackets["1h_success"]) / float64(uptimeBrackets["1h_total"])
|
||||
HourlyStatistics: make(map[int64]*HourlyUptimeStatistics),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestUptime_ProcessResult(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service)
|
||||
uptime := serviceStatus.Uptime
|
||||
|
||||
checkUptimes(t, serviceStatus, 0.00, 0.00, 0.00)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-7 * 24 * time.Hour), Success: true})
|
||||
checkUptimes(t, serviceStatus, 1.00, 0.00, 0.00)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-6 * 24 * time.Hour), Success: false})
|
||||
checkUptimes(t, serviceStatus, 0.50, 0.00, 0.00)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-8 * 24 * time.Hour), Success: true})
|
||||
checkUptimes(t, serviceStatus, 0.50, 0.00, 0.00)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-24 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-12 * time.Hour), Success: true})
|
||||
checkUptimes(t, serviceStatus, 0.75, 1.00, 0.00)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-1 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-30 * time.Minute), Success: false})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-15 * time.Minute), Success: false})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-10 * time.Minute), Success: false})
|
||||
checkUptimes(t, serviceStatus, 0.50, 0.50, 0.25)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-120 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-119 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-118 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-117 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-10 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-8 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-30 * time.Minute), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: time.Now().Add(-25 * time.Minute), Success: true})
|
||||
checkUptimes(t, serviceStatus, 0.75, 0.70, 0.50)
|
||||
}
|
||||
|
||||
func TestServiceStatus_AddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service)
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12 days ago
|
||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||
for timestamp.Unix() <= now.Unix() {
|
||||
serviceStatus.AddResult(&Result{Timestamp: timestamp, Success: true})
|
||||
if len(serviceStatus.Uptime.SuccessCountPerHour) > numberOfHoursInTenDays {
|
||||
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.SuccessCountPerHour", numberOfHoursInTenDays)
|
||||
}
|
||||
//fmt.Printf("timestamp=%s; uptimeDuringLastHour=%f; timeAgo=%s\n", timestamp.Format(time.RFC3339), serviceStatus.UptimeDuringLastHour, time.Since(timestamp))
|
||||
if now.Sub(timestamp) > time.Hour && serviceStatus.Uptime.LastHour != 0 {
|
||||
t.Error("most recent timestamp > 1h ago, expected serviceStatus.Uptime.LastHour to be 0, got", serviceStatus.Uptime.LastHour)
|
||||
}
|
||||
if now.Sub(timestamp) < time.Hour && serviceStatus.Uptime.LastHour == 0 {
|
||||
t.Error("most recent timestamp < 1h ago, expected serviceStatus.Uptime.LastHour to NOT be 0, got", serviceStatus.Uptime.LastHour)
|
||||
}
|
||||
// Simulate service with an interval of 1 minute
|
||||
timestamp = timestamp.Add(3 * time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func checkUptimes(t *testing.T, status *ServiceStatus, expectedUptimeDuringLastSevenDays, expectedUptimeDuringLastTwentyFourHours, expectedUptimeDuringLastHour float64) {
|
||||
if status.Uptime.LastSevenDays != expectedUptimeDuringLastSevenDays {
|
||||
t.Errorf("expected status.Uptime.LastSevenDays to be %f, got %f", expectedUptimeDuringLastHour, status.Uptime.LastSevenDays)
|
||||
}
|
||||
if status.Uptime.LastTwentyFourHours != expectedUptimeDuringLastTwentyFourHours {
|
||||
t.Errorf("expected status.Uptime.LastTwentyFourHours to be %f, got %f", expectedUptimeDuringLastTwentyFourHours, status.Uptime.LastTwentyFourHours)
|
||||
}
|
||||
if status.Uptime.LastHour != expectedUptimeDuringLastHour {
|
||||
t.Errorf("expected status.Uptime.LastHour to be %f, got %f", expectedUptimeDuringLastHour, status.Uptime.LastHour)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
version: "3.8"
|
||||
services:
|
||||
gatus:
|
||||
image: twinproduction/gatus:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ./config.yaml:/config/config.yaml
|
||||
42
examples/docker-compose-sqlite-storage/config.yaml
Normal file
42
examples/docker-compose-sqlite-storage/config.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
storage:
|
||||
type: sqlite
|
||||
file: /data/data.db
|
||||
|
||||
services:
|
||||
- name: back-end
|
||||
group: core
|
||||
url: "https://example.org/"
|
||||
interval: 5m
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[CERTIFICATE_EXPIRATION] > 48h"
|
||||
|
||||
- name: monitoring
|
||||
group: internal
|
||||
url: "https://example.org/"
|
||||
interval: 5m
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
|
||||
- name: nas
|
||||
group: internal
|
||||
url: "https://example.org/"
|
||||
interval: 5m
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
|
||||
- name: example-dns-query
|
||||
url: "8.8.8.8" # Address of the DNS server to use
|
||||
interval: 5m
|
||||
dns:
|
||||
query-name: "example.com"
|
||||
query-type: "A"
|
||||
conditions:
|
||||
- "[BODY] == 93.184.216.34"
|
||||
- "[DNS_RCODE] == NOERROR"
|
||||
|
||||
- name: icmp-ping
|
||||
url: "icmp://example.org"
|
||||
interval: 1m
|
||||
conditions:
|
||||
- "[CONNECTED] == true"
|
||||
@@ -0,0 +1,9 @@
|
||||
version: "3.8"
|
||||
services:
|
||||
gatus:
|
||||
image: twinproduction/gatus:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ./config.yaml:/config/config.yaml
|
||||
- ./data:/data/
|
||||
8
examples/docker-compose/docker-compose.yml
Normal file
8
examples/docker-compose/docker-compose.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
version: "3.8"
|
||||
services:
|
||||
gatus:
|
||||
image: twinproduction/gatus:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ./config.yaml:/config/config.yaml
|
||||
6
go.mod
6
go.mod
@@ -1,10 +1,10 @@
|
||||
module github.com/TwinProduction/gatus
|
||||
|
||||
go 1.15
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.74.0 // indirect
|
||||
github.com/TwinProduction/gocache v1.2.1
|
||||
github.com/TwinProduction/gocache v1.2.3
|
||||
github.com/TwinProduction/health v1.0.0
|
||||
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
@@ -14,13 +14,13 @@ require (
|
||||
github.com/prometheus/client_golang v1.9.0
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b // indirect
|
||||
golang.org/x/sys v0.0.0-20201223074533-0d417f636930 // indirect
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.18.14
|
||||
k8s.io/apimachinery v0.18.14
|
||||
k8s.io/client-go v0.18.14
|
||||
modernc.org/sqlite v1.11.2
|
||||
)
|
||||
|
||||
replace k8s.io/client-go => k8s.io/client-go v0.18.14
|
||||
|
||||
84
go.sum
84
go.sum
@@ -1,5 +1,4 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
@@ -50,8 +49,8 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/TwinProduction/gocache v1.2.1 h1:NAdMwO9SQEZFmX69YWx6fzhwb6fHakkLri0451c+V1w=
|
||||
github.com/TwinProduction/gocache v1.2.1/go.mod h1:6zkBoLjrFLkIISwkZTgLy67qliCGSon1xpORM4Ri5HM=
|
||||
github.com/TwinProduction/gocache v1.2.3 h1:4wFNih4CemUX+A99Gk/EsaU0SXSNZV42Ve77v7/7ToY=
|
||||
github.com/TwinProduction/gocache v1.2.3/go.mod h1:Yj2daITit8TTBgiOpc26XCDSbg9xcFskUilHj9u3Mh8=
|
||||
github.com/TwinProduction/health v1.0.0 h1:TVyYTAORQQZ8LaptX8jCHZRCGCAO6e+oJx19BUIzQYY=
|
||||
github.com/TwinProduction/health v1.0.0/go.mod h1:ys4mYKUeEfYrWmkm60xLtPjTuLIEDQNBZaTZvenLG1c=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
@@ -100,6 +99,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
@@ -142,7 +143,6 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
@@ -161,7 +161,6 @@ github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
@@ -172,7 +171,6 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
@@ -184,13 +182,12 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
@@ -220,7 +217,6 @@ github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEo
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
@@ -271,6 +267,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
@@ -289,7 +287,11 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
@@ -362,21 +364,18 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
|
||||
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
|
||||
@@ -385,11 +384,12 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -407,7 +407,6 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
@@ -418,7 +417,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
@@ -459,7 +457,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
@@ -481,7 +478,6 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
@@ -495,6 +491,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -531,7 +528,6 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
@@ -542,7 +538,6 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -556,7 +551,6 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -594,6 +588,7 @@ golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -610,15 +605,14 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201223074533-0d417f636930 h1:vRgIt+nup/B/BwIS0g2oC0haq0iqbV3ZA+u6+0TlNCo=
|
||||
golang.org/x/sys v0.0.0-20201223074533-0d417f636930/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@@ -626,7 +620,6 @@ golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fq
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
@@ -634,7 +627,6 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -668,7 +660,6 @@ golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114 h1:DnSr2mCsxyCE6ZgIkmcWUQY2R5cH/6wL7eIxEmQOMSE=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
@@ -690,11 +681,12 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2 h1:vEtypaVub6UvKkiXZ2xx9QIvp9TL7sI7xp7vdi2kezA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -719,7 +711,6 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
@@ -787,14 +778,12 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
@@ -810,12 +799,10 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
@@ -841,13 +828,42 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
lukechampine.com/uint128 v1.1.1 h1:pnxCASz787iMf+02ssImqk6OLt+Z5QHMoZyUXR4z6JU=
|
||||
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
modernc.org/cc/v3 v3.33.6 h1:r63dgSzVzRxUpAJFPQWHy1QeZeY1ydNENUDaBx1GqYc=
|
||||
modernc.org/cc/v3 v3.33.6/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
|
||||
modernc.org/ccgo/v3 v3.9.5 h1:dEuUSf8WN51rDkprFuAqjfchKEzN0WttP/Py3enBwjk=
|
||||
modernc.org/ccgo/v3 v3.9.5/go.mod h1:umuo2EP2oDSBnD3ckjaVUXMrmeAw8C8OSICVa0iFf60=
|
||||
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
|
||||
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
|
||||
modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
|
||||
modernc.org/libc v1.9.8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
|
||||
modernc.org/libc v1.9.11 h1:QUxZMs48Ahg2F7SN41aERvMfGLY2HU/ADnB9DC4Yts8=
|
||||
modernc.org/libc v1.9.11/go.mod h1:NyF3tsA5ArIjJ83XB0JlqhjTabTCHm9aX4XMPHyQn0Q=
|
||||
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/mathutil v1.4.0 h1:GCjoRaBew8ECCKINQA2nYjzvufFW9YiEuuB+rQ9bn2E=
|
||||
modernc.org/mathutil v1.4.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/memory v1.0.4 h1:utMBrFcpnQDdNsmM6asmyH/FM9TqLPS7XF7otpJmrwM=
|
||||
modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
|
||||
modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A=
|
||||
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sqlite v1.11.2 h1:ShWQpeD3ag/bmx6TqidBlIWonWmQaSQKls3aenCbt+w=
|
||||
modernc.org/sqlite v1.11.2/go.mod h1:+mhs/P1ONd+6G7hcAs6irwDi/bjTQ7nLW6LHRBsEa3A=
|
||||
modernc.org/strutil v1.1.1 h1:xv+J1BXY3Opl2ALrBwyfEikFAj8pmqcpnfmuwUwcozs=
|
||||
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
|
||||
modernc.org/tcl v1.5.5 h1:N03RwthgTR/l/eQvz3UjfYnvVVj1G2sZqzFGfoD4HE4=
|
||||
modernc.org/tcl v1.5.5/go.mod h1:ADkaTUuwukkrlhqwERyq0SM8OvyXo7+TjFz7yAF56EI=
|
||||
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
|
||||
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
modernc.org/z v1.0.1 h1:WyIDpEpAIx4Hel6q/Pcgj/VhaQV5XPJ2I6ryIYbjnpc=
|
||||
modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
||||
@@ -25,6 +25,9 @@ func walk(path string, object interface{}) (string, int, error) {
|
||||
case map[string]interface{}:
|
||||
return walk(strings.Replace(path, fmt.Sprintf("%s.", currentKey), "", 1), value)
|
||||
case string:
|
||||
if len(keys) > 1 {
|
||||
return "", 0, fmt.Errorf("couldn't walk through '%s', because '%s' was a string instead of an object", keys[1], currentKey)
|
||||
}
|
||||
return value, len(value), nil
|
||||
case []interface{}:
|
||||
return fmt.Sprintf("%v", value), len(value), nil
|
||||
@@ -41,7 +44,7 @@ func extractValue(currentKey string, value interface{}) interface{} {
|
||||
tmp := strings.SplitN(currentKey, "[", 3)
|
||||
arrayIndex, err := strconv.Atoi(strings.Replace(tmp[1], "]", "", 1))
|
||||
if err != nil {
|
||||
return value
|
||||
return nil
|
||||
}
|
||||
currentKey := tmp[0]
|
||||
// if currentKey contains only an index (i.e. [0] or 0)
|
||||
|
||||
@@ -1,152 +1,148 @@
|
||||
package jsonpath
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEval(t *testing.T) {
|
||||
path := "simple"
|
||||
data := `{"simple": "value"}`
|
||||
|
||||
expectedOutput := "value"
|
||||
|
||||
output, outputLength, err := Eval(path, []byte(data))
|
||||
if err != nil {
|
||||
t.Error("Didn't expect any error, but got", err)
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Path string
|
||||
Data string
|
||||
ExpectedOutput string
|
||||
ExpectedOutputLength int
|
||||
ExpectedError bool
|
||||
}
|
||||
if outputLength != len(expectedOutput) {
|
||||
t.Errorf("Expected output length to be %v, but was %v", len(expectedOutput), outputLength)
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "simple",
|
||||
Path: "key",
|
||||
Data: `{"key": "value"}`,
|
||||
ExpectedOutput: "value",
|
||||
ExpectedOutputLength: 5,
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
Name: "simple-with-invalid-data",
|
||||
Path: "key",
|
||||
Data: "invalid data",
|
||||
ExpectedOutput: "",
|
||||
ExpectedOutputLength: 0,
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
Name: "invalid-path",
|
||||
Path: "key",
|
||||
Data: `{}`,
|
||||
ExpectedOutput: "",
|
||||
ExpectedOutputLength: 0,
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
Name: "long-simple-walk",
|
||||
Path: "long.simple.walk",
|
||||
Data: `{"long": {"simple": {"walk": "value"}}}`,
|
||||
ExpectedOutput: "value",
|
||||
ExpectedOutputLength: 5,
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
Name: "array-of-maps",
|
||||
Path: "ids[1].id",
|
||||
Data: `{"ids": [{"id": 1}, {"id": 2}]}`,
|
||||
ExpectedOutput: "2",
|
||||
ExpectedOutputLength: 1,
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
Name: "array-of-values",
|
||||
Path: "ids[0]",
|
||||
Data: `{"ids": [1, 2]}`,
|
||||
ExpectedOutput: "1",
|
||||
ExpectedOutputLength: 1,
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
Name: "array-of-values-and-invalid-index",
|
||||
Path: "ids[wat]",
|
||||
Data: `{"ids": [1, 2]}`,
|
||||
ExpectedOutput: "",
|
||||
ExpectedOutputLength: 0,
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
Name: "array-of-values-at-root",
|
||||
Path: "[1]",
|
||||
Data: `[1, 2]`,
|
||||
ExpectedOutput: "2",
|
||||
ExpectedOutputLength: 1,
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
Name: "array-of-maps-at-root",
|
||||
Path: "[0].id",
|
||||
Data: `[{"id": 1}, {"id": 2}]`,
|
||||
ExpectedOutput: "1",
|
||||
ExpectedOutputLength: 1,
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
Name: "array-of-maps-at-root-and-invalid-index",
|
||||
Path: "[5].id",
|
||||
Data: `[{"id": 1}, {"id": 2}]`,
|
||||
ExpectedOutput: "",
|
||||
ExpectedOutputLength: 0,
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
Name: "long-walk-and-array",
|
||||
Path: "data.ids[0].id",
|
||||
Data: `{"data": {"ids": [{"id": 1}, {"id": 2}, {"id": 3}]}}`,
|
||||
ExpectedOutput: "1",
|
||||
ExpectedOutputLength: 1,
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
Name: "nested-array",
|
||||
Path: "[3][2]",
|
||||
Data: `[[1, 2], [3, 4], [], [5, 6, 7]]`,
|
||||
ExpectedOutput: "7",
|
||||
ExpectedOutputLength: 1,
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
Name: "map-of-nested-arrays",
|
||||
Path: "data[1][1]",
|
||||
Data: `{"data": [["a", "b", "c"], ["d", "eeeee", "f"]]}`,
|
||||
ExpectedOutput: "eeeee",
|
||||
ExpectedOutputLength: 5,
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
Name: "partially-invalid-path-issue122",
|
||||
Path: "data.name.invalid",
|
||||
Data: `{"data": {"name": "john"}}`,
|
||||
ExpectedOutput: "",
|
||||
ExpectedOutputLength: 0,
|
||||
ExpectedError: true,
|
||||
},
|
||||
}
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Expected output to be %v, but was %v", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithLongSimpleWalk(t *testing.T) {
|
||||
path := "long.simple.walk"
|
||||
data := `{"long": {"simple": {"walk": "value"}}}`
|
||||
|
||||
expectedOutput := "value"
|
||||
|
||||
output, _, err := Eval(path, []byte(data))
|
||||
if err != nil {
|
||||
t.Error("Didn't expect any error, but got", err)
|
||||
}
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Expected output to be %v, but was %v", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithArrayOfMaps(t *testing.T) {
|
||||
path := "ids[1].id"
|
||||
data := `{"ids": [{"id": 1}, {"id": 2}]}`
|
||||
|
||||
expectedOutput := "2"
|
||||
|
||||
output, _, err := Eval(path, []byte(data))
|
||||
if err != nil {
|
||||
t.Error("Didn't expect any error, but got", err)
|
||||
}
|
||||
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Expected output to be %v, but was %v", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithArrayOfValues(t *testing.T) {
|
||||
path := "ids[0]"
|
||||
data := `{"ids": [1, 2]}`
|
||||
|
||||
expectedOutput := "1"
|
||||
|
||||
output, _, err := Eval(path, []byte(data))
|
||||
if err != nil {
|
||||
t.Error("Didn't expect any error, but got", err)
|
||||
}
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Expected output to be %v, but was %v", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithRootArrayOfValues(t *testing.T) {
|
||||
path := "[1]"
|
||||
data := `[1, 2]`
|
||||
|
||||
expectedOutput := "2"
|
||||
|
||||
output, _, err := Eval(path, []byte(data))
|
||||
if err != nil {
|
||||
t.Error("Didn't expect any error, but got", err)
|
||||
}
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Expected output to be %v, but was %v", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithRootArrayOfMaps(t *testing.T) {
|
||||
path := "[0].id"
|
||||
data := `[{"id": 1}, {"id": 2}]`
|
||||
|
||||
expectedOutput := "1"
|
||||
|
||||
output, _, err := Eval(path, []byte(data))
|
||||
if err != nil {
|
||||
t.Error("Didn't expect any error, but got", err)
|
||||
}
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Expected output to be %v, but was %v", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithRootArrayOfMapsUsingInvalidArrayIndex(t *testing.T) {
|
||||
path := "[5].id"
|
||||
data := `[{"id": 1}, {"id": 2}]`
|
||||
|
||||
_, _, err := Eval(path, []byte(data))
|
||||
if err == nil {
|
||||
t.Error("Should've returned an error, but didn't")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithLongWalkAndArray(t *testing.T) {
|
||||
path := "data.ids[0].id"
|
||||
data := `{"data": {"ids": [{"id": 1}, {"id": 2}, {"id": 3}]}}`
|
||||
|
||||
expectedOutput := "1"
|
||||
|
||||
output, _, err := Eval(path, []byte(data))
|
||||
if err != nil {
|
||||
t.Error("Didn't expect any error, but got", err)
|
||||
}
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Expected output to be %v, but was %v", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithNestedArray(t *testing.T) {
|
||||
path := "[3][2]"
|
||||
data := `[[1, 2], [3, 4], [], [5, 6, 7]]`
|
||||
|
||||
expectedOutput := "7"
|
||||
|
||||
output, _, err := Eval(path, []byte(data))
|
||||
if err != nil {
|
||||
t.Error("Didn't expect any error, but got", err)
|
||||
}
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Expected output to be %v, but was %v", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalWithMapOfNestedArray(t *testing.T) {
|
||||
path := "data[1][1]"
|
||||
data := `{"data": [["a", "b", "c"], ["d", "e", "f"]]}`
|
||||
|
||||
expectedOutput := "e"
|
||||
|
||||
output, _, err := Eval(path, []byte(data))
|
||||
if err != nil {
|
||||
t.Error("Didn't expect any error, but got", err)
|
||||
}
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Expected output to be %v, but was %v", expectedOutput, output)
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
output, outputLength, err := Eval(scenario.Path, []byte(scenario.Data))
|
||||
if (err != nil) != scenario.ExpectedError {
|
||||
if scenario.ExpectedError {
|
||||
t.Errorf("Expected error, got '%v'", err)
|
||||
} else {
|
||||
t.Errorf("Expected no error, got '%v'", err)
|
||||
}
|
||||
}
|
||||
if outputLength != scenario.ExpectedOutputLength {
|
||||
t.Errorf("Expected output length to be %v, but was %v", scenario.ExpectedOutputLength, outputLength)
|
||||
}
|
||||
if output != scenario.ExpectedOutput {
|
||||
t.Errorf("Expected output to be %v, but was %v", scenario.ExpectedOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
77
main.go
77
main.go
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/config"
|
||||
"github.com/TwinProduction/gatus/controller"
|
||||
@@ -13,37 +14,75 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg := loadConfiguration()
|
||||
go watchdog.Monitor(cfg)
|
||||
go controller.Handle()
|
||||
cfg, err := loadConfiguration()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
start(cfg)
|
||||
// Wait for termination signal
|
||||
sig := make(chan os.Signal, 1)
|
||||
signalChannel := make(chan os.Signal, 1)
|
||||
done := make(chan bool, 1)
|
||||
signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
|
||||
signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-sig
|
||||
<-signalChannel
|
||||
log.Println("Received termination signal, attempting to gracefully shut down")
|
||||
controller.Shutdown()
|
||||
err := storage.Get().Save()
|
||||
if err != nil {
|
||||
log.Println("Failed to save storage provider:", err.Error())
|
||||
}
|
||||
stop()
|
||||
save()
|
||||
done <- true
|
||||
}()
|
||||
<-done
|
||||
log.Println("Shutting down")
|
||||
}
|
||||
|
||||
func loadConfiguration() *config.Config {
|
||||
var err error
|
||||
func start(cfg *config.Config) {
|
||||
go controller.Handle(cfg.Security, cfg.Web, cfg.Metrics)
|
||||
watchdog.Monitor(cfg)
|
||||
go listenToConfigurationFileChanges(cfg)
|
||||
}
|
||||
|
||||
func stop() {
|
||||
watchdog.Shutdown()
|
||||
controller.Shutdown()
|
||||
}
|
||||
|
||||
func save() {
|
||||
err := storage.Get().Save()
|
||||
if err != nil {
|
||||
log.Println("Failed to save storage provider:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func loadConfiguration() (cfg *config.Config, err error) {
|
||||
customConfigFile := os.Getenv("GATUS_CONFIG_FILE")
|
||||
if len(customConfigFile) > 0 {
|
||||
err = config.Load(customConfigFile)
|
||||
cfg, err = config.Load(customConfigFile)
|
||||
} else {
|
||||
err = config.LoadDefaultConfiguration()
|
||||
cfg, err = config.LoadDefaultConfiguration()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func listenToConfigurationFileChanges(cfg *config.Config) {
|
||||
for {
|
||||
time.Sleep(30 * time.Second)
|
||||
if cfg.HasLoadedConfigurationFileBeenModified() {
|
||||
log.Println("[main][listenToConfigurationFileChanges] Configuration file has been modified")
|
||||
save()
|
||||
updatedConfig, err := loadConfiguration()
|
||||
if err != nil {
|
||||
if cfg.SkipInvalidConfigUpdate {
|
||||
log.Println("[main][listenToConfigurationFileChanges] Failed to load new configuration:", err.Error())
|
||||
log.Println("[main][listenToConfigurationFileChanges] The configuration file was updated, but it is not valid. The old configuration will continue being used.")
|
||||
// Update the last file modification time to avoid trying to process the same invalid configuration again
|
||||
cfg.UpdateLastFileModTime()
|
||||
continue
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
stop()
|
||||
start(updatedConfig)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return config.Get()
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/TwinProduction/gatus/config"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -19,18 +18,16 @@ var (
|
||||
// PublishMetricsForService publishes metrics for the given service and its result.
|
||||
// These metrics will be exposed at /metrics if the metrics are enabled
|
||||
func PublishMetricsForService(service *core.Service, result *core.Result) {
|
||||
if config.Get().Metrics {
|
||||
rwLock.Lock()
|
||||
gauge, exists := gauges[fmt.Sprintf("%s_%s", service.Name, service.URL)]
|
||||
if !exists {
|
||||
gauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: "gatus",
|
||||
Name: "tasks",
|
||||
ConstLabels: prometheus.Labels{"service": service.Name, "url": service.URL},
|
||||
}, []string{"status", "success"})
|
||||
gauges[fmt.Sprintf("%s_%s", service.Name, service.URL)] = gauge
|
||||
}
|
||||
rwLock.Unlock()
|
||||
gauge.WithLabelValues(strconv.Itoa(result.HTTPStatus), strconv.FormatBool(result.Success)).Inc()
|
||||
rwLock.Lock()
|
||||
gauge, exists := gauges[fmt.Sprintf("%s_%s", service.Name, service.URL)]
|
||||
if !exists {
|
||||
gauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: "gatus",
|
||||
Name: "tasks",
|
||||
ConstLabels: prometheus.Labels{"service": service.Name, "url": service.URL},
|
||||
}, []string{"status", "success"})
|
||||
gauges[fmt.Sprintf("%s_%s", service.Name, service.URL)] = gauge
|
||||
}
|
||||
rwLock.Unlock()
|
||||
gauge.WithLabelValues(strconv.Itoa(result.HTTPStatus), strconv.FormatBool(result.Success)).Inc()
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Handler takes care of security for a given handler with the given security configuratioon
|
||||
// Handler takes care of security for a given handler with the given security configuration
|
||||
func Handler(handler http.HandlerFunc, security *Config) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
usernameEntered, passwordEntered, ok := r.BasicAuth()
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
package storage
|
||||
|
||||
// Config is the configuration for alerting providers
|
||||
// Config is the configuration for storage
|
||||
type Config struct {
|
||||
// File is the path of the file to use for persistence
|
||||
// If blank, persistence is disabled.
|
||||
// If blank, persistence is disabled
|
||||
File string `yaml:"file"`
|
||||
|
||||
// Type of store
|
||||
// If blank, uses the default in-memory store
|
||||
Type Type `yaml:"type"`
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/storage/store"
|
||||
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||
"github.com/TwinProduction/gatus/storage/store/sqlite"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -15,6 +17,9 @@ var (
|
||||
// Because store.Store is an interface, a nil check wouldn't be sufficient, so instead of doing reflection
|
||||
// every single time Get is called, we'll just lazily keep track of its existence through this variable
|
||||
initialized bool
|
||||
|
||||
ctx context.Context
|
||||
cancelFunc context.CancelFunc
|
||||
)
|
||||
|
||||
// Get retrieves the storage provider
|
||||
@@ -33,31 +38,54 @@ func Get() store.Store {
|
||||
func Initialize(cfg *Config) error {
|
||||
initialized = true
|
||||
var err error
|
||||
if cfg == nil || len(cfg.File) == 0 {
|
||||
log.Println("[storage][Initialize] Creating storage provider")
|
||||
provider, err = memory.NewStore("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cancelFunc != nil {
|
||||
// Stop the active autoSaveStore task, if there's already one
|
||||
cancelFunc()
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = &Config{}
|
||||
}
|
||||
if len(cfg.File) == 0 {
|
||||
log.Printf("[storage][Initialize] Creating storage provider with type=%s", cfg.Type)
|
||||
} else {
|
||||
log.Printf("[storage][Initialize] Creating storage provider with file=%s", cfg.File)
|
||||
provider, err = memory.NewStore(cfg.File)
|
||||
log.Printf("[storage][Initialize] Creating storage provider with type=%s and file=%s", cfg.Type, cfg.File)
|
||||
}
|
||||
ctx, cancelFunc = context.WithCancel(context.Background())
|
||||
switch cfg.Type {
|
||||
case TypeSQLite:
|
||||
provider, err = sqlite.NewStore(string(cfg.Type), cfg.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go autoSave(7 * time.Minute)
|
||||
case TypeInMemory:
|
||||
fallthrough
|
||||
default:
|
||||
if len(cfg.File) > 0 {
|
||||
provider, err = memory.NewStore(cfg.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go autoSaveStore(ctx, provider, 7*time.Minute)
|
||||
} else {
|
||||
provider, _ = memory.NewStore("")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// autoSave automatically calls the Save function of the provider at every interval
|
||||
func autoSave(interval time.Duration) {
|
||||
// autoSaveStore automatically calls the Save function of the provider at every interval
|
||||
func autoSaveStore(ctx context.Context, provider store.Store, interval time.Duration) {
|
||||
for {
|
||||
time.Sleep(interval)
|
||||
log.Printf("[storage][autoSave] Saving")
|
||||
err := provider.Save()
|
||||
if err != nil {
|
||||
log.Println("[storage][autoSave] Save failed:", err.Error())
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Printf("[storage][autoSaveStore] Stopping active job")
|
||||
return
|
||||
case <-time.After(interval):
|
||||
log.Printf("[storage][autoSaveStore] Saving")
|
||||
err := provider.Save()
|
||||
if err != nil {
|
||||
log.Println("[storage][autoSaveStore] Save failed:", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
94
storage/storage_test.go
Normal file
94
storage/storage_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/storage/store/sqlite"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
store := Get()
|
||||
if store == nil {
|
||||
t.Error("store should've been automatically initialized")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Cfg *Config
|
||||
ExpectedErr error
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "nil",
|
||||
Cfg: nil,
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "blank",
|
||||
Cfg: &Config{},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "inmemory-no-file",
|
||||
Cfg: &Config{Type: TypeInMemory},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "inmemory-with-file",
|
||||
Cfg: &Config{Type: TypeInMemory, File: t.TempDir() + "/TestInitialize_inmemory-with-file.db"},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-no-file",
|
||||
Cfg: &Config{Type: TypeSQLite},
|
||||
ExpectedErr: sqlite.ErrFilePathNotSpecified,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-with-file",
|
||||
Cfg: &Config{Type: TypeSQLite, File: t.TempDir() + "/TestInitialize_sqlite-with-file.db"},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
err := Initialize(scenario.Cfg)
|
||||
if err != scenario.ExpectedErr {
|
||||
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if cancelFunc == nil {
|
||||
t.Error("cancelFunc shouldn't have been nil")
|
||||
}
|
||||
if ctx == nil {
|
||||
t.Error("ctx shouldn't have been nil")
|
||||
}
|
||||
if provider == nil {
|
||||
t.Fatal("provider shouldn't have been nit")
|
||||
}
|
||||
provider.Close()
|
||||
// Try to initialize it again
|
||||
err = Initialize(scenario.Cfg)
|
||||
if err != scenario.ExpectedErr {
|
||||
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
|
||||
return
|
||||
}
|
||||
provider.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoSave(t *testing.T) {
|
||||
file := t.TempDir() + "/TestAutoSave.db"
|
||||
if err := Initialize(&Config{File: file}); err != nil {
|
||||
t.Fatal("shouldn't have returned an error")
|
||||
}
|
||||
go autoSaveStore(ctx, provider, 3*time.Millisecond)
|
||||
time.Sleep(15 * time.Millisecond)
|
||||
cancelFunc()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
@@ -2,8 +2,11 @@ package memory
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
"github.com/TwinProduction/gocache"
|
||||
)
|
||||
@@ -17,11 +20,15 @@ func init() {
|
||||
|
||||
// Store that leverages gocache
|
||||
type Store struct {
|
||||
sync.RWMutex
|
||||
file string
|
||||
cache *gocache.Cache
|
||||
}
|
||||
|
||||
// NewStore creates a new store
|
||||
// NewStore creates a new store using gocache.Cache
|
||||
//
|
||||
// This store holds everything in memory, and if the file parameter is not blank,
|
||||
// supports eventual persistence.
|
||||
func NewStore(file string) (*Store, error) {
|
||||
store := &Store{
|
||||
file: file,
|
||||
@@ -36,40 +43,46 @@ func NewStore(file string) (*Store, error) {
|
||||
return store, nil
|
||||
}
|
||||
|
||||
// GetAllServiceStatusesWithResultPagination returns all monitored core.ServiceStatus
|
||||
// GetAllServiceStatuses returns all monitored core.ServiceStatus
|
||||
// with a subset of core.Result defined by the page and pageSize parameters
|
||||
func (s *Store) GetAllServiceStatusesWithResultPagination(page, pageSize int) map[string]*core.ServiceStatus {
|
||||
func (s *Store) GetAllServiceStatuses(params *paging.ServiceStatusParams) map[string]*core.ServiceStatus {
|
||||
serviceStatuses := s.cache.GetAll()
|
||||
pagedServiceStatuses := make(map[string]*core.ServiceStatus, len(serviceStatuses))
|
||||
for k, v := range serviceStatuses {
|
||||
pagedServiceStatuses[k] = v.(*core.ServiceStatus).WithResultPagination(page, pageSize)
|
||||
pagedServiceStatuses[k] = ShallowCopyServiceStatus(v.(*core.ServiceStatus), params)
|
||||
}
|
||||
return pagedServiceStatuses
|
||||
}
|
||||
|
||||
// GetServiceStatus returns the service status for a given service name in the given group
|
||||
func (s *Store) GetServiceStatus(groupName, serviceName string) *core.ServiceStatus {
|
||||
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName))
|
||||
func (s *Store) GetServiceStatus(groupName, serviceName string, params *paging.ServiceStatusParams) *core.ServiceStatus {
|
||||
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName), params)
|
||||
}
|
||||
|
||||
// GetServiceStatusByKey returns the service status for a given key
|
||||
func (s *Store) GetServiceStatusByKey(key string) *core.ServiceStatus {
|
||||
func (s *Store) GetServiceStatusByKey(key string, params *paging.ServiceStatusParams) *core.ServiceStatus {
|
||||
serviceStatus := s.cache.GetValue(key)
|
||||
if serviceStatus == nil {
|
||||
return nil
|
||||
}
|
||||
return serviceStatus.(*core.ServiceStatus).ShallowCopy()
|
||||
return ShallowCopyServiceStatus(serviceStatus.(*core.ServiceStatus), params)
|
||||
}
|
||||
|
||||
// Insert adds the observed result for the specified service into the store
|
||||
func (s *Store) Insert(service *core.Service, result *core.Result) {
|
||||
key := util.ConvertGroupAndServiceToKey(service.Group, service.Name)
|
||||
key := service.Key()
|
||||
s.Lock()
|
||||
serviceStatus, exists := s.cache.Get(key)
|
||||
if !exists {
|
||||
serviceStatus = core.NewServiceStatus(service)
|
||||
serviceStatus = core.NewServiceStatus(key, service.Group, service.Name)
|
||||
serviceStatus.(*core.ServiceStatus).Events = append(serviceStatus.(*core.ServiceStatus).Events, &core.Event{
|
||||
Type: core.EventStart,
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
serviceStatus.(*core.ServiceStatus).AddResult(result)
|
||||
AddResult(serviceStatus.(*core.ServiceStatus), result)
|
||||
s.cache.Set(key, serviceStatus)
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
// DeleteAllServiceStatusesNotInKeys removes all ServiceStatus that are not within the keys provided
|
||||
@@ -102,3 +115,8 @@ func (s *Store) Save() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close does nothing, because there's nothing to close
|
||||
func (s *Store) Close() {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -33,7 +32,6 @@ var (
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Body: []byte("body"),
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
@@ -59,7 +57,6 @@ var (
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Body: []byte("body"),
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
@@ -83,177 +80,31 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func TestStore_Insert(t *testing.T) {
|
||||
// Note that are much more extensive tests in /storage/store/store_test.go.
|
||||
// This test is simply an extra sanity check
|
||||
func TestStore_SanityCheck(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
if numberOfServiceStatuses := len(store.GetAllServiceStatuses(paging.NewServiceStatusParams())); numberOfServiceStatuses != 1 {
|
||||
t.Fatalf("expected 1 ServiceStatus, got %d", numberOfServiceStatuses)
|
||||
}
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
|
||||
if store.cache.Count() != 1 {
|
||||
t.Fatalf("expected 1 ServiceStatus, got %d", store.cache.Count())
|
||||
// Both results inserted are for the same service, therefore, the count shouldn't have increased
|
||||
if numberOfServiceStatuses := len(store.GetAllServiceStatuses(paging.NewServiceStatusParams())); numberOfServiceStatuses != 1 {
|
||||
t.Fatalf("expected 1 ServiceStatus, got %d", numberOfServiceStatuses)
|
||||
}
|
||||
key := fmt.Sprintf("%s_%s", testService.Group, testService.Name)
|
||||
serviceStatus := store.GetServiceStatusByKey(key)
|
||||
if serviceStatus == nil {
|
||||
t.Fatalf("Store should've had key '%s', but didn't", key)
|
||||
ss := store.GetServiceStatus(testService.Group, testService.Name, paging.NewServiceStatusParams().WithResults(1, 20).WithEvents(1, 20))
|
||||
if ss == nil {
|
||||
t.Fatalf("Store should've had key '%s', but didn't", testService.Key())
|
||||
}
|
||||
if len(serviceStatus.Results) != 2 {
|
||||
t.Fatalf("Service '%s' should've had 2 results, but actually returned %d", serviceStatus.Name, len(serviceStatus.Results))
|
||||
if len(ss.Events) != 3 {
|
||||
t.Errorf("Service '%s' should've had 3 events, got %d", ss.Name, len(ss.Events))
|
||||
}
|
||||
for i, r := range serviceStatus.Results {
|
||||
expectedResult := store.GetServiceStatus(testService.Group, testService.Name).Results[i]
|
||||
if r.HTTPStatus != expectedResult.HTTPStatus {
|
||||
t.Errorf("Result at index %d should've had a HTTPStatus of %d, but was actually %d", i, expectedResult.HTTPStatus, r.HTTPStatus)
|
||||
}
|
||||
if r.DNSRCode != expectedResult.DNSRCode {
|
||||
t.Errorf("Result at index %d should've had a DNSRCode of %s, but was actually %s", i, expectedResult.DNSRCode, r.DNSRCode)
|
||||
}
|
||||
if len(r.Body) != len(expectedResult.Body) {
|
||||
t.Errorf("Result at index %d should've had a body of length %d, but was actually %d", i, len(expectedResult.Body), len(r.Body))
|
||||
}
|
||||
if r.Hostname != expectedResult.Hostname {
|
||||
t.Errorf("Result at index %d should've had a Hostname of %s, but was actually %s", i, expectedResult.Hostname, r.Hostname)
|
||||
}
|
||||
if r.IP != expectedResult.IP {
|
||||
t.Errorf("Result at index %d should've had a IP of %s, but was actually %s", i, expectedResult.IP, r.IP)
|
||||
}
|
||||
if r.Connected != expectedResult.Connected {
|
||||
t.Errorf("Result at index %d should've had a Connected value of %t, but was actually %t", i, expectedResult.Connected, r.Connected)
|
||||
}
|
||||
if r.Duration != expectedResult.Duration {
|
||||
t.Errorf("Result at index %d should've had a Duration of %s, but was actually %s", i, expectedResult.Duration.String(), r.Duration.String())
|
||||
}
|
||||
if len(r.Errors) != len(expectedResult.Errors) {
|
||||
t.Errorf("Result at index %d should've had %d errors, but actually had %d errors", i, len(expectedResult.Errors), len(r.Errors))
|
||||
}
|
||||
if len(r.ConditionResults) != len(expectedResult.ConditionResults) {
|
||||
t.Errorf("Result at index %d should've had %d ConditionResults, but actually had %d ConditionResults", i, len(expectedResult.ConditionResults), len(r.ConditionResults))
|
||||
}
|
||||
if r.Success != expectedResult.Success {
|
||||
t.Errorf("Result at index %d should've had a Success of %t, but was actually %t", i, expectedResult.Success, r.Success)
|
||||
}
|
||||
if r.Timestamp != expectedResult.Timestamp {
|
||||
t.Errorf("Result at index %d should've had a Timestamp of %s, but was actually %s", i, expectedResult.Timestamp.String(), r.Timestamp.String())
|
||||
}
|
||||
if r.CertificateExpiration != expectedResult.CertificateExpiration {
|
||||
t.Errorf("Result at index %d should've had a CertificateExpiration of %s, but was actually %s", i, expectedResult.CertificateExpiration.String(), r.CertificateExpiration.String())
|
||||
}
|
||||
if len(ss.Results) != 2 {
|
||||
t.Errorf("Service '%s' should've had 2 results, got %d", ss.Name, len(ss.Results))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetServiceStatus(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
|
||||
serviceStatus := store.GetServiceStatus(testService.Group, testService.Name)
|
||||
if serviceStatus == nil {
|
||||
t.Fatalf("serviceStatus shouldn't have been nil")
|
||||
}
|
||||
if serviceStatus.Uptime == nil {
|
||||
t.Fatalf("serviceStatus.Uptime shouldn't have been nil")
|
||||
}
|
||||
if serviceStatus.Uptime.LastHour != 0.5 {
|
||||
t.Errorf("serviceStatus.Uptime.LastHour should've been 0.5")
|
||||
}
|
||||
if serviceStatus.Uptime.LastTwentyFourHours != 0.5 {
|
||||
t.Errorf("serviceStatus.Uptime.LastTwentyFourHours should've been 0.5")
|
||||
}
|
||||
if serviceStatus.Uptime.LastSevenDays != 0.5 {
|
||||
t.Errorf("serviceStatus.Uptime.LastSevenDays should've been 0.5")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
|
||||
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname")
|
||||
if serviceStatus != nil {
|
||||
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, testService.Name)
|
||||
}
|
||||
serviceStatus = store.GetServiceStatus(testService.Group, "nonexistantname")
|
||||
if serviceStatus != nil {
|
||||
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, "nonexistantname")
|
||||
}
|
||||
serviceStatus = store.GetServiceStatus("nonexistantgroup", testService.Name)
|
||||
if serviceStatus != nil {
|
||||
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", "nonexistantgroup", testService.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetServiceStatusByKey(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
|
||||
serviceStatus := store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(testService.Group, testService.Name))
|
||||
if serviceStatus == nil {
|
||||
t.Fatalf("serviceStatus shouldn't have been nil")
|
||||
}
|
||||
if serviceStatus.Uptime == nil {
|
||||
t.Fatalf("serviceStatus.Uptime shouldn't have been nil")
|
||||
}
|
||||
if serviceStatus.Uptime.LastHour != 0.5 {
|
||||
t.Errorf("serviceStatus.Uptime.LastHour should've been 0.5")
|
||||
}
|
||||
if serviceStatus.Uptime.LastTwentyFourHours != 0.5 {
|
||||
t.Errorf("serviceStatus.Uptime.LastTwentyFourHours should've been 0.5")
|
||||
}
|
||||
if serviceStatus.Uptime.LastSevenDays != 0.5 {
|
||||
t.Errorf("serviceStatus.Uptime.LastSevenDays should've been 0.5")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetAllServiceStatusesWithResultPagination(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
firstResult := &testSuccessfulResult
|
||||
secondResult := &testUnsuccessfulResult
|
||||
store.Insert(&testService, firstResult)
|
||||
store.Insert(&testService, secondResult)
|
||||
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||
firstResult.Timestamp = time.Time{}
|
||||
secondResult.Timestamp = time.Time{}
|
||||
serviceStatuses := store.GetAllServiceStatusesWithResultPagination(1, 20)
|
||||
if len(serviceStatuses) != 1 {
|
||||
t.Fatal("expected 1 service status")
|
||||
}
|
||||
actual, exists := serviceStatuses[util.ConvertGroupAndServiceToKey(testService.Group, testService.Name)]
|
||||
if !exists {
|
||||
t.Fatal("expected service status to exist")
|
||||
}
|
||||
if len(actual.Results) != 2 {
|
||||
t.Error("expected 2 results, got", len(actual.Results))
|
||||
}
|
||||
if len(actual.Events) != 2 {
|
||||
t.Error("expected 2 events, got", len(actual.Events))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_DeleteAllServiceStatusesNotInKeys(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
firstService := core.Service{Name: "service-1", Group: "group"}
|
||||
secondService := core.Service{Name: "service-2", Group: "group"}
|
||||
result := &testSuccessfulResult
|
||||
store.Insert(&firstService, result)
|
||||
store.Insert(&secondService, result)
|
||||
if store.cache.Count() != 2 {
|
||||
t.Errorf("expected cache to have 2 keys, got %d", store.cache.Count())
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)) == nil {
|
||||
t.Fatal("firstService should exist")
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(secondService.Group, secondService.Name)) == nil {
|
||||
t.Fatal("secondService should exist")
|
||||
}
|
||||
store.DeleteAllServiceStatusesNotInKeys([]string{util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)})
|
||||
if store.cache.Count() != 1 {
|
||||
t.Fatalf("expected cache to have 1 keys, got %d", store.cache.Count())
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)) == nil {
|
||||
t.Error("secondService should've been deleted")
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(secondService.Group, secondService.Name)) != nil {
|
||||
t.Error("firstService should still exist")
|
||||
if deleted := store.DeleteAllServiceStatusesNotInKeys([]string{}); deleted != 1 {
|
||||
t.Errorf("%d entries should've been deleted, got %d", 1, deleted)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
119
storage/store/memory/uptime.go
Normal file
119
storage/store/memory/uptime.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
const (
|
||||
numberOfHoursInTenDays = 10 * 24
|
||||
sevenDays = 7 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// processUptimeAfterResult processes the result by extracting the relevant from the result and recalculating the uptime
|
||||
// if necessary
|
||||
func processUptimeAfterResult(uptime *core.Uptime, result *core.Result) {
|
||||
// XXX: Remove this on v3.0.0
|
||||
if len(uptime.SuccessfulExecutionsPerHour) != 0 || len(uptime.TotalExecutionsPerHour) != 0 {
|
||||
migrateUptimeToHourlyStatistics(uptime)
|
||||
}
|
||||
if uptime.HourlyStatistics == nil {
|
||||
uptime.HourlyStatistics = make(map[int64]*core.HourlyUptimeStatistics)
|
||||
}
|
||||
unixTimestampFlooredAtHour := result.Timestamp.Truncate(time.Hour).Unix()
|
||||
hourlyStats, _ := uptime.HourlyStatistics[unixTimestampFlooredAtHour]
|
||||
if hourlyStats == nil {
|
||||
hourlyStats = &core.HourlyUptimeStatistics{}
|
||||
uptime.HourlyStatistics[unixTimestampFlooredAtHour] = hourlyStats
|
||||
}
|
||||
if result.Success {
|
||||
hourlyStats.SuccessfulExecutions++
|
||||
}
|
||||
hourlyStats.TotalExecutions++
|
||||
hourlyStats.TotalExecutionsResponseTime += uint64(result.Duration.Milliseconds())
|
||||
// Clean up only when we're starting to have too many useless keys
|
||||
// Note that this is only triggered when there are more entries than there should be after
|
||||
// 10 days, despite the fact that we are deleting everything that's older than 7 days.
|
||||
// This is to prevent re-iterating on every `processUptimeAfterResult` as soon as the uptime has been logged for 7 days.
|
||||
if len(uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
||||
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour)).Unix()
|
||||
for hourlyUnixTimestamp := range uptime.HourlyStatistics {
|
||||
if sevenDaysAgo > hourlyUnixTimestamp {
|
||||
delete(uptime.HourlyStatistics, hourlyUnixTimestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
if result.Success {
|
||||
// Recalculate uptime if at least one of the 1h, 24h or 7d uptime are not 100%
|
||||
// If they're all 100%, then recalculating the uptime would be useless unless
|
||||
// the result added was a failure (!result.Success)
|
||||
if uptime.LastSevenDays != 1 || uptime.LastTwentyFourHours != 1 || uptime.LastHour != 1 {
|
||||
recalculateUptime(uptime)
|
||||
}
|
||||
} else {
|
||||
// Recalculate uptime if at least one of the 1h, 24h or 7d uptime are not 0%
|
||||
// If they're all 0%, then recalculating the uptime would be useless unless
|
||||
// the result added was a success (result.Success)
|
||||
if uptime.LastSevenDays != 0 || uptime.LastTwentyFourHours != 0 || uptime.LastHour != 0 {
|
||||
recalculateUptime(uptime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func recalculateUptime(uptime *core.Uptime) {
|
||||
uptimeBrackets := make(map[string]uint64)
|
||||
now := time.Now()
|
||||
// The oldest uptime bracket starts 7 days ago, so we'll start from there
|
||||
timestamp := now.Add(-sevenDays)
|
||||
for now.Sub(timestamp) >= 0 {
|
||||
hourlyUnixTimestamp := timestamp.Truncate(time.Hour).Unix()
|
||||
hourlyStats := uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
timestamp = timestamp.Add(time.Hour)
|
||||
continue
|
||||
}
|
||||
uptimeBrackets["7d_success"] += hourlyStats.SuccessfulExecutions
|
||||
uptimeBrackets["7d_total"] += hourlyStats.TotalExecutions
|
||||
if now.Sub(timestamp) <= 24*time.Hour {
|
||||
uptimeBrackets["24h_success"] += hourlyStats.SuccessfulExecutions
|
||||
uptimeBrackets["24h_total"] += hourlyStats.TotalExecutions
|
||||
}
|
||||
if now.Sub(timestamp) <= time.Hour {
|
||||
uptimeBrackets["1h_success"] += hourlyStats.SuccessfulExecutions
|
||||
uptimeBrackets["1h_total"] += hourlyStats.TotalExecutions
|
||||
}
|
||||
timestamp = timestamp.Add(time.Hour)
|
||||
}
|
||||
if uptimeBrackets["7d_total"] > 0 {
|
||||
uptime.LastSevenDays = float64(uptimeBrackets["7d_success"]) / float64(uptimeBrackets["7d_total"])
|
||||
}
|
||||
if uptimeBrackets["24h_total"] > 0 {
|
||||
uptime.LastTwentyFourHours = float64(uptimeBrackets["24h_success"]) / float64(uptimeBrackets["24h_total"])
|
||||
}
|
||||
if uptimeBrackets["1h_total"] > 0 {
|
||||
uptime.LastHour = float64(uptimeBrackets["1h_success"]) / float64(uptimeBrackets["1h_total"])
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Remove this on v3.0.0
|
||||
// Deprecated
|
||||
func migrateUptimeToHourlyStatistics(uptime *core.Uptime) {
|
||||
log.Println("[migrateUptimeToHourlyStatistics] Got", len(uptime.SuccessfulExecutionsPerHour), "entries for successful executions and", len(uptime.TotalExecutionsPerHour), "entries for total executions")
|
||||
uptime.HourlyStatistics = make(map[int64]*core.HourlyUptimeStatistics)
|
||||
for hourlyUnixTimestamp, totalExecutions := range uptime.TotalExecutionsPerHour {
|
||||
if totalExecutions == 0 {
|
||||
log.Println("[migrateUptimeToHourlyStatistics] Skipping entry at", hourlyUnixTimestamp, "because total number of executions is 0")
|
||||
continue
|
||||
}
|
||||
uptime.HourlyStatistics[hourlyUnixTimestamp] = &core.HourlyUptimeStatistics{
|
||||
TotalExecutions: totalExecutions,
|
||||
SuccessfulExecutions: uptime.SuccessfulExecutionsPerHour[hourlyUnixTimestamp],
|
||||
TotalExecutionsResponseTime: 0,
|
||||
}
|
||||
}
|
||||
log.Println("[migrateUptimeToHourlyStatistics] Migrated", len(uptime.HourlyStatistics), "entries")
|
||||
uptime.SuccessfulExecutionsPerHour = nil
|
||||
uptime.TotalExecutionsPerHour = nil
|
||||
}
|
||||
26
storage/store/memory/uptime_bench_test.go
Normal file
26
storage/store/memory/uptime_bench_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
func BenchmarkProcessUptimeAfterResult(b *testing.B) {
|
||||
uptime := core.NewUptime()
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12000 days ago
|
||||
timestamp := now.Add(-12000 * 24 * time.Hour)
|
||||
for n := 0; n < b.N; n++ {
|
||||
processUptimeAfterResult(uptime, &core.Result{
|
||||
Duration: 18 * time.Millisecond,
|
||||
Success: n%15 == 0,
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
// Simulate service with an interval of 3 minutes
|
||||
timestamp = timestamp.Add(3 * time.Minute)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
98
storage/store/memory/uptime_test.go
Normal file
98
storage/store/memory/uptime_test.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
)
|
||||
|
||||
func TestProcessUptimeAfterResult(t *testing.T) {
|
||||
service := &core.Service{Name: "name", Group: "group"}
|
||||
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
uptime := serviceStatus.Uptime
|
||||
|
||||
checkUptimes(t, serviceStatus, 0.00, 0.00, 0.00)
|
||||
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-7 * 24 * time.Hour), Success: true})
|
||||
checkUptimes(t, serviceStatus, 1.00, 0.00, 0.00)
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-6 * 24 * time.Hour), Success: false})
|
||||
checkUptimes(t, serviceStatus, 0.50, 0.00, 0.00)
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-8 * 24 * time.Hour), Success: true})
|
||||
checkUptimes(t, serviceStatus, 0.50, 0.00, 0.00)
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-24 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-12 * time.Hour), Success: true})
|
||||
checkUptimes(t, serviceStatus, 0.75, 1.00, 0.00)
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-1 * time.Hour), Success: true, Duration: 10 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 10, 1, 1)
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-30 * time.Minute), Success: false, Duration: 500 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 510, 2, 1)
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-15 * time.Minute), Success: false, Duration: 25 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 535, 3, 1)
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-10 * time.Minute), Success: false})
|
||||
checkUptimes(t, serviceStatus, 0.50, 0.50, 0.25)
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-120 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-119 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-118 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-117 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-10 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-30 * time.Minute), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-25 * time.Minute), Success: true})
|
||||
checkUptimes(t, serviceStatus, 0.75, 0.70, 0.50)
|
||||
}
|
||||
|
||||
func TestAddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||
service := &core.Service{Name: "name", Group: "group"}
|
||||
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12 days ago
|
||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||
for timestamp.Unix() <= now.Unix() {
|
||||
AddResult(serviceStatus, &core.Result{Timestamp: timestamp, Success: true})
|
||||
if len(serviceStatus.Uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
||||
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.SuccessfulExecutionsPerHour, but there are %d", numberOfHoursInTenDays, len(serviceStatus.Uptime.HourlyStatistics))
|
||||
}
|
||||
if now.Sub(timestamp) > time.Hour && serviceStatus.Uptime.LastHour != 0 {
|
||||
t.Error("most recent timestamp > 1h ago, expected serviceStatus.Uptime.LastHour to be 0, got", serviceStatus.Uptime.LastHour)
|
||||
}
|
||||
if now.Sub(timestamp) < time.Hour && serviceStatus.Uptime.LastHour == 0 {
|
||||
t.Error("most recent timestamp < 1h ago, expected serviceStatus.Uptime.LastHour to NOT be 0, got", serviceStatus.Uptime.LastHour)
|
||||
}
|
||||
// Simulate service with an interval of 3 minutes
|
||||
timestamp = timestamp.Add(3 * time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func checkUptimes(t *testing.T, status *core.ServiceStatus, expectedUptimeDuringLastSevenDays, expectedUptimeDuringLastTwentyFourHours, expectedUptimeDuringLastHour float64) {
|
||||
if status.Uptime.LastSevenDays != expectedUptimeDuringLastSevenDays {
|
||||
t.Errorf("expected status.Uptime.LastSevenDays to be %f, got %f", expectedUptimeDuringLastHour, status.Uptime.LastSevenDays)
|
||||
}
|
||||
if status.Uptime.LastTwentyFourHours != expectedUptimeDuringLastTwentyFourHours {
|
||||
t.Errorf("expected status.Uptime.LastTwentyFourHours to be %f, got %f", expectedUptimeDuringLastTwentyFourHours, status.Uptime.LastTwentyFourHours)
|
||||
}
|
||||
if status.Uptime.LastHour != expectedUptimeDuringLastHour {
|
||||
t.Errorf("expected status.Uptime.LastHour to be %f, got %f", expectedUptimeDuringLastHour, status.Uptime.LastHour)
|
||||
}
|
||||
}
|
||||
|
||||
func checkHourlyStatistics(t *testing.T, hourlyUptimeStatistics *core.HourlyUptimeStatistics, expectedTotalExecutionsResponseTime uint64, expectedTotalExecutions uint64, expectedSuccessfulExecutions uint64) {
|
||||
if hourlyUptimeStatistics.TotalExecutionsResponseTime != expectedTotalExecutionsResponseTime {
|
||||
t.Error("TotalExecutionsResponseTime should've been", expectedTotalExecutionsResponseTime, "got", hourlyUptimeStatistics.TotalExecutionsResponseTime)
|
||||
}
|
||||
if hourlyUptimeStatistics.TotalExecutions != expectedTotalExecutions {
|
||||
t.Error("TotalExecutions should've been", expectedTotalExecutions, "got", hourlyUptimeStatistics.TotalExecutions)
|
||||
}
|
||||
if hourlyUptimeStatistics.SuccessfulExecutions != expectedSuccessfulExecutions {
|
||||
t.Error("SuccessfulExecutions should've been", expectedSuccessfulExecutions, "got", hourlyUptimeStatistics.SuccessfulExecutions)
|
||||
}
|
||||
}
|
||||
85
storage/store/memory/util.go
Normal file
85
storage/store/memory/util.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||
)
|
||||
|
||||
// ShallowCopyServiceStatus returns a shallow copy of a ServiceStatus with only the results
|
||||
// within the range defined by the page and pageSize parameters
|
||||
func ShallowCopyServiceStatus(ss *core.ServiceStatus, params *paging.ServiceStatusParams) *core.ServiceStatus {
|
||||
shallowCopy := &core.ServiceStatus{
|
||||
Name: ss.Name,
|
||||
Group: ss.Group,
|
||||
Key: ss.Key,
|
||||
Uptime: core.NewUptime(),
|
||||
}
|
||||
numberOfResults := len(ss.Results)
|
||||
resultsStart, resultsEnd := getStartAndEndIndex(numberOfResults, params.ResultsPage, params.ResultsPageSize)
|
||||
if resultsStart < 0 || resultsEnd < 0 {
|
||||
shallowCopy.Results = []*core.Result{}
|
||||
} else {
|
||||
shallowCopy.Results = ss.Results[resultsStart:resultsEnd]
|
||||
}
|
||||
numberOfEvents := len(ss.Events)
|
||||
eventsStart, eventsEnd := getStartAndEndIndex(numberOfEvents, params.EventsPage, params.EventsPageSize)
|
||||
if eventsStart < 0 || eventsEnd < 0 {
|
||||
shallowCopy.Events = []*core.Event{}
|
||||
} else {
|
||||
shallowCopy.Events = ss.Events[eventsStart:eventsEnd]
|
||||
}
|
||||
if params.IncludeUptime {
|
||||
shallowCopy.Uptime.LastHour = ss.Uptime.LastHour
|
||||
shallowCopy.Uptime.LastTwentyFourHours = ss.Uptime.LastTwentyFourHours
|
||||
shallowCopy.Uptime.LastSevenDays = ss.Uptime.LastSevenDays
|
||||
}
|
||||
return shallowCopy
|
||||
}
|
||||
|
||||
func getStartAndEndIndex(numberOfResults int, page, pageSize int) (int, int) {
|
||||
if page < 1 || pageSize < 0 {
|
||||
return -1, -1
|
||||
}
|
||||
start := numberOfResults - (page * pageSize)
|
||||
end := numberOfResults - ((page - 1) * pageSize)
|
||||
if start > numberOfResults {
|
||||
start = -1
|
||||
} else if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
if end > numberOfResults {
|
||||
end = numberOfResults
|
||||
}
|
||||
return start, end
|
||||
}
|
||||
|
||||
// AddResult adds a Result to ServiceStatus.Results and makes sure that there are
|
||||
// no more than MaximumNumberOfResults results in the Results slice
|
||||
func AddResult(ss *core.ServiceStatus, result *core.Result) {
|
||||
if ss == nil {
|
||||
return
|
||||
}
|
||||
if len(ss.Results) > 0 {
|
||||
// Check if there's any change since the last result
|
||||
if ss.Results[len(ss.Results)-1].Success != result.Success {
|
||||
ss.Events = append(ss.Events, core.NewEventFromResult(result))
|
||||
if len(ss.Events) > core.MaximumNumberOfEvents {
|
||||
// Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has
|
||||
// more than one extra element, we can get rid of all of them at once and thus returning the slice to a
|
||||
// length of MaximumNumberOfEvents by using ss.Events[len(ss.Events)-MaximumNumberOfEvents:] instead
|
||||
ss.Events = ss.Events[len(ss.Events)-core.MaximumNumberOfEvents:]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This is the first result, so we need to add the first healthy/unhealthy event
|
||||
ss.Events = append(ss.Events, core.NewEventFromResult(result))
|
||||
}
|
||||
ss.Results = append(ss.Results, result)
|
||||
if len(ss.Results) > core.MaximumNumberOfResults {
|
||||
// Doing ss.Results[1:] would usually be sufficient, but in the case where for some reason, the slice has more
|
||||
// than one extra element, we can get rid of all of them at once and thus returning the slice to a length of
|
||||
// MaximumNumberOfResults by using ss.Results[len(ss.Results)-MaximumNumberOfResults:] instead
|
||||
ss.Results = ss.Results[len(ss.Results)-core.MaximumNumberOfResults:]
|
||||
}
|
||||
processUptimeAfterResult(ss.Uptime, result)
|
||||
}
|
||||
20
storage/store/memory/util_bench_test.go
Normal file
20
storage/store/memory/util_bench_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||
)
|
||||
|
||||
func BenchmarkShallowCopyServiceStatus(b *testing.B) {
|
||||
service := &testService
|
||||
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
for i := 0; i < core.MaximumNumberOfResults; i++ {
|
||||
AddResult(serviceStatus, &testSuccessfulResult)
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 20))
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
79
storage/store/memory/util_test.go
Normal file
79
storage/store/memory/util_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||
)
|
||||
|
||||
func TestAddResult(t *testing.T) {
|
||||
service := &core.Service{Name: "name", Group: "group"}
|
||||
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
for i := 0; i < (core.MaximumNumberOfResults+core.MaximumNumberOfEvents)*2; i++ {
|
||||
AddResult(serviceStatus, &core.Result{Success: i%2 == 0, Timestamp: time.Now()})
|
||||
}
|
||||
if len(serviceStatus.Results) != core.MaximumNumberOfResults {
|
||||
t.Errorf("expected serviceStatus.Results to not exceed a length of %d", core.MaximumNumberOfResults)
|
||||
}
|
||||
if len(serviceStatus.Events) != core.MaximumNumberOfEvents {
|
||||
t.Errorf("expected serviceStatus.Events to not exceed a length of %d", core.MaximumNumberOfEvents)
|
||||
}
|
||||
// Try to add nil serviceStatus
|
||||
AddResult(nil, &core.Result{Timestamp: time.Now()})
|
||||
}
|
||||
|
||||
func TestShallowCopyServiceStatus(t *testing.T) {
|
||||
service := &core.Service{Name: "name", Group: "group"}
|
||||
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
ts := time.Now().Add(-25 * time.Hour)
|
||||
for i := 0; i < 25; i++ {
|
||||
AddResult(serviceStatus, &core.Result{Success: i%2 == 0, Timestamp: ts})
|
||||
ts = ts.Add(time.Hour)
|
||||
}
|
||||
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(-1, -1)).Results) != 0 {
|
||||
t.Error("expected to have 0 result")
|
||||
}
|
||||
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 1)).Results) != 1 {
|
||||
t.Error("expected to have 1 result")
|
||||
}
|
||||
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(5, 0)).Results) != 0 {
|
||||
t.Error("expected to have 0 results")
|
||||
}
|
||||
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(-1, 20)).Results) != 0 {
|
||||
t.Error("expected to have 0 result, because the page was invalid")
|
||||
}
|
||||
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, -1)).Results) != 0 {
|
||||
t.Error("expected to have 0 result, because the page size was invalid")
|
||||
}
|
||||
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 10)).Results) != 10 {
|
||||
t.Error("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
|
||||
}
|
||||
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(2, 10)).Results) != 10 {
|
||||
t.Error("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
|
||||
}
|
||||
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(3, 10)).Results) != 5 {
|
||||
t.Error("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
|
||||
}
|
||||
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(4, 10)).Results) != 0 {
|
||||
t.Error("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
|
||||
}
|
||||
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 50)).Results) != 25 {
|
||||
t.Error("expected to have 25 results, because there's only 25 results")
|
||||
}
|
||||
uptime := ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithUptime()).Uptime
|
||||
if uptime == nil {
|
||||
t.Error("expected uptime to not be nil")
|
||||
} else {
|
||||
if uptime.LastHour != 1 {
|
||||
t.Error("expected uptime.LastHour to not be 1, got", uptime.LastHour)
|
||||
}
|
||||
if uptime.LastTwentyFourHours != 0.5 {
|
||||
t.Error("expected uptime.LastTwentyFourHours to not be 0.5, got", uptime.LastTwentyFourHours)
|
||||
}
|
||||
if uptime.LastSevenDays != 0.52 {
|
||||
t.Error("expected uptime.LastSevenDays to not be 0.52, got", uptime.LastSevenDays)
|
||||
}
|
||||
}
|
||||
}
|
||||
35
storage/store/paging/paging.go
Normal file
35
storage/store/paging/paging.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package paging
|
||||
|
||||
// ServiceStatusParams represents all parameters that can be used for paging purposes
|
||||
type ServiceStatusParams struct {
|
||||
EventsPage int // Number of the event page
|
||||
EventsPageSize int // Size of the event page
|
||||
ResultsPage int // Number of the result page
|
||||
ResultsPageSize int // Size of the result page
|
||||
IncludeUptime bool // Whether to include uptime data
|
||||
}
|
||||
|
||||
// NewServiceStatusParams creates a new ServiceStatusParams
|
||||
func NewServiceStatusParams() *ServiceStatusParams {
|
||||
return &ServiceStatusParams{}
|
||||
}
|
||||
|
||||
// WithEvents sets the values for EventsPage and EventsPageSize
|
||||
func (params *ServiceStatusParams) WithEvents(page, pageSize int) *ServiceStatusParams {
|
||||
params.EventsPage = page
|
||||
params.EventsPageSize = pageSize
|
||||
return params
|
||||
}
|
||||
|
||||
// WithResults sets the values for ResultsPage and ResultsPageSize
|
||||
func (params *ServiceStatusParams) WithResults(page, pageSize int) *ServiceStatusParams {
|
||||
params.ResultsPage = page
|
||||
params.ResultsPageSize = pageSize
|
||||
return params
|
||||
}
|
||||
|
||||
// WithUptime sets the value IncludeUptime to true
|
||||
func (params *ServiceStatusParams) WithUptime() *ServiceStatusParams {
|
||||
params.IncludeUptime = true
|
||||
return params
|
||||
}
|
||||
81
storage/store/paging/paging_test.go
Normal file
81
storage/store/paging/paging_test.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package paging
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestNewServiceStatusParams(t *testing.T) {
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Params *ServiceStatusParams
|
||||
ExpectedEventsPage int
|
||||
ExpectedEventsPageSize int
|
||||
ExpectedResultsPage int
|
||||
ExpectedResultsPageSize int
|
||||
ExpectedIncludeUptime bool
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "empty-params",
|
||||
Params: NewServiceStatusParams(),
|
||||
ExpectedEventsPage: 0,
|
||||
ExpectedEventsPageSize: 0,
|
||||
ExpectedResultsPage: 0,
|
||||
ExpectedResultsPageSize: 0,
|
||||
ExpectedIncludeUptime: false,
|
||||
},
|
||||
{
|
||||
Name: "with-events-page-2-size-7",
|
||||
Params: NewServiceStatusParams().WithEvents(2, 7),
|
||||
ExpectedEventsPage: 2,
|
||||
ExpectedEventsPageSize: 7,
|
||||
ExpectedResultsPage: 0,
|
||||
ExpectedResultsPageSize: 0,
|
||||
ExpectedIncludeUptime: false,
|
||||
},
|
||||
{
|
||||
Name: "with-events-page-4-size-3-uptime",
|
||||
Params: NewServiceStatusParams().WithEvents(4, 3).WithUptime(),
|
||||
ExpectedEventsPage: 4,
|
||||
ExpectedEventsPageSize: 3,
|
||||
ExpectedResultsPage: 0,
|
||||
ExpectedResultsPageSize: 0,
|
||||
ExpectedIncludeUptime: true,
|
||||
},
|
||||
{
|
||||
Name: "with-results-page-1-size-20-uptime",
|
||||
Params: NewServiceStatusParams().WithResults(1, 20).WithUptime(),
|
||||
ExpectedEventsPage: 0,
|
||||
ExpectedEventsPageSize: 0,
|
||||
ExpectedResultsPage: 1,
|
||||
ExpectedResultsPageSize: 20,
|
||||
ExpectedIncludeUptime: true,
|
||||
},
|
||||
{
|
||||
Name: "with-results-page-2-size-10-events-page-3-size-50",
|
||||
Params: NewServiceStatusParams().WithResults(2, 10).WithEvents(3, 50),
|
||||
ExpectedEventsPage: 3,
|
||||
ExpectedEventsPageSize: 50,
|
||||
ExpectedResultsPage: 2,
|
||||
ExpectedResultsPageSize: 10,
|
||||
ExpectedIncludeUptime: false,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
if scenario.Params.EventsPage != scenario.ExpectedEventsPage {
|
||||
t.Errorf("expected ExpectedEventsPage to be %d, was %d", scenario.ExpectedEventsPageSize, scenario.Params.EventsPage)
|
||||
}
|
||||
if scenario.Params.EventsPageSize != scenario.ExpectedEventsPageSize {
|
||||
t.Errorf("expected EventsPageSize to be %d, was %d", scenario.ExpectedEventsPageSize, scenario.Params.EventsPageSize)
|
||||
}
|
||||
if scenario.Params.ResultsPage != scenario.ExpectedResultsPage {
|
||||
t.Errorf("expected ResultsPage to be %d, was %d", scenario.ExpectedResultsPage, scenario.Params.ResultsPage)
|
||||
}
|
||||
if scenario.Params.ResultsPageSize != scenario.ExpectedResultsPageSize {
|
||||
t.Errorf("expected ResultsPageSize to be %d, was %d", scenario.ExpectedResultsPageSize, scenario.Params.ResultsPageSize)
|
||||
}
|
||||
if scenario.Params.IncludeUptime != scenario.ExpectedIncludeUptime {
|
||||
t.Errorf("expected IncludeUptime to be %v, was %v", scenario.ExpectedIncludeUptime, scenario.Params.IncludeUptime)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
778
storage/store/sqlite/sqlite.go
Normal file
778
storage/store/sqlite/sqlite.go
Normal file
@@ -0,0 +1,778 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Note that only exported functions in this file may create, commit, or rollback a transaction //
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const (
|
||||
arraySeparator = "|~|"
|
||||
|
||||
uptimeCleanUpThreshold = 10 * 24 * time.Hour // Maximum uptime age before triggering a clean up
|
||||
eventsCleanUpThreshold = core.MaximumNumberOfEvents + 10 // Maximum number of events before triggering a clean up
|
||||
resultsCleanUpThreshold = core.MaximumNumberOfResults + 10 // Maximum number of results before triggering a clean up
|
||||
|
||||
uptimeRetention = 7 * 24 * time.Hour
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrFilePathNotSpecified is the error returned when path parameter passed in NewStore is blank
|
||||
ErrFilePathNotSpecified = errors.New("file path cannot be empty")
|
||||
|
||||
// ErrDatabaseDriverNotSpecified is the error returned when the driver parameter passed in NewStore is blank
|
||||
ErrDatabaseDriverNotSpecified = errors.New("database driver cannot be empty")
|
||||
|
||||
errServiceNotFoundInDatabase = errors.New("service does not exist in database")
|
||||
errNoRowsReturned = errors.New("expected a row to be returned, but none was")
|
||||
)
|
||||
|
||||
// Store that leverages a database
|
||||
type Store struct {
|
||||
driver, file string
|
||||
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewStore initializes the database and creates the schema if it doesn't already exist in the file specified
|
||||
func NewStore(driver, path string) (*Store, error) {
|
||||
if len(driver) == 0 {
|
||||
return nil, ErrDatabaseDriverNotSpecified
|
||||
}
|
||||
if len(path) == 0 {
|
||||
return nil, ErrFilePathNotSpecified
|
||||
}
|
||||
store := &Store{driver: driver, file: path}
|
||||
var err error
|
||||
if store.db, err = sql.Open(driver, path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if driver == "sqlite" {
|
||||
_, _ = store.db.Exec("PRAGMA foreign_keys=ON")
|
||||
_, _ = store.db.Exec("PRAGMA journal_mode=WAL")
|
||||
_, _ = store.db.Exec("PRAGMA synchronous=NORMAL")
|
||||
// Prevents driver from running into "database is locked" errors
|
||||
// This is because we're using WAL to improve performance
|
||||
store.db.SetMaxOpenConns(1)
|
||||
}
|
||||
if err = store.createSchema(); err != nil {
|
||||
_ = store.db.Close()
|
||||
return nil, err
|
||||
}
|
||||
return store, nil
|
||||
}
|
||||
|
||||
// createSchema creates the schema required to perform all database operations.
|
||||
func (s *Store) createSchema() error {
|
||||
_, err := s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS service (
|
||||
service_id INTEGER PRIMARY KEY,
|
||||
service_key TEXT UNIQUE,
|
||||
service_name TEXT,
|
||||
service_group TEXT,
|
||||
UNIQUE(service_name, service_group)
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS service_event (
|
||||
service_event_id INTEGER PRIMARY KEY,
|
||||
service_id INTEGER REFERENCES service(service_id) ON DELETE CASCADE,
|
||||
event_type TEXT,
|
||||
event_timestamp TIMESTAMP
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS service_result (
|
||||
service_result_id INTEGER PRIMARY KEY,
|
||||
service_id INTEGER REFERENCES service(service_id) ON DELETE CASCADE,
|
||||
success INTEGER,
|
||||
errors TEXT,
|
||||
connected INTEGER,
|
||||
status INTEGER,
|
||||
dns_rcode TEXT,
|
||||
certificate_expiration INTEGER,
|
||||
hostname TEXT,
|
||||
ip TEXT,
|
||||
duration INTEGER,
|
||||
timestamp TIMESTAMP
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS service_result_condition (
|
||||
service_result_condition_id INTEGER PRIMARY KEY,
|
||||
service_result_id INTEGER REFERENCES service_result(service_result_id) ON DELETE CASCADE,
|
||||
condition TEXT,
|
||||
success INTEGER
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS service_uptime (
|
||||
service_uptime_id INTEGER PRIMARY KEY,
|
||||
service_id INTEGER REFERENCES service(service_id) ON DELETE CASCADE,
|
||||
hour_unix_timestamp INTEGER,
|
||||
total_executions INTEGER,
|
||||
successful_executions INTEGER,
|
||||
total_response_time INTEGER,
|
||||
UNIQUE(service_id, hour_unix_timestamp)
|
||||
)
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetAllServiceStatuses returns all monitored core.ServiceStatus
|
||||
// with a subset of core.Result defined by the page and pageSize parameters
|
||||
func (s *Store) GetAllServiceStatuses(params *paging.ServiceStatusParams) map[string]*core.ServiceStatus {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
keys, err := s.getAllServiceKeys(tx)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return nil
|
||||
}
|
||||
serviceStatuses := make(map[string]*core.ServiceStatus, len(keys))
|
||||
for _, key := range keys {
|
||||
serviceStatus, err := s.getServiceStatusByKey(tx, key, params)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
serviceStatuses[key] = serviceStatus
|
||||
}
|
||||
if err = tx.Commit(); err != nil {
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
return serviceStatuses
|
||||
}
|
||||
|
||||
// GetServiceStatus returns the service status for a given service name in the given group
|
||||
func (s *Store) GetServiceStatus(groupName, serviceName string, params *paging.ServiceStatusParams) *core.ServiceStatus {
|
||||
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName), params)
|
||||
}
|
||||
|
||||
// GetServiceStatusByKey returns the service status for a given key
|
||||
func (s *Store) GetServiceStatusByKey(key string, params *paging.ServiceStatusParams) *core.ServiceStatus {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
serviceStatus, err := s.getServiceStatusByKey(tx, key, params)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return nil
|
||||
}
|
||||
if err = tx.Commit(); err != nil {
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
return serviceStatus
|
||||
}
|
||||
|
||||
// Insert adds the observed result for the specified service into the store
|
||||
func (s *Store) Insert(service *core.Service, result *core.Result) {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
//start := time.Now()
|
||||
serviceID, err := s.getServiceID(tx, service)
|
||||
if err != nil {
|
||||
if err == errServiceNotFoundInDatabase {
|
||||
// Service doesn't exist in the database, insert it
|
||||
if serviceID, err = s.insertService(tx, service); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return // failed to insert service
|
||||
}
|
||||
} else {
|
||||
_ = tx.Rollback()
|
||||
return
|
||||
}
|
||||
}
|
||||
// First, we need to check if we need to insert a new event.
|
||||
//
|
||||
// A new event must be added if either of the following cases happen:
|
||||
// 1. There is only 1 event. The total number of events for a service can only be 1 if the only existing event is
|
||||
// of type EventStart, in which case we will have to create a new event of type EventHealthy or EventUnhealthy
|
||||
// based on result.Success.
|
||||
// 2. The lastResult.Success != result.Success. This implies that the service went from healthy to unhealthy or
|
||||
// vice-versa, in which case we will have to create a new event of type EventHealthy or EventUnhealthy
|
||||
// based on result.Success.
|
||||
numberOfEvents, err := s.getNumberOfEventsByServiceID(tx, serviceID)
|
||||
if err != nil {
|
||||
log.Printf("[sqlite][Insert] Failed to retrieve total number of events for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
||||
}
|
||||
if numberOfEvents == 0 {
|
||||
// There's no events yet, which means we need to add the EventStart and the first healthy/unhealthy event
|
||||
err = s.insertEvent(tx, serviceID, &core.Event{
|
||||
Type: core.EventStart,
|
||||
Timestamp: result.Timestamp.Add(-50 * time.Millisecond),
|
||||
})
|
||||
if err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sqlite][Insert] Failed to insert event=%s for group=%s; service=%s: %s", core.EventStart, service.Group, service.Name, err.Error())
|
||||
}
|
||||
event := core.NewEventFromResult(result)
|
||||
if err = s.insertEvent(tx, serviceID, event); err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sqlite][Insert] Failed to insert event=%s for group=%s; service=%s: %s", event.Type, service.Group, service.Name, err.Error())
|
||||
}
|
||||
} else {
|
||||
// Get the success value of the previous result
|
||||
var lastResultSuccess bool
|
||||
if lastResultSuccess, err = s.getLastServiceResultSuccessValue(tx, serviceID); err != nil {
|
||||
log.Printf("[sqlite][Insert] Failed to retrieve outcome of previous result for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
||||
} else {
|
||||
// If we managed to retrieve the outcome of the previous result, we'll compare it with the new result.
|
||||
// If the final outcome (success or failure) of the previous and the new result aren't the same, it means
|
||||
// that the service either went from Healthy to Unhealthy or Unhealthy -> Healthy, therefore, we'll add
|
||||
// an event to mark the change in state
|
||||
if lastResultSuccess != result.Success {
|
||||
event := core.NewEventFromResult(result)
|
||||
if err = s.insertEvent(tx, serviceID, event); err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sqlite][Insert] Failed to insert event=%s for group=%s; service=%s: %s", event.Type, service.Group, service.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Clean up old events if there's more than twice the maximum number of events
|
||||
// This lets us both keep the table clean without impacting performance too much
|
||||
// (since we're only deleting MaximumNumberOfEvents at a time instead of 1)
|
||||
if numberOfEvents > eventsCleanUpThreshold {
|
||||
if err = s.deleteOldServiceEvents(tx, serviceID); err != nil {
|
||||
log.Printf("[sqlite][Insert] Failed to delete old events for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Second, we need to insert the result.
|
||||
if err = s.insertResult(tx, serviceID, result); err != nil {
|
||||
log.Printf("[sqlite][Insert] Failed to insert result for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
||||
_ = tx.Rollback() // If we can't insert the result, we'll rollback now since there's no point continuing
|
||||
return
|
||||
}
|
||||
// Clean up old results
|
||||
numberOfResults, err := s.getNumberOfResultsByServiceID(tx, serviceID)
|
||||
if err != nil {
|
||||
log.Printf("[sqlite][Insert] Failed to retrieve total number of results for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
||||
} else {
|
||||
if numberOfResults > resultsCleanUpThreshold {
|
||||
if err = s.deleteOldServiceResults(tx, serviceID); err != nil {
|
||||
log.Printf("[sqlite][Insert] Failed to delete old results for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Finally, we need to insert the uptime data.
|
||||
// Because the uptime data significantly outlives the results, we can't rely on the results for determining the uptime
|
||||
if err = s.updateServiceUptime(tx, serviceID, result); err != nil {
|
||||
log.Printf("[sqlite][Insert] Failed to update uptime for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
||||
}
|
||||
// Clean up old uptime entries
|
||||
ageOfOldestUptimeEntry, err := s.getAgeOfOldestServiceUptimeEntry(tx, serviceID)
|
||||
if err != nil {
|
||||
log.Printf("[sqlite][Insert] Failed to retrieve oldest service uptime entry for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
||||
} else {
|
||||
if ageOfOldestUptimeEntry > uptimeCleanUpThreshold {
|
||||
if err = s.deleteOldUptimeEntries(tx, serviceID, time.Now().Add(-(uptimeRetention + time.Hour))); err != nil {
|
||||
log.Printf("[sqlite][Insert] Failed to delete old uptime entries for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
//log.Printf("[sqlite][Insert] Successfully inserted result in duration=%dms", time.Since(start).Milliseconds())
|
||||
if err = tx.Commit(); err != nil {
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteAllServiceStatusesNotInKeys removes all rows owned by a service whose key is not within the keys provided
|
||||
func (s *Store) DeleteAllServiceStatusesNotInKeys(keys []string) int {
|
||||
var err error
|
||||
var result sql.Result
|
||||
if len(keys) == 0 {
|
||||
// Delete everything
|
||||
result, err = s.db.Exec("DELETE FROM service")
|
||||
} else {
|
||||
args := make([]interface{}, 0, len(keys))
|
||||
for i := range keys {
|
||||
args = append(args, keys[i])
|
||||
}
|
||||
result, err = s.db.Exec(fmt.Sprintf("DELETE FROM service WHERE service_key NOT IN (%s)", strings.Trim(strings.Repeat("?,", len(keys)), ",")), args...)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("[sqlite][DeleteAllServiceStatusesNotInKeys] Failed to delete rows that do not belong to any of keys=%v: %s", keys, err.Error())
|
||||
return 0
|
||||
}
|
||||
rowsAffects, _ := result.RowsAffected()
|
||||
return int(rowsAffects)
|
||||
}
|
||||
|
||||
// Clear deletes everything from the store
|
||||
func (s *Store) Clear() {
|
||||
_, _ = s.db.Exec("DELETE FROM service")
|
||||
}
|
||||
|
||||
// Save does nothing, because this store is immediately persistent.
|
||||
func (s *Store) Save() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the database handle
|
||||
func (s *Store) Close() {
|
||||
_ = s.db.Close()
|
||||
}
|
||||
|
||||
// insertService inserts a service in the store and returns the generated id of said service
|
||||
func (s *Store) insertService(tx *sql.Tx, service *core.Service) (int64, error) {
|
||||
//log.Printf("[sqlite][insertService] Inserting service with group=%s and name=%s", service.Group, service.Name)
|
||||
result, err := tx.Exec(
|
||||
"INSERT INTO service (service_key, service_name, service_group) VALUES ($1, $2, $3)",
|
||||
service.Key(),
|
||||
service.Name,
|
||||
service.Group,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.LastInsertId()
|
||||
}
|
||||
|
||||
// insertEvent inserts a service event in the store
|
||||
func (s *Store) insertEvent(tx *sql.Tx, serviceID int64, event *core.Event) error {
|
||||
_, err := tx.Exec(
|
||||
"INSERT INTO service_event (service_id, event_type, event_timestamp) VALUES ($1, $2, $3)",
|
||||
serviceID,
|
||||
event.Type,
|
||||
event.Timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// insertResult inserts a result in the store
|
||||
func (s *Store) insertResult(tx *sql.Tx, serviceID int64, result *core.Result) error {
|
||||
res, err := tx.Exec(
|
||||
`
|
||||
INSERT INTO service_result (service_id, success, errors, connected, status, dns_rcode, certificate_expiration, hostname, ip, duration, timestamp)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
|
||||
`,
|
||||
serviceID,
|
||||
result.Success,
|
||||
strings.Join(result.Errors, arraySeparator),
|
||||
result.Connected,
|
||||
result.HTTPStatus,
|
||||
result.DNSRCode,
|
||||
result.CertificateExpiration,
|
||||
result.Hostname,
|
||||
result.IP,
|
||||
result.Duration,
|
||||
result.Timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
serviceResultID, err := res.LastInsertId()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.insertConditionResults(tx, serviceResultID, result.ConditionResults)
|
||||
}
|
||||
|
||||
func (s *Store) insertConditionResults(tx *sql.Tx, serviceResultID int64, conditionResults []*core.ConditionResult) error {
|
||||
var err error
|
||||
for _, cr := range conditionResults {
|
||||
_, err = tx.Exec("INSERT INTO service_result_condition (service_result_id, condition, success) VALUES ($1, $2, $3)",
|
||||
serviceResultID,
|
||||
cr.Condition,
|
||||
cr.Success,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) updateServiceUptime(tx *sql.Tx, serviceID int64, result *core.Result) error {
|
||||
unixTimestampFlooredAtHour := result.Timestamp.Truncate(time.Hour).Unix()
|
||||
var successfulExecutions int
|
||||
if result.Success {
|
||||
successfulExecutions = 1
|
||||
}
|
||||
_, err := tx.Exec(
|
||||
`
|
||||
INSERT INTO service_uptime (service_id, hour_unix_timestamp, total_executions, successful_executions, total_response_time)
|
||||
VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT(service_id, hour_unix_timestamp) DO UPDATE SET
|
||||
total_executions = excluded.total_executions + total_executions,
|
||||
successful_executions = excluded.successful_executions + successful_executions,
|
||||
total_response_time = excluded.total_response_time + total_response_time
|
||||
`,
|
||||
serviceID,
|
||||
unixTimestampFlooredAtHour,
|
||||
1,
|
||||
successfulExecutions,
|
||||
result.Duration.Milliseconds(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) getAllServiceKeys(tx *sql.Tx) (keys []string, err error) {
|
||||
rows, err := tx.Query("SELECT service_key FROM service")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for rows.Next() {
|
||||
var key string
|
||||
_ = rows.Scan(&key)
|
||||
keys = append(keys, key)
|
||||
}
|
||||
_ = rows.Close()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Store) getServiceStatusByKey(tx *sql.Tx, key string, parameters *paging.ServiceStatusParams) (*core.ServiceStatus, error) {
|
||||
serviceID, serviceGroup, serviceName, err := s.getServiceIDGroupAndNameByKey(tx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceStatus := core.NewServiceStatus(key, serviceGroup, serviceName)
|
||||
if parameters.EventsPageSize > 0 {
|
||||
if serviceStatus.Events, err = s.getEventsByServiceID(tx, serviceID, parameters.EventsPage, parameters.EventsPageSize); err != nil {
|
||||
log.Printf("[sqlite][getServiceStatusByKey] Failed to retrieve events for key=%s: %s", key, err.Error())
|
||||
}
|
||||
}
|
||||
if parameters.ResultsPageSize > 0 {
|
||||
if serviceStatus.Results, err = s.getResultsByServiceID(tx, serviceID, parameters.ResultsPage, parameters.ResultsPageSize); err != nil {
|
||||
log.Printf("[sqlite][getServiceStatusByKey] Failed to retrieve results for key=%s: %s", key, err.Error())
|
||||
}
|
||||
}
|
||||
if parameters.IncludeUptime {
|
||||
now := time.Now()
|
||||
serviceStatus.Uptime.LastHour, _, err = s.getServiceUptime(tx, serviceID, now.Add(-time.Hour), now)
|
||||
serviceStatus.Uptime.LastTwentyFourHours, _, err = s.getServiceUptime(tx, serviceID, now.Add(-24*time.Hour), now)
|
||||
serviceStatus.Uptime.LastSevenDays, _, err = s.getServiceUptime(tx, serviceID, now.Add(-7*24*time.Hour), now)
|
||||
}
|
||||
return serviceStatus, nil
|
||||
}
|
||||
|
||||
func (s *Store) getServiceIDGroupAndNameByKey(tx *sql.Tx, key string) (id int64, group, name string, err error) {
|
||||
rows, err := tx.Query(
|
||||
`
|
||||
SELECT service_id, service_group, service_name
|
||||
FROM service
|
||||
WHERE service_key = $1
|
||||
LIMIT 1
|
||||
`,
|
||||
key,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, "", "", err
|
||||
}
|
||||
for rows.Next() {
|
||||
_ = rows.Scan(&id, &group, &name)
|
||||
}
|
||||
_ = rows.Close()
|
||||
if id == 0 {
|
||||
return 0, "", "", errServiceNotFoundInDatabase
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Store) getEventsByServiceID(tx *sql.Tx, serviceID int64, page, pageSize int) (events []*core.Event, err error) {
|
||||
rows, err := tx.Query(
|
||||
`
|
||||
SELECT event_type, event_timestamp
|
||||
FROM service_event
|
||||
WHERE service_id = $1
|
||||
ORDER BY service_event_id ASC
|
||||
LIMIT $2 OFFSET $3
|
||||
`,
|
||||
serviceID,
|
||||
pageSize,
|
||||
(page-1)*pageSize,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for rows.Next() {
|
||||
event := &core.Event{}
|
||||
_ = rows.Scan(&event.Type, &event.Timestamp)
|
||||
events = append(events, event)
|
||||
}
|
||||
_ = rows.Close()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Store) getResultsByServiceID(tx *sql.Tx, serviceID int64, page, pageSize int) (results []*core.Result, err error) {
|
||||
rows, err := tx.Query(
|
||||
`
|
||||
SELECT service_result_id, success, errors, connected, status, dns_rcode, certificate_expiration, hostname, ip, duration, timestamp
|
||||
FROM service_result
|
||||
WHERE service_id = $1
|
||||
ORDER BY service_result_id DESC -- Normally, we'd sort by timestamp, but sorting by service_result_id is faster
|
||||
LIMIT $2 OFFSET $3
|
||||
`,
|
||||
//`
|
||||
// SELECT * FROM (
|
||||
// SELECT service_result_id, success, errors, connected, status, dns_rcode, certificate_expiration, hostname, ip, duration, timestamp
|
||||
// FROM service_result
|
||||
// WHERE service_id = $1
|
||||
// ORDER BY service_result_id DESC -- Normally, we'd sort by timestamp, but sorting by service_result_id is faster
|
||||
// LIMIT $2 OFFSET $3
|
||||
// )
|
||||
// ORDER BY service_result_id ASC -- Normally, we'd sort by timestamp, but sorting by service_result_id is faster
|
||||
//`,
|
||||
serviceID,
|
||||
pageSize,
|
||||
(page-1)*pageSize,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idResultMap := make(map[int64]*core.Result)
|
||||
for rows.Next() {
|
||||
result := &core.Result{}
|
||||
var id int64
|
||||
var joinedErrors string
|
||||
_ = rows.Scan(&id, &result.Success, &joinedErrors, &result.Connected, &result.HTTPStatus, &result.DNSRCode, &result.CertificateExpiration, &result.Hostname, &result.IP, &result.Duration, &result.Timestamp)
|
||||
if len(joinedErrors) != 0 {
|
||||
result.Errors = strings.Split(joinedErrors, arraySeparator)
|
||||
}
|
||||
//results = append(results, result)
|
||||
// This is faster than using a subselect
|
||||
results = append([]*core.Result{result}, results...)
|
||||
idResultMap[id] = result
|
||||
}
|
||||
_ = rows.Close()
|
||||
// Get the conditionResults
|
||||
for serviceResultID, result := range idResultMap {
|
||||
rows, err = tx.Query(
|
||||
`
|
||||
SELECT condition, success
|
||||
FROM service_result_condition
|
||||
WHERE service_result_id = $1
|
||||
`,
|
||||
serviceResultID,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for rows.Next() {
|
||||
conditionResult := &core.ConditionResult{}
|
||||
if err = rows.Scan(&conditionResult.Condition, &conditionResult.Success); err != nil {
|
||||
return
|
||||
}
|
||||
result.ConditionResults = append(result.ConditionResults, conditionResult)
|
||||
}
|
||||
_ = rows.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Store) getServiceUptime(tx *sql.Tx, serviceID int64, from, to time.Time) (uptime float64, avgResponseTime time.Duration, err error) {
|
||||
rows, err := tx.Query(
|
||||
`
|
||||
SELECT SUM(total_executions), SUM(successful_executions), SUM(total_response_time)
|
||||
FROM service_uptime
|
||||
WHERE service_id = $1
|
||||
AND hour_unix_timestamp >= $2
|
||||
AND hour_unix_timestamp <= $3
|
||||
`,
|
||||
serviceID,
|
||||
from.Unix(),
|
||||
to.Unix(),
|
||||
)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
var totalExecutions, totalSuccessfulExecutions, totalResponseTime int
|
||||
for rows.Next() {
|
||||
_ = rows.Scan(&totalExecutions, &totalSuccessfulExecutions, &totalResponseTime)
|
||||
break
|
||||
}
|
||||
_ = rows.Close()
|
||||
if totalExecutions > 0 {
|
||||
uptime = float64(totalSuccessfulExecutions) / float64(totalExecutions)
|
||||
avgResponseTime = time.Duration(float64(totalResponseTime)/float64(totalExecutions)) * time.Millisecond
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Store) getServiceID(tx *sql.Tx, service *core.Service) (int64, error) {
|
||||
rows, err := tx.Query("SELECT service_id FROM service WHERE service_key = $1", service.Key())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var id int64
|
||||
var found bool
|
||||
for rows.Next() {
|
||||
_ = rows.Scan(&id)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
_ = rows.Close()
|
||||
if !found {
|
||||
return 0, errServiceNotFoundInDatabase
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (s *Store) getNumberOfEventsByServiceID(tx *sql.Tx, serviceID int64) (int64, error) {
|
||||
rows, err := tx.Query("SELECT COUNT(1) FROM service_event WHERE service_id = $1", serviceID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var numberOfEvents int64
|
||||
for rows.Next() {
|
||||
_ = rows.Scan(&numberOfEvents)
|
||||
}
|
||||
_ = rows.Close()
|
||||
return numberOfEvents, nil
|
||||
}
|
||||
|
||||
func (s *Store) getNumberOfResultsByServiceID(tx *sql.Tx, serviceID int64) (int64, error) {
|
||||
rows, err := tx.Query("SELECT COUNT(1) FROM service_result WHERE service_id = $1", serviceID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var numberOfResults int64
|
||||
for rows.Next() {
|
||||
_ = rows.Scan(&numberOfResults)
|
||||
}
|
||||
_ = rows.Close()
|
||||
return numberOfResults, nil
|
||||
}
|
||||
|
||||
func (s *Store) getAgeOfOldestServiceUptimeEntry(tx *sql.Tx, serviceID int64) (time.Duration, error) {
|
||||
rows, err := tx.Query(
|
||||
`
|
||||
SELECT hour_unix_timestamp
|
||||
FROM service_uptime
|
||||
WHERE service_id = $1
|
||||
ORDER BY hour_unix_timestamp
|
||||
LIMIT 1
|
||||
`,
|
||||
serviceID,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var oldestServiceUptimeUnixTimestamp int64
|
||||
var found bool
|
||||
for rows.Next() {
|
||||
_ = rows.Scan(&oldestServiceUptimeUnixTimestamp)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
_ = rows.Close()
|
||||
if !found {
|
||||
return 0, errNoRowsReturned
|
||||
}
|
||||
return time.Since(time.Unix(oldestServiceUptimeUnixTimestamp, 0)), nil
|
||||
}
|
||||
|
||||
func (s *Store) getLastServiceResultSuccessValue(tx *sql.Tx, serviceID int64) (bool, error) {
|
||||
rows, err := tx.Query("SELECT success FROM service_result WHERE service_id = $1 ORDER BY service_result_id DESC LIMIT 1", serviceID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var success bool
|
||||
var found bool
|
||||
for rows.Next() {
|
||||
_ = rows.Scan(&success)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
_ = rows.Close()
|
||||
if !found {
|
||||
return false, errNoRowsReturned
|
||||
}
|
||||
return success, nil
|
||||
}
|
||||
|
||||
// deleteOldServiceEvents deletes old service events that are no longer needed
|
||||
func (s *Store) deleteOldServiceEvents(tx *sql.Tx, serviceID int64) error {
|
||||
_, err := tx.Exec(
|
||||
`
|
||||
DELETE FROM service_event
|
||||
WHERE service_id = $1
|
||||
AND service_event_id NOT IN (
|
||||
SELECT service_event_id
|
||||
FROM service_event
|
||||
WHERE service_id = $1
|
||||
ORDER BY service_event_id DESC
|
||||
LIMIT $2
|
||||
)
|
||||
`,
|
||||
serviceID,
|
||||
core.MaximumNumberOfEvents,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//rowsAffected, _ := result.RowsAffected()
|
||||
//log.Printf("deleted %d rows from service_event", rowsAffected)
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteOldServiceResults deletes old service results that are no longer needed
|
||||
func (s *Store) deleteOldServiceResults(tx *sql.Tx, serviceID int64) error {
|
||||
_, err := tx.Exec(
|
||||
`
|
||||
DELETE FROM service_result
|
||||
WHERE service_id = $1
|
||||
AND service_result_id NOT IN (
|
||||
SELECT service_result_id
|
||||
FROM service_result
|
||||
WHERE service_id = $1
|
||||
ORDER BY service_result_id DESC
|
||||
LIMIT $2
|
||||
)
|
||||
`,
|
||||
serviceID,
|
||||
core.MaximumNumberOfResults,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//rowsAffected, _ := result.RowsAffected()
|
||||
//log.Printf("deleted %d rows from service_result", rowsAffected)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) deleteOldUptimeEntries(tx *sql.Tx, serviceID int64, maxAge time.Time) error {
|
||||
_, err := tx.Exec("DELETE FROM service_uptime WHERE service_id = $1 AND hour_unix_timestamp < $2", serviceID, maxAge.Unix())
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//rowsAffected, _ := result.RowsAffected()
|
||||
//log.Printf("deleted %d rows from service_uptime", rowsAffected)
|
||||
return err
|
||||
}
|
||||
352
storage/store/sqlite/sqlite_test.go
Normal file
352
storage/store/sqlite/sqlite_test.go
Normal file
@@ -0,0 +1,352 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = core.Condition("[STATUS] == 200")
|
||||
secondCondition = core.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = core.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
now = time.Now()
|
||||
|
||||
testService = core.Service{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []*core.Condition{&firstCondition, &secondCondition, &thirdCondition},
|
||||
Alerts: nil,
|
||||
Insecure: false,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = core.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
Timestamp: now,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = core.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
Timestamp: now,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestNewStore(t *testing.T) {
|
||||
if _, err := NewStore("", "TestNewStore.db"); err != ErrDatabaseDriverNotSpecified {
|
||||
t.Error("expected error due to blank driver parameter")
|
||||
}
|
||||
if _, err := NewStore("sqlite", ""); err != ErrFilePathNotSpecified {
|
||||
t.Error("expected error due to blank path parameter")
|
||||
}
|
||||
if store, err := NewStore("sqlite", t.TempDir()+"/TestNewStore.db"); err != nil {
|
||||
t.Error("shouldn't have returned any error, got", err.Error())
|
||||
} else {
|
||||
_ = store.db.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpOldUptimeEntriesProperly.db")
|
||||
defer store.Close()
|
||||
now := time.Now().Round(time.Minute)
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
|
||||
store.Insert(&testService, &core.Result{Timestamp: now.Add(-5 * time.Hour), Success: true})
|
||||
|
||||
tx, _ := store.db.Begin()
|
||||
oldest, _ := store.getAgeOfOldestServiceUptimeEntry(tx, 1)
|
||||
_ = tx.Commit()
|
||||
if oldest.Truncate(time.Hour) != 5*time.Hour {
|
||||
t.Errorf("oldest service uptime entry should've been ~5 hours old, was %s", oldest)
|
||||
}
|
||||
|
||||
// The oldest cache entry should remain at ~5 hours old, because this entry is more recent
|
||||
store.Insert(&testService, &core.Result{Timestamp: now.Add(-3 * time.Hour), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestServiceUptimeEntry(tx, 1)
|
||||
_ = tx.Commit()
|
||||
if oldest.Truncate(time.Hour) != 5*time.Hour {
|
||||
t.Errorf("oldest service uptime entry should've been ~5 hours old, was %s", oldest)
|
||||
}
|
||||
|
||||
// The oldest cache entry should now become at ~8 hours old, because this entry is older
|
||||
store.Insert(&testService, &core.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestServiceUptimeEntry(tx, 1)
|
||||
_ = tx.Commit()
|
||||
if oldest.Truncate(time.Hour) != 8*time.Hour {
|
||||
t.Errorf("oldest service uptime entry should've been ~8 hours old, was %s", oldest)
|
||||
}
|
||||
|
||||
// Since this is one hour before reaching the clean up threshold, the oldest entry should now be this one
|
||||
store.Insert(&testService, &core.Result{Timestamp: now.Add(-(uptimeCleanUpThreshold - time.Hour)), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestServiceUptimeEntry(tx, 1)
|
||||
_ = tx.Commit()
|
||||
if oldest.Truncate(time.Hour) != uptimeCleanUpThreshold-time.Hour {
|
||||
t.Errorf("oldest service uptime entry should've been ~%s hours old, was %s", uptimeCleanUpThreshold-time.Hour, oldest)
|
||||
}
|
||||
|
||||
// Since this entry is after the uptimeCleanUpThreshold, both this entry as well as the previous
|
||||
// one should be deleted since they both surpass uptimeRetention
|
||||
store.Insert(&testService, &core.Result{Timestamp: now.Add(-(uptimeCleanUpThreshold + time.Hour)), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestServiceUptimeEntry(tx, 1)
|
||||
_ = tx.Commit()
|
||||
if oldest.Truncate(time.Hour) != 8*time.Hour {
|
||||
t.Errorf("oldest service uptime entry should've been ~8 hours old, was %s", oldest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db")
|
||||
defer store.Close()
|
||||
for i := 0; i < resultsCleanUpThreshold+eventsCleanUpThreshold; i++ {
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
ss := store.GetServiceStatusByKey(testService.Key(), paging.NewServiceStatusParams().WithResults(1, core.MaximumNumberOfResults*5).WithEvents(1, core.MaximumNumberOfEvents*5))
|
||||
if len(ss.Results) > resultsCleanUpThreshold+1 {
|
||||
t.Errorf("number of results shouldn't have exceeded %d, reached %d", resultsCleanUpThreshold, len(ss.Results))
|
||||
}
|
||||
if len(ss.Events) > eventsCleanUpThreshold+1 {
|
||||
t.Errorf("number of events shouldn't have exceeded %d, reached %d", eventsCleanUpThreshold, len(ss.Events))
|
||||
}
|
||||
}
|
||||
store.Clear()
|
||||
}
|
||||
|
||||
func TestStore_Persistence(t *testing.T) {
|
||||
file := t.TempDir() + "/TestStore_Persistence.db"
|
||||
store, _ := NewStore("sqlite", file)
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
ssFromOldStore := store.GetServiceStatus(testService.Group, testService.Name, paging.NewServiceStatusParams().WithResults(1, core.MaximumNumberOfResults).WithEvents(1, core.MaximumNumberOfEvents).WithUptime())
|
||||
if ssFromOldStore == nil || ssFromOldStore.Group != "group" || ssFromOldStore.Name != "name" || len(ssFromOldStore.Events) != 3 || len(ssFromOldStore.Results) != 2 || ssFromOldStore.Uptime.LastHour != 0.5 || ssFromOldStore.Uptime.LastTwentyFourHours != 0.5 || ssFromOldStore.Uptime.LastSevenDays != 0.5 {
|
||||
store.Close()
|
||||
t.Fatal("sanity check failed")
|
||||
}
|
||||
store.Close()
|
||||
store, _ = NewStore("sqlite", file)
|
||||
defer store.Close()
|
||||
ssFromNewStore := store.GetServiceStatus(testService.Group, testService.Name, paging.NewServiceStatusParams().WithResults(1, core.MaximumNumberOfResults).WithEvents(1, core.MaximumNumberOfEvents).WithUptime())
|
||||
if ssFromNewStore == nil || ssFromNewStore.Group != "group" || ssFromNewStore.Name != "name" || len(ssFromNewStore.Events) != 3 || len(ssFromNewStore.Results) != 2 || ssFromNewStore.Uptime.LastHour != 0.5 || ssFromNewStore.Uptime.LastTwentyFourHours != 0.5 || ssFromNewStore.Uptime.LastSevenDays != 0.5 {
|
||||
t.Fatal("failed sanity check")
|
||||
}
|
||||
if ssFromNewStore == ssFromOldStore {
|
||||
t.Fatal("ss from the old and new store should have a different memory address")
|
||||
}
|
||||
for i := range ssFromNewStore.Events {
|
||||
if ssFromNewStore.Events[i].Timestamp != ssFromOldStore.Events[i].Timestamp {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Events[i].Type != ssFromOldStore.Events[i].Type {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
}
|
||||
for i := range ssFromOldStore.Results {
|
||||
if ssFromNewStore.Results[i].Timestamp != ssFromOldStore.Results[i].Timestamp {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].Success != ssFromOldStore.Results[i].Success {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].Connected != ssFromOldStore.Results[i].Connected {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].IP != ssFromOldStore.Results[i].IP {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].Hostname != ssFromOldStore.Results[i].Hostname {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].HTTPStatus != ssFromOldStore.Results[i].HTTPStatus {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].DNSRCode != ssFromOldStore.Results[i].DNSRCode {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if len(ssFromNewStore.Results[i].Errors) != len(ssFromOldStore.Results[i].Errors) {
|
||||
t.Error("new and old should've been the same")
|
||||
} else {
|
||||
for j := range ssFromOldStore.Results[i].Errors {
|
||||
if ssFromNewStore.Results[i].Errors[j] != ssFromOldStore.Results[i].Errors[j] {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ssFromNewStore.Results[i].ConditionResults) != len(ssFromOldStore.Results[i].ConditionResults) {
|
||||
t.Error("new and old should've been the same")
|
||||
} else {
|
||||
for j := range ssFromOldStore.Results[i].ConditionResults {
|
||||
if ssFromNewStore.Results[i].ConditionResults[j].Condition != ssFromOldStore.Results[i].ConditionResults[j].Condition {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].ConditionResults[j].Success != ssFromOldStore.Results[i].ConditionResults[j].Success {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_Save(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_Save.db")
|
||||
defer store.Close()
|
||||
if store.Save() != nil {
|
||||
t.Error("Save shouldn't do anything for this store")
|
||||
}
|
||||
}
|
||||
|
||||
// Note that are much more extensive tests in /storage/store/store_test.go.
|
||||
// This test is simply an extra sanity check
|
||||
func TestStore_SanityCheck(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_SanityCheck.db")
|
||||
defer store.Close()
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
if numberOfServiceStatuses := len(store.GetAllServiceStatuses(paging.NewServiceStatusParams())); numberOfServiceStatuses != 1 {
|
||||
t.Fatalf("expected 1 ServiceStatus, got %d", numberOfServiceStatuses)
|
||||
}
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
// Both results inserted are for the same service, therefore, the count shouldn't have increased
|
||||
if numberOfServiceStatuses := len(store.GetAllServiceStatuses(paging.NewServiceStatusParams())); numberOfServiceStatuses != 1 {
|
||||
t.Fatalf("expected 1 ServiceStatus, got %d", numberOfServiceStatuses)
|
||||
}
|
||||
ss := store.GetServiceStatus(testService.Group, testService.Name, paging.NewServiceStatusParams().WithResults(1, 20).WithEvents(1, 20))
|
||||
if ss == nil {
|
||||
t.Fatalf("Store should've had key '%s', but didn't", testService.Key())
|
||||
}
|
||||
if len(ss.Events) != 3 {
|
||||
t.Errorf("Service '%s' should've had 3 events, got %d", ss.Name, len(ss.Events))
|
||||
}
|
||||
if len(ss.Results) != 2 {
|
||||
t.Errorf("Service '%s' should've had 2 results, got %d", ss.Name, len(ss.Results))
|
||||
}
|
||||
if deleted := store.DeleteAllServiceStatusesNotInKeys([]string{}); deleted != 1 {
|
||||
t.Errorf("%d entries should've been deleted, got %d", 1, deleted)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStore_InvalidTransaction tests what happens if an invalid transaction is passed as parameter
|
||||
func TestStore_InvalidTransaction(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InvalidTransaction.db")
|
||||
defer store.Close()
|
||||
tx, _ := store.db.Begin()
|
||||
tx.Commit()
|
||||
if _, err := store.insertService(tx, &testService); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.insertEvent(tx, 1, core.NewEventFromResult(&testSuccessfulResult)); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.insertResult(tx, 1, &testSuccessfulResult); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.insertConditionResults(tx, 1, testSuccessfulResult.ConditionResults); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.updateServiceUptime(tx, 1, &testSuccessfulResult); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getAllServiceKeys(tx); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getServiceStatusByKey(tx, testService.Key(), paging.NewServiceStatusParams().WithResults(1, 20)); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getEventsByServiceID(tx, 1, 1, 50); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getResultsByServiceID(tx, 1, 1, 50); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.deleteOldServiceEvents(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.deleteOldServiceResults(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, _, err := store.getServiceUptime(tx, 1, time.Now(), time.Now()); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getServiceID(tx, &testService); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getNumberOfEventsByServiceID(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getNumberOfResultsByServiceID(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getAgeOfOldestServiceUptimeEntry(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getLastServiceResultSuccessValue(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_NoRows(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_NoRows.db")
|
||||
defer store.Close()
|
||||
tx, _ := store.db.Begin()
|
||||
defer tx.Rollback()
|
||||
if _, err := store.getLastServiceResultSuccessValue(tx, 1); err != errNoRowsReturned {
|
||||
t.Errorf("should've %v, got %v", errNoRowsReturned, err)
|
||||
}
|
||||
if _, err := store.getAgeOfOldestServiceUptimeEntry(tx, 1); err != errNoRowsReturned {
|
||||
t.Errorf("should've %v, got %v", errNoRowsReturned, err)
|
||||
}
|
||||
}
|
||||
@@ -3,19 +3,21 @@ package store
|
||||
import (
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||
"github.com/TwinProduction/gatus/storage/store/sqlite"
|
||||
)
|
||||
|
||||
// Store is the interface that each stores should implement
|
||||
type Store interface {
|
||||
// GetAllAsJSON returns the JSON encoding of all monitored core.ServiceStatus
|
||||
// GetAllServiceStatuses returns the JSON encoding of all monitored core.ServiceStatus
|
||||
// with a subset of core.Result defined by the page and pageSize parameters
|
||||
GetAllServiceStatusesWithResultPagination(page, pageSize int) map[string]*core.ServiceStatus
|
||||
GetAllServiceStatuses(params *paging.ServiceStatusParams) map[string]*core.ServiceStatus
|
||||
|
||||
// GetServiceStatus returns the service status for a given service name in the given group
|
||||
GetServiceStatus(groupName, serviceName string) *core.ServiceStatus
|
||||
GetServiceStatus(groupName, serviceName string, params *paging.ServiceStatusParams) *core.ServiceStatus
|
||||
|
||||
// GetServiceStatusByKey returns the service status for a given key
|
||||
GetServiceStatusByKey(key string) *core.ServiceStatus
|
||||
GetServiceStatusByKey(key string, params *paging.ServiceStatusParams) *core.ServiceStatus
|
||||
|
||||
// Insert adds the observed result for the specified service into the store
|
||||
Insert(service *core.Service, result *core.Result)
|
||||
@@ -30,9 +32,16 @@ type Store interface {
|
||||
|
||||
// Save persists the data if and where it needs to be persisted
|
||||
Save() error
|
||||
|
||||
// Close terminates every connections and closes the store, if applicable.
|
||||
// Should only be used before stopping the application.
|
||||
Close()
|
||||
}
|
||||
|
||||
// TODO: add method to check state of store (by keeping track of silent errors)
|
||||
|
||||
var (
|
||||
// Validate interface implementation on compile
|
||||
_ Store = (*memory.Store)(nil)
|
||||
_ Store = (*sqlite.Store)(nil)
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user