Compare commits
51 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c094c06e56 | ||
|
|
f961bf961e | ||
|
|
404a3cea64 | ||
|
|
d000460a99 | ||
|
|
455fae05c1 | ||
|
|
85dff34350 | ||
|
|
99fa632021 | ||
|
|
cafcc9d45b | ||
|
|
dc929dac70 | ||
|
|
42825b62fb | ||
|
|
a89bb392ed | ||
|
|
e7c4d03c22 | ||
|
|
4e6bf91651 | ||
|
|
de31a7a62e | ||
|
|
7f0543ebd2 | ||
|
|
9b893aa4e0 | ||
|
|
50435f4030 | ||
|
|
9649d80388 | ||
|
|
8c3ab1eac2 | ||
|
|
cdb5ba080a | ||
|
|
0a145da912 | ||
|
|
b603cdb0ea | ||
|
|
11d1f24ceb | ||
|
|
c74472d332 | ||
|
|
78a1262e7c | ||
|
|
7ff8907eda | ||
|
|
1d21f5889d | ||
|
|
d7d904ae5f | ||
|
|
7390895514 | ||
|
|
2873d96b9f | ||
|
|
ea9623f695 | ||
|
|
9cdef02bdc | ||
|
|
16229592a2 | ||
|
|
52ad4ee9e5 | ||
|
|
b47c6dc408 | ||
|
|
8e2a2c4dbc | ||
|
|
8698736e7d | ||
|
|
cd1430f043 | ||
|
|
79bef8d391 | ||
|
|
9196f57487 | ||
|
|
1e0d9e184c | ||
|
|
bc42497cb7 | ||
|
|
b0b0ab574d | ||
|
|
e369484e5f | ||
|
|
d18618449f | ||
|
|
9186049589 | ||
|
|
4362831f71 | ||
|
|
99f558d43e | ||
|
|
4a3d5944b6 | ||
|
|
688456d7cf | ||
|
|
431fb3e9f2 |
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
@@ -24,9 +24,9 @@ jobs:
|
||||
run: go build -mod vendor
|
||||
- name: Test
|
||||
# We're using "sudo" because one of the tests leverages ping, which requires super-user privileges.
|
||||
# As for the "PATH=$PATH", we need it to use the same "go" executable that was configured by the "Set
|
||||
# up Go" step (otherwise, it'd use sudo's "go" executable)
|
||||
run: sudo "PATH=$PATH" go test -mod vendor ./... -race -coverprofile=coverage.txt -covermode=atomic
|
||||
# As for the 'env "PATH=$PATH" "GOROOT=$GOROOT"', we need it to use the same "go" executable that
|
||||
# was configured by the "Set up Go 1.15" step (otherwise, it'd use sudo's "go" executable)
|
||||
run: sudo env "PATH=$PATH" "GOROOT=$GOROOT" go test -mod vendor ./... -race -coverprofile=coverage.txt -covermode=atomic
|
||||
- name: Codecov
|
||||
uses: codecov/codecov-action@v1.0.14
|
||||
with:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
.idea
|
||||
.vscode
|
||||
gatus
|
||||
gatus
|
||||
db.db
|
||||
5
Makefile
5
Makefile
@@ -8,4 +8,7 @@ build-frontend:
|
||||
npm --prefix web/app run build
|
||||
|
||||
run-frontend:
|
||||
npm --prefix web/app run serve
|
||||
npm --prefix web/app run serve
|
||||
|
||||
test:
|
||||
go test ./alerting/... ./client/... ./config/... ./controller/... ./core/... ./jsonpath/... ./pattern/... ./security/... ./storage/... ./util/... ./watchdog/... -cover
|
||||
51
README.md
51
README.md
@@ -7,8 +7,10 @@
|
||||
[](https://cloud.docker.com/repository/docker/twinproduction/gatus)
|
||||
[](https://github.com/TwinProduction)
|
||||
|
||||
A service health dashboard in Go that is meant to be used as a docker
|
||||
image with a custom configuration file.
|
||||
Gatus is a health dashboard that gives you the ability to monitor your services using HTTP, ICMP, TCP, and even DNS
|
||||
queries as well as evaluate the result of said queries by using a list of conditions on values like the status code,
|
||||
the response time, the certificate expiration, the body and many others. The icing on top is that each of these health
|
||||
checks can be paired with alerting via Slack, PagerDuty and even Twilio.
|
||||
|
||||
I personally deploy it in my Kubernetes cluster and let it monitor the status of my
|
||||
core applications: https://status.twinnation.org/
|
||||
@@ -100,6 +102,8 @@ Note that you can also add environment variables in the configuration file (i.e.
|
||||
|:---------------------------------------- |:----------------------------------------------------------------------------- |:-------------- |
|
||||
| `debug` | Whether to enable debug logs | `false` |
|
||||
| `metrics` | Whether to expose metrics at /metrics | `false` |
|
||||
| `storage` | Storage configuration | `{}` |
|
||||
| `storage.file` | File to persist the data in. If not set, storage is in-memory only. | `""` |
|
||||
| `services` | List of services to monitor | Required `[]` |
|
||||
| `services[].name` | Name of the service. Can be anything. | Required `""` |
|
||||
| `services[].group` | Group name. Used to group multiple services together on the dashboard. See [Service groups](#service-groups). | `""` |
|
||||
@@ -165,7 +169,7 @@ Here are some examples of conditions you can use:
|
||||
| `[STATUS] < 300` | Status must lower than 300 | 200, 201, 299 | 301, 302, ... |
|
||||
| `[STATUS] <= 299` | Status must be less than or equal to 299 | 200, 201, 299 | 301, 302, ... |
|
||||
| `[STATUS] > 400` | Status must be greater than 400 | 401, 402, 403, 404 | 400, 200, ... |
|
||||
| `[STATUS] == any(200, 429)` | Status must be either 200 or 420 | 200, 429 | 201, 400, ... |
|
||||
| `[STATUS] == any(200, 429)` | Status must be either 200 or 429 | 200, 429 | 201, 400, ... |
|
||||
| `[CONNECTED] == true` | Connection to host must've been successful | true, false | |
|
||||
| `[RESPONSE_TIME] < 500` | Response time must be below 500ms | 100ms, 200ms, 300ms | 500ms, 501ms |
|
||||
| `[IP] == 127.0.0.1` | Target IP must be 127.0.0.1 | 127.0.0.1 | 0.0.0.0 |
|
||||
@@ -206,6 +210,12 @@ Here are some examples of conditions you can use:
|
||||
|
||||
### Alerting
|
||||
|
||||
Gatus supports multiple alerting providers, such as Slack and PagerDuty, and supports different alerts for each
|
||||
individual services with configurable descriptions and thresholds.
|
||||
|
||||
Note that if an alerting provider is not configured properly, all alerts configured with the provider's type will be
|
||||
ignored.
|
||||
|
||||
|
||||
#### Configuring Slack alerts
|
||||
|
||||
@@ -356,14 +366,15 @@ leveraging Gatus, you could have Gatus call that application endpoint when a ser
|
||||
would then check if the service that started failing was recently deployed, and if it was, then automatically
|
||||
roll it back.
|
||||
|
||||
The values `[ALERT_DESCRIPTION]` and `[SERVICE_NAME]` are automatically substituted for the alert description and the
|
||||
service name respectively in the body (`alerting.custom.body`) as well as the url (`alerting.custom.url`).
|
||||
The placeholders `[ALERT_DESCRIPTION]` and `[SERVICE_NAME]` are automatically substituted for the alert description and
|
||||
the service name. These placeholders can be used in the body (`alerting.custom.body`) and in the url (`alerting.custom.url`).
|
||||
|
||||
If you have `send-on-resolved` set to `true`, you may want to use `[ALERT_TRIGGERED_OR_RESOLVED]` to differentiate
|
||||
the notifications. It will be replaced for either `TRIGGERED` or `RESOLVED`, based on the situation.
|
||||
If you have an alert using the `custom` provider with `send-on-resolved` set to `true`, you can use the
|
||||
`[ALERT_TRIGGERED_OR_RESOLVED]` placeholder to differentiate the notifications.
|
||||
The aforementioned placeholder will be replaced by `TRIGGERED` or `RESOLVED` accordingly, though it can be modified
|
||||
(details at the end of this section).
|
||||
|
||||
For all intents and purpose, we'll configure the custom alert with a Slack webhook, but you can call anything you want.
|
||||
|
||||
```yaml
|
||||
alerting:
|
||||
custom:
|
||||
@@ -391,6 +402,18 @@ services:
|
||||
- "[RESPONSE_TIME] < 300"
|
||||
```
|
||||
|
||||
Note that you can customize the resolved values for the `[ALERT_TRIGGERED_OR_RESOLVED]` placeholder like so:
|
||||
```yaml
|
||||
alerting:
|
||||
custom:
|
||||
placeholders:
|
||||
ALERT_TRIGGERED_OR_RESOLVED:
|
||||
TRIGGERED: "partial_outage"
|
||||
RESOLVED: "operational"
|
||||
```
|
||||
As a result, the `[ALERT_TRIGGERED_OR_RESOLVED]` in the body of first example of this section would be replaced by
|
||||
`partial_outage` when an alert is triggered and `operational` when an alert is resolved.
|
||||
|
||||
### Kubernetes (ALPHA)
|
||||
|
||||
> **WARNING**: This feature is in ALPHA. This means that it is very likely to change in the near future, which means that
|
||||
@@ -491,13 +514,13 @@ By setting `services[].graphql` to true, the body will automatically be wrapped
|
||||
For instance, the following configuration:
|
||||
```yaml
|
||||
services:
|
||||
- name: filter users by gender
|
||||
- name: filter-users-by-gender
|
||||
url: http://localhost:8080/playground
|
||||
method: POST
|
||||
graphql: true
|
||||
body: |
|
||||
{
|
||||
user(gender: "female") {
|
||||
users(gender: "female") {
|
||||
id
|
||||
name
|
||||
gender
|
||||
@@ -508,12 +531,12 @@ services:
|
||||
Content-Type: application/json # XXX: as of v1.9.2, this header is automatically added when graphql is set to true
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
- "[BODY].data.user[0].gender == female"
|
||||
- "[BODY].data.users[0].gender == female"
|
||||
```
|
||||
|
||||
will send a `POST` request to `http://localhost:8080/playground` with the following body:
|
||||
```json
|
||||
{"query":" {\n user(gender: \"female\") {\n id\n name\n gender\n avatar\n }\n }"}
|
||||
{"query":" {\n users(gender: \"female\") {\n id\n name\n gender\n avatar\n }\n }"}
|
||||
```
|
||||
|
||||
|
||||
@@ -585,7 +608,7 @@ commonly known as "ping" or "echo":
|
||||
|
||||
```yaml
|
||||
services:
|
||||
- name: ICMP
|
||||
- name: ping-example
|
||||
url: "icmp://example.com"
|
||||
conditions:
|
||||
- "[CONNECTED] == true"
|
||||
@@ -600,7 +623,7 @@ You can specify a domain prefixed by `icmp://`, or an IP address prefixed by `ic
|
||||
Defining a `dns` configuration in a service will automatically mark that service as a service of type DNS:
|
||||
```yaml
|
||||
services:
|
||||
- name: example dns query
|
||||
- name: example-dns-query
|
||||
url: "8.8.8.8" # Address of the DNS server to use
|
||||
interval: 30s
|
||||
dns:
|
||||
|
||||
@@ -16,11 +16,12 @@ import (
|
||||
// AlertProvider is the configuration necessary for sending an alert using a custom HTTP request
|
||||
// Technically, all alert providers should be reachable using the custom alert provider
|
||||
type AlertProvider struct {
|
||||
URL string `yaml:"url"`
|
||||
Method string `yaml:"method,omitempty"`
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
Body string `yaml:"body,omitempty"`
|
||||
Headers map[string]string `yaml:"headers,omitempty"`
|
||||
URL string `yaml:"url"`
|
||||
Method string `yaml:"method,omitempty"`
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
Body string `yaml:"body,omitempty"`
|
||||
Headers map[string]string `yaml:"headers,omitempty"`
|
||||
Placeholders map[string]map[string]string `yaml:"placeholders,omitempty"`
|
||||
}
|
||||
|
||||
// IsValid returns whether the provider's configuration is valid
|
||||
@@ -33,10 +34,25 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
return provider
|
||||
}
|
||||
|
||||
// GetAlertStatePlaceholderValue returns the Placeholder value for ALERT_TRIGGERED_OR_RESOLVED if configured
|
||||
func (provider *AlertProvider) GetAlertStatePlaceholderValue(resolved bool) string {
|
||||
status := "TRIGGERED"
|
||||
if resolved {
|
||||
status = "RESOLVED"
|
||||
}
|
||||
if _, ok := provider.Placeholders["ALERT_TRIGGERED_OR_RESOLVED"]; ok {
|
||||
if val, ok := provider.Placeholders["ALERT_TRIGGERED_OR_RESOLVED"][status]; ok {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (provider *AlertProvider) buildHTTPRequest(serviceName, alertDescription string, resolved bool) *http.Request {
|
||||
body := provider.Body
|
||||
providerURL := provider.URL
|
||||
method := provider.Method
|
||||
|
||||
if strings.Contains(body, "[ALERT_DESCRIPTION]") {
|
||||
body = strings.ReplaceAll(body, "[ALERT_DESCRIPTION]", alertDescription)
|
||||
}
|
||||
@@ -45,9 +61,9 @@ func (provider *AlertProvider) buildHTTPRequest(serviceName, alertDescription st
|
||||
}
|
||||
if strings.Contains(body, "[ALERT_TRIGGERED_OR_RESOLVED]") {
|
||||
if resolved {
|
||||
body = strings.ReplaceAll(body, "[ALERT_TRIGGERED_OR_RESOLVED]", "RESOLVED")
|
||||
body = strings.ReplaceAll(body, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(true))
|
||||
} else {
|
||||
body = strings.ReplaceAll(body, "[ALERT_TRIGGERED_OR_RESOLVED]", "TRIGGERED")
|
||||
body = strings.ReplaceAll(body, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(false))
|
||||
}
|
||||
}
|
||||
if strings.Contains(providerURL, "[ALERT_DESCRIPTION]") {
|
||||
@@ -58,9 +74,9 @@ func (provider *AlertProvider) buildHTTPRequest(serviceName, alertDescription st
|
||||
}
|
||||
if strings.Contains(providerURL, "[ALERT_TRIGGERED_OR_RESOLVED]") {
|
||||
if resolved {
|
||||
providerURL = strings.ReplaceAll(providerURL, "[ALERT_TRIGGERED_OR_RESOLVED]", "RESOLVED")
|
||||
providerURL = strings.ReplaceAll(providerURL, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(true))
|
||||
} else {
|
||||
providerURL = strings.ReplaceAll(providerURL, "[ALERT_TRIGGERED_OR_RESOLVED]", "TRIGGERED")
|
||||
providerURL = strings.ReplaceAll(providerURL, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(false))
|
||||
}
|
||||
}
|
||||
if len(method) == 0 {
|
||||
|
||||
@@ -62,9 +62,49 @@ func TestAlertProvider_ToCustomAlertProvider(t *testing.T) {
|
||||
provider := AlertProvider{URL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if customAlertProvider != customAlertProvider {
|
||||
t.Error("customAlertProvider should've been equal to customAlertProvider")
|
||||
if customAlertProvider.URL != "http://example.com" {
|
||||
t.Error("expected URL to be http://example.com, got", customAlertProvider.URL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_buildHTTPRequestWithCustomPlaceholder(t *testing.T) {
|
||||
const (
|
||||
ExpectedURL = "http://example.com/service-name?event=test&description=alert-description"
|
||||
ExpectedBody = "service-name,alert-description,test"
|
||||
)
|
||||
customAlertProvider := &AlertProvider{
|
||||
URL: "http://example.com/[SERVICE_NAME]?event=[ALERT_TRIGGERED_OR_RESOLVED]&description=[ALERT_DESCRIPTION]",
|
||||
Body: "[SERVICE_NAME],[ALERT_DESCRIPTION],[ALERT_TRIGGERED_OR_RESOLVED]",
|
||||
Headers: nil,
|
||||
Placeholders: map[string]map[string]string{
|
||||
"ALERT_TRIGGERED_OR_RESOLVED": {
|
||||
"RESOLVED": "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
request := customAlertProvider.buildHTTPRequest("service-name", "alert-description", true)
|
||||
if request.URL.String() != ExpectedURL {
|
||||
t.Error("expected URL to be", ExpectedURL, "was", request.URL.String())
|
||||
}
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
if string(body) != ExpectedBody {
|
||||
t.Error("expected body to be", ExpectedBody, "was", string(body))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_GetAlertStatePlaceholderValueDefaults(t *testing.T) {
|
||||
customAlertProvider := &AlertProvider{
|
||||
URL: "http://example.com/[SERVICE_NAME]?event=[ALERT_TRIGGERED_OR_RESOLVED]&description=[ALERT_DESCRIPTION]",
|
||||
Body: "[SERVICE_NAME],[ALERT_DESCRIPTION],[ALERT_TRIGGERED_OR_RESOLVED]",
|
||||
Headers: nil,
|
||||
Placeholders: nil,
|
||||
}
|
||||
if customAlertProvider.GetAlertStatePlaceholderValue(true) != "RESOLVED" {
|
||||
t.Error("expected RESOLVED, got", customAlertProvider.GetAlertStatePlaceholderValue(true))
|
||||
}
|
||||
if customAlertProvider.GetAlertStatePlaceholderValue(false) != "TRIGGERED" {
|
||||
t.Error("expected TRIGGERED, got", customAlertProvider.GetAlertStatePlaceholderValue(false))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
} else {
|
||||
prefix = ":x:"
|
||||
}
|
||||
results += fmt.Sprintf("%s - `%s`\n", prefix, conditionResult.Condition)
|
||||
results += fmt.Sprintf("%s - `%s`\\n", prefix, conditionResult.Condition)
|
||||
}
|
||||
return &custom.AlertProvider{
|
||||
URL: provider.WebhookURL,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package mattermost
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -19,23 +21,45 @@ func TestAlertProvider_IsValid(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
provider := AlertProvider{WebhookURL: "http://example.org"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "resolved") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring resolved")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.org" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.org", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
provider := AlertProvider{WebhookURL: "http://example.org"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "triggered") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring triggered")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.org" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.org", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package messagebird
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -30,11 +32,22 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "RESOLVED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring RESOLVED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://rest.messagebird.com/messages" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://rest.messagebird.com/messages", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
@@ -45,9 +58,20 @@ func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "TRIGGERED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring TRIGGERED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://rest.messagebird.com/messages" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://rest.messagebird.com/messages", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package pagerduty
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -22,20 +24,42 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{IntegrationKey: "00000000000000000000000000000000"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "RESOLVED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring RESOLVED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://events.pagerduty.com/v2/enqueue" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://events.pagerduty.com/v2/enqueue", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{IntegrationKey: "00000000000000000000000000000000"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "TRIGGERED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring TRIGGERED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://events.pagerduty.com/v2/enqueue" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://events.pagerduty.com/v2/enqueue", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func (provider *AlertProvider) ToCustomAlertProvider(service *core.Service, aler
|
||||
} else {
|
||||
prefix = ":x:"
|
||||
}
|
||||
results += fmt.Sprintf("%s - `%s`\n", prefix, conditionResult.Condition)
|
||||
results += fmt.Sprintf("%s - `%s`\\n", prefix, conditionResult.Condition)
|
||||
}
|
||||
return &custom.AlertProvider{
|
||||
URL: provider.WebhookURL,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package slack
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -22,20 +24,42 @@ func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "SUCCESSFUL_CONDITION", Success: true}}}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "resolved") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring resolved")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.com" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.com", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{WebhookURL: "http://example.com"}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{ConditionResults: []*core.ConditionResult{{Condition: "UNSUCCESSFUL_CONDITION", Success: false}}}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "triggered") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring triggered")
|
||||
}
|
||||
if customAlertProvider.URL != "http://example.com" {
|
||||
t.Errorf("expected URL to be %s, got %s", "http://example.com", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(customAlertProvider.Body), &body)
|
||||
if err != nil {
|
||||
t.Error("expected body to be valid JSON, got error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package twilio
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -26,31 +27,49 @@ func TestTwilioAlertProvider_IsValid(t *testing.T) {
|
||||
func TestAlertProvider_ToCustomAlertProviderWithResolvedAlert(t *testing.T) {
|
||||
provider := AlertProvider{
|
||||
SID: "1",
|
||||
Token: "1",
|
||||
From: "1",
|
||||
To: "1",
|
||||
Token: "2",
|
||||
From: "3",
|
||||
To: "4",
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, true)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{Name: "service-name"}, &core.Alert{Description: "alert-description"}, &core.Result{}, true)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "RESOLVED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring RESOLVED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://api.twilio.com/2010-04-01/Accounts/1/Messages.json" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://api.twilio.com/2010-04-01/Accounts/1/Messages.json", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
if customAlertProvider.Body != "Body=RESOLVED%3A+service-name+-+alert-description&From=3&To=4" {
|
||||
t.Errorf("expected body to be %s, got %s", "Body=RESOLVED%3A+service-name+-+alert-description&From=3&To=4", customAlertProvider.Body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertProvider_ToCustomAlertProviderWithTriggeredAlert(t *testing.T) {
|
||||
provider := AlertProvider{
|
||||
SID: "1",
|
||||
Token: "1",
|
||||
From: "1",
|
||||
SID: "4",
|
||||
Token: "3",
|
||||
From: "2",
|
||||
To: "1",
|
||||
}
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{}, &core.Alert{}, &core.Result{}, false)
|
||||
customAlertProvider := provider.ToCustomAlertProvider(&core.Service{Name: "service-name"}, &core.Alert{Description: "alert-description"}, &core.Result{}, false)
|
||||
if customAlertProvider == nil {
|
||||
t.Error("customAlertProvider shouldn't have been nil")
|
||||
t.Fatal("customAlertProvider shouldn't have been nil")
|
||||
}
|
||||
if !strings.Contains(customAlertProvider.Body, "TRIGGERED") {
|
||||
t.Error("customAlertProvider.Body should've contained the substring TRIGGERED")
|
||||
}
|
||||
if customAlertProvider.URL != "https://api.twilio.com/2010-04-01/Accounts/4/Messages.json" {
|
||||
t.Errorf("expected URL to be %s, got %s", "https://api.twilio.com/2010-04-01/Accounts/4/Messages.json", customAlertProvider.URL)
|
||||
}
|
||||
if customAlertProvider.Method != http.MethodPost {
|
||||
t.Errorf("expected method to be %s, got %s", http.MethodPost, customAlertProvider.Method)
|
||||
}
|
||||
if customAlertProvider.Body != "Body=TRIGGERED%3A+service-name+-+alert-description&From=2&To=1" {
|
||||
t.Errorf("expected body to be %s, got %s", "Body=TRIGGERED%3A+service-name+-+alert-description&From=2&To=1", customAlertProvider.Body)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ func GetHTTPClient(insecure bool) *http.Client {
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 20,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
@@ -41,6 +42,7 @@ func GetHTTPClient(insecure bool) *http.Client {
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 20,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/k8s"
|
||||
"github.com/TwinProduction/gatus/security"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@@ -71,8 +73,11 @@ type Config struct {
|
||||
// Kubernetes is the Kubernetes configuration
|
||||
Kubernetes *k8s.Config `yaml:"kubernetes"`
|
||||
|
||||
// Storage is the configuration for how the data is stored
|
||||
Storage *storage.Config `yaml:"storage"`
|
||||
|
||||
// Web is the configuration for the web listener
|
||||
Web *webConfig `yaml:"web"`
|
||||
Web *WebConfig `yaml:"web"`
|
||||
}
|
||||
|
||||
// Get returns the configuration, or panics if the configuration hasn't loaded yet
|
||||
@@ -144,13 +149,33 @@ func parseAndValidateConfigBytes(yamlBytes []byte) (config *Config, err error) {
|
||||
validateServicesConfig(config)
|
||||
validateKubernetesConfig(config)
|
||||
validateWebConfig(config)
|
||||
validateStorageConfig(config)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateStorageConfig(config *Config) {
|
||||
if config.Storage == nil {
|
||||
config.Storage = &storage.Config{}
|
||||
}
|
||||
err := storage.Initialize(config.Storage)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Remove all ServiceStatus that represent services which no longer exist in the configuration
|
||||
var keys []string
|
||||
for _, service := range config.Services {
|
||||
keys = append(keys, util.ConvertGroupAndServiceToKey(service.Group, service.Name))
|
||||
}
|
||||
numberOfServiceStatusesDeleted := storage.Get().DeleteAllServiceStatusesNotInKeys(keys)
|
||||
if numberOfServiceStatusesDeleted > 0 {
|
||||
log.Printf("[config][validateStorageConfig] Deleted %d service statuses because their matching services no longer existed", numberOfServiceStatusesDeleted)
|
||||
}
|
||||
}
|
||||
|
||||
func validateWebConfig(config *Config) {
|
||||
if config.Web == nil {
|
||||
config.Web = &webConfig{Address: DefaultAddress, Port: DefaultPort}
|
||||
config.Web = &WebConfig{Address: DefaultAddress, Port: DefaultPort}
|
||||
} else {
|
||||
config.Web.validateAndSetDefaults()
|
||||
}
|
||||
|
||||
@@ -42,7 +42,10 @@ func TestLoadDefaultConfigurationFile(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytes(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
file := t.TempDir() + "/test.db"
|
||||
config, err := parseAndValidateConfigBytes([]byte(fmt.Sprintf(`
|
||||
storage:
|
||||
file: %s
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/health
|
||||
@@ -54,9 +57,9 @@ services:
|
||||
conditions:
|
||||
- "[STATUS] != 400"
|
||||
- "[STATUS] != 500"
|
||||
`))
|
||||
`, file)))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -99,7 +102,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -132,7 +135,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -167,7 +170,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -201,7 +204,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -250,7 +253,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -288,7 +291,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -332,7 +335,6 @@ badconfig:
|
||||
func TestParseAndValidateConfigBytesWithAlerting(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
debug: true
|
||||
|
||||
alerting:
|
||||
slack:
|
||||
webhook-url: "http://example.com"
|
||||
@@ -359,7 +361,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -452,7 +454,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -486,7 +488,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -500,11 +502,106 @@ services:
|
||||
if !config.Alerting.Custom.IsValid() {
|
||||
t.Fatal("Custom alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(true) != "RESOLVED" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for RESOLVED should've been 'RESOLVED', got", config.Alerting.Custom.GetAlertStatePlaceholderValue(true))
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(false) != "TRIGGERED" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for TRIGGERED should've been 'TRIGGERED', got", config.Alerting.Custom.GetAlertStatePlaceholderValue(false))
|
||||
}
|
||||
if config.Alerting.Custom.Insecure {
|
||||
t.Fatal("config.Alerting.Custom.Insecure shouldn't have been true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithCustomAlertingConfigAndCustomPlaceholderValues(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
alerting:
|
||||
custom:
|
||||
placeholders:
|
||||
ALERT_TRIGGERED_OR_RESOLVED:
|
||||
TRIGGERED: "partial_outage"
|
||||
RESOLVED: "operational"
|
||||
url: "https://example.com"
|
||||
insecure: true
|
||||
body: "[ALERT_TRIGGERED_OR_RESOLVED]: [SERVICE_NAME] - [ALERT_DESCRIPTION]"
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/health
|
||||
alerts:
|
||||
- type: custom
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting == nil {
|
||||
t.Fatal("config.Alerting shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting.Custom == nil {
|
||||
t.Fatal("PagerDuty alerting config shouldn't have been nil")
|
||||
}
|
||||
if !config.Alerting.Custom.IsValid() {
|
||||
t.Fatal("Custom alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(true) != "operational" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for RESOLVED should've been 'operational'")
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(false) != "partial_outage" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for TRIGGERED should've been 'partial_outage'")
|
||||
}
|
||||
if !config.Alerting.Custom.Insecure {
|
||||
t.Fatal("config.Alerting.Custom.Insecure shouldn't have been true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithCustomAlertingConfigAndOneCustomPlaceholderValue(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
alerting:
|
||||
custom:
|
||||
placeholders:
|
||||
ALERT_TRIGGERED_OR_RESOLVED:
|
||||
TRIGGERED: "partial_outage"
|
||||
url: "https://example.com"
|
||||
insecure: true
|
||||
body: "[ALERT_TRIGGERED_OR_RESOLVED]: [SERVICE_NAME] - [ALERT_DESCRIPTION]"
|
||||
services:
|
||||
- name: twinnation
|
||||
url: https://twinnation.org/health
|
||||
alerts:
|
||||
- type: custom
|
||||
conditions:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting == nil {
|
||||
t.Fatal("config.Alerting shouldn't have been nil")
|
||||
}
|
||||
if config.Alerting.Custom == nil {
|
||||
t.Fatal("PagerDuty alerting config shouldn't have been nil")
|
||||
}
|
||||
if !config.Alerting.Custom.IsValid() {
|
||||
t.Fatal("Custom alerting config should've been valid")
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(true) != "RESOLVED" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for RESOLVED should've been 'RESOLVED'")
|
||||
}
|
||||
if config.Alerting.Custom.GetAlertStatePlaceholderValue(false) != "partial_outage" {
|
||||
t.Fatal("ALERT_TRIGGERED_OR_RESOLVED placeholder value for TRIGGERED should've been 'partial_outage'")
|
||||
}
|
||||
if !config.Alerting.Custom.Insecure {
|
||||
t.Fatal("config.Alerting.Custom.Insecure shouldn't have been true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndValidateConfigBytesWithCustomAlertingConfigThatHasInsecureSetToTrue(t *testing.T) {
|
||||
config, err := parseAndValidateConfigBytes([]byte(`
|
||||
alerting:
|
||||
@@ -525,7 +622,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -578,7 +675,7 @@ services:
|
||||
- "[STATUS] == 200"
|
||||
`, expectedUsername, expectedPasswordHash)))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
@@ -645,7 +742,7 @@ kubernetes:
|
||||
target-path: "/health"
|
||||
`))
|
||||
if err != nil {
|
||||
t.Error("No error should've been returned")
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if config == nil {
|
||||
t.Fatal("Config shouldn't have been nil")
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// webConfig is the structure which supports the configuration of the endpoint
|
||||
// WebConfig is the structure which supports the configuration of the endpoint
|
||||
// which provides access to the web frontend
|
||||
type webConfig struct {
|
||||
type WebConfig struct {
|
||||
// Address to listen on (defaults to 0.0.0.0 specified by DefaultAddress)
|
||||
Address string `yaml:"address"`
|
||||
|
||||
@@ -16,7 +16,7 @@ type webConfig struct {
|
||||
}
|
||||
|
||||
// validateAndSetDefaults checks and sets the default values for fields that are not set
|
||||
func (web *webConfig) validateAndSetDefaults() {
|
||||
func (web *WebConfig) validateAndSetDefaults() {
|
||||
// Validate the Address
|
||||
if len(web.Address) == 0 {
|
||||
web.Address = DefaultAddress
|
||||
@@ -30,6 +30,6 @@ func (web *webConfig) validateAndSetDefaults() {
|
||||
}
|
||||
|
||||
// SocketAddress returns the combination of the Address and the Port
|
||||
func (web *webConfig) SocketAddress() string {
|
||||
func (web *WebConfig) SocketAddress() string {
|
||||
return fmt.Sprintf("%s:%d", web.Address, web.Port)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
)
|
||||
|
||||
func TestWebConfig_SocketAddress(t *testing.T) {
|
||||
web := &webConfig{
|
||||
web := &WebConfig{
|
||||
Address: "0.0.0.0",
|
||||
Port: 8081,
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/watchdog"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
@@ -25,18 +25,23 @@ func badgeHandler(writer http.ResponseWriter, request *http.Request) {
|
||||
}
|
||||
identifier := variables["identifier"]
|
||||
key := strings.TrimSuffix(identifier, ".svg")
|
||||
uptime := watchdog.GetUptimeByKey(key)
|
||||
if uptime == nil {
|
||||
serviceStatus := storage.Get().GetServiceStatusByKey(key)
|
||||
if serviceStatus == nil {
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
_, _ = writer.Write([]byte("Requested service not found"))
|
||||
return
|
||||
}
|
||||
if serviceStatus.Uptime == nil {
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = writer.Write([]byte("Failed to compute uptime"))
|
||||
return
|
||||
}
|
||||
formattedDate := time.Now().Format(http.TimeFormat)
|
||||
writer.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||
writer.Header().Set("Date", formattedDate)
|
||||
writer.Header().Set("Expires", formattedDate)
|
||||
writer.Header().Set("Content-Type", "image/svg+xml")
|
||||
_, _ = writer.Write(generateSVG(duration, uptime))
|
||||
_, _ = writer.Write(generateSVG(duration, serviceStatus.Uptime))
|
||||
}
|
||||
|
||||
func generateSVG(duration string, uptime *core.Uptime) []byte {
|
||||
|
||||
@@ -3,6 +3,7 @@ package controller
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
@@ -13,8 +14,9 @@ import (
|
||||
|
||||
"github.com/TwinProduction/gatus/config"
|
||||
"github.com/TwinProduction/gatus/security"
|
||||
"github.com/TwinProduction/gatus/watchdog"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gocache"
|
||||
"github.com/TwinProduction/health"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
@@ -25,6 +27,14 @@ const (
|
||||
|
||||
var (
|
||||
cache = gocache.NewCache().WithMaxSize(100).WithEvictionPolicy(gocache.LeastRecentlyUsed)
|
||||
|
||||
// staticFolder is the path to the location of the static folder from the root path of the project
|
||||
// The only reason this is exposed is to allow running tests from a different path than the root path of the project
|
||||
staticFolder = "./web/static"
|
||||
|
||||
// server is the http.Server created by Handle.
|
||||
// The only reason it exists is for testing purposes.
|
||||
server *http.Server
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -40,36 +50,49 @@ func Handle() {
|
||||
if os.Getenv("ENVIRONMENT") == "dev" {
|
||||
router = developmentCorsHandler(router)
|
||||
}
|
||||
server := &http.Server{
|
||||
server = &http.Server{
|
||||
Addr: fmt.Sprintf("%s:%d", cfg.Web.Address, cfg.Web.Port),
|
||||
Handler: router,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
IdleTimeout: 15 * time.Second,
|
||||
}
|
||||
log.Println("[controller][Handle] Listening on" + cfg.Web.SocketAddress())
|
||||
log.Fatal(server.ListenAndServe())
|
||||
log.Println("[controller][Handle] Listening on " + cfg.Web.SocketAddress())
|
||||
if os.Getenv("ROUTER_TEST") == "true" {
|
||||
return
|
||||
}
|
||||
log.Println("[controller][Handle]", server.ListenAndServe())
|
||||
}
|
||||
|
||||
// Shutdown stops the server
|
||||
func Shutdown() {
|
||||
if server != nil {
|
||||
_ = server.Shutdown(context.TODO())
|
||||
server = nil
|
||||
}
|
||||
}
|
||||
|
||||
// CreateRouter creates the router for the http server
|
||||
func CreateRouter(cfg *config.Config) *mux.Router {
|
||||
router := mux.NewRouter()
|
||||
router.HandleFunc("/favicon.ico", favIconHandler).Methods("GET") // favicon needs to be always served from the root
|
||||
router.HandleFunc("/services/{service}", spaHandler).Methods("GET")
|
||||
router.HandleFunc("/api/v1/statuses", secureIfNecessary(cfg, serviceStatusesHandler)).Methods("GET")
|
||||
router.HandleFunc("/api/v1/statuses/{key}", secureIfNecessary(cfg, GzipHandlerFunc(serviceStatusHandler))).Methods("GET")
|
||||
router.HandleFunc("/api/v1/badges/uptime/{duration}/{identifier}", badgeHandler).Methods("GET")
|
||||
router.HandleFunc("/health", healthHandler).Methods("GET")
|
||||
router.PathPrefix("/").Handler(GzipHandler(http.FileServer(http.Dir("./web/static"))))
|
||||
if cfg.Metrics {
|
||||
router.Handle("/metrics", promhttp.Handler()).Methods("GET")
|
||||
}
|
||||
router.Handle("/health", health.Handler().WithJSON(true)).Methods("GET")
|
||||
router.HandleFunc("/favicon.ico", favIconHandler).Methods("GET")
|
||||
router.HandleFunc("/api/v1/statuses", secureIfNecessary(cfg, serviceStatusesHandler)).Methods("GET") // No GzipHandler for this one, because we cache the content
|
||||
router.HandleFunc("/api/v1/statuses/{key}", secureIfNecessary(cfg, GzipHandlerFunc(serviceStatusHandler))).Methods("GET")
|
||||
router.HandleFunc("/api/v1/badges/uptime/{duration}/{identifier}", badgeHandler).Methods("GET")
|
||||
// SPA
|
||||
router.HandleFunc("/services/{service}", spaHandler).Methods("GET")
|
||||
// Everything else falls back on static content
|
||||
router.PathPrefix("/").Handler(GzipHandler(http.FileServer(http.Dir(staticFolder))))
|
||||
return router
|
||||
}
|
||||
|
||||
func secureIfNecessary(cfg *config.Config, handler http.HandlerFunc) http.HandlerFunc {
|
||||
if cfg.Security != nil && cfg.Security.IsValid() {
|
||||
return security.Handler(serviceStatusesHandler, cfg.Security)
|
||||
return security.Handler(handler, cfg.Security)
|
||||
}
|
||||
return handler
|
||||
}
|
||||
@@ -78,21 +101,22 @@ func secureIfNecessary(cfg *config.Config, handler http.HandlerFunc) http.Handle
|
||||
// Due to the size of the response, this function leverages a cache.
|
||||
// Must not be wrapped by GzipHandler
|
||||
func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
page, pageSize := extractPageAndPageSizeFromRequest(r)
|
||||
gzipped := strings.Contains(r.Header.Get("Accept-Encoding"), "gzip")
|
||||
var exists bool
|
||||
var value interface{}
|
||||
if gzipped {
|
||||
writer.Header().Set("Content-Encoding", "gzip")
|
||||
value, exists = cache.Get("service-status-gzipped")
|
||||
value, exists = cache.Get(fmt.Sprintf("service-status-%d-%d-gzipped", page, pageSize))
|
||||
} else {
|
||||
value, exists = cache.Get("service-status")
|
||||
value, exists = cache.Get(fmt.Sprintf("service-status-%d-%d", page, pageSize))
|
||||
}
|
||||
var data []byte
|
||||
if !exists {
|
||||
var err error
|
||||
buffer := &bytes.Buffer{}
|
||||
gzipWriter := gzip.NewWriter(buffer)
|
||||
data, err = watchdog.GetServiceStatusesAsJSON()
|
||||
data, err = json.Marshal(storage.Get().GetAllServiceStatusesWithResultPagination(page, pageSize))
|
||||
if err != nil {
|
||||
log.Printf("[controller][serviceStatusesHandler] Unable to marshal object to JSON: %s", err.Error())
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
@@ -102,8 +126,8 @@ func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
_, _ = gzipWriter.Write(data)
|
||||
_ = gzipWriter.Close()
|
||||
gzippedData := buffer.Bytes()
|
||||
cache.SetWithTTL("service-status", data, cacheTTL)
|
||||
cache.SetWithTTL("service-status-gzipped", gzippedData, cacheTTL)
|
||||
cache.SetWithTTL(fmt.Sprintf("service-status-%d-%d", page, pageSize), data, cacheTTL)
|
||||
cache.SetWithTTL(fmt.Sprintf("service-status-%d-%d-gzipped", page, pageSize), gzippedData, cacheTTL)
|
||||
if gzipped {
|
||||
data = gzippedData
|
||||
}
|
||||
@@ -117,8 +141,9 @@ func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// serviceStatusHandler retrieves a single ServiceStatus by group name and service name
|
||||
func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
page, pageSize := extractPageAndPageSizeFromRequest(r)
|
||||
vars := mux.Vars(r)
|
||||
serviceStatus := watchdog.GetServiceStatusByKey(vars["key"])
|
||||
serviceStatus := storage.Get().GetServiceStatusByKey(vars["key"])
|
||||
if serviceStatus == nil {
|
||||
log.Printf("[controller][serviceStatusHandler] Service with key=%s not found", vars["key"])
|
||||
writer.WriteHeader(http.StatusNotFound)
|
||||
@@ -126,36 +151,21 @@ func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
data := map[string]interface{}{
|
||||
"serviceStatus": serviceStatus,
|
||||
// This is my lazy way of exposing events even though they're not visible from the json annotation
|
||||
// present in ServiceStatus. We do this because creating a separate object for each endpoints
|
||||
// would be wasteful (one with and one without Events)
|
||||
"serviceStatus": serviceStatus.WithResultPagination(page, pageSize),
|
||||
// The following fields, while present on core.ServiceStatus, are annotated to remain hidden so that we can
|
||||
// expose only the necessary data on /api/v1/statuses.
|
||||
// Since the /api/v1/statuses/{key} endpoint does need this data, however, we explicitly expose it here
|
||||
"events": serviceStatus.Events,
|
||||
"uptime": serviceStatus.Uptime,
|
||||
}
|
||||
output, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
log.Printf("[controller][serviceStatusHandler] Unable to marshal object to JSON: %s", err.Error())
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = writer.Write([]byte("Unable to marshal object to JSON"))
|
||||
_, _ = writer.Write([]byte("unable to marshal object to JSON"))
|
||||
return
|
||||
}
|
||||
writer.Header().Add("Content-Type", "application/json")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, _ = writer.Write(output)
|
||||
}
|
||||
|
||||
func healthHandler(writer http.ResponseWriter, _ *http.Request) {
|
||||
writer.Header().Add("Content-Type", "application/json")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, _ = writer.Write([]byte("{\"status\":\"UP\"}"))
|
||||
}
|
||||
|
||||
// favIconHandler handles requests for /favicon.ico
|
||||
func favIconHandler(writer http.ResponseWriter, request *http.Request) {
|
||||
http.ServeFile(writer, request, "./web/static/favicon.ico")
|
||||
}
|
||||
|
||||
// spaHandler handles requests for /favicon.ico
|
||||
func spaHandler(writer http.ResponseWriter, request *http.Request) {
|
||||
http.ServeFile(writer, request, "./web/static/index.html")
|
||||
}
|
||||
|
||||
333
controller/controller_test.go
Normal file
333
controller/controller_test.go
Normal file
@@ -0,0 +1,333 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/config"
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gatus/watchdog"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = core.Condition("[STATUS] == 200")
|
||||
secondCondition = core.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = core.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
timestamp = time.Now()
|
||||
|
||||
testService = core.Service{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []*core.Condition{&firstCondition, &secondCondition, &thirdCondition},
|
||||
Alerts: nil,
|
||||
Insecure: false,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = core.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Body: []byte("body"),
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
Timestamp: timestamp,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = core.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Body: []byte("body"),
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
Timestamp: timestamp,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestCreateRouter(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer cache.Clear()
|
||||
staticFolder = "../web/static"
|
||||
cfg := &config.Config{
|
||||
Metrics: true,
|
||||
Services: []*core.Service{
|
||||
{
|
||||
Name: "frontend",
|
||||
Group: "core",
|
||||
},
|
||||
{
|
||||
Name: "backend",
|
||||
Group: "core",
|
||||
},
|
||||
},
|
||||
}
|
||||
watchdog.UpdateServiceStatuses(cfg.Services[0], &core.Result{Success: true, Duration: time.Millisecond, Timestamp: time.Now()})
|
||||
watchdog.UpdateServiceStatuses(cfg.Services[1], &core.Result{Success: false, Duration: time.Second, Timestamp: time.Now()})
|
||||
router := CreateRouter(cfg)
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Path string
|
||||
ExpectedCode int
|
||||
Gzip bool
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "health",
|
||||
Path: "/health",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "metrics",
|
||||
Path: "/metrics",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "badges-1h",
|
||||
Path: "/api/v1/badges/uptime/1h/core_frontend.svg",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "badges-24h",
|
||||
Path: "/api/v1/badges/uptime/24h/core_backend.svg",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "badges-7d",
|
||||
Path: "/api/v1/badges/uptime/7d/core_frontend.svg",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "badges-with-invalid-duration",
|
||||
Path: "/api/v1/badges/uptime/3d/core_backend.svg",
|
||||
ExpectedCode: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
Name: "badges-for-invalid-key",
|
||||
Path: "/api/v1/badges/uptime/7d/invalid_key.svg",
|
||||
ExpectedCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
Name: "service-statuses",
|
||||
Path: "/api/v1/statuses",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "service-statuses-gzip",
|
||||
Path: "/api/v1/statuses",
|
||||
ExpectedCode: http.StatusOK,
|
||||
Gzip: true,
|
||||
},
|
||||
{
|
||||
Name: "service-statuses-pagination",
|
||||
Path: "/api/v1/statuses?page=1&pageSize=20",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "service-status",
|
||||
Path: "/api/v1/statuses/core_frontend",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "service-status-gzip",
|
||||
Path: "/api/v1/statuses/core_frontend",
|
||||
ExpectedCode: http.StatusOK,
|
||||
Gzip: true,
|
||||
},
|
||||
{
|
||||
Name: "service-status-for-invalid-key",
|
||||
Path: "/api/v1/statuses/invalid_key",
|
||||
ExpectedCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
Name: "favicon",
|
||||
Path: "/favicon.ico",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "frontend-home",
|
||||
Path: "/",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "frontend-assets",
|
||||
Path: "/js/app.js",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
Name: "frontend-service",
|
||||
Path: "/services/core_frontend",
|
||||
ExpectedCode: http.StatusOK,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
request, _ := http.NewRequest("GET", scenario.Path, nil)
|
||||
if scenario.Gzip {
|
||||
request.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(responseRecorder, request)
|
||||
if responseRecorder.Code != scenario.ExpectedCode {
|
||||
t.Errorf("%s %s should have returned %d, but returned %d instead", request.Method, request.URL, scenario.ExpectedCode, responseRecorder.Code)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandle(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer cache.Clear()
|
||||
cfg := &config.Config{
|
||||
Web: &config.WebConfig{
|
||||
Address: "0.0.0.0",
|
||||
Port: rand.Intn(65534),
|
||||
},
|
||||
Services: []*core.Service{
|
||||
{
|
||||
Name: "frontend",
|
||||
Group: "core",
|
||||
},
|
||||
{
|
||||
Name: "backend",
|
||||
Group: "core",
|
||||
},
|
||||
},
|
||||
}
|
||||
config.Set(cfg)
|
||||
defer config.Set(nil)
|
||||
_ = os.Setenv("ROUTER_TEST", "true")
|
||||
_ = os.Setenv("ENVIRONMENT", "dev")
|
||||
defer os.Clearenv()
|
||||
Handle()
|
||||
defer Shutdown()
|
||||
request, _ := http.NewRequest("GET", "/health", nil)
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
server.Handler.ServeHTTP(responseRecorder, request)
|
||||
if responseRecorder.Code != http.StatusOK {
|
||||
t.Error("expected GET /health to return status code 200")
|
||||
}
|
||||
if server == nil {
|
||||
t.Fatal("server should've been set (but because we set ROUTER_TEST, it shouldn't have been started)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestShutdown(t *testing.T) {
|
||||
// Pretend that we called controller.Handle(), which initializes the server variable
|
||||
server = &http.Server{}
|
||||
Shutdown()
|
||||
if server != nil {
|
||||
t.Error("server should've been shut down")
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceStatusesHandler(t *testing.T) {
|
||||
defer storage.Get().Clear()
|
||||
defer cache.Clear()
|
||||
staticFolder = "../web/static"
|
||||
firstResult := &testSuccessfulResult
|
||||
secondResult := &testUnsuccessfulResult
|
||||
storage.Get().Insert(&testService, firstResult)
|
||||
storage.Get().Insert(&testService, secondResult)
|
||||
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||
firstResult.Timestamp = time.Time{}
|
||||
secondResult.Timestamp = time.Time{}
|
||||
router := CreateRouter(&config.Config{})
|
||||
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Path string
|
||||
ExpectedCode int
|
||||
ExpectedBody string
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "no-pagination",
|
||||
Path: "/api/v1/statuses",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}}`,
|
||||
},
|
||||
{
|
||||
Name: "pagination-first-result",
|
||||
Path: "/api/v1/statuses?page=1&pageSize=1",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}}`,
|
||||
},
|
||||
{
|
||||
Name: "pagination-second-result",
|
||||
Path: "/api/v1/statuses?page=2&pageSize=1",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"}]}}`,
|
||||
},
|
||||
{
|
||||
Name: "pagination-no-results",
|
||||
Path: "/api/v1/statuses?page=5&pageSize=20",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `{"group_name":{"name":"name","group":"group","key":"group_name","results":[]}}`,
|
||||
},
|
||||
{
|
||||
Name: "invalid-pagination-should-fall-back-to-default",
|
||||
Path: "/api/v1/statuses?page=INVALID&pageSize=INVALID",
|
||||
ExpectedCode: http.StatusOK,
|
||||
ExpectedBody: `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
request, _ := http.NewRequest("GET", scenario.Path, nil)
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(responseRecorder, request)
|
||||
if responseRecorder.Code != scenario.ExpectedCode {
|
||||
t.Errorf("%s %s should have returned %d, but returned %d instead", request.Method, request.URL, scenario.ExpectedCode, responseRecorder.Code)
|
||||
}
|
||||
output := responseRecorder.Body.String()
|
||||
if output != scenario.ExpectedBody {
|
||||
t.Errorf("expected:\n %s\n\ngot:\n %s", scenario.ExpectedBody, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
8
controller/favicon.go
Normal file
8
controller/favicon.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package controller
|
||||
|
||||
import "net/http"
|
||||
|
||||
// favIconHandler handles requests for /favicon.ico
|
||||
func favIconHandler(writer http.ResponseWriter, request *http.Request) {
|
||||
http.ServeFile(writer, request, staticFolder+"/favicon.ico")
|
||||
}
|
||||
8
controller/spa.go
Normal file
8
controller/spa.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package controller
|
||||
|
||||
import "net/http"
|
||||
|
||||
// spaHandler handles requests for /
|
||||
func spaHandler(writer http.ResponseWriter, request *http.Request) {
|
||||
http.ServeFile(writer, request, staticFolder+"/index.html")
|
||||
}
|
||||
46
controller/util.go
Normal file
46
controller/util.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultPage is the default page to use if none is specified or an invalid value is provided
|
||||
DefaultPage = 1
|
||||
|
||||
// DefaultPageSize is the default page siZE to use if none is specified or an invalid value is provided
|
||||
DefaultPageSize = 20
|
||||
|
||||
// MaximumPageSize is the maximum page size allowed
|
||||
MaximumPageSize = 100
|
||||
)
|
||||
|
||||
func extractPageAndPageSizeFromRequest(r *http.Request) (page int, pageSize int) {
|
||||
var err error
|
||||
if pageParameter := r.URL.Query().Get("page"); len(pageParameter) == 0 {
|
||||
page = DefaultPage
|
||||
} else {
|
||||
page, err = strconv.Atoi(pageParameter)
|
||||
if err != nil {
|
||||
page = DefaultPage
|
||||
}
|
||||
if page < 1 {
|
||||
page = DefaultPage
|
||||
}
|
||||
}
|
||||
if pageSizeParameter := r.URL.Query().Get("pageSize"); len(pageSizeParameter) == 0 {
|
||||
pageSize = DefaultPageSize
|
||||
} else {
|
||||
pageSize, err = strconv.Atoi(pageSizeParameter)
|
||||
if err != nil {
|
||||
pageSize = DefaultPageSize
|
||||
}
|
||||
if pageSize > MaximumPageSize {
|
||||
pageSize = MaximumPageSize
|
||||
} else if pageSize < 1 {
|
||||
pageSize = DefaultPageSize
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
67
controller/util_test.go
Normal file
67
controller/util_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExtractPageAndPageSizeFromRequest(t *testing.T) {
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Page string
|
||||
PageSize string
|
||||
ExpectedPage int
|
||||
ExpectedPageSize int
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Page: "1",
|
||||
PageSize: "20",
|
||||
ExpectedPage: 1,
|
||||
ExpectedPageSize: 20,
|
||||
},
|
||||
{
|
||||
Page: "2",
|
||||
PageSize: "10",
|
||||
ExpectedPage: 2,
|
||||
ExpectedPageSize: 10,
|
||||
},
|
||||
{
|
||||
Page: "2",
|
||||
PageSize: "10",
|
||||
ExpectedPage: 2,
|
||||
ExpectedPageSize: 10,
|
||||
},
|
||||
{
|
||||
Page: "1",
|
||||
PageSize: "999999",
|
||||
ExpectedPage: 1,
|
||||
ExpectedPageSize: MaximumPageSize,
|
||||
},
|
||||
{
|
||||
Page: "-1",
|
||||
PageSize: "-1",
|
||||
ExpectedPage: DefaultPage,
|
||||
ExpectedPageSize: DefaultPageSize,
|
||||
},
|
||||
{
|
||||
Page: "invalid",
|
||||
PageSize: "invalid",
|
||||
ExpectedPage: DefaultPage,
|
||||
ExpectedPageSize: DefaultPageSize,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run("page-"+scenario.Page+"-pageSize-"+scenario.PageSize, func(t *testing.T) {
|
||||
request, _ := http.NewRequest("GET", fmt.Sprintf("/api/v1/statuses?page=%s&pageSize=%s", scenario.Page, scenario.PageSize), nil)
|
||||
actualPage, actualPageSize := extractPageAndPageSizeFromRequest(request)
|
||||
if actualPage != scenario.ExpectedPage {
|
||||
t.Errorf("expected %d, got %d", scenario.ExpectedPage, actualPage)
|
||||
}
|
||||
if actualPageSize != scenario.ExpectedPageSize {
|
||||
t.Errorf("expected %d, got %d", scenario.ExpectedPageSize, actualPageSize)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,14 @@ import (
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaximumNumberOfResults is the maximum number of results that ServiceStatus.Results can have
|
||||
MaximumNumberOfResults = 100
|
||||
|
||||
// MaximumNumberOfEvents is the maximum number of events that ServiceStatus.Events can have
|
||||
MaximumNumberOfEvents = 50
|
||||
)
|
||||
|
||||
// ServiceStatus contains the evaluation Results of a Service
|
||||
type ServiceStatus struct {
|
||||
// Name of the service
|
||||
@@ -22,13 +30,17 @@ type ServiceStatus struct {
|
||||
|
||||
// Events is a list of events
|
||||
//
|
||||
// We don't expose this through JSON, because the main dashboard doesn't need to have these events.
|
||||
// We don't expose this through JSON, because the main dashboard doesn't need to have this data.
|
||||
// However, the detailed service page does leverage this by including it to a map that will be
|
||||
// marshalled alongside the ServiceStatus.
|
||||
Events []*Event `json:"-"`
|
||||
|
||||
// Uptime information on the service's uptime
|
||||
Uptime *Uptime `json:"uptime"`
|
||||
//
|
||||
// We don't expose this through JSON, because the main dashboard doesn't need to have this data.
|
||||
// However, the detailed service page does leverage this by including it to a map that will be
|
||||
// marshalled alongside the ServiceStatus.
|
||||
Uptime *Uptime `json:"-"`
|
||||
}
|
||||
|
||||
// NewServiceStatus creates a new ServiceStatus
|
||||
@@ -46,6 +58,41 @@ func NewServiceStatus(service *Service) *ServiceStatus {
|
||||
}
|
||||
}
|
||||
|
||||
// ShallowCopy creates a shallow copy of ServiceStatus
|
||||
func (ss *ServiceStatus) ShallowCopy() *ServiceStatus {
|
||||
return &ServiceStatus{
|
||||
Name: ss.Name,
|
||||
Group: ss.Group,
|
||||
Key: ss.Key,
|
||||
Results: ss.Results,
|
||||
Events: ss.Events,
|
||||
Uptime: ss.Uptime,
|
||||
}
|
||||
}
|
||||
|
||||
// WithResultPagination makes a shallow copy of the ServiceStatus with only the results
|
||||
// within the range defined by the page and pageSize parameters
|
||||
func (ss *ServiceStatus) WithResultPagination(page, pageSize int) *ServiceStatus {
|
||||
shallowCopy := ss.ShallowCopy()
|
||||
numberOfResults := len(shallowCopy.Results)
|
||||
start := numberOfResults - (page * pageSize)
|
||||
end := numberOfResults - ((page - 1) * pageSize)
|
||||
if start > numberOfResults {
|
||||
start = -1
|
||||
} else if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
if end > numberOfResults {
|
||||
end = numberOfResults
|
||||
}
|
||||
if start < 0 || end < 0 {
|
||||
shallowCopy.Results = []*Result{}
|
||||
} else {
|
||||
shallowCopy.Results = shallowCopy.Results[start:end]
|
||||
}
|
||||
return shallowCopy
|
||||
}
|
||||
|
||||
// AddResult adds a Result to ServiceStatus.Results and makes sure that there are
|
||||
// no more than 20 results in the Results slice
|
||||
func (ss *ServiceStatus) AddResult(result *Result) {
|
||||
@@ -60,14 +107,20 @@ func (ss *ServiceStatus) AddResult(result *Result) {
|
||||
event.Type = EventUnhealthy
|
||||
}
|
||||
ss.Events = append(ss.Events, event)
|
||||
if len(ss.Events) > 20 {
|
||||
ss.Events = ss.Events[1:]
|
||||
if len(ss.Events) > MaximumNumberOfEvents {
|
||||
// Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has
|
||||
// more than one extra element, we can get rid of all of them at once and thus returning the slice to a
|
||||
// length of MaximumNumberOfEvents by using ss.Events[len(ss.Events)-MaximumNumberOfEvents:] instead
|
||||
ss.Events = ss.Events[len(ss.Events)-MaximumNumberOfEvents:]
|
||||
}
|
||||
}
|
||||
}
|
||||
ss.Results = append(ss.Results, result)
|
||||
if len(ss.Results) > 20 {
|
||||
ss.Results = ss.Results[1:]
|
||||
if len(ss.Results) > MaximumNumberOfResults {
|
||||
// Doing ss.Results[1:] would usually be sufficient, but in the case where for some reason, the slice has more
|
||||
// than one extra element, we can get rid of all of them at once and thus returning the slice to a length of
|
||||
// MaximumNumberOfResults by using ss.Results[len(ss.Results)-MaximumNumberOfResults:] instead
|
||||
ss.Results = ss.Results[len(ss.Results)-MaximumNumberOfResults:]
|
||||
}
|
||||
ss.Uptime.ProcessResult(result)
|
||||
}
|
||||
|
||||
@@ -22,10 +22,45 @@ func TestNewServiceStatus(t *testing.T) {
|
||||
func TestServiceStatus_AddResult(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service)
|
||||
for i := 0; i < 50; i++ {
|
||||
for i := 0; i < MaximumNumberOfResults+10; i++ {
|
||||
serviceStatus.AddResult(&Result{Timestamp: time.Now()})
|
||||
}
|
||||
if len(serviceStatus.Results) != 20 {
|
||||
if len(serviceStatus.Results) != MaximumNumberOfResults {
|
||||
t.Errorf("expected serviceStatus.Results to not exceed a length of 20")
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceStatus_WithResultPagination(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service)
|
||||
for i := 0; i < 25; i++ {
|
||||
serviceStatus.AddResult(&Result{Timestamp: time.Now()})
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 1).Results) != 1 {
|
||||
t.Errorf("expected to have 1 result")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(5, 0).Results) != 0 {
|
||||
t.Errorf("expected to have 0 results")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(-1, 20).Results) != 0 {
|
||||
t.Errorf("expected to have 0 result, because the page was invalid")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, -1).Results) != 0 {
|
||||
t.Errorf("expected to have 0 result, because the page size was invalid")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 10).Results) != 10 {
|
||||
t.Errorf("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(2, 10).Results) != 10 {
|
||||
t.Errorf("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(3, 10).Results) != 5 {
|
||||
t.Errorf("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(4, 10).Results) != 0 {
|
||||
t.Errorf("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 50).Results) != 25 {
|
||||
t.Errorf("expected to have 25 results, because there's only 25 results")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,9 +213,10 @@ func (service *Service) call(result *Result) {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
return
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.TLS != nil && len(response.TLS.PeerCertificates) > 0 {
|
||||
certificate := response.TLS.PeerCertificates[0]
|
||||
result.CertificateExpiration = certificate.NotAfter.Sub(time.Now())
|
||||
result.CertificateExpiration = time.Until(certificate.NotAfter)
|
||||
}
|
||||
result.HTTPStatus = response.StatusCode
|
||||
result.Connected = response.StatusCode > 0
|
||||
|
||||
@@ -25,15 +25,20 @@ type Uptime struct {
|
||||
// LastHour is the uptime percentage over the past hour
|
||||
LastHour float64 `json:"1h"`
|
||||
|
||||
successCountPerHour map[string]uint64
|
||||
totalCountPerHour map[string]uint64
|
||||
// SuccessCountPerHour is a map containing the number of successes per hour, per timestamp following the
|
||||
// custom RFC3339WithoutMinutesAndSeconds format
|
||||
SuccessCountPerHour map[string]uint64 `json:"-"`
|
||||
|
||||
// TotalCountPerHour is a map containing the total number of checks per hour, per timestamp following the
|
||||
// custom RFC3339WithoutMinutesAndSeconds format
|
||||
TotalCountPerHour map[string]uint64 `json:"-"`
|
||||
}
|
||||
|
||||
// NewUptime creates a new Uptime
|
||||
func NewUptime() *Uptime {
|
||||
return &Uptime{
|
||||
successCountPerHour: make(map[string]uint64),
|
||||
totalCountPerHour: make(map[string]uint64),
|
||||
SuccessCountPerHour: make(map[string]uint64),
|
||||
TotalCountPerHour: make(map[string]uint64),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,16 +47,16 @@ func NewUptime() *Uptime {
|
||||
func (uptime *Uptime) ProcessResult(result *Result) {
|
||||
timestampDateWithHour := result.Timestamp.Format(RFC3339WithoutMinutesAndSeconds)
|
||||
if result.Success {
|
||||
uptime.successCountPerHour[timestampDateWithHour]++
|
||||
uptime.SuccessCountPerHour[timestampDateWithHour]++
|
||||
}
|
||||
uptime.totalCountPerHour[timestampDateWithHour]++
|
||||
uptime.TotalCountPerHour[timestampDateWithHour]++
|
||||
// Clean up only when we're starting to have too many useless keys
|
||||
// Note that this is only triggered when there are more entries than there should be after
|
||||
// 10 days, despite the fact that we are deleting everything that's older than 7 days.
|
||||
// This is to prevent re-iterating on every `ProcessResult` as soon as the uptime has been logged for 7 days.
|
||||
if len(uptime.totalCountPerHour) > numberOfHoursInTenDays {
|
||||
if len(uptime.TotalCountPerHour) > numberOfHoursInTenDays {
|
||||
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour))
|
||||
for k := range uptime.totalCountPerHour {
|
||||
for k := range uptime.TotalCountPerHour {
|
||||
dateWithHour, err := time.Parse(time.RFC3339, k)
|
||||
if err != nil {
|
||||
// This shouldn't happen, but we'll log it in case it does happen
|
||||
@@ -59,8 +64,8 @@ func (uptime *Uptime) ProcessResult(result *Result) {
|
||||
continue
|
||||
}
|
||||
if sevenDaysAgo.Unix() > dateWithHour.Unix() {
|
||||
delete(uptime.totalCountPerHour, k)
|
||||
delete(uptime.successCountPerHour, k)
|
||||
delete(uptime.TotalCountPerHour, k)
|
||||
delete(uptime.SuccessCountPerHour, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -88,8 +93,8 @@ func (uptime *Uptime) recalculate() {
|
||||
timestamp := now.Add(-sevenDays)
|
||||
for now.Sub(timestamp) >= 0 {
|
||||
timestampDateWithHour := timestamp.Format(RFC3339WithoutMinutesAndSeconds)
|
||||
successCountForTimestamp := uptime.successCountPerHour[timestampDateWithHour]
|
||||
totalCountForTimestamp := uptime.totalCountPerHour[timestampDateWithHour]
|
||||
successCountForTimestamp := uptime.SuccessCountPerHour[timestampDateWithHour]
|
||||
totalCountForTimestamp := uptime.TotalCountPerHour[timestampDateWithHour]
|
||||
uptimeBrackets["7d_success"] += successCountForTimestamp
|
||||
uptimeBrackets["7d_total"] += totalCountForTimestamp
|
||||
if now.Sub(timestamp) <= 24*time.Hour {
|
||||
|
||||
@@ -51,8 +51,8 @@ func TestServiceStatus_AddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||
for timestamp.Unix() <= now.Unix() {
|
||||
serviceStatus.AddResult(&Result{Timestamp: timestamp, Success: true})
|
||||
if len(serviceStatus.Uptime.successCountPerHour) > numberOfHoursInTenDays {
|
||||
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.successCountPerHour", numberOfHoursInTenDays)
|
||||
if len(serviceStatus.Uptime.SuccessCountPerHour) > numberOfHoursInTenDays {
|
||||
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.SuccessCountPerHour", numberOfHoursInTenDays)
|
||||
}
|
||||
//fmt.Printf("timestamp=%s; uptimeDuringLastHour=%f; timeAgo=%s\n", timestamp.Format(time.RFC3339), serviceStatus.UptimeDuringLastHour, time.Since(timestamp))
|
||||
if now.Sub(timestamp) > time.Hour && serviceStatus.Uptime.LastHour != 0 {
|
||||
|
||||
@@ -81,11 +81,11 @@ spec:
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 50M
|
||||
cpu: 250m
|
||||
memory: 100M
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 20M
|
||||
memory: 30M
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: gatus-config
|
||||
|
||||
@@ -59,11 +59,11 @@ spec:
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 50M
|
||||
cpu: 250m
|
||||
memory: 100M
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 20M
|
||||
memory: 30M
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: gatus-config
|
||||
|
||||
3
go.mod
3
go.mod
@@ -4,7 +4,8 @@ go 1.15
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.74.0 // indirect
|
||||
github.com/TwinProduction/gocache v1.1.0
|
||||
github.com/TwinProduction/gocache v1.2.1
|
||||
github.com/TwinProduction/health v1.0.0
|
||||
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0
|
||||
|
||||
10
go.sum
10
go.sum
@@ -50,8 +50,10 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/TwinProduction/gocache v1.1.0 h1:mibBUyccd8kGHlm5dXhTMDOvWBK4mjNqGyOOkG8mib8=
|
||||
github.com/TwinProduction/gocache v1.1.0/go.mod h1:+qH57V/K4oAcX9C7CvgJTwUX4lzfIUXQC/6XaRSOS1Y=
|
||||
github.com/TwinProduction/gocache v1.2.1 h1:NAdMwO9SQEZFmX69YWx6fzhwb6fHakkLri0451c+V1w=
|
||||
github.com/TwinProduction/gocache v1.2.1/go.mod h1:6zkBoLjrFLkIISwkZTgLy67qliCGSon1xpORM4Ri5HM=
|
||||
github.com/TwinProduction/health v1.0.0 h1:TVyYTAORQQZ8LaptX8jCHZRCGCAO6e+oJx19BUIzQYY=
|
||||
github.com/TwinProduction/health v1.0.0/go.mod h1:ys4mYKUeEfYrWmkm60xLtPjTuLIEDQNBZaTZvenLG1c=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@@ -73,8 +75,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@@ -432,6 +432,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
|
||||
@@ -16,13 +16,13 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// KubernetesClientApi is a minimal interface for interacting with Kubernetes
|
||||
// KubernetesClientAPI is a minimal interface for interacting with Kubernetes
|
||||
// Created mostly to make mocking the Kubernetes client easier
|
||||
type KubernetesClientApi interface {
|
||||
type KubernetesClientAPI interface {
|
||||
GetServices(namespace string) ([]v1.Service, error)
|
||||
}
|
||||
|
||||
// KubernetesClient is a working implementation of KubernetesClientApi
|
||||
// KubernetesClient is a working implementation of KubernetesClientAPI
|
||||
type KubernetesClient struct {
|
||||
client *kubernetes.Clientset
|
||||
}
|
||||
@@ -44,7 +44,7 @@ func NewKubernetesClient(client *kubernetes.Clientset) *KubernetesClient {
|
||||
}
|
||||
|
||||
// NewClient creates a Kubernetes client for the given ClusterMode
|
||||
func NewClient(clusterMode ClusterMode) (KubernetesClientApi, error) {
|
||||
func NewClient(clusterMode ClusterMode) (KubernetesClientAPI, error) {
|
||||
var kubeConfig *rest.Config
|
||||
var err error
|
||||
switch clusterMode {
|
||||
|
||||
@@ -5,6 +5,6 @@ import (
|
||||
)
|
||||
|
||||
// GetKubernetesServices return a list of Services from the given namespace
|
||||
func GetKubernetesServices(client KubernetesClientApi, namespace string) ([]v1.Service, error) {
|
||||
func GetKubernetesServices(client KubernetesClientAPI, namespace string) ([]v1.Service, error) {
|
||||
return client.GetServices(namespace)
|
||||
}
|
||||
|
||||
22
main.go
22
main.go
@@ -1,17 +1,37 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/TwinProduction/gatus/config"
|
||||
"github.com/TwinProduction/gatus/controller"
|
||||
"github.com/TwinProduction/gatus/storage"
|
||||
"github.com/TwinProduction/gatus/watchdog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg := loadConfiguration()
|
||||
go watchdog.Monitor(cfg)
|
||||
controller.Handle()
|
||||
go controller.Handle()
|
||||
// Wait for termination signal
|
||||
sig := make(chan os.Signal, 1)
|
||||
done := make(chan bool, 1)
|
||||
signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-sig
|
||||
log.Println("Received termination signal, attempting to gracefully shut down")
|
||||
controller.Shutdown()
|
||||
err := storage.Get().Save()
|
||||
if err != nil {
|
||||
log.Println("Failed to save storage provider:", err.Error())
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
<-done
|
||||
log.Println("Shutting down")
|
||||
}
|
||||
|
||||
func loadConfiguration() *config.Config {
|
||||
|
||||
8
storage/config.go
Normal file
8
storage/config.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package storage
|
||||
|
||||
// Config is the configuration for alerting providers
|
||||
type Config struct {
|
||||
// File is the path of the file to use for persistence
|
||||
// If blank, persistence is disabled.
|
||||
File string `yaml:"file"`
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
)
|
||||
|
||||
// InMemoryStore implements an in-memory store
|
||||
type InMemoryStore struct {
|
||||
serviceStatuses map[string]*core.ServiceStatus
|
||||
serviceResultsMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewInMemoryStore returns an in-memory store. Note that the store acts as a singleton, so although new-ing
|
||||
// up in-memory stores will give you a unique reference to a struct each time, all structs returned
|
||||
// by this function will act on the same in-memory store.
|
||||
func NewInMemoryStore() *InMemoryStore {
|
||||
return &InMemoryStore{
|
||||
serviceStatuses: make(map[string]*core.ServiceStatus),
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllAsJSON returns the JSON encoding of all monitored core.ServiceStatus
|
||||
func (ims *InMemoryStore) GetAllAsJSON() ([]byte, error) {
|
||||
ims.serviceResultsMutex.RLock()
|
||||
serviceStatuses, err := json.Marshal(ims.serviceStatuses)
|
||||
ims.serviceResultsMutex.RUnlock()
|
||||
return serviceStatuses, err
|
||||
}
|
||||
|
||||
// GetServiceStatus returns the service status for a given service name in the given group
|
||||
func (ims *InMemoryStore) GetServiceStatus(groupName, serviceName string) *core.ServiceStatus {
|
||||
key := util.ConvertGroupAndServiceToKey(groupName, serviceName)
|
||||
ims.serviceResultsMutex.RLock()
|
||||
serviceStatus := ims.serviceStatuses[key]
|
||||
ims.serviceResultsMutex.RUnlock()
|
||||
return serviceStatus
|
||||
}
|
||||
|
||||
// GetServiceStatusByKey returns the service status for a given key
|
||||
func (ims *InMemoryStore) GetServiceStatusByKey(key string) *core.ServiceStatus {
|
||||
ims.serviceResultsMutex.RLock()
|
||||
serviceStatus := ims.serviceStatuses[key]
|
||||
ims.serviceResultsMutex.RUnlock()
|
||||
return serviceStatus
|
||||
}
|
||||
|
||||
// Insert inserts the observed result for the specified service into the in memory store
|
||||
func (ims *InMemoryStore) Insert(service *core.Service, result *core.Result) {
|
||||
key := util.ConvertGroupAndServiceToKey(service.Group, service.Name)
|
||||
ims.serviceResultsMutex.Lock()
|
||||
serviceStatus, exists := ims.serviceStatuses[key]
|
||||
if !exists {
|
||||
serviceStatus = core.NewServiceStatus(service)
|
||||
ims.serviceStatuses[key] = serviceStatus
|
||||
}
|
||||
serviceStatus.AddResult(result)
|
||||
ims.serviceResultsMutex.Unlock()
|
||||
}
|
||||
|
||||
// Clear will empty all the results from the in memory store
|
||||
func (ims *InMemoryStore) Clear() {
|
||||
ims.serviceResultsMutex.Lock()
|
||||
ims.serviceStatuses = make(map[string]*core.ServiceStatus)
|
||||
ims.serviceResultsMutex.Unlock()
|
||||
}
|
||||
63
storage/storage.go
Normal file
63
storage/storage.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/storage/store"
|
||||
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||
)
|
||||
|
||||
var (
|
||||
provider store.Store
|
||||
|
||||
// initialized keeps track of whether the storage provider was initialized
|
||||
// Because store.Store is an interface, a nil check wouldn't be sufficient, so instead of doing reflection
|
||||
// every single time Get is called, we'll just lazily keep track of its existence through this variable
|
||||
initialized bool
|
||||
)
|
||||
|
||||
// Get retrieves the storage provider
|
||||
func Get() store.Store {
|
||||
if !initialized {
|
||||
log.Println("[storage][Get] Provider requested before it was initialized, automatically initializing")
|
||||
err := Initialize(nil)
|
||||
if err != nil {
|
||||
panic("failed to automatically initialize store: " + err.Error())
|
||||
}
|
||||
}
|
||||
return provider
|
||||
}
|
||||
|
||||
// Initialize instantiates the storage provider based on the Config provider
|
||||
func Initialize(cfg *Config) error {
|
||||
initialized = true
|
||||
var err error
|
||||
if cfg == nil || len(cfg.File) == 0 {
|
||||
log.Println("[storage][Initialize] Creating storage provider")
|
||||
provider, err = memory.NewStore("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("[storage][Initialize] Creating storage provider with file=%s", cfg.File)
|
||||
provider, err = memory.NewStore(cfg.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go autoSave(7 * time.Minute)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// autoSave automatically calls the Save function of the provider at every interval
|
||||
func autoSave(interval time.Duration) {
|
||||
for {
|
||||
time.Sleep(interval)
|
||||
log.Printf("[storage][autoSave] Saving")
|
||||
err := provider.Save()
|
||||
if err != nil {
|
||||
log.Println("[storage][autoSave] Save failed:", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
104
storage/store/memory/memory.go
Normal file
104
storage/store/memory/memory.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/util"
|
||||
"github.com/TwinProduction/gocache"
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(&core.ServiceStatus{})
|
||||
gob.Register(&core.Uptime{})
|
||||
gob.Register(&core.Result{})
|
||||
gob.Register(&core.Event{})
|
||||
}
|
||||
|
||||
// Store that leverages gocache
|
||||
type Store struct {
|
||||
file string
|
||||
cache *gocache.Cache
|
||||
}
|
||||
|
||||
// NewStore creates a new store
|
||||
func NewStore(file string) (*Store, error) {
|
||||
store := &Store{
|
||||
file: file,
|
||||
cache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize),
|
||||
}
|
||||
if len(file) > 0 {
|
||||
_, err := store.cache.ReadFromFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return store, nil
|
||||
}
|
||||
|
||||
// GetAllServiceStatusesWithResultPagination returns all monitored core.ServiceStatus
|
||||
// with a subset of core.Result defined by the page and pageSize parameters
|
||||
func (s *Store) GetAllServiceStatusesWithResultPagination(page, pageSize int) map[string]*core.ServiceStatus {
|
||||
serviceStatuses := s.cache.GetAll()
|
||||
pagedServiceStatuses := make(map[string]*core.ServiceStatus, len(serviceStatuses))
|
||||
for k, v := range serviceStatuses {
|
||||
pagedServiceStatuses[k] = v.(*core.ServiceStatus).WithResultPagination(page, pageSize)
|
||||
}
|
||||
return pagedServiceStatuses
|
||||
}
|
||||
|
||||
// GetServiceStatus returns the service status for a given service name in the given group
|
||||
func (s *Store) GetServiceStatus(groupName, serviceName string) *core.ServiceStatus {
|
||||
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName))
|
||||
}
|
||||
|
||||
// GetServiceStatusByKey returns the service status for a given key
|
||||
func (s *Store) GetServiceStatusByKey(key string) *core.ServiceStatus {
|
||||
serviceStatus := s.cache.GetValue(key)
|
||||
if serviceStatus == nil {
|
||||
return nil
|
||||
}
|
||||
return serviceStatus.(*core.ServiceStatus).ShallowCopy()
|
||||
}
|
||||
|
||||
// Insert adds the observed result for the specified service into the store
|
||||
func (s *Store) Insert(service *core.Service, result *core.Result) {
|
||||
key := util.ConvertGroupAndServiceToKey(service.Group, service.Name)
|
||||
serviceStatus, exists := s.cache.Get(key)
|
||||
if !exists {
|
||||
serviceStatus = core.NewServiceStatus(service)
|
||||
}
|
||||
serviceStatus.(*core.ServiceStatus).AddResult(result)
|
||||
s.cache.Set(key, serviceStatus)
|
||||
}
|
||||
|
||||
// DeleteAllServiceStatusesNotInKeys removes all ServiceStatus that are not within the keys provided
|
||||
func (s *Store) DeleteAllServiceStatusesNotInKeys(keys []string) int {
|
||||
var keysToDelete []string
|
||||
for _, existingKey := range s.cache.GetKeysByPattern("*", 0) {
|
||||
shouldDelete := true
|
||||
for _, key := range keys {
|
||||
if existingKey == key {
|
||||
shouldDelete = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldDelete {
|
||||
keysToDelete = append(keysToDelete, existingKey)
|
||||
}
|
||||
}
|
||||
return s.cache.DeleteAll(keysToDelete)
|
||||
}
|
||||
|
||||
// Clear deletes everything from the store
|
||||
func (s *Store) Clear() {
|
||||
s.cache.Clear()
|
||||
}
|
||||
|
||||
// Save persists the cache to the store file
|
||||
func (s *Store) Save() error {
|
||||
if len(s.file) > 0 {
|
||||
return s.cache.SaveToFile(s.file)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package storage
|
||||
package memory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -83,17 +83,17 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func TestInMemoryStore_Insert(t *testing.T) {
|
||||
store := NewInMemoryStore()
|
||||
func TestStore_Insert(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
|
||||
if len(store.serviceStatuses) != 1 {
|
||||
t.Fatalf("expected 1 ServiceStatus, got %d", len(store.serviceStatuses))
|
||||
if store.cache.Count() != 1 {
|
||||
t.Fatalf("expected 1 ServiceStatus, got %d", store.cache.Count())
|
||||
}
|
||||
key := fmt.Sprintf("%s_%s", testService.Group, testService.Name)
|
||||
serviceStatus, exists := store.serviceStatuses[key]
|
||||
if !exists {
|
||||
serviceStatus := store.GetServiceStatusByKey(key)
|
||||
if serviceStatus == nil {
|
||||
t.Fatalf("Store should've had key '%s', but didn't", key)
|
||||
}
|
||||
if len(serviceStatus.Results) != 2 {
|
||||
@@ -140,8 +140,8 @@ func TestInMemoryStore_Insert(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryStore_GetServiceStatus(t *testing.T) {
|
||||
store := NewInMemoryStore()
|
||||
func TestStore_GetServiceStatus(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
|
||||
@@ -163,8 +163,8 @@ func TestInMemoryStore_GetServiceStatus(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
|
||||
store := NewInMemoryStore()
|
||||
func TestStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
|
||||
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname")
|
||||
@@ -181,8 +181,8 @@ func TestInMemoryStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryStore_GetServiceStatusByKey(t *testing.T) {
|
||||
store := NewInMemoryStore()
|
||||
func TestStore_GetServiceStatusByKey(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
store.Insert(&testService, &testSuccessfulResult)
|
||||
store.Insert(&testService, &testUnsuccessfulResult)
|
||||
|
||||
@@ -204,8 +204,8 @@ func TestInMemoryStore_GetServiceStatusByKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryStore_GetAllAsJSON(t *testing.T) {
|
||||
store := NewInMemoryStore()
|
||||
func TestStore_GetAllServiceStatusesWithResultPagination(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
firstResult := &testSuccessfulResult
|
||||
secondResult := &testUnsuccessfulResult
|
||||
store.Insert(&testService, firstResult)
|
||||
@@ -213,12 +213,65 @@ func TestInMemoryStore_GetAllAsJSON(t *testing.T) {
|
||||
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||
firstResult.Timestamp = time.Time{}
|
||||
secondResult.Timestamp = time.Time{}
|
||||
output, err := store.GetAllAsJSON()
|
||||
if err != nil {
|
||||
t.Fatal("shouldn't have returned an error, got", err.Error())
|
||||
serviceStatuses := store.GetAllServiceStatusesWithResultPagination(1, 20)
|
||||
if len(serviceStatuses) != 1 {
|
||||
t.Fatal("expected 1 service status")
|
||||
}
|
||||
expectedOutput := `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}],"uptime":{"7d":0.5,"24h":0.5,"1h":0.5}}}`
|
||||
if string(output) != expectedOutput {
|
||||
t.Errorf("expected:\n %s\n\ngot:\n %s", expectedOutput, string(output))
|
||||
actual, exists := serviceStatuses[util.ConvertGroupAndServiceToKey(testService.Group, testService.Name)]
|
||||
if !exists {
|
||||
t.Fatal("expected service status to exist")
|
||||
}
|
||||
if len(actual.Results) != 2 {
|
||||
t.Error("expected 2 results, got", len(actual.Results))
|
||||
}
|
||||
if len(actual.Events) != 2 {
|
||||
t.Error("expected 2 events, got", len(actual.Events))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_DeleteAllServiceStatusesNotInKeys(t *testing.T) {
|
||||
store, _ := NewStore("")
|
||||
firstService := core.Service{Name: "service-1", Group: "group"}
|
||||
secondService := core.Service{Name: "service-2", Group: "group"}
|
||||
result := &testSuccessfulResult
|
||||
store.Insert(&firstService, result)
|
||||
store.Insert(&secondService, result)
|
||||
if store.cache.Count() != 2 {
|
||||
t.Errorf("expected cache to have 2 keys, got %d", store.cache.Count())
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)) == nil {
|
||||
t.Fatal("firstService should exist")
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(secondService.Group, secondService.Name)) == nil {
|
||||
t.Fatal("secondService should exist")
|
||||
}
|
||||
store.DeleteAllServiceStatusesNotInKeys([]string{util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)})
|
||||
if store.cache.Count() != 1 {
|
||||
t.Fatalf("expected cache to have 1 keys, got %d", store.cache.Count())
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)) == nil {
|
||||
t.Error("secondService should've been deleted")
|
||||
}
|
||||
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(secondService.Group, secondService.Name)) != nil {
|
||||
t.Error("firstService should still exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_Save(t *testing.T) {
|
||||
files := []string{
|
||||
"",
|
||||
t.TempDir() + "/test.db",
|
||||
}
|
||||
for _, file := range files {
|
||||
t.Run(file, func(t *testing.T) {
|
||||
store, err := NewStore(file)
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
err = store.Save()
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
38
storage/store/store.go
Normal file
38
storage/store/store.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||
)
|
||||
|
||||
// Store is the interface that each stores should implement
|
||||
type Store interface {
|
||||
// GetAllAsJSON returns the JSON encoding of all monitored core.ServiceStatus
|
||||
// with a subset of core.Result defined by the page and pageSize parameters
|
||||
GetAllServiceStatusesWithResultPagination(page, pageSize int) map[string]*core.ServiceStatus
|
||||
|
||||
// GetServiceStatus returns the service status for a given service name in the given group
|
||||
GetServiceStatus(groupName, serviceName string) *core.ServiceStatus
|
||||
|
||||
// GetServiceStatusByKey returns the service status for a given key
|
||||
GetServiceStatusByKey(key string) *core.ServiceStatus
|
||||
|
||||
// Insert adds the observed result for the specified service into the store
|
||||
Insert(service *core.Service, result *core.Result)
|
||||
|
||||
// DeleteAllServiceStatusesNotInKeys removes all ServiceStatus that are not within the keys provided
|
||||
//
|
||||
// Used to delete services that have been persisted but are no longer part of the configured services
|
||||
DeleteAllServiceStatusesNotInKeys(keys []string) int
|
||||
|
||||
// Clear deletes everything from the store
|
||||
Clear()
|
||||
|
||||
// Save persists the data if and where it needs to be persisted
|
||||
Save() error
|
||||
}
|
||||
|
||||
var (
|
||||
// Validate interface implementation on compile
|
||||
_ Store = (*memory.Store)(nil)
|
||||
)
|
||||
136
storage/store/store_bench_test.go
Normal file
136
storage/store/store_bench_test.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/gatus/core"
|
||||
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = core.Condition("[STATUS] == 200")
|
||||
secondCondition = core.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = core.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
timestamp = time.Now()
|
||||
|
||||
testService = core.Service{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []*core.Condition{&firstCondition, &secondCondition, &thirdCondition},
|
||||
Alerts: nil,
|
||||
Insecure: false,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = core.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Body: []byte("body"),
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
Timestamp: timestamp,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = core.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Body: []byte("body"),
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
Timestamp: timestamp,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func BenchmarkStore_GetAllAsJSON(b *testing.B) {
|
||||
memoryStore, err := memory.NewStore("")
|
||||
if err != nil {
|
||||
b.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Store Store
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "memory",
|
||||
Store: memoryStore,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
scenario.Store.Insert(&testService, &testSuccessfulResult)
|
||||
scenario.Store.Insert(&testService, &testUnsuccessfulResult)
|
||||
b.Run(scenario.Name, func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
scenario.Store.GetAllServiceStatusesWithResultPagination(1, 20)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStore_Insert(b *testing.B) {
|
||||
memoryStore, err := memory.NewStore("")
|
||||
if err != nil {
|
||||
b.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Store Store
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "memory",
|
||||
Store: memoryStore,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
b.Run(scenario.Name, func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
scenario.Store.Insert(&testService, &testSuccessfulResult)
|
||||
scenario.Store.Insert(&testService, &testUnsuccessfulResult)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
})
|
||||
}
|
||||
}
|
||||
106
vendor/github.com/TwinProduction/gocache/README.md
generated
vendored
106
vendor/github.com/TwinProduction/gocache/README.md
generated
vendored
@@ -306,9 +306,9 @@ If you do not start the janitor, there will be no passive deletion of expired ke
|
||||
For the sake of convenience, a ready-to-go cache server is available
|
||||
through the `gocacheserver` package.
|
||||
|
||||
The reason why the server is in a different package is because `gocache` does not use
|
||||
any external dependencies, but rather than re-inventing the wheel, the server
|
||||
implementation uses redcon, which is a Redis server framework for Go.
|
||||
The reason why the server is in a different package is because `gocache` limit its external dependencies to the strict
|
||||
minimum (e.g. boltdb for persistence), however, rather than re-inventing the wheel, the server implementation uses
|
||||
redcon, which is a very good Redis server framework for Go.
|
||||
|
||||
That way, those who desire to use gocache without the server will not add any extra dependencies
|
||||
as long as they don't import the `gocacheserver` package.
|
||||
@@ -323,7 +323,7 @@ import (
|
||||
|
||||
func main() {
|
||||
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(100000)
|
||||
server := gocacheserver.NewServer(cache)
|
||||
server := gocacheserver.NewServer(cache).WithPort(6379)
|
||||
server.Start()
|
||||
}
|
||||
```
|
||||
@@ -382,43 +382,67 @@ but if you're looking into using a library like gocache, odds are, you want more
|
||||
| mem | 32G DDR4 |
|
||||
|
||||
```
|
||||
BenchmarkMap_Get-8 95936680 26.3 ns/op
|
||||
BenchmarkMap_SetSmallValue-8 7738132 424 ns/op
|
||||
BenchmarkMap_SetMediumValue-8 7766346 424 ns/op
|
||||
BenchmarkMap_SetLargeValue-8 7947063 435 ns/op
|
||||
BenchmarkCache_Get-8 54549049 45.7 ns/op
|
||||
BenchmarkCache_SetSmallValue-8 35225013 69.2 ns/op
|
||||
BenchmarkCache_SetMediumValue-8 5952064 412 ns/op
|
||||
BenchmarkCache_SetLargeValue-8 5969121 411 ns/op
|
||||
BenchmarkCache_GetUsingLRU-8 54545949 45.6 ns/op
|
||||
BenchmarkCache_SetSmallValueUsingLRU-8 5909504 419 ns/op
|
||||
BenchmarkCache_SetMediumValueUsingLRU-8 5910885 418 ns/op
|
||||
BenchmarkCache_SetLargeValueUsingLRU-8 5867544 419 ns/op
|
||||
BenchmarkCache_SetSmallValueWhenUsingMaxMemoryUsage-8 5477178 462 ns/op
|
||||
BenchmarkCache_SetMediumValueWhenUsingMaxMemoryUsage-8 5417595 475 ns/op
|
||||
BenchmarkCache_SetLargeValueWhenUsingMaxMemoryUsage-8 5215263 479 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize10-8 10115574 236 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize10-8 10242792 241 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize10-8 10201894 241 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize1000-8 9637113 253 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize1000-8 9635175 253 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize1000-8 9598982 260 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize100000-8 7642584 337 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize100000-8 7407571 344 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize100000-8 7071360 345 ns/op
|
||||
BenchmarkCache_SetSmallValueWithMaxSize100000AndLRU-8 7544194 332 ns/op
|
||||
BenchmarkCache_SetMediumValueWithMaxSize100000AndLRU-8 7667004 344 ns/op
|
||||
BenchmarkCache_SetLargeValueWithMaxSize100000AndLRU-8 7357642 338 ns/op
|
||||
BenchmarkCache_GetAndSetMultipleConcurrently-8 1442306 1684 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndLRU-8 5117271 477 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndFIFO-8 5228412 475 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndLRU-8 5139195 529 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndFIFO-8 5251639 511 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndLRU-8 7384626 334 ns/op
|
||||
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndFIFO-8 7361985 332 ns/op
|
||||
BenchmarkCache_GetConcurrentlyWithLRU-8 3370784 726 ns/op
|
||||
BenchmarkCache_GetConcurrentlyWithFIFO-8 3749994 681 ns/op
|
||||
BenchmarkCache_GetKeysThatDoNotExistConcurrently-8 17647344 143 ns/op
|
||||
// Normal map
|
||||
BenchmarkMap_Get
|
||||
BenchmarkMap_Get-8 46087372 26.7 ns/op
|
||||
BenchmarkMap_Set
|
||||
BenchmarkMap_Set/small_value-8 3841911 389 ns/op
|
||||
BenchmarkMap_Set/medium_value-8 3887074 391 ns/op
|
||||
BenchmarkMap_Set/large_value-8 3921956 393 ns/op
|
||||
// Gocache
|
||||
BenchmarkCache_Get
|
||||
BenchmarkCache_Get/FirstInFirstOut-8 27273036 46.4 ns/op
|
||||
BenchmarkCache_Get/LeastRecentlyUsed-8 26648248 46.3 ns/op
|
||||
BenchmarkCache_Set
|
||||
BenchmarkCache_Set/FirstInFirstOut_small_value-8 2919584 405 ns/op
|
||||
BenchmarkCache_Set/FirstInFirstOut_medium_value-8 2990841 391 ns/op
|
||||
BenchmarkCache_Set/FirstInFirstOut_large_value-8 2970513 391 ns/op
|
||||
BenchmarkCache_Set/LeastRecentlyUsed_small_value-8 2962939 402 ns/op
|
||||
BenchmarkCache_Set/LeastRecentlyUsed_medium_value-8 2962963 390 ns/op
|
||||
BenchmarkCache_Set/LeastRecentlyUsed_large_value-8 2962928 394 ns/op
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage/small_value-8 2683356 447 ns/op
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage/medium_value-8 2637578 441 ns/op
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage/large_value-8 2672434 443 ns/op
|
||||
BenchmarkCache_SetWithMaxSize
|
||||
BenchmarkCache_SetWithMaxSize/100_small_value-8 4782966 252 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/10000_small_value-8 4067967 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100000_small_value-8 3762055 328 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100_medium_value-8 4760479 252 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/10000_medium_value-8 4081050 295 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100000_medium_value-8 3785050 330 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100_large_value-8 4732909 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/10000_large_value-8 4079533 297 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100000_large_value-8 3712820 331 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_small_value-8 4761732 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_small_value-8 4084474 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_small_value-8 3761402 329 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_medium_value-8 4783075 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_medium_value-8 4103980 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_medium_value-8 3646023 331 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_large_value-8 4779025 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_large_value-8 4096192 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_large_value-8 3726823 331 ns/op
|
||||
BenchmarkCache_GetSetMultipleConcurrent
|
||||
BenchmarkCache_GetSetMultipleConcurrent-8 707142 1698 ns/op
|
||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction
|
||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction/FirstInFirstOut-8 3616256 334 ns/op
|
||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction/LeastRecentlyUsed-8 3636367 331 ns/op
|
||||
BenchmarkCache_GetConcurrentWithLRU
|
||||
BenchmarkCache_GetConcurrentWithLRU/FirstInFirstOut-8 4405557 268 ns/op
|
||||
BenchmarkCache_GetConcurrentWithLRU/LeastRecentlyUsed-8 4445475 269 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/true_with_nil_struct_pointer-8 6184591 191 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/true-8 6090482 191 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/false_with_nil_struct_pointer-8 6184629 187 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/false-8 6281781 186 ns/op
|
||||
(Trimmed "BenchmarkCache_" for readability)
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/true_with_nil_struct_pointer-8 4379564 268 ns/op
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/true-8 4379558 265 ns/op
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/false_with_nil_struct_pointer-8 4444456 261 ns/op
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/false-8 4493896 262 ns/op
|
||||
```
|
||||
|
||||
|
||||
|
||||
2
vendor/github.com/TwinProduction/gocache/go.mod
generated
vendored
2
vendor/github.com/TwinProduction/gocache/go.mod
generated
vendored
@@ -3,9 +3,9 @@ module github.com/TwinProduction/gocache
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/onsi/ginkgo v1.14.1 // indirect
|
||||
github.com/onsi/gomega v1.10.2 // indirect
|
||||
github.com/tidwall/redcon v1.3.2
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
)
|
||||
|
||||
5
vendor/github.com/TwinProduction/gocache/go.sum
generated
vendored
5
vendor/github.com/TwinProduction/gocache/go.sum
generated
vendored
@@ -1,5 +1,3 @@
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
@@ -30,6 +28,8 @@ github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
|
||||
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/tidwall/redcon v1.3.2 h1:8INx/Nm3VSUbDUT16TH1rMgYQsbXNqy9xcX70edHXbo=
|
||||
github.com/tidwall/redcon v1.3.2/go.mod h1:bdYBm4rlcWpst2XMwKVzWDF9CoUxEbUmM7CQrKeOZas=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
|
||||
@@ -40,6 +40,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
||||
12
vendor/github.com/TwinProduction/gocache/gocache.go
generated
vendored
12
vendor/github.com/TwinProduction/gocache/gocache.go
generated
vendored
@@ -170,7 +170,7 @@ func (cache *Cache) WithEvictionPolicy(policy EvictionPolicy) *Cache {
|
||||
// value, _ := cache.Get("key")
|
||||
// // the following returns true, because the interface{} was forcefully set to nil
|
||||
// if value == nil {}
|
||||
// // the following will panic, because the value has been casted to its type
|
||||
// // the following will panic, because the value has been casted to its type (which is nil)
|
||||
// if value.(*Struct) == nil {}
|
||||
//
|
||||
// If set to false:
|
||||
@@ -218,7 +218,8 @@ func (cache *Cache) Set(key string, value interface{}) {
|
||||
// The TTL provided must be greater than 0, or NoExpiration (-1). If a negative value that isn't -1 (NoExpiration) is
|
||||
// provided, the entry will not be created if the key doesn't exist
|
||||
func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration) {
|
||||
// An interface is only nil if both its value and its type are nil, however, passing a pointer
|
||||
// An interface is only nil if both its value and its type are nil, however, passing a nil pointer as an interface{}
|
||||
// means that the interface itself is not nil, because the interface value is nil but not the type.
|
||||
if cache.forceNilInterfaceOnNilPointer {
|
||||
if value != nil && (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) {
|
||||
value = nil
|
||||
@@ -334,6 +335,13 @@ func (cache *Cache) Get(key string) (interface{}, bool) {
|
||||
return entry.Value, true
|
||||
}
|
||||
|
||||
// GetValue retrieves an entry using the key passed as parameter
|
||||
// Unlike Get, this function only returns the value
|
||||
func (cache *Cache) GetValue(key string) interface{} {
|
||||
value, _ := cache.Get(key)
|
||||
return value
|
||||
}
|
||||
|
||||
// GetByKeys retrieves multiple entries using the keys passed as parameter
|
||||
// All keys are returned in the map, regardless of whether they exist or not, however, entries that do not exist in the
|
||||
// cache will return nil, meaning that there is no way of determining whether a key genuinely has the value nil, or
|
||||
|
||||
2
vendor/github.com/TwinProduction/gocache/persistence.go
generated
vendored
2
vendor/github.com/TwinProduction/gocache/persistence.go
generated
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// SaveToFile stores the content of the cache to a file so that it can be read using
|
||||
|
||||
26
vendor/github.com/TwinProduction/gocache/policy.go
generated
vendored
26
vendor/github.com/TwinProduction/gocache/policy.go
generated
vendored
@@ -3,6 +3,30 @@ package gocache
|
||||
type EvictionPolicy string
|
||||
|
||||
var (
|
||||
// LeastRecentlyUsed is an eviction policy that causes the most recently accessed cache entry to be moved to the
|
||||
// head of the cache. Effectively, this causes the cache entries that have not been accessed for some time to
|
||||
// gradually move closer and closer to the tail, and since the tail is the entry that gets deleted when an eviction
|
||||
// is required, it allows less used cache entries to be evicted while keeping recently accessed entries at or close
|
||||
// to the head.
|
||||
//
|
||||
// For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would
|
||||
// put 3 at the head and 1 at the tail:
|
||||
// 3 (head) -> 2 -> 1 (tail)
|
||||
// If the cache entry 1 was then accessed, 1 would become the head and 2 the tail:
|
||||
// 1 (head) -> 3 -> 2 (tail)
|
||||
// If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (2) would then be evicted:
|
||||
// 4 (head) -> 1 -> 3 (tail)
|
||||
LeastRecentlyUsed EvictionPolicy = "LeastRecentlyUsed"
|
||||
FirstInFirstOut EvictionPolicy = "FirstInFirstOut"
|
||||
|
||||
// FirstInFirstOut is an eviction policy that causes cache entries to be evicted in the same order that they are
|
||||
// created.
|
||||
//
|
||||
// For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would
|
||||
// put 3 at the head and 1 at the tail:
|
||||
// 3 (head) -> 2 -> 1 (tail)
|
||||
// If the cache entry 1 was then accessed, unlike with LeastRecentlyUsed, nothing would change:
|
||||
// 3 (head) -> 2 -> 1 (tail)
|
||||
// If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (1) would then be evicted:
|
||||
// 4 (head) -> 3 -> 2 (tail)
|
||||
FirstInFirstOut EvictionPolicy = "FirstInFirstOut"
|
||||
)
|
||||
|
||||
1
vendor/github.com/TwinProduction/health/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/TwinProduction/health/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* text=lf
|
||||
2
vendor/github.com/TwinProduction/health/.gitignore
generated
vendored
Normal file
2
vendor/github.com/TwinProduction/health/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.idea
|
||||
*.iml
|
||||
72
vendor/github.com/TwinProduction/health/README.md
generated
vendored
Normal file
72
vendor/github.com/TwinProduction/health/README.md
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
# health
|
||||
|
||||

|
||||
[](https://goreportcard.com/report/github.com/TwinProduction/health)
|
||||
[](https://codecov.io/gh/TwinProduction/health)
|
||||
[](https://github.com/TwinProduction/health)
|
||||
[](https://pkg.go.dev/github.com/TwinProduction/health)
|
||||
|
||||
Health is a library used for creating a very simple health endpoint.
|
||||
|
||||
While implementing a health endpoint is very simple, I've grown tired of implementing
|
||||
it over and over again.
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
go get -u github.com/TwinProduction/health
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
To retrieve the handler, you must use `health.Handler()` and are expected to pass it to the router like so:
|
||||
```go
|
||||
router := http.NewServeMux()
|
||||
router.Handle("/health", health.Handler())
|
||||
server := &http.Server{
|
||||
Addr: ":8080",
|
||||
Handler: router,
|
||||
}
|
||||
```
|
||||
|
||||
By default, the handler will return `UP` when the status is down, and `DOWN` when the status is down.
|
||||
If you prefer using JSON, however, you may initialize the health handler like so:
|
||||
```go
|
||||
router.Handle("/health", health.Handler().WithJSON(true))
|
||||
```
|
||||
The above will cause the response body to become `{"status":"UP"}` and `{"status":"DOWN"}` for both status respectively.
|
||||
|
||||
To change the health of the application, you can use `health.SetStatus(<status>)` where `<status>` is one `health.Up`
|
||||
or `health.Down`:
|
||||
|
||||
```go
|
||||
health.SetStatus(health.Up)
|
||||
health.SetStatus(health.Down)
|
||||
```
|
||||
|
||||
### Complete example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/TwinProduction/health"
|
||||
)
|
||||
|
||||
func main() {
|
||||
router := http.NewServeMux()
|
||||
router.Handle("/health", health.Handler())
|
||||
server := &http.Server{
|
||||
Addr: "0.0.0.0:8080",
|
||||
Handler: router,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
IdleTimeout: 15 * time.Second,
|
||||
}
|
||||
server.ListenAndServe()
|
||||
}
|
||||
```
|
||||
3
vendor/github.com/TwinProduction/health/go.mod
generated
vendored
Normal file
3
vendor/github.com/TwinProduction/health/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/TwinProduction/health
|
||||
|
||||
go 1.15
|
||||
51
vendor/github.com/TwinProduction/health/health.go
generated
vendored
Normal file
51
vendor/github.com/TwinProduction/health/health.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package health
|
||||
|
||||
import "net/http"
|
||||
|
||||
var (
|
||||
handler = &healthHandler{
|
||||
useJSON: false,
|
||||
status: Up,
|
||||
}
|
||||
)
|
||||
|
||||
type healthHandler struct {
|
||||
useJSON bool
|
||||
status Status
|
||||
}
|
||||
|
||||
// WithJSON configures whether the handler should output a response in JSON or in raw text
|
||||
//
|
||||
// Defaults to false
|
||||
func (h *healthHandler) WithJSON(v bool) *healthHandler {
|
||||
h.useJSON = v
|
||||
return h
|
||||
}
|
||||
|
||||
func (h healthHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
|
||||
var status int
|
||||
var body []byte
|
||||
if h.status == Up {
|
||||
status = http.StatusOK
|
||||
} else {
|
||||
status = http.StatusInternalServerError
|
||||
}
|
||||
if h.useJSON {
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
body = []byte(`{"status":"` + h.status + `"}`)
|
||||
} else {
|
||||
body = []byte(h.status)
|
||||
}
|
||||
writer.WriteHeader(status)
|
||||
_, _ = writer.Write(body)
|
||||
}
|
||||
|
||||
// Handler retrieves the health handler
|
||||
func Handler() *healthHandler {
|
||||
return handler
|
||||
}
|
||||
|
||||
// SetStatus sets the status to be reflected by the health handler
|
||||
func SetStatus(status Status) {
|
||||
handler.status = status
|
||||
}
|
||||
8
vendor/github.com/TwinProduction/health/status.go
generated
vendored
Normal file
8
vendor/github.com/TwinProduction/health/status.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package health
|
||||
|
||||
type Status string
|
||||
|
||||
var (
|
||||
Down Status = "DOWN"
|
||||
Up Status = "UP"
|
||||
)
|
||||
18
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
18
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
@@ -1,18 +0,0 @@
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
||||
|
||||
test:
|
||||
@go test -v -cover .
|
||||
@go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: fmt test
|
||||
18
vendor/github.com/boltdb/bolt/appveyor.yml
generated
vendored
18
vendor/github.com/boltdb/bolt/appveyor.yml
generated
vendored
@@ -1,18 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\boltdb\bolt
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
||||
28
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
28
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
@@ -1,28 +0,0 @@
|
||||
package bolt
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned bool
|
||||
|
||||
func init() {
|
||||
// Simple check to see whether this arch handles unaligned load/stores
|
||||
// correctly.
|
||||
|
||||
// ARM9 and older devices require load/stores to be from/to aligned
|
||||
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||
// read in a jumbled up order.
|
||||
|
||||
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||
|
||||
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||
|
||||
brokenUnaligned = val != 0x11222211
|
||||
}
|
||||
252
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
252
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
@@ -1,252 +0,0 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
ids []pgid // all free and available free page ids.
|
||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist() *freelist {
|
||||
return &freelist{
|
||||
pending: make(map[txid][]pgid),
|
||||
cache: make(map[pgid]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
n := f.count()
|
||||
if n >= 0xFFFF {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
func (f *freelist) count() int {
|
||||
return f.free_count() + f.pending_count()
|
||||
}
|
||||
|
||||
// free_count returns count of free pages
|
||||
func (f *freelist) free_count() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, list := range f.pending {
|
||||
count += len(list)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, list := range f.pending {
|
||||
m = append(m, list...)
|
||||
}
|
||||
sort.Sort(m)
|
||||
mergepgids(dst, f.ids, m)
|
||||
}
|
||||
|
||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) allocate(n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free then a panic will occur.
|
||||
func (f *freelist) free(txid txid, p *page) {
|
||||
if p.id <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
var ids = f.pending[txid]
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
|
||||
// Add to the freelist and cache.
|
||||
ids = append(ids, id)
|
||||
f.cache[id] = true
|
||||
}
|
||||
f.pending[txid] = ids
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, ids := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
for _, id := range f.pending[txid] {
|
||||
delete(f.cache, id)
|
||||
}
|
||||
|
||||
// Remove pages from pending list.
|
||||
delete(f.pending, txid)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
func (f *freelist) freed(pgid pgid) bool {
|
||||
return f.cache[pgid]
|
||||
}
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(f.ids))
|
||||
}
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
lenids := f.count()
|
||||
if lenids == 0 {
|
||||
p.count = uint16(lenids)
|
||||
} else if lenids < 0xFFFF {
|
||||
p.count = uint16(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reload reads the freelist from a page and filters out pending items.
|
||||
func (f *freelist) reload(p *page) {
|
||||
f.read(p)
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range f.ids {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
f.ids = a
|
||||
|
||||
// Once the available list is rebuilt then rebuild the free cache so that
|
||||
// it includes the available and pending free pages.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool, len(f.ids))
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
1
vendor/github.com/boltdb/bolt/.gitignore → vendor/go.etcd.io/bbolt/.gitignore
generated
vendored
1
vendor/github.com/boltdb/bolt/.gitignore → vendor/go.etcd.io/bbolt/.gitignore
generated
vendored
@@ -2,3 +2,4 @@
|
||||
*.test
|
||||
*.swp
|
||||
/bin/
|
||||
cover.out
|
||||
17
vendor/go.etcd.io/bbolt/.travis.yml
generated
vendored
Normal file
17
vendor/go.etcd.io/bbolt/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
language: go
|
||||
go_import_path: go.etcd.io/bbolt
|
||||
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.12
|
||||
|
||||
before_install:
|
||||
- go get -v honnef.co/go/tools/...
|
||||
- go get -v github.com/kisielk/errcheck
|
||||
|
||||
script:
|
||||
- make fmt
|
||||
- make test
|
||||
- make race
|
||||
# - make errcheck
|
||||
0
vendor/github.com/boltdb/bolt/LICENSE → vendor/go.etcd.io/bbolt/LICENSE
generated
vendored
0
vendor/github.com/boltdb/bolt/LICENSE → vendor/go.etcd.io/bbolt/LICENSE
generated
vendored
38
vendor/go.etcd.io/bbolt/Makefile
generated
vendored
Normal file
38
vendor/go.etcd.io/bbolt/Makefile
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
@echo "array freelist test"
|
||||
@TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
fmt:
|
||||
!(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
|
||||
|
||||
# go get honnef.co/go/tools/simple
|
||||
gosimple:
|
||||
gosimple ./...
|
||||
|
||||
# go get honnef.co/go/tools/unused
|
||||
unused:
|
||||
unused ./...
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
|
||||
|
||||
test:
|
||||
TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic
|
||||
# Note: gets "program not an importable package" in out of path builds
|
||||
TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
|
||||
|
||||
@echo "array freelist test"
|
||||
|
||||
@TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic
|
||||
# Note: gets "program not an importable package" in out of path builds
|
||||
@TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
|
||||
|
||||
.PHONY: race fmt errcheck test gosimple unused
|
||||
205
vendor/github.com/boltdb/bolt/README.md → vendor/go.etcd.io/bbolt/README.md
generated
vendored
205
vendor/github.com/boltdb/bolt/README.md → vendor/go.etcd.io/bbolt/README.md
generated
vendored
@@ -1,5 +1,18 @@
|
||||
Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
|
||||
====
|
||||
bbolt
|
||||
=====
|
||||
|
||||
[](https://goreportcard.com/report/github.com/etcd-io/bbolt)
|
||||
[](https://codecov.io/gh/etcd-io/bbolt)
|
||||
[](https://travis-ci.com/etcd-io/bbolt)
|
||||
[](https://godoc.org/github.com/etcd-io/bbolt)
|
||||
[](https://github.com/etcd-io/bbolt/releases)
|
||||
[](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
|
||||
|
||||
bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
|
||||
store. The purpose of this fork is to provide the Go community with an active
|
||||
maintenance and development target for Bolt; the goal is improved reliability
|
||||
and stability. bbolt includes bug fixes, performance enhancements, and features
|
||||
not found in Bolt while preserving backwards compatibility with the Bolt API.
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
@@ -10,6 +23,8 @@ Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[gh_ben]: https://github.com/benbjohnson
|
||||
[bolt]: https://github.com/boltdb/bolt
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
@@ -21,36 +36,42 @@ consistency and thread safety. Bolt is currently used in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## Project versioning
|
||||
|
||||
bbolt uses [semantic versioning](http://semver.org).
|
||||
API should not change between patch and minor releases.
|
||||
New minor versions may add additional features to the API.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
@@ -59,13 +80,28 @@ Shopify and Heroku use Bolt-backed services every day.
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
$ go get go.etcd.io/bbolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Importing bbolt
|
||||
|
||||
To use bbolt as an embedded key-value store, import as:
|
||||
|
||||
```go
|
||||
import bolt "go.etcd.io/bbolt"
|
||||
|
||||
db, err := bolt.Open(path, 0666, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
```
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
@@ -79,7 +115,7 @@ package main
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -116,11 +152,12 @@ are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
Transactions should not depend on one another and generally shouldn't be opened
|
||||
simultaneously in the same goroutine. This can cause a deadlock as the read-write
|
||||
transaction needs to periodically re-map the data file but it cannot do so while
|
||||
any read-only transaction is open. Even a nested read-only transaction can cause
|
||||
a deadlock, as the child transaction can block the parent transaction from releasing
|
||||
its resources.
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
@@ -239,7 +276,7 @@ should be writable.
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
bucket must be unique. You can create a bucket using the `Tx.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
@@ -522,7 +559,7 @@ this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
@@ -811,7 +848,7 @@ Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
@@ -863,54 +900,58 @@ them via pull request.
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more)
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
||||
5
vendor/github.com/boltdb/bolt/bolt_386.go → vendor/go.etcd.io/bbolt/bolt_386.go
generated
vendored
5
vendor/github.com/boltdb/bolt/bolt_386.go → vendor/go.etcd.io/bbolt/bolt_386.go
generated
vendored
@@ -1,10 +1,7 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
7
vendor/go.etcd.io/bbolt/bolt_amd64.go
generated
vendored
Normal file
7
vendor/go.etcd.io/bbolt/bolt_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
7
vendor/go.etcd.io/bbolt/bolt_arm.go
generated
vendored
Normal file
7
vendor/go.etcd.io/bbolt/bolt_arm.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
5
vendor/github.com/boltdb/bolt/bolt_arm64.go → vendor/go.etcd.io/bbolt/bolt_arm64.go
generated
vendored
5
vendor/github.com/boltdb/bolt/bolt_arm64.go → vendor/go.etcd.io/bbolt/bolt_arm64.go
generated
vendored
@@ -1,12 +1,9 @@
|
||||
// +build arm64
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
2
vendor/github.com/boltdb/bolt/bolt_linux.go → vendor/go.etcd.io/bbolt/bolt_linux.go
generated
vendored
2
vendor/github.com/boltdb/bolt/bolt_linux.go → vendor/go.etcd.io/bbolt/bolt_linux.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
9
vendor/go.etcd.io/bbolt/bolt_mips64x.go
generated
vendored
Normal file
9
vendor/go.etcd.io/bbolt/bolt_mips64x.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build mips64 mips64le
|
||||
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x8000000000 // 512GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
9
vendor/go.etcd.io/bbolt/bolt_mipsx.go
generated
vendored
Normal file
9
vendor/go.etcd.io/bbolt/bolt_mipsx.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build mips mipsle
|
||||
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x40000000 // 1GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
2
vendor/github.com/boltdb/bolt/bolt_ppc.go → vendor/go.etcd.io/bbolt/bolt_ppc.go
generated
vendored
2
vendor/github.com/boltdb/bolt/bolt_ppc.go → vendor/go.etcd.io/bbolt/bolt_ppc.go
generated
vendored
@@ -1,6 +1,6 @@
|
||||
// +build ppc
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
5
vendor/github.com/boltdb/bolt/bolt_ppc64.go → vendor/go.etcd.io/bbolt/bolt_ppc64.go
generated
vendored
5
vendor/github.com/boltdb/bolt/bolt_ppc64.go → vendor/go.etcd.io/bbolt/bolt_ppc64.go
generated
vendored
@@ -1,12 +1,9 @@
|
||||
// +build ppc64
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
@@ -1,12 +1,9 @@
|
||||
// +build ppc64le
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
@@ -1,10 +1,9 @@
|
||||
package bolt
|
||||
// +build riscv64
|
||||
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
5
vendor/github.com/boltdb/bolt/bolt_s390x.go → vendor/go.etcd.io/bbolt/bolt_s390x.go
generated
vendored
5
vendor/github.com/boltdb/bolt/bolt_s390x.go → vendor/go.etcd.io/bbolt/bolt_s390x.go
generated
vendored
@@ -1,12 +1,9 @@
|
||||
// +build s390x
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
44
vendor/github.com/boltdb/bolt/bolt_unix.go → vendor/go.etcd.io/bbolt/bolt_unix.go
generated
vendored
44
vendor/github.com/boltdb/bolt/bolt_unix.go → vendor/go.etcd.io/bbolt/bolt_unix.go
generated
vendored
@@ -1,41 +1,43 @@
|
||||
// +build !windows,!plan9,!solaris
|
||||
// +build !windows,!plan9,!solaris,!aix
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
func flock(db *DB, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := db.file.Fd()
|
||||
flag := syscall.LOCK_NB
|
||||
if exclusive {
|
||||
flag |= syscall.LOCK_EX
|
||||
} else {
|
||||
flag |= syscall.LOCK_SH
|
||||
}
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
flag := syscall.LOCK_SH
|
||||
if exclusive {
|
||||
flag = syscall.LOCK_EX
|
||||
}
|
||||
|
||||
// Otherwise attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
|
||||
// Attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(fd), flag)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EWOULDBLOCK {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed out then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,7 +55,9 @@ func mmap(db *DB, sz int) error {
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
err = madvise(b, syscall.MADV_RANDOM)
|
||||
if err != nil && err != syscall.ENOSYS {
|
||||
// Ignore not implemented error in kernel because it still works.
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
90
vendor/go.etcd.io/bbolt/bolt_unix_aix.go
generated
vendored
Normal file
90
vendor/go.etcd.io/bbolt/bolt_unix_aix.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// +build aix
|
||||
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := db.file.Fd()
|
||||
var lockType int16
|
||||
if exclusive {
|
||||
lockType = syscall.F_WRLCK
|
||||
} else {
|
||||
lockType = syscall.F_RDLCK
|
||||
}
|
||||
for {
|
||||
// Attempt to obtain an exclusive lock.
|
||||
lock := syscall.Flock_t{Type: lockType}
|
||||
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed out then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Type = syscall.F_UNLCK
|
||||
lock.Whence = 0
|
||||
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := unix.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
||||
@@ -1,8 +1,7 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
@@ -11,36 +10,35 @@ import (
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
func flock(db *DB, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := db.file.Fd()
|
||||
var lockType int16
|
||||
if exclusive {
|
||||
lockType = syscall.F_WRLCK
|
||||
} else {
|
||||
lockType = syscall.F_RDLCK
|
||||
}
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Pid = 0
|
||||
lock.Whence = 0
|
||||
lock.Pid = 0
|
||||
if exclusive {
|
||||
lock.Type = syscall.F_WRLCK
|
||||
} else {
|
||||
lock.Type = syscall.F_RDLCK
|
||||
}
|
||||
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
|
||||
// Attempt to obtain an exclusive lock.
|
||||
lock := syscall.Flock_t{Type: lockType}
|
||||
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed out then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
57
vendor/github.com/boltdb/bolt/bolt_windows.go → vendor/go.etcd.io/bbolt/bolt_windows.go
generated
vendored
57
vendor/github.com/boltdb/bolt/bolt_windows.go → vendor/go.etcd.io/bbolt/bolt_windows.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -16,8 +16,6 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
lockExt = ".lock"
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
||||
flagLockExclusive = 2
|
||||
flagLockFailImmediately = 1
|
||||
@@ -48,48 +46,47 @@ func fdatasync(db *DB) error {
|
||||
}
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
// Create a separate lock file on windows because a process
|
||||
// cannot share an exclusive lock on the same file. This is
|
||||
// needed during Tx.WriteTo().
|
||||
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db.lockfile = f
|
||||
|
||||
func flock(db *DB, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
// Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
|
||||
// -1..0 as the lock on the database file.
|
||||
var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
|
||||
err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{
|
||||
Offset: m1,
|
||||
OffsetHigh: m1,
|
||||
})
|
||||
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
|
||||
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != errLockViolation {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed oumercit then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||
db.lockfile.Close()
|
||||
os.Remove(db.path + lockExt)
|
||||
var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
|
||||
err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{
|
||||
Offset: m1,
|
||||
OffsetHigh: m1,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// +build !windows,!plan9,!linux,!openbsd
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
50
vendor/github.com/boltdb/bolt/bucket.go → vendor/go.etcd.io/bbolt/bucket.go
generated
vendored
50
vendor/github.com/boltdb/bolt/bucket.go → vendor/go.etcd.io/bbolt/bucket.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -14,13 +14,6 @@ const (
|
||||
MaxValueSize = (1 << 31) - 2
|
||||
)
|
||||
|
||||
const (
|
||||
maxUint = ^uint(0)
|
||||
minUint = 0
|
||||
maxInt = int(^uint(0) >> 1)
|
||||
minInt = -maxInt - 1
|
||||
)
|
||||
|
||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||
|
||||
const (
|
||||
@@ -130,10 +123,12 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If unaligned load/stores are broken on this arch and value is
|
||||
// unaligned simply clone to an aligned byte array.
|
||||
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||
|
||||
// Unaligned access requires a copy to be made.
|
||||
const unalignedMask = unsafe.Alignof(struct {
|
||||
bucket
|
||||
page
|
||||
}{}) - 1
|
||||
unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0
|
||||
if unaligned {
|
||||
value = cloneBytes(value)
|
||||
}
|
||||
@@ -213,7 +208,7 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket at the given key.
|
||||
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
|
||||
// Returns an error if the bucket does not exist, or if the key represents a non-bucket value.
|
||||
func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
@@ -235,7 +230,7 @@ func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
// Recursively delete all child buckets.
|
||||
child := b.Bucket(key)
|
||||
err := child.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 {
|
||||
if err := child.DeleteBucket(k); err != nil {
|
||||
return fmt.Errorf("delete bucket: %s", err)
|
||||
}
|
||||
@@ -323,7 +318,12 @@ func (b *Bucket) Delete(key []byte) error {
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
_, _, flags := c.seek(key)
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return nil if the key doesn't exist.
|
||||
if !bytes.Equal(key, k) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return an error if there is already existing bucket value.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
@@ -411,7 +411,7 @@ func (b *Bucket) Stats() BucketStats {
|
||||
|
||||
if p.count != 0 {
|
||||
// If page has any elements, add all element headers.
|
||||
used += leafPageElementSize * int(p.count-1)
|
||||
used += leafPageElementSize * uintptr(p.count-1)
|
||||
|
||||
// Add all element key, value sizes.
|
||||
// The computation takes advantage of the fact that the position
|
||||
@@ -419,16 +419,16 @@ func (b *Bucket) Stats() BucketStats {
|
||||
// of all previous elements' keys and values.
|
||||
// It also includes the last element's header.
|
||||
lastElement := p.leafPageElement(p.count - 1)
|
||||
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
}
|
||||
|
||||
if b.root == 0 {
|
||||
// For inlined bucket just update the inline stats
|
||||
s.InlineBucketInuse += used
|
||||
s.InlineBucketInuse += int(used)
|
||||
} else {
|
||||
// For non-inlined bucket update all the leaf stats
|
||||
s.LeafPageN++
|
||||
s.LeafInuse += used
|
||||
s.LeafInuse += int(used)
|
||||
s.LeafOverflowN += int(p.overflow)
|
||||
|
||||
// Collect stats from sub-buckets.
|
||||
@@ -449,13 +449,13 @@ func (b *Bucket) Stats() BucketStats {
|
||||
|
||||
// used totals the used bytes for the page
|
||||
// Add header and all element headers.
|
||||
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
|
||||
used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1))
|
||||
|
||||
// Add size of all keys and values.
|
||||
// Again, use the fact that last element's position equals to
|
||||
// the total of key, value sizes of all previous elements.
|
||||
used += int(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += used
|
||||
used += uintptr(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += int(used)
|
||||
s.BranchOverflowN += int(p.overflow)
|
||||
}
|
||||
|
||||
@@ -595,7 +595,7 @@ func (b *Bucket) inlineable() bool {
|
||||
// our threshold for inline bucket size.
|
||||
var size = pageHeaderSize
|
||||
for _, inode := range n.inodes {
|
||||
size += leafPageElementSize + len(inode.key) + len(inode.value)
|
||||
size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value))
|
||||
|
||||
if inode.flags&bucketLeafFlag != 0 {
|
||||
return false
|
||||
@@ -608,8 +608,8 @@ func (b *Bucket) inlineable() bool {
|
||||
}
|
||||
|
||||
// Returns the maximum total size of a bucket to make it a candidate for inlining.
|
||||
func (b *Bucket) maxInlineBucketSize() int {
|
||||
return b.tx.db.pageSize / 4
|
||||
func (b *Bucket) maxInlineBucketSize() uintptr {
|
||||
return uintptr(b.tx.db.pageSize / 4)
|
||||
}
|
||||
|
||||
// write allocates and writes a bucket to a byte slice.
|
||||
12
vendor/github.com/boltdb/bolt/cursor.go → vendor/go.etcd.io/bbolt/cursor.go
generated
vendored
12
vendor/github.com/boltdb/bolt/cursor.go → vendor/go.etcd.io/bbolt/cursor.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -157,12 +157,6 @@ func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
|
||||
// Start from root page/node and traverse to correct page.
|
||||
c.stack = c.stack[:0]
|
||||
c.search(seek, c.bucket.root)
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
|
||||
// If the cursor is pointing to the end of page/node then return nil.
|
||||
if ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// If this is a bucket then return a nil value.
|
||||
return c.keyValue()
|
||||
@@ -339,6 +333,8 @@ func (c *Cursor) nsearch(key []byte) {
|
||||
// keyValue returns the key and value of the current leaf element.
|
||||
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
|
||||
// If the cursor is pointing to the end of page/node then return nil.
|
||||
if ref.count() == 0 || ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
@@ -370,7 +366,7 @@ func (c *Cursor) node() *node {
|
||||
}
|
||||
for _, ref := range c.stack[:len(c.stack)-1] {
|
||||
_assert(!n.isLeaf, "expected branch node")
|
||||
n = n.childAt(int(ref.index))
|
||||
n = n.childAt(ref.index)
|
||||
}
|
||||
_assert(n.isLeaf, "expected leaf node")
|
||||
return n
|
||||
265
vendor/github.com/boltdb/bolt/db.go → vendor/go.etcd.io/bbolt/db.go
generated
vendored
265
vendor/github.com/boltdb/bolt/db.go → vendor/go.etcd.io/bbolt/db.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -7,8 +7,7 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
@@ -23,6 +22,8 @@ const version = 2
|
||||
// Represents a marker value to indicate that a file is a Bolt DB.
|
||||
const magic uint32 = 0xED0CDAED
|
||||
|
||||
const pgidNoFreelist pgid = 0xffffffffffffffff
|
||||
|
||||
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
|
||||
// syncing changes to a file. This is required as some operating systems,
|
||||
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
|
||||
@@ -39,6 +40,19 @@ const (
|
||||
// default page size for db is set to the OS page size.
|
||||
var defaultPageSize = os.Getpagesize()
|
||||
|
||||
// The time elapsed between consecutive file locking attempts.
|
||||
const flockRetryTimeout = 50 * time.Millisecond
|
||||
|
||||
// FreelistType is the type of the freelist backend
|
||||
type FreelistType string
|
||||
|
||||
const (
|
||||
// FreelistArrayType indicates backend freelist type is array
|
||||
FreelistArrayType = FreelistType("array")
|
||||
// FreelistMapType indicates backend freelist type is hashmap
|
||||
FreelistMapType = FreelistType("hashmap")
|
||||
)
|
||||
|
||||
// DB represents a collection of buckets persisted to a file on disk.
|
||||
// All data access is performed through transactions which can be obtained through the DB.
|
||||
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
|
||||
@@ -61,6 +75,18 @@ type DB struct {
|
||||
// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
|
||||
NoSync bool
|
||||
|
||||
// When true, skips syncing freelist to disk. This improves the database
|
||||
// write performance under normal operation, but requires a full database
|
||||
// re-sync during recovery.
|
||||
NoFreelistSync bool
|
||||
|
||||
// FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
|
||||
// dramatic performance degradation if database is large and framentation in freelist is common.
|
||||
// The alternative one is using hashmap, it is faster in almost all circumstances
|
||||
// but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
|
||||
// The default type is array
|
||||
FreelistType FreelistType
|
||||
|
||||
// When true, skips the truncate call when growing the database.
|
||||
// Setting this to true is only safe on non-ext3/ext4 systems.
|
||||
// Skipping truncation avoids preallocation of hard drive space and
|
||||
@@ -95,9 +121,9 @@ type DB struct {
|
||||
AllocSize int
|
||||
|
||||
path string
|
||||
openFile func(string, int, os.FileMode) (*os.File, error)
|
||||
file *os.File
|
||||
lockfile *os.File // windows only
|
||||
dataref []byte // mmap'ed readonly, write throws SEGV
|
||||
dataref []byte // mmap'ed readonly, write throws SEGV
|
||||
data *[maxMapSize]byte
|
||||
datasz int
|
||||
filesz int // current on disk file size
|
||||
@@ -107,9 +133,11 @@ type DB struct {
|
||||
opened bool
|
||||
rwtx *Tx
|
||||
txs []*Tx
|
||||
freelist *freelist
|
||||
stats Stats
|
||||
|
||||
freelist *freelist
|
||||
freelistLoad sync.Once
|
||||
|
||||
pagePool sync.Pool
|
||||
|
||||
batchMu sync.Mutex
|
||||
@@ -148,14 +176,18 @@ func (db *DB) String() string {
|
||||
// If the file does not exist then it will be created automatically.
|
||||
// Passing in nil options will cause Bolt to open the database with the default options.
|
||||
func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
var db = &DB{opened: true}
|
||||
|
||||
db := &DB{
|
||||
opened: true,
|
||||
}
|
||||
// Set default options if no options are provided.
|
||||
if options == nil {
|
||||
options = DefaultOptions
|
||||
}
|
||||
db.NoSync = options.NoSync
|
||||
db.NoGrowSync = options.NoGrowSync
|
||||
db.MmapFlags = options.MmapFlags
|
||||
db.NoFreelistSync = options.NoFreelistSync
|
||||
db.FreelistType = options.FreelistType
|
||||
|
||||
// Set default values for later DB operations.
|
||||
db.MaxBatchSize = DefaultMaxBatchSize
|
||||
@@ -168,13 +200,18 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
db.readOnly = true
|
||||
}
|
||||
|
||||
db.openFile = options.OpenFile
|
||||
if db.openFile == nil {
|
||||
db.openFile = os.OpenFile
|
||||
}
|
||||
|
||||
// Open data file and separate sync handler for metadata writes.
|
||||
db.path = path
|
||||
var err error
|
||||
if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil {
|
||||
if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil {
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
db.path = db.file.Name()
|
||||
|
||||
// Lock file so that other processes using Bolt in read-write mode cannot
|
||||
// use the database at the same time. This would cause corruption since
|
||||
@@ -183,7 +220,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
// if !options.ReadOnly.
|
||||
// The database file is locked using the shared lock (more than one process may
|
||||
// hold a lock at the same time) otherwise (options.ReadOnly is set).
|
||||
if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
|
||||
if err := flock(db, !db.readOnly, options.Timeout); err != nil {
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
@@ -191,31 +228,41 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
// Default values for test hooks
|
||||
db.ops.writeAt = db.file.WriteAt
|
||||
|
||||
if db.pageSize = options.PageSize; db.pageSize == 0 {
|
||||
// Set the default page size to the OS page size.
|
||||
db.pageSize = defaultPageSize
|
||||
}
|
||||
|
||||
// Initialize the database if it doesn't exist.
|
||||
if info, err := db.file.Stat(); err != nil {
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
} else if info.Size() == 0 {
|
||||
// Initialize new files with meta pages.
|
||||
if err := db.init(); err != nil {
|
||||
// clean up file descriptor on initialization fail
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Read the first meta page to determine the page size.
|
||||
var buf [0x1000]byte
|
||||
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
|
||||
m := db.pageInBuffer(buf[:], 0).meta()
|
||||
if err := m.validate(); err != nil {
|
||||
// If we can't read the page size, we can assume it's the same
|
||||
// as the OS -- since that's how the page size was chosen in the
|
||||
// first place.
|
||||
//
|
||||
// If the first page is invalid and this OS uses a different
|
||||
// page size than what the database was created with then we
|
||||
// are out of luck and cannot access the database.
|
||||
db.pageSize = os.Getpagesize()
|
||||
} else {
|
||||
// If we can't read the page size, but can read a page, assume
|
||||
// it's the same as the OS or one given -- since that's how the
|
||||
// page size was chosen in the first place.
|
||||
//
|
||||
// If the first page is invalid and this OS uses a different
|
||||
// page size than what the database was created with then we
|
||||
// are out of luck and cannot access the database.
|
||||
//
|
||||
// TODO: scan for next page
|
||||
if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
|
||||
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
|
||||
db.pageSize = int(m.pageSize)
|
||||
}
|
||||
} else {
|
||||
_ = db.close()
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,14 +279,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read in the freelist.
|
||||
db.freelist = newFreelist()
|
||||
db.freelist.read(db.page(db.meta().freelist))
|
||||
if db.readOnly {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
db.loadFreelist()
|
||||
|
||||
// Flush freelist when transitioning from no sync to sync so
|
||||
// NoFreelistSync unaware boltdb can open the db later.
|
||||
if !db.NoFreelistSync && !db.hasSyncedFreelist() {
|
||||
tx, err := db.Begin(true)
|
||||
if tx != nil {
|
||||
err = tx.Commit()
|
||||
}
|
||||
if err != nil {
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the database as opened and return.
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// loadFreelist reads the freelist if it is synced, or reconstructs it
|
||||
// by scanning the DB if it is not synced. It assumes there are no
|
||||
// concurrent accesses being made to the freelist.
|
||||
func (db *DB) loadFreelist() {
|
||||
db.freelistLoad.Do(func() {
|
||||
db.freelist = newFreelist(db.FreelistType)
|
||||
if !db.hasSyncedFreelist() {
|
||||
// Reconstruct free list by scanning the DB.
|
||||
db.freelist.readIDs(db.freepages())
|
||||
} else {
|
||||
// Read free list from freelist page.
|
||||
db.freelist.read(db.page(db.meta().freelist))
|
||||
}
|
||||
db.stats.FreePageN = db.freelist.free_count()
|
||||
})
|
||||
}
|
||||
|
||||
func (db *DB) hasSyncedFreelist() bool {
|
||||
return db.meta().freelist != pgidNoFreelist
|
||||
}
|
||||
|
||||
// mmap opens the underlying memory-mapped file and initializes the meta references.
|
||||
// minsz is the minimum size that the new mmap can be.
|
||||
func (db *DB) mmap(minsz int) error {
|
||||
@@ -341,9 +424,6 @@ func (db *DB) mmapSize(size int) (int, error) {
|
||||
|
||||
// init creates a new database file and initializes its meta pages.
|
||||
func (db *DB) init() error {
|
||||
// Set the page size to the OS page size.
|
||||
db.pageSize = os.Getpagesize()
|
||||
|
||||
// Create two meta pages on a buffer.
|
||||
buf := make([]byte, db.pageSize*4)
|
||||
for i := 0; i < 2; i++ {
|
||||
@@ -387,7 +467,8 @@ func (db *DB) init() error {
|
||||
}
|
||||
|
||||
// Close releases all database resources.
|
||||
// All transactions must be closed before closing the database.
|
||||
// It will block waiting for any open transactions to finish
|
||||
// before closing the database and returning.
|
||||
func (db *DB) Close() error {
|
||||
db.rwlock.Lock()
|
||||
defer db.rwlock.Unlock()
|
||||
@@ -395,8 +476,8 @@ func (db *DB) Close() error {
|
||||
db.metalock.Lock()
|
||||
defer db.metalock.Unlock()
|
||||
|
||||
db.mmaplock.RLock()
|
||||
defer db.mmaplock.RUnlock()
|
||||
db.mmaplock.Lock()
|
||||
defer db.mmaplock.Unlock()
|
||||
|
||||
return db.close()
|
||||
}
|
||||
@@ -526,21 +607,36 @@ func (db *DB) beginRWTx() (*Tx, error) {
|
||||
t := &Tx{writable: true}
|
||||
t.init(db)
|
||||
db.rwtx = t
|
||||
db.freePages()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Free any pages associated with closed read-only transactions.
|
||||
var minid txid = 0xFFFFFFFFFFFFFFFF
|
||||
for _, t := range db.txs {
|
||||
if t.meta.txid < minid {
|
||||
minid = t.meta.txid
|
||||
}
|
||||
// freePages releases any pages associated with closed read-only transactions.
|
||||
func (db *DB) freePages() {
|
||||
// Free all pending pages prior to earliest open transaction.
|
||||
sort.Sort(txsById(db.txs))
|
||||
minid := txid(0xFFFFFFFFFFFFFFFF)
|
||||
if len(db.txs) > 0 {
|
||||
minid = db.txs[0].meta.txid
|
||||
}
|
||||
if minid > 0 {
|
||||
db.freelist.release(minid - 1)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
// Release unused txid extents.
|
||||
for _, t := range db.txs {
|
||||
db.freelist.releaseRange(minid, t.meta.txid-1)
|
||||
minid = t.meta.txid + 1
|
||||
}
|
||||
db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF))
|
||||
// Any page both allocated and freed in an extent is safe to release.
|
||||
}
|
||||
|
||||
type txsById []*Tx
|
||||
|
||||
func (t txsById) Len() int { return len(t) }
|
||||
func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
||||
func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid }
|
||||
|
||||
// removeTx removes a transaction from the database.
|
||||
func (db *DB) removeTx(tx *Tx) {
|
||||
// Release the read lock on the mmap.
|
||||
@@ -633,11 +729,7 @@ func (db *DB) View(fn func(*Tx) error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.Rollback(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return t.Rollback()
|
||||
}
|
||||
|
||||
// Batch calls fn as part of a batch. It behaves similar to Update,
|
||||
@@ -737,9 +829,7 @@ retry:
|
||||
|
||||
// pass success, or bolt internal errors, to all callers
|
||||
for _, c := range b.calls {
|
||||
if c.err != nil {
|
||||
c.err <- err
|
||||
}
|
||||
c.err <- err
|
||||
}
|
||||
break retry
|
||||
}
|
||||
@@ -826,7 +916,7 @@ func (db *DB) meta() *meta {
|
||||
}
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (db *DB) allocate(count int) (*page, error) {
|
||||
func (db *DB) allocate(txid txid, count int) (*page, error) {
|
||||
// Allocate a temporary buffer for the page.
|
||||
var buf []byte
|
||||
if count == 1 {
|
||||
@@ -838,7 +928,7 @@ func (db *DB) allocate(count int) (*page, error) {
|
||||
p.overflow = uint32(count - 1)
|
||||
|
||||
// Use pages from the freelist if they are available.
|
||||
if p.id = db.freelist.allocate(count); p.id != 0 {
|
||||
if p.id = db.freelist.allocate(txid, count); p.id != 0 {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
@@ -893,6 +983,38 @@ func (db *DB) IsReadOnly() bool {
|
||||
return db.readOnly
|
||||
}
|
||||
|
||||
func (db *DB) freepages() []pgid {
|
||||
tx, err := db.beginTx()
|
||||
defer func() {
|
||||
err = tx.Rollback()
|
||||
if err != nil {
|
||||
panic("freepages: failed to rollback tx")
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
panic("freepages: failed to open read only tx")
|
||||
}
|
||||
|
||||
reachable := make(map[pgid]*page)
|
||||
nofreed := make(map[pgid]bool)
|
||||
ech := make(chan error)
|
||||
go func() {
|
||||
for e := range ech {
|
||||
panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
|
||||
}
|
||||
}()
|
||||
tx.checkBucket(&tx.root, reachable, nofreed, ech)
|
||||
close(ech)
|
||||
|
||||
var fids []pgid
|
||||
for i := pgid(2); i < db.meta().pgid; i++ {
|
||||
if _, ok := reachable[i]; !ok {
|
||||
fids = append(fids, i)
|
||||
}
|
||||
}
|
||||
return fids
|
||||
}
|
||||
|
||||
// Options represents the options that can be set when opening a database.
|
||||
type Options struct {
|
||||
// Timeout is the amount of time to wait to obtain a file lock.
|
||||
@@ -903,6 +1025,17 @@ type Options struct {
|
||||
// Sets the DB.NoGrowSync flag before memory mapping the file.
|
||||
NoGrowSync bool
|
||||
|
||||
// Do not sync freelist to disk. This improves the database write performance
|
||||
// under normal operation, but requires a full database re-sync during recovery.
|
||||
NoFreelistSync bool
|
||||
|
||||
// FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
|
||||
// dramatic performance degradation if database is large and framentation in freelist is common.
|
||||
// The alternative one is using hashmap, it is faster in almost all circumstances
|
||||
// but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
|
||||
// The default type is array
|
||||
FreelistType FreelistType
|
||||
|
||||
// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
|
||||
// grab a shared lock (UNIX).
|
||||
ReadOnly bool
|
||||
@@ -919,13 +1052,26 @@ type Options struct {
|
||||
// If initialMmapSize is smaller than the previous database size,
|
||||
// it takes no effect.
|
||||
InitialMmapSize int
|
||||
|
||||
// PageSize overrides the default OS page size.
|
||||
PageSize int
|
||||
|
||||
// NoSync sets the initial value of DB.NoSync. Normally this can just be
|
||||
// set directly on the DB itself when returned from Open(), but this option
|
||||
// is useful in APIs which expose Options but not the underlying DB.
|
||||
NoSync bool
|
||||
|
||||
// OpenFile is used to open files. It defaults to os.OpenFile. This option
|
||||
// is useful for writing hermetic tests.
|
||||
OpenFile func(string, int, os.FileMode) (*os.File, error)
|
||||
}
|
||||
|
||||
// DefaultOptions represent the options used if nil options are passed into Open().
|
||||
// No timeout is used which will cause Bolt to wait indefinitely for a lock.
|
||||
var DefaultOptions = &Options{
|
||||
Timeout: 0,
|
||||
NoGrowSync: false,
|
||||
Timeout: 0,
|
||||
NoGrowSync: false,
|
||||
FreelistType: FreelistArrayType,
|
||||
}
|
||||
|
||||
// Stats represents statistics about the database.
|
||||
@@ -960,10 +1106,6 @@ func (s *Stats) Sub(other *Stats) Stats {
|
||||
return diff
|
||||
}
|
||||
|
||||
func (s *Stats) add(other *Stats) {
|
||||
s.TxStats.add(&other.TxStats)
|
||||
}
|
||||
|
||||
type Info struct {
|
||||
Data uintptr
|
||||
PageSize int
|
||||
@@ -1002,7 +1144,8 @@ func (m *meta) copy(dest *meta) {
|
||||
func (m *meta) write(p *page) {
|
||||
if m.root.root >= m.pgid {
|
||||
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
|
||||
} else if m.freelist >= m.pgid {
|
||||
} else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist {
|
||||
// TODO: reject pgidNoFreeList if !NoFreelistSync
|
||||
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
|
||||
}
|
||||
|
||||
@@ -1029,11 +1172,3 @@ func _assert(condition bool, msg string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("assertion failed: "+msg, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
|
||||
func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
|
||||
|
||||
func printstack() {
|
||||
stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
|
||||
fmt.Fprintln(os.Stderr, stack)
|
||||
}
|
||||
4
vendor/github.com/boltdb/bolt/doc.go → vendor/go.etcd.io/bbolt/doc.go
generated
vendored
4
vendor/github.com/boltdb/bolt/doc.go → vendor/go.etcd.io/bbolt/doc.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Package bolt implements a low-level key/value store in pure Go. It supports
|
||||
package bbolt implements a low-level key/value store in pure Go. It supports
|
||||
fully serializable transactions, ACID semantics, and lock-free MVCC with
|
||||
multiple readers and a single writer. Bolt can be used for projects that
|
||||
want a simple data store without the need to add large dependencies such as
|
||||
@@ -41,4 +41,4 @@ point to different data or can point to invalid memory which will cause a panic.
|
||||
|
||||
|
||||
*/
|
||||
package bolt
|
||||
package bbolt
|
||||
2
vendor/github.com/boltdb/bolt/errors.go → vendor/go.etcd.io/bbolt/errors.go
generated
vendored
2
vendor/github.com/boltdb/bolt/errors.go → vendor/go.etcd.io/bbolt/errors.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import "errors"
|
||||
|
||||
404
vendor/go.etcd.io/bbolt/freelist.go
generated
vendored
Normal file
404
vendor/go.etcd.io/bbolt/freelist.go
generated
vendored
Normal file
@@ -0,0 +1,404 @@
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// txPending holds a list of pgids and corresponding allocation txns
|
||||
// that are pending to be freed.
|
||||
type txPending struct {
|
||||
ids []pgid
|
||||
alloctx []txid // txids allocating the ids
|
||||
lastReleaseBegin txid // beginning txid of last matching releaseRange
|
||||
}
|
||||
|
||||
// pidSet holds the set of starting pgids which have the same span size
|
||||
type pidSet map[pgid]struct{}
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
freelistType FreelistType // freelist type
|
||||
ids []pgid // all free and available free page ids.
|
||||
allocs map[pgid]txid // mapping of txid that allocated a pgid.
|
||||
pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
|
||||
forwardMap map[pgid]uint64 // key is start pgid, value is its span size
|
||||
backwardMap map[pgid]uint64 // key is end pgid, value is its span size
|
||||
allocate func(txid txid, n int) pgid // the freelist allocate func
|
||||
free_count func() int // the function which gives you free page number
|
||||
mergeSpans func(ids pgids) // the mergeSpan func
|
||||
getFreePageIDs func() []pgid // get free pgids func
|
||||
readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist(freelistType FreelistType) *freelist {
|
||||
f := &freelist{
|
||||
freelistType: freelistType,
|
||||
allocs: make(map[pgid]txid),
|
||||
pending: make(map[txid]*txPending),
|
||||
cache: make(map[pgid]bool),
|
||||
freemaps: make(map[uint64]pidSet),
|
||||
forwardMap: make(map[pgid]uint64),
|
||||
backwardMap: make(map[pgid]uint64),
|
||||
}
|
||||
|
||||
if freelistType == FreelistMapType {
|
||||
f.allocate = f.hashmapAllocate
|
||||
f.free_count = f.hashmapFreeCount
|
||||
f.mergeSpans = f.hashmapMergeSpans
|
||||
f.getFreePageIDs = f.hashmapGetFreePageIDs
|
||||
f.readIDs = f.hashmapReadIDs
|
||||
} else {
|
||||
f.allocate = f.arrayAllocate
|
||||
f.free_count = f.arrayFreeCount
|
||||
f.mergeSpans = f.arrayMergeSpans
|
||||
f.getFreePageIDs = f.arrayGetFreePageIDs
|
||||
f.readIDs = f.arrayReadIDs
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
n := f.count()
|
||||
if n >= 0xFFFF {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
func (f *freelist) count() int {
|
||||
return f.free_count() + f.pending_count()
|
||||
}
|
||||
|
||||
// arrayFreeCount returns count of free pages(array version)
|
||||
func (f *freelist) arrayFreeCount() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, txp := range f.pending {
|
||||
count += len(txp.ids)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// copyall copies a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, txp := range f.pending {
|
||||
m = append(m, txp.ids...)
|
||||
}
|
||||
sort.Sort(m)
|
||||
mergepgids(dst, f.getFreePageIDs(), m)
|
||||
}
|
||||
|
||||
// arrayAllocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) arrayAllocate(txid txid, n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
f.allocs[initial] = txid
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free then a panic will occur.
|
||||
func (f *freelist) free(txid txid, p *page) {
|
||||
if p.id <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
txp := f.pending[txid]
|
||||
if txp == nil {
|
||||
txp = &txPending{}
|
||||
f.pending[txid] = txp
|
||||
}
|
||||
allocTxid, ok := f.allocs[p.id]
|
||||
if ok {
|
||||
delete(f.allocs, p.id)
|
||||
} else if (p.flags & freelistPageFlag) != 0 {
|
||||
// Freelist is always allocated by prior tx.
|
||||
allocTxid = txid - 1
|
||||
}
|
||||
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
// Add to the freelist and cache.
|
||||
txp.ids = append(txp.ids, id)
|
||||
txp.alloctx = append(txp.alloctx, allocTxid)
|
||||
f.cache[id] = true
|
||||
}
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, txp := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, txp.ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
f.mergeSpans(m)
|
||||
}
|
||||
|
||||
// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
|
||||
func (f *freelist) releaseRange(begin, end txid) {
|
||||
if begin > end {
|
||||
return
|
||||
}
|
||||
var m pgids
|
||||
for tid, txp := range f.pending {
|
||||
if tid < begin || tid > end {
|
||||
continue
|
||||
}
|
||||
// Don't recompute freed pages if ranges haven't updated.
|
||||
if txp.lastReleaseBegin == begin {
|
||||
continue
|
||||
}
|
||||
for i := 0; i < len(txp.ids); i++ {
|
||||
if atx := txp.alloctx[i]; atx < begin || atx > end {
|
||||
continue
|
||||
}
|
||||
m = append(m, txp.ids[i])
|
||||
txp.ids[i] = txp.ids[len(txp.ids)-1]
|
||||
txp.ids = txp.ids[:len(txp.ids)-1]
|
||||
txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
|
||||
txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
|
||||
i--
|
||||
}
|
||||
txp.lastReleaseBegin = begin
|
||||
if len(txp.ids) == 0 {
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
f.mergeSpans(m)
|
||||
}
|
||||
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
txp := f.pending[txid]
|
||||
if txp == nil {
|
||||
return
|
||||
}
|
||||
var m pgids
|
||||
for i, pgid := range txp.ids {
|
||||
delete(f.cache, pgid)
|
||||
tx := txp.alloctx[i]
|
||||
if tx == 0 {
|
||||
continue
|
||||
}
|
||||
if tx != txid {
|
||||
// Pending free aborted; restore page back to alloc list.
|
||||
f.allocs[pgid] = tx
|
||||
} else {
|
||||
// Freed page was allocated by this txn; OK to throw away.
|
||||
m = append(m, pgid)
|
||||
}
|
||||
}
|
||||
// Remove pages from pending list and mark as free if allocated by txid.
|
||||
delete(f.pending, txid)
|
||||
f.mergeSpans(m)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
func (f *freelist) freed(pgid pgid) bool {
|
||||
return f.cache[pgid]
|
||||
}
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
if (p.flags & freelistPageFlag) == 0 {
|
||||
panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
|
||||
}
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
var idx, count = 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
|
||||
count = int(c)
|
||||
if count < 0 {
|
||||
panic(fmt.Sprintf("leading element count %d overflows int", c))
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
var ids []pgid
|
||||
data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx)
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, count)
|
||||
|
||||
// copy the ids, so we don't modify on the freelist page directly
|
||||
idsCopy := make([]pgid, count)
|
||||
copy(idsCopy, ids)
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(idsCopy))
|
||||
|
||||
f.readIDs(idsCopy)
|
||||
}
|
||||
}
|
||||
|
||||
// arrayReadIDs initializes the freelist from a given list of ids.
|
||||
func (f *freelist) arrayReadIDs(ids []pgid) {
|
||||
f.ids = ids
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
func (f *freelist) arrayGetFreePageIDs() []pgid {
|
||||
return f.ids
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
l := f.count()
|
||||
if l == 0 {
|
||||
p.count = uint16(l)
|
||||
} else if l < 0xFFFF {
|
||||
p.count = uint16(l)
|
||||
var ids []pgid
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, l)
|
||||
f.copyall(ids)
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
var ids []pgid
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, l+1)
|
||||
ids[0] = pgid(l)
|
||||
f.copyall(ids[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reload reads the freelist from a page and filters out pending items.
|
||||
func (f *freelist) reload(p *page) {
|
||||
f.read(p)
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, txp := range f.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range f.getFreePageIDs() {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
|
||||
f.readIDs(a)
|
||||
}
|
||||
|
||||
// noSyncReload reads the freelist from pgids and filters out pending items.
|
||||
func (f *freelist) noSyncReload(pgids []pgid) {
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, txp := range f.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range pgids {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
|
||||
f.readIDs(a)
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
ids := f.getFreePageIDs()
|
||||
f.cache = make(map[pgid]bool, len(ids))
|
||||
for _, id := range ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, txp := range f.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array
|
||||
func (f *freelist) arrayMergeSpans(ids pgids) {
|
||||
sort.Sort(ids)
|
||||
f.ids = pgids(f.ids).merge(ids)
|
||||
}
|
||||
178
vendor/go.etcd.io/bbolt/freelist_hmap.go
generated
vendored
Normal file
178
vendor/go.etcd.io/bbolt/freelist_hmap.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
package bbolt
|
||||
|
||||
import "sort"
|
||||
|
||||
// hashmapFreeCount returns count of free pages(hashmap version)
|
||||
func (f *freelist) hashmapFreeCount() int {
|
||||
// use the forwardmap to get the total count
|
||||
count := 0
|
||||
for _, size := range f.forwardMap {
|
||||
count += int(size)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend
|
||||
func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// if we have a exact size match just return short path
|
||||
if bm, ok := f.freemaps[uint64(n)]; ok {
|
||||
for pid := range bm {
|
||||
// remove the span
|
||||
f.delSpan(pid, uint64(n))
|
||||
|
||||
f.allocs[pid] = txid
|
||||
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, pid+i)
|
||||
}
|
||||
return pid
|
||||
}
|
||||
}
|
||||
|
||||
// lookup the map to find larger span
|
||||
for size, bm := range f.freemaps {
|
||||
if size < uint64(n) {
|
||||
continue
|
||||
}
|
||||
|
||||
for pid := range bm {
|
||||
// remove the initial
|
||||
f.delSpan(pid, uint64(size))
|
||||
|
||||
f.allocs[pid] = txid
|
||||
|
||||
remain := size - uint64(n)
|
||||
|
||||
// add remain span
|
||||
f.addSpan(pid+pgid(n), remain)
|
||||
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, pid+pgid(i))
|
||||
}
|
||||
return pid
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version)
|
||||
func (f *freelist) hashmapReadIDs(pgids []pgid) {
|
||||
f.init(pgids)
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// hashmapGetFreePageIDs returns the sorted free page ids
|
||||
func (f *freelist) hashmapGetFreePageIDs() []pgid {
|
||||
count := f.free_count()
|
||||
if count == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
m := make([]pgid, 0, count)
|
||||
for start, size := range f.forwardMap {
|
||||
for i := 0; i < int(size); i++ {
|
||||
m = append(m, start+pgid(i))
|
||||
}
|
||||
}
|
||||
sort.Sort(pgids(m))
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans
|
||||
func (f *freelist) hashmapMergeSpans(ids pgids) {
|
||||
for _, id := range ids {
|
||||
// try to see if we can merge and update
|
||||
f.mergeWithExistingSpan(id)
|
||||
}
|
||||
}
|
||||
|
||||
// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
|
||||
func (f *freelist) mergeWithExistingSpan(pid pgid) {
|
||||
prev := pid - 1
|
||||
next := pid + 1
|
||||
|
||||
preSize, mergeWithPrev := f.backwardMap[prev]
|
||||
nextSize, mergeWithNext := f.forwardMap[next]
|
||||
newStart := pid
|
||||
newSize := uint64(1)
|
||||
|
||||
if mergeWithPrev {
|
||||
//merge with previous span
|
||||
start := prev + 1 - pgid(preSize)
|
||||
f.delSpan(start, preSize)
|
||||
|
||||
newStart -= pgid(preSize)
|
||||
newSize += preSize
|
||||
}
|
||||
|
||||
if mergeWithNext {
|
||||
// merge with next span
|
||||
f.delSpan(next, nextSize)
|
||||
newSize += nextSize
|
||||
}
|
||||
|
||||
f.addSpan(newStart, newSize)
|
||||
}
|
||||
|
||||
func (f *freelist) addSpan(start pgid, size uint64) {
|
||||
f.backwardMap[start-1+pgid(size)] = size
|
||||
f.forwardMap[start] = size
|
||||
if _, ok := f.freemaps[size]; !ok {
|
||||
f.freemaps[size] = make(map[pgid]struct{})
|
||||
}
|
||||
|
||||
f.freemaps[size][start] = struct{}{}
|
||||
}
|
||||
|
||||
func (f *freelist) delSpan(start pgid, size uint64) {
|
||||
delete(f.forwardMap, start)
|
||||
delete(f.backwardMap, start+pgid(size-1))
|
||||
delete(f.freemaps[size], start)
|
||||
if len(f.freemaps[size]) == 0 {
|
||||
delete(f.freemaps, size)
|
||||
}
|
||||
}
|
||||
|
||||
// initial from pgids using when use hashmap version
|
||||
// pgids must be sorted
|
||||
func (f *freelist) init(pgids []pgid) {
|
||||
if len(pgids) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
size := uint64(1)
|
||||
start := pgids[0]
|
||||
|
||||
if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
|
||||
panic("pgids not sorted")
|
||||
}
|
||||
|
||||
f.freemaps = make(map[uint64]pidSet)
|
||||
f.forwardMap = make(map[pgid]uint64)
|
||||
f.backwardMap = make(map[pgid]uint64)
|
||||
|
||||
for i := 1; i < len(pgids); i++ {
|
||||
// continuous page
|
||||
if pgids[i] == pgids[i-1]+1 {
|
||||
size++
|
||||
} else {
|
||||
f.addSpan(start, size)
|
||||
|
||||
size = 1
|
||||
start = pgids[i]
|
||||
}
|
||||
}
|
||||
|
||||
// init the tail
|
||||
if size != 0 && start != 0 {
|
||||
f.addSpan(start, size)
|
||||
}
|
||||
}
|
||||
5
vendor/go.etcd.io/bbolt/go.mod
generated
vendored
Normal file
5
vendor/go.etcd.io/bbolt/go.mod
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
module go.etcd.io/bbolt
|
||||
|
||||
go 1.12
|
||||
|
||||
require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
|
||||
2
vendor/go.etcd.io/bbolt/go.sum
generated
vendored
Normal file
2
vendor/go.etcd.io/bbolt/go.sum
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
60
vendor/github.com/boltdb/bolt/node.go → vendor/go.etcd.io/bbolt/node.go
generated
vendored
60
vendor/github.com/boltdb/bolt/node.go → vendor/go.etcd.io/bbolt/node.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -41,19 +41,19 @@ func (n *node) size() int {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
|
||||
}
|
||||
return sz
|
||||
return int(sz)
|
||||
}
|
||||
|
||||
// sizeLessThan returns true if the node is less than a given size.
|
||||
// This is an optimization to avoid calculating a large node when we only need
|
||||
// to know if it fits inside a certain page size.
|
||||
func (n *node) sizeLessThan(v int) bool {
|
||||
func (n *node) sizeLessThan(v uintptr) bool {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
|
||||
if sz >= v {
|
||||
return false
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func (n *node) sizeLessThan(v int) bool {
|
||||
}
|
||||
|
||||
// pageElementSize returns the size of each page element based on the type of node.
|
||||
func (n *node) pageElementSize() int {
|
||||
func (n *node) pageElementSize() uintptr {
|
||||
if n.isLeaf {
|
||||
return leafPageElementSize
|
||||
}
|
||||
@@ -207,10 +207,17 @@ func (n *node) write(p *page) {
|
||||
}
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||
// off tracks the offset into p of the start of the next data.
|
||||
off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
|
||||
for i, item := range n.inodes {
|
||||
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||
|
||||
// Create a slice to write into of needed size and advance
|
||||
// byte pointer for next iteration.
|
||||
sz := len(item.key) + len(item.value)
|
||||
b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
|
||||
off += uintptr(sz)
|
||||
|
||||
// Write the page element.
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
@@ -226,20 +233,9 @@ func (n *node) write(p *page) {
|
||||
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// If the length of key+value is larger than the max allocation size
|
||||
// then we need to reallocate the byte array pointer.
|
||||
//
|
||||
// See: https://github.com/boltdb/bolt/pull/335
|
||||
klen, vlen := len(item.key), len(item.value)
|
||||
if len(b) < klen+vlen {
|
||||
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
|
||||
}
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
copy(b[0:], item.key)
|
||||
b = b[klen:]
|
||||
copy(b[0:], item.value)
|
||||
b = b[vlen:]
|
||||
l := copy(b, item.key)
|
||||
copy(b[l:], item.value)
|
||||
}
|
||||
|
||||
// DEBUG ONLY: n.dump()
|
||||
@@ -247,7 +243,7 @@ func (n *node) write(p *page) {
|
||||
|
||||
// split breaks up a node into multiple smaller nodes, if appropriate.
|
||||
// This should only be called from the spill() function.
|
||||
func (n *node) split(pageSize int) []*node {
|
||||
func (n *node) split(pageSize uintptr) []*node {
|
||||
var nodes []*node
|
||||
|
||||
node := n
|
||||
@@ -270,7 +266,7 @@ func (n *node) split(pageSize int) []*node {
|
||||
|
||||
// splitTwo breaks up a node into two smaller nodes, if appropriate.
|
||||
// This should only be called from the split() function.
|
||||
func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||
func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
|
||||
// Ignore the split if the page doesn't have at least enough nodes for
|
||||
// two pages or if the nodes can fit in a single page.
|
||||
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||
@@ -312,18 +308,18 @@ func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||
// splitIndex finds the position where a page will fill a given threshold.
|
||||
// It returns the index as well as the size of the first page.
|
||||
// This is only be called from split().
|
||||
func (n *node) splitIndex(threshold int) (index, sz int) {
|
||||
func (n *node) splitIndex(threshold int) (index, sz uintptr) {
|
||||
sz = pageHeaderSize
|
||||
|
||||
// Loop until we only have the minimum number of keys required for the second page.
|
||||
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
|
||||
index = i
|
||||
index = uintptr(i)
|
||||
inode := n.inodes[i]
|
||||
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
|
||||
elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value))
|
||||
|
||||
// If we have at least the minimum number of keys and adding another
|
||||
// node would put us over the threshold then exit and return.
|
||||
if i >= minKeysPerPage && sz+elsize > threshold {
|
||||
if index >= minKeysPerPage && sz+elsize > uintptr(threshold) {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -356,7 +352,7 @@ func (n *node) spill() error {
|
||||
n.children = nil
|
||||
|
||||
// Split nodes into appropriate sizes. The first node will always be n.
|
||||
var nodes = n.split(tx.db.pageSize)
|
||||
var nodes = n.split(uintptr(tx.db.pageSize))
|
||||
for _, node := range nodes {
|
||||
// Add node's page to the freelist if it's not new.
|
||||
if node.pgid > 0 {
|
||||
@@ -365,7 +361,7 @@ func (n *node) spill() error {
|
||||
}
|
||||
|
||||
// Allocate contiguous space for the node.
|
||||
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
|
||||
p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -587,9 +583,11 @@ func (n *node) dump() {
|
||||
|
||||
type nodes []*node
|
||||
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool {
|
||||
return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1
|
||||
}
|
||||
|
||||
// inode represents an internal node inside of a node.
|
||||
// It can be used to point to elements in a page or point
|
||||
43
vendor/github.com/boltdb/bolt/page.go → vendor/go.etcd.io/bbolt/page.go
generated
vendored
43
vendor/github.com/boltdb/bolt/page.go → vendor/go.etcd.io/bbolt/page.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
|
||||
const pageHeaderSize = unsafe.Sizeof(page{})
|
||||
|
||||
const minKeysPerPage = 2
|
||||
|
||||
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
|
||||
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
|
||||
const branchPageElementSize = unsafe.Sizeof(branchPageElement{})
|
||||
const leafPageElementSize = unsafe.Sizeof(leafPageElement{})
|
||||
|
||||
const (
|
||||
branchPageFlag = 0x01
|
||||
@@ -32,7 +32,6 @@ type page struct {
|
||||
flags uint16
|
||||
count uint16
|
||||
overflow uint32
|
||||
ptr uintptr
|
||||
}
|
||||
|
||||
// typ returns a human readable page type string used for debugging.
|
||||
@@ -51,13 +50,13 @@ func (p *page) typ() string {
|
||||
|
||||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() *meta {
|
||||
return (*meta)(unsafe.Pointer(&p.ptr))
|
||||
return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
|
||||
}
|
||||
|
||||
// leafPageElement retrieves the leaf node by index
|
||||
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
return n
|
||||
return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
|
||||
leafPageElementSize, int(index)))
|
||||
}
|
||||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
@@ -65,12 +64,16 @@ func (p *page) leafPageElements() []leafPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
var elems []leafPageElement
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
|
||||
return elems
|
||||
}
|
||||
|
||||
// branchPageElement retrieves the branch node by index
|
||||
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
|
||||
unsafe.Sizeof(branchPageElement{}), int(index)))
|
||||
}
|
||||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
@@ -78,12 +81,15 @@ func (p *page) branchPageElements() []branchPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
var elems []branchPageElement
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
|
||||
return elems
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *page) hexdump(n int) {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
@@ -102,8 +108,7 @@ type branchPageElement struct {
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
@@ -116,14 +121,16 @@ type leafPageElement struct {
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
|
||||
i := int(n.pos)
|
||||
j := i + int(n.ksize)
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
|
||||
}
|
||||
|
||||
// value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) value() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
|
||||
i := int(n.pos) + int(n.ksize)
|
||||
j := i + int(n.vsize)
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
130
vendor/github.com/boltdb/bolt/tx.go → vendor/go.etcd.io/bbolt/tx.go
generated
vendored
130
vendor/github.com/boltdb/bolt/tx.go → vendor/go.etcd.io/bbolt/tx.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -126,10 +126,7 @@ func (tx *Tx) DeleteBucket(name []byte) error {
|
||||
// the error is returned to the caller.
|
||||
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||
return tx.root.ForEach(func(k, v []byte) error {
|
||||
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return fn(k, tx.root.Bucket(k))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -169,28 +166,18 @@ func (tx *Tx) Commit() error {
|
||||
// Free the old root bucket.
|
||||
tx.meta.root.root = tx.root.root
|
||||
|
||||
opgid := tx.meta.pgid
|
||||
|
||||
// Free the freelist and allocate new pages for it. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
// Free the old freelist because commit writes out a fresh freelist.
|
||||
if tx.meta.freelist != pgidNoFreelist {
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
if !tx.db.NoFreelistSync {
|
||||
err := tx.commitFreelist()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
tx.meta.freelist = pgidNoFreelist
|
||||
}
|
||||
|
||||
// Write dirty pages to disk.
|
||||
@@ -235,6 +222,31 @@ func (tx *Tx) Commit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) commitFreelist() error {
|
||||
// Allocate new pages for the new free list. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
opgid := tx.meta.pgid
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||
// transactions must be rolled back and not committed.
|
||||
func (tx *Tx) Rollback() error {
|
||||
@@ -242,17 +254,36 @@ func (tx *Tx) Rollback() error {
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
tx.rollback()
|
||||
tx.nonPhysicalRollback()
|
||||
return nil
|
||||
}
|
||||
|
||||
// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk.
|
||||
func (tx *Tx) nonPhysicalRollback() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
tx.db.freelist.rollback(tx.meta.txid)
|
||||
}
|
||||
tx.close()
|
||||
}
|
||||
|
||||
// rollback needs to reload the free pages from disk in case some system error happens like fsync error.
|
||||
func (tx *Tx) rollback() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
tx.db.freelist.rollback(tx.meta.txid)
|
||||
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
||||
if !tx.db.hasSyncedFreelist() {
|
||||
// Reconstruct free page list by scanning the DB to get the whole free page list.
|
||||
// Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
|
||||
tx.db.freelist.noSyncReload(tx.db.freepages())
|
||||
} else {
|
||||
// Read free page list from freelist page.
|
||||
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
||||
}
|
||||
}
|
||||
tx.close()
|
||||
}
|
||||
@@ -291,7 +322,9 @@ func (tx *Tx) close() {
|
||||
}
|
||||
|
||||
// Copy writes the entire database to a writer.
|
||||
// This function exists for backwards compatibility. Use WriteTo() instead.
|
||||
// This function exists for backwards compatibility.
|
||||
//
|
||||
// Deprecated; Use WriteTo() instead.
|
||||
func (tx *Tx) Copy(w io.Writer) error {
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
@@ -301,11 +334,15 @@ func (tx *Tx) Copy(w io.Writer) error {
|
||||
// If err == nil then exactly tx.Size() bytes will be written into the writer.
|
||||
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
// Attempt to open reader with WriteFlag
|
||||
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
|
||||
f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
defer func() {
|
||||
if cerr := f.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
// Generate a meta page. We use the same page data for both meta pages.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
@@ -333,7 +370,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
}
|
||||
|
||||
// Move past the meta pages in the file.
|
||||
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
|
||||
if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
|
||||
return n, fmt.Errorf("seek: %s", err)
|
||||
}
|
||||
|
||||
@@ -344,14 +381,14 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, f.Close()
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// CopyFile copies the entire database to file at the given path.
|
||||
// A reader transaction is maintained during the copy so it is safe to continue
|
||||
// using the database while a copy is in progress.
|
||||
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
|
||||
f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -379,6 +416,9 @@ func (tx *Tx) Check() <-chan error {
|
||||
}
|
||||
|
||||
func (tx *Tx) check(ch chan error) {
|
||||
// Force loading free list if opened in ReadOnly mode.
|
||||
tx.db.loadFreelist()
|
||||
|
||||
// Check if any pages are double freed.
|
||||
freed := make(map[pgid]bool)
|
||||
all := make([]pgid, tx.db.freelist.count())
|
||||
@@ -394,8 +434,10 @@ func (tx *Tx) check(ch chan error) {
|
||||
reachable := make(map[pgid]*page)
|
||||
reachable[0] = tx.page(0) // meta0
|
||||
reachable[1] = tx.page(1) // meta1
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
if tx.meta.freelist != pgidNoFreelist {
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively check buckets.
|
||||
@@ -453,7 +495,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (tx *Tx) allocate(count int) (*page, error) {
|
||||
p, err := tx.db.allocate(count)
|
||||
p, err := tx.db.allocate(tx.meta.txid, count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -462,7 +504,7 @@ func (tx *Tx) allocate(count int) (*page, error) {
|
||||
tx.pages[p.id] = p
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.PageCount++
|
||||
tx.stats.PageCount += count
|
||||
tx.stats.PageAlloc += count * tx.db.pageSize
|
||||
|
||||
return p, nil
|
||||
@@ -481,20 +523,18 @@ func (tx *Tx) write() error {
|
||||
|
||||
// Write pages to disk in order.
|
||||
for _, p := range pages {
|
||||
size := (int(p.overflow) + 1) * tx.db.pageSize
|
||||
rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize)
|
||||
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||
var written uintptr
|
||||
|
||||
// Write out page in "max allocation" sized chunks.
|
||||
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
|
||||
for {
|
||||
// Limit our write to our max allocation size.
|
||||
sz := size
|
||||
sz := rem
|
||||
if sz > maxAllocSize-1 {
|
||||
sz = maxAllocSize - 1
|
||||
}
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
|
||||
|
||||
// Write chunk to disk.
|
||||
buf := ptr[:sz]
|
||||
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -503,14 +543,14 @@ func (tx *Tx) write() error {
|
||||
tx.stats.Write++
|
||||
|
||||
// Exit inner for loop if we've written all the chunks.
|
||||
size -= sz
|
||||
if size == 0 {
|
||||
rem -= sz
|
||||
if rem == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Otherwise move offset forward and move pointer to next chunk.
|
||||
offset += int64(sz)
|
||||
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
|
||||
written += uintptr(sz)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -529,7 +569,7 @@ func (tx *Tx) write() error {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
|
||||
|
||||
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||
for i := range buf {
|
||||
39
vendor/go.etcd.io/bbolt/unsafe.go
generated
vendored
Normal file
39
vendor/go.etcd.io/bbolt/unsafe.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(base) + offset)
|
||||
}
|
||||
|
||||
func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
|
||||
}
|
||||
|
||||
func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
|
||||
// See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
|
||||
//
|
||||
// This memory is not allocated from C, but it is unmanaged by Go's
|
||||
// garbage collector and should behave similarly, and the compiler
|
||||
// should produce similar code. Note that this conversion allows a
|
||||
// subslice to begin after the base address, with an optional offset,
|
||||
// while the URL above does not cover this case and only slices from
|
||||
// index 0. However, the wiki never says that the address must be to
|
||||
// the beginning of a C allocation (or even that malloc was used at
|
||||
// all), so this is believed to be correct.
|
||||
return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j]
|
||||
}
|
||||
|
||||
// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by
|
||||
// the slice parameter. This helper should be used over other direct
|
||||
// manipulation of reflect.SliceHeader to prevent misuse, namely, converting
|
||||
// from reflect.SliceHeader to a Go slice type.
|
||||
func unsafeSlice(slice, data unsafe.Pointer, len int) {
|
||||
s := (*reflect.SliceHeader)(slice)
|
||||
s.Data = uintptr(data)
|
||||
s.Cap = len
|
||||
s.Len = len
|
||||
}
|
||||
9
vendor/modules.txt
vendored
9
vendor/modules.txt
vendored
@@ -1,13 +1,14 @@
|
||||
# cloud.google.com/go v0.74.0
|
||||
## explicit
|
||||
cloud.google.com/go/compute/metadata
|
||||
# github.com/TwinProduction/gocache v1.1.0
|
||||
# github.com/TwinProduction/gocache v1.2.1
|
||||
## explicit
|
||||
github.com/TwinProduction/gocache
|
||||
# github.com/TwinProduction/health v1.0.0
|
||||
## explicit
|
||||
github.com/TwinProduction/health
|
||||
# github.com/beorn7/perks v1.0.1
|
||||
github.com/beorn7/perks/quantile
|
||||
# github.com/boltdb/bolt v1.3.1
|
||||
github.com/boltdb/bolt
|
||||
# github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/cespare/xxhash/v2
|
||||
# github.com/davecgh/go-spew v1.1.1
|
||||
@@ -67,6 +68,8 @@ github.com/prometheus/procfs/internal/fs
|
||||
github.com/prometheus/procfs/internal/util
|
||||
# github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/pflag
|
||||
# go.etcd.io/bbolt v1.3.5
|
||||
go.etcd.io/bbolt
|
||||
# golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
## explicit
|
||||
golang.org/x/crypto/ed25519
|
||||
|
||||
@@ -13,38 +13,17 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
store = storage.NewInMemoryStore()
|
||||
|
||||
// monitoringMutex is used to prevent multiple services from being evaluated at the same time.
|
||||
// Without this, conditions using response time may become inaccurate.
|
||||
monitoringMutex sync.Mutex
|
||||
)
|
||||
|
||||
// GetServiceStatusesAsJSON the JSON encoding of all core.ServiceStatus recorded
|
||||
func GetServiceStatusesAsJSON() ([]byte, error) {
|
||||
return store.GetAllAsJSON()
|
||||
}
|
||||
|
||||
// GetUptimeByKey returns the uptime of a service based on the ServiceStatus key
|
||||
func GetUptimeByKey(key string) *core.Uptime {
|
||||
serviceStatus := store.GetServiceStatusByKey(key)
|
||||
if serviceStatus == nil {
|
||||
return nil
|
||||
}
|
||||
return serviceStatus.Uptime
|
||||
}
|
||||
|
||||
// GetServiceStatusByKey returns the uptime of a service based on its ServiceStatus key
|
||||
func GetServiceStatusByKey(key string) *core.ServiceStatus {
|
||||
return store.GetServiceStatusByKey(key)
|
||||
}
|
||||
|
||||
// Monitor loops over each services and starts a goroutine to monitor each services separately
|
||||
func Monitor(cfg *config.Config) {
|
||||
for _, service := range cfg.Services {
|
||||
go monitor(service)
|
||||
// To prevent multiple requests from running at the same time
|
||||
// To prevent multiple requests from running at the same time, we'll wait for a little bit before each iteration
|
||||
time.Sleep(1111 * time.Millisecond)
|
||||
go monitor(service)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,5 +67,5 @@ func monitor(service *core.Service) {
|
||||
|
||||
// UpdateServiceStatuses updates the slice of service statuses
|
||||
func UpdateServiceStatuses(service *core.Service, result *core.Result) {
|
||||
store.Insert(service, result)
|
||||
storage.Get().Insert(service, result)
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
<template>
|
||||
<div class="container container-xs relative mx-auto rounded shadow-xl border my-3 p-5 text-left" id="global">
|
||||
<div class="container container-xs relative mx-auto xl:rounded xl:border xl:shadow-xl xl:my-5 p-5 pb-12 xl:pb-5 text-left" id="global">
|
||||
<div class="mb-2">
|
||||
<div class="flex flex-wrap">
|
||||
<div class="w-3/4 text-left my-auto">
|
||||
<div class="title text-5xl font-light">Health Status</div>
|
||||
<div class="text-3xl xl:text-5xl lg:text-4xl font-light">Health Status</div>
|
||||
</div>
|
||||
<div class="w-1/4 flex justify-end">
|
||||
<img src="./assets/logo.png" alt="Gatus" class="object-scale-down" style="max-width: 100px; min-width: 50px; min-height:50px;"/>
|
||||
@@ -45,6 +45,7 @@ export default {
|
||||
html {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
html, body {
|
||||
background-color: #f7f9fb;
|
||||
}
|
||||
|
||||
BIN
web/app/src/assets/arrow-down-red.png
Normal file
BIN
web/app/src/assets/arrow-down-red.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.1 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user