mirror of
https://github.com/Oxalide/vsphere-influxdb-go.git
synced 2023-10-10 11:36:51 +00:00
add vendoring with go dep
This commit is contained in:
66
vendor/github.com/influxdata/influxdb/services/httpd/config.go
generated
vendored
Normal file
66
vendor/github.com/influxdata/influxdb/services/httpd/config.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
package httpd
|
||||
|
||||
import "github.com/influxdata/influxdb/monitor/diagnostics"
|
||||
|
||||
const (
|
||||
// DefaultBindAddress is the default address to bind to.
|
||||
DefaultBindAddress = ":8086"
|
||||
|
||||
// DefaultRealm is the default realm sent back when issuing a basic auth challenge.
|
||||
DefaultRealm = "InfluxDB"
|
||||
|
||||
// DefaultBindSocket is the default unix socket to bind to.
|
||||
DefaultBindSocket = "/var/run/influxdb.sock"
|
||||
)
|
||||
|
||||
// Config represents a configuration for a HTTP service.
|
||||
type Config struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
BindAddress string `toml:"bind-address"`
|
||||
AuthEnabled bool `toml:"auth-enabled"`
|
||||
LogEnabled bool `toml:"log-enabled"`
|
||||
WriteTracing bool `toml:"write-tracing"`
|
||||
PprofEnabled bool `toml:"pprof-enabled"`
|
||||
HTTPSEnabled bool `toml:"https-enabled"`
|
||||
HTTPSCertificate string `toml:"https-certificate"`
|
||||
HTTPSPrivateKey string `toml:"https-private-key"`
|
||||
MaxRowLimit int `toml:"max-row-limit"`
|
||||
MaxConnectionLimit int `toml:"max-connection-limit"`
|
||||
SharedSecret string `toml:"shared-secret"`
|
||||
Realm string `toml:"realm"`
|
||||
UnixSocketEnabled bool `toml:"unix-socket-enabled"`
|
||||
BindSocket string `toml:"bind-socket"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config with default settings.
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
Enabled: true,
|
||||
BindAddress: DefaultBindAddress,
|
||||
LogEnabled: true,
|
||||
PprofEnabled: true,
|
||||
HTTPSEnabled: false,
|
||||
HTTPSCertificate: "/etc/ssl/influxdb.pem",
|
||||
MaxRowLimit: 0,
|
||||
Realm: DefaultRealm,
|
||||
UnixSocketEnabled: false,
|
||||
BindSocket: DefaultBindSocket,
|
||||
}
|
||||
}
|
||||
|
||||
// Diagnostics returns a diagnostics representation of a subset of the Config.
|
||||
func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {
|
||||
if !c.Enabled {
|
||||
return diagnostics.RowFromMap(map[string]interface{}{
|
||||
"enabled": false,
|
||||
}), nil
|
||||
}
|
||||
|
||||
return diagnostics.RowFromMap(map[string]interface{}{
|
||||
"enabled": true,
|
||||
"bind-address": c.BindAddress,
|
||||
"https-enabled": c.HTTPSEnabled,
|
||||
"max-row-limit": c.MaxRowLimit,
|
||||
"max-connection-limit": c.MaxConnectionLimit,
|
||||
}), nil
|
||||
}
|
55
vendor/github.com/influxdata/influxdb/services/httpd/config_test.go
generated
vendored
Normal file
55
vendor/github.com/influxdata/influxdb/services/httpd/config_test.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package httpd_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/influxdata/influxdb/services/httpd"
|
||||
)
|
||||
|
||||
func TestConfig_Parse(t *testing.T) {
|
||||
// Parse configuration.
|
||||
var c httpd.Config
|
||||
if _, err := toml.Decode(`
|
||||
enabled = true
|
||||
bind-address = ":8080"
|
||||
auth-enabled = true
|
||||
log-enabled = true
|
||||
write-tracing = true
|
||||
https-enabled = true
|
||||
https-certificate = "/dev/null"
|
||||
unix-socket-enabled = true
|
||||
bind-socket = "/var/run/influxdb.sock"
|
||||
`, &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate configuration.
|
||||
if c.Enabled != true {
|
||||
t.Fatalf("unexpected enabled: %v", c.Enabled)
|
||||
} else if c.BindAddress != ":8080" {
|
||||
t.Fatalf("unexpected bind address: %s", c.BindAddress)
|
||||
} else if c.AuthEnabled != true {
|
||||
t.Fatalf("unexpected auth enabled: %v", c.AuthEnabled)
|
||||
} else if c.LogEnabled != true {
|
||||
t.Fatalf("unexpected log enabled: %v", c.LogEnabled)
|
||||
} else if c.WriteTracing != true {
|
||||
t.Fatalf("unexpected write tracing: %v", c.WriteTracing)
|
||||
} else if c.HTTPSEnabled != true {
|
||||
t.Fatalf("unexpected https enabled: %v", c.HTTPSEnabled)
|
||||
} else if c.HTTPSCertificate != "/dev/null" {
|
||||
t.Fatalf("unexpected https certificate: %v", c.HTTPSCertificate)
|
||||
} else if c.UnixSocketEnabled != true {
|
||||
t.Fatalf("unexpected unix socket enabled: %v", c.UnixSocketEnabled)
|
||||
} else if c.BindSocket != "/var/run/influxdb.sock" {
|
||||
t.Fatalf("unexpected bind unix socket: %v", c.BindSocket)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_WriteTracing(t *testing.T) {
|
||||
c := httpd.Config{WriteTracing: true}
|
||||
s := httpd.NewService(c)
|
||||
if !s.Handler.Config.WriteTracing {
|
||||
t.Fatalf("write tracing was not set")
|
||||
}
|
||||
}
|
1309
vendor/github.com/influxdata/influxdb/services/httpd/handler.go
generated
vendored
Normal file
1309
vendor/github.com/influxdata/influxdb/services/httpd/handler.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
706
vendor/github.com/influxdata/influxdb/services/httpd/handler_test.go
generated
vendored
Normal file
706
vendor/github.com/influxdata/influxdb/services/httpd/handler_test.go
generated
vendored
Normal file
@@ -0,0 +1,706 @@
|
||||
package httpd_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/internal"
|
||||
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/influxdata/influxdb/services/httpd"
|
||||
"github.com/influxdata/influxdb/services/meta"
|
||||
)
|
||||
|
||||
// Ensure the handler returns results from a query (including nil results).
|
||||
func TestHandler_Query(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
if stmt.String() != `SELECT * FROM bar` {
|
||||
t.Fatalf("unexpected query: %s", stmt.String())
|
||||
} else if ctx.Database != `foo` {
|
||||
t.Fatalf("unexpected db: %s", ctx.Database)
|
||||
}
|
||||
ctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: "series0"}})}
|
||||
ctx.Results <- &influxql.Result{StatementID: 2, Series: models.Rows([]*models.Row{{Name: "series1"}})}
|
||||
return nil
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar", nil))
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"results":[{"statement_id":1,"series":[{"name":"series0"}]},{"statement_id":2,"series":[{"name":"series1"}]}]}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler returns results from a query passed as a file.
|
||||
func TestHandler_Query_File(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
if stmt.String() != `SELECT * FROM bar` {
|
||||
t.Fatalf("unexpected query: %s", stmt.String())
|
||||
} else if ctx.Database != `foo` {
|
||||
t.Fatalf("unexpected db: %s", ctx.Database)
|
||||
}
|
||||
ctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: "series0"}})}
|
||||
ctx.Results <- &influxql.Result{StatementID: 2, Series: models.Rows([]*models.Row{{Name: "series1"}})}
|
||||
return nil
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
writer := multipart.NewWriter(&body)
|
||||
part, err := writer.CreateFormFile("q", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
io.WriteString(part, "SELECT * FROM bar")
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r := MustNewJSONRequest("POST", "/query?db=foo", &body)
|
||||
r.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, r)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"results":[{"statement_id":1,"series":[{"name":"series0"}]},{"statement_id":2,"series":[{"name":"series1"}]}]}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
// Test query with user authentication.
|
||||
func TestHandler_Query_Auth(t *testing.T) {
|
||||
// Create the handler to be tested.
|
||||
h := NewHandler(true)
|
||||
|
||||
// Set mock meta client functions for the handler to use.
|
||||
h.MetaClient.AdminUserExistsFn = func() bool { return true }
|
||||
|
||||
h.MetaClient.UserFn = func(username string) (meta.User, error) {
|
||||
if username != "user1" {
|
||||
return nil, meta.ErrUserNotFound
|
||||
}
|
||||
return &meta.UserInfo{
|
||||
Name: "user1",
|
||||
Hash: "abcd",
|
||||
Admin: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
h.MetaClient.AuthenticateFn = func(u, p string) (meta.User, error) {
|
||||
if u != "user1" {
|
||||
return nil, fmt.Errorf("unexpected user: exp: user1, got: %s", u)
|
||||
} else if p != "abcd" {
|
||||
return nil, fmt.Errorf("unexpected password: exp: abcd, got: %s", p)
|
||||
}
|
||||
return h.MetaClient.User(u)
|
||||
}
|
||||
|
||||
// Set mock query authorizer for handler to use.
|
||||
h.QueryAuthorizer.AuthorizeQueryFn = func(u meta.User, query *influxql.Query, database string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set mock statement executor for handler to use.
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
if stmt.String() != `SELECT * FROM bar` {
|
||||
t.Fatalf("unexpected query: %s", stmt.String())
|
||||
} else if ctx.Database != `foo` {
|
||||
t.Fatalf("unexpected db: %s", ctx.Database)
|
||||
}
|
||||
ctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: "series0"}})}
|
||||
ctx.Results <- &influxql.Result{StatementID: 2, Series: models.Rows([]*models.Row{{Name: "series1"}})}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Test the handler with valid user and password in the URL parameters.
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?u=user1&p=abcd&db=foo&q=SELECT+*+FROM+bar", nil))
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String())
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"results":[{"statement_id":1,"series":[{"name":"series0"}]},{"statement_id":2,"series":[{"name":"series1"}]}]}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
|
||||
// Test the handler with valid user and password using basic auth.
|
||||
w = httptest.NewRecorder()
|
||||
r := MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar", nil)
|
||||
r.SetBasicAuth("user1", "abcd")
|
||||
h.ServeHTTP(w, r)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String())
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"results":[{"statement_id":1,"series":[{"name":"series0"}]},{"statement_id":2,"series":[{"name":"series1"}]}]}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
|
||||
// Test the handler with valid JWT bearer token.
|
||||
req := MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar", nil)
|
||||
// Create a signed JWT token string and add it to the request header.
|
||||
_, signedToken := MustJWTToken("user1", h.Config.SharedSecret, false)
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", signedToken))
|
||||
|
||||
w = httptest.NewRecorder()
|
||||
h.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String())
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"results":[{"statement_id":1,"series":[{"name":"series0"}]},{"statement_id":2,"series":[{"name":"series1"}]}]}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
|
||||
// Test the handler with JWT token signed with invalid key.
|
||||
req = MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar", nil)
|
||||
// Create a signed JWT token string and add it to the request header.
|
||||
_, signedToken = MustJWTToken("user1", "invalid key", false)
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", signedToken))
|
||||
|
||||
w = httptest.NewRecorder()
|
||||
h.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusUnauthorized {
|
||||
t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String())
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"error":"signature is invalid"}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
|
||||
// Test handler with valid JWT token carrying non-existant user.
|
||||
_, signedToken = MustJWTToken("bad_user", h.Config.SharedSecret, false)
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", signedToken))
|
||||
|
||||
w = httptest.NewRecorder()
|
||||
h.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusUnauthorized {
|
||||
t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String())
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"error":"user not found"}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
|
||||
// Test handler with expired JWT token.
|
||||
_, signedToken = MustJWTToken("user1", h.Config.SharedSecret, true)
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", signedToken))
|
||||
|
||||
w = httptest.NewRecorder()
|
||||
h.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusUnauthorized {
|
||||
t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String())
|
||||
} else if !strings.Contains(w.Body.String(), `{"error":"Token is expired`) {
|
||||
t.Fatalf("unexpected body: %s", w.Body.String())
|
||||
}
|
||||
|
||||
// Test handler with JWT token that has no expiration set.
|
||||
token, _ := MustJWTToken("user1", h.Config.SharedSecret, false)
|
||||
delete(token.Claims.(jwt.MapClaims), "exp")
|
||||
signedToken, err := token.SignedString([]byte(h.Config.SharedSecret))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", signedToken))
|
||||
w = httptest.NewRecorder()
|
||||
h.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusUnauthorized {
|
||||
t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String())
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"error":"token expiration required"}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
|
||||
// Test the handler with valid user and password in the url and invalid in
|
||||
// basic auth (prioritize url).
|
||||
w = httptest.NewRecorder()
|
||||
r = MustNewJSONRequest("GET", "/query?u=user1&p=abcd&db=foo&q=SELECT+*+FROM+bar", nil)
|
||||
r.SetBasicAuth("user1", "efgh")
|
||||
h.ServeHTTP(w, r)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String())
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"results":[{"statement_id":1,"series":[{"name":"series0"}]},{"statement_id":2,"series":[{"name":"series1"}]}]}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler returns results from a query (including nil results).
|
||||
func TestHandler_QueryRegex(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
if stmt.String() != `SELECT * FROM test WHERE url =~ /http\:\/\/www.akamai\.com/` {
|
||||
t.Fatalf("unexpected query: %s", stmt.String())
|
||||
} else if ctx.Database != `test` {
|
||||
t.Fatalf("unexpected db: %s", ctx.Database)
|
||||
}
|
||||
ctx.Results <- nil
|
||||
return nil
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewRequest("GET", "/query?db=test&q=SELECT%20%2A%20FROM%20test%20WHERE%20url%20%3D~%20%2Fhttp%5C%3A%5C%2F%5C%2Fwww.akamai%5C.com%2F", nil))
|
||||
}
|
||||
|
||||
// Ensure the handler merges results from the same statement.
|
||||
func TestHandler_Query_MergeResults(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
ctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: "series0"}})}
|
||||
ctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: "series1"}})}
|
||||
return nil
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar", nil))
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"results":[{"statement_id":1,"series":[{"name":"series0"},{"name":"series1"}]}]}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler merges results from the same statement.
|
||||
func TestHandler_Query_MergeEmptyResults(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
ctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows{}}
|
||||
ctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: "series1"}})}
|
||||
return nil
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar", nil))
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"results":[{"statement_id":1,"series":[{"name":"series1"}]}]}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler can parse chunked and chunk size query parameters.
|
||||
func TestHandler_Query_Chunked(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
if ctx.ChunkSize != 2 {
|
||||
t.Fatalf("unexpected chunk size: %d", ctx.ChunkSize)
|
||||
}
|
||||
ctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: "series0"}})}
|
||||
ctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: "series1"}})}
|
||||
return nil
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar&chunked=true&chunk_size=2", nil))
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
} else if w.Body.String() != `{"results":[{"statement_id":1,"series":[{"name":"series0"}]}]}
|
||||
{"results":[{"statement_id":1,"series":[{"name":"series1"}]}]}
|
||||
` {
|
||||
t.Fatalf("unexpected body: %s", w.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler can accept an async query.
|
||||
func TestHandler_Query_Async(t *testing.T) {
|
||||
done := make(chan struct{})
|
||||
h := NewHandler(false)
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
if stmt.String() != `SELECT * FROM bar` {
|
||||
t.Fatalf("unexpected query: %s", stmt.String())
|
||||
} else if ctx.Database != `foo` {
|
||||
t.Fatalf("unexpected db: %s", ctx.Database)
|
||||
}
|
||||
ctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: "series0"}})}
|
||||
ctx.Results <- &influxql.Result{StatementID: 2, Series: models.Rows([]*models.Row{{Name: "series1"}})}
|
||||
close(done)
|
||||
return nil
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar&async=true", nil))
|
||||
if w.Code != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
|
||||
// Wait to make sure the async query runs and completes.
|
||||
timer := time.NewTimer(100 * time.Millisecond)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
t.Fatal("timeout while waiting for async query to complete")
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler returns a status 400 if the query is not passed in.
|
||||
func TestHandler_Query_ErrQueryRequired(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query", nil))
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"error":"missing required parameter \"q\""}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler returns a status 400 if the query cannot be parsed.
|
||||
func TestHandler_Query_ErrInvalidQuery(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?q=SELECT", nil))
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"error":"error parsing query: found EOF, expected identifier, string, number, bool at line 1, char 8"}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler returns an appropriate 401 or 403 status when authentication or authorization fails.
|
||||
func TestHandler_Query_ErrAuthorize(t *testing.T) {
|
||||
h := NewHandler(true)
|
||||
h.QueryAuthorizer.AuthorizeQueryFn = func(u meta.User, q *influxql.Query, db string) error {
|
||||
return errors.New("marker")
|
||||
}
|
||||
h.MetaClient.AdminUserExistsFn = func() bool { return true }
|
||||
h.MetaClient.AuthenticateFn = func(u, p string) (meta.User, error) {
|
||||
|
||||
users := []meta.UserInfo{
|
||||
{
|
||||
Name: "admin",
|
||||
Hash: "admin",
|
||||
Admin: true,
|
||||
},
|
||||
{
|
||||
Name: "user1",
|
||||
Hash: "abcd",
|
||||
Privileges: map[string]influxql.Privilege{
|
||||
"db0": influxql.ReadPrivilege,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, user := range users {
|
||||
if u == user.Name {
|
||||
if p == user.Hash {
|
||||
return &user, nil
|
||||
}
|
||||
return nil, meta.ErrAuthenticate
|
||||
}
|
||||
}
|
||||
return nil, meta.ErrUserNotFound
|
||||
}
|
||||
|
||||
for i, tt := range []struct {
|
||||
user string
|
||||
password string
|
||||
query string
|
||||
code int
|
||||
}{
|
||||
{
|
||||
query: "/query?q=SHOW+DATABASES",
|
||||
code: http.StatusUnauthorized,
|
||||
},
|
||||
{
|
||||
user: "user1",
|
||||
password: "abcd",
|
||||
query: "/query?q=SHOW+DATABASES",
|
||||
code: http.StatusForbidden,
|
||||
},
|
||||
{
|
||||
user: "user2",
|
||||
password: "abcd",
|
||||
query: "/query?q=SHOW+DATABASES",
|
||||
code: http.StatusUnauthorized,
|
||||
},
|
||||
} {
|
||||
w := httptest.NewRecorder()
|
||||
r := MustNewJSONRequest("GET", tt.query, nil)
|
||||
params := r.URL.Query()
|
||||
if tt.user != "" {
|
||||
params.Set("u", tt.user)
|
||||
}
|
||||
if tt.password != "" {
|
||||
params.Set("p", tt.password)
|
||||
}
|
||||
r.URL.RawQuery = params.Encode()
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
if w.Code != tt.code {
|
||||
t.Errorf("%d. unexpected status: got=%d exp=%d\noutput: %s", i, w.Code, tt.code, w.Body.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler returns a status 200 if an error is returned in the result.
|
||||
func TestHandler_Query_ErrResult(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
return errors.New("measurement not found")
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SHOW+SERIES+from+bin", nil))
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
} else if body := strings.TrimSpace(w.Body.String()); body != `{"results":[{"statement_id":0,"error":"measurement not found"}]}` {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that closing the HTTP connection causes the query to be interrupted.
|
||||
func TestHandler_Query_CloseNotify(t *testing.T) {
|
||||
// Avoid leaking a goroutine when this fails.
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
interrupted := make(chan struct{})
|
||||
h := NewHandler(false)
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
select {
|
||||
case <-ctx.InterruptCh:
|
||||
case <-done:
|
||||
}
|
||||
close(interrupted)
|
||||
return nil
|
||||
}
|
||||
|
||||
s := httptest.NewServer(h)
|
||||
defer s.Close()
|
||||
|
||||
// Parse the URL and generate a query request.
|
||||
u, err := url.Parse(s.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
u.Path = "/query"
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("q", "SELECT * FROM cpu")
|
||||
values.Set("db", "db0")
|
||||
values.Set("rp", "rp0")
|
||||
values.Set("chunked", "true")
|
||||
u.RawQuery = values.Encode()
|
||||
|
||||
req, err := http.NewRequest("GET", u.String(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Perform the request and retrieve the response.
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate that the interrupted channel has NOT been closed yet.
|
||||
timer := time.NewTimer(100 * time.Millisecond)
|
||||
select {
|
||||
case <-interrupted:
|
||||
timer.Stop()
|
||||
t.Fatal("query interrupted unexpectedly")
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
// Close the response body which should abort the query in the handler.
|
||||
resp.Body.Close()
|
||||
|
||||
// The query should abort within 100 milliseconds.
|
||||
timer.Reset(100 * time.Millisecond)
|
||||
select {
|
||||
case <-interrupted:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
t.Fatal("timeout while waiting for query to abort")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler handles ping requests correctly.
|
||||
// TODO: This should be expanded to verify the MetaClient check in servePing is working correctly
|
||||
func TestHandler_Ping(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewRequest("GET", "/ping", nil))
|
||||
if w.Code != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
h.ServeHTTP(w, MustNewRequest("HEAD", "/ping", nil))
|
||||
if w.Code != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler returns the version correctly from the different endpoints.
|
||||
func TestHandler_Version(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
return nil
|
||||
}
|
||||
tests := []struct {
|
||||
method string
|
||||
endpoint string
|
||||
body io.Reader
|
||||
}{
|
||||
{
|
||||
method: "GET",
|
||||
endpoint: "/ping",
|
||||
body: nil,
|
||||
},
|
||||
{
|
||||
method: "GET",
|
||||
endpoint: "/query?db=foo&q=SELECT+*+FROM+bar",
|
||||
body: nil,
|
||||
},
|
||||
{
|
||||
method: "POST",
|
||||
endpoint: "/write",
|
||||
body: bytes.NewReader(make([]byte, 10)),
|
||||
},
|
||||
{
|
||||
method: "GET",
|
||||
endpoint: "/notfound",
|
||||
body: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewRequest(test.method, test.endpoint, test.body))
|
||||
if v, ok := w.HeaderMap["X-Influxdb-Version"]; ok {
|
||||
if v[0] != "0.0.0" {
|
||||
t.Fatalf("unexpected version: %s", v)
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("Header entry 'X-Influxdb-Version' not present")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler handles status requests correctly.
|
||||
func TestHandler_Status(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewRequest("GET", "/status", nil))
|
||||
if w.Code != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
h.ServeHTTP(w, MustNewRequest("HEAD", "/status", nil))
|
||||
if w.Code != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure write endpoint can handle bad requests
|
||||
func TestHandler_HandleBadRequestBody(t *testing.T) {
|
||||
b := bytes.NewReader(make([]byte, 10))
|
||||
h := NewHandler(false)
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewRequest("POST", "/write", b))
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure X-Forwarded-For header writes the correct log message.
|
||||
func TestHandler_XForwardedFor(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
h := NewHandler(false)
|
||||
h.CLFLogger = log.New(&buf, "", 0)
|
||||
|
||||
req := MustNewRequest("GET", "/query", nil)
|
||||
req.Header.Set("X-Forwarded-For", "192.168.0.1")
|
||||
req.RemoteAddr = "127.0.0.1"
|
||||
h.ServeHTTP(httptest.NewRecorder(), req)
|
||||
|
||||
parts := strings.Split(buf.String(), " ")
|
||||
if parts[0] != "192.168.0.1,127.0.0.1" {
|
||||
t.Errorf("unexpected host ip address: %s", parts[0])
|
||||
}
|
||||
}
|
||||
|
||||
// NewHandler represents a test wrapper for httpd.Handler.
|
||||
type Handler struct {
|
||||
*httpd.Handler
|
||||
MetaClient *internal.MetaClientMock
|
||||
StatementExecutor HandlerStatementExecutor
|
||||
QueryAuthorizer HandlerQueryAuthorizer
|
||||
}
|
||||
|
||||
// NewHandler returns a new instance of Handler.
|
||||
func NewHandler(requireAuthentication bool) *Handler {
|
||||
config := httpd.NewConfig()
|
||||
config.AuthEnabled = requireAuthentication
|
||||
config.SharedSecret = "super secret key"
|
||||
|
||||
h := &Handler{
|
||||
Handler: httpd.NewHandler(config),
|
||||
}
|
||||
|
||||
h.MetaClient = &internal.MetaClientMock{}
|
||||
|
||||
h.Handler.MetaClient = h.MetaClient
|
||||
h.Handler.QueryExecutor = influxql.NewQueryExecutor()
|
||||
h.Handler.QueryExecutor.StatementExecutor = &h.StatementExecutor
|
||||
h.Handler.QueryAuthorizer = &h.QueryAuthorizer
|
||||
h.Handler.Version = "0.0.0"
|
||||
return h
|
||||
}
|
||||
|
||||
// HandlerStatementExecutor is a mock implementation of Handler.StatementExecutor.
|
||||
type HandlerStatementExecutor struct {
|
||||
ExecuteStatementFn func(stmt influxql.Statement, ctx influxql.ExecutionContext) error
|
||||
}
|
||||
|
||||
func (e *HandlerStatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
return e.ExecuteStatementFn(stmt, ctx)
|
||||
}
|
||||
|
||||
// HandlerQueryAuthorizer is a mock implementation of Handler.QueryAuthorizer.
|
||||
type HandlerQueryAuthorizer struct {
|
||||
AuthorizeQueryFn func(u meta.User, query *influxql.Query, database string) error
|
||||
}
|
||||
|
||||
func (a *HandlerQueryAuthorizer) AuthorizeQuery(u meta.User, query *influxql.Query, database string) error {
|
||||
return a.AuthorizeQueryFn(u, query, database)
|
||||
}
|
||||
|
||||
// MustNewRequest returns a new HTTP request. Panic on error.
|
||||
func MustNewRequest(method, urlStr string, body io.Reader) *http.Request {
|
||||
r, err := http.NewRequest(method, urlStr, body)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// MustNewRequest returns a new HTTP request with the content type set. Panic on error.
|
||||
func MustNewJSONRequest(method, urlStr string, body io.Reader) *http.Request {
|
||||
r := MustNewRequest(method, urlStr, body)
|
||||
r.Header.Set("Accept", "application/json")
|
||||
return r
|
||||
}
|
||||
|
||||
// MustJWTToken returns a new JWT token and signed string or panics trying.
|
||||
func MustJWTToken(username, secret string, expired bool) (*jwt.Token, string) {
|
||||
token := jwt.New(jwt.GetSigningMethod("HS512"))
|
||||
token.Claims.(jwt.MapClaims)["username"] = username
|
||||
if expired {
|
||||
token.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(-time.Second).Unix()
|
||||
} else {
|
||||
token.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(time.Minute * 10).Unix()
|
||||
}
|
||||
signed, err := token.SignedString([]byte(secret))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return token, signed
|
||||
}
|
51
vendor/github.com/influxdata/influxdb/services/httpd/listen.go
generated
vendored
Normal file
51
vendor/github.com/influxdata/influxdb/services/httpd/listen.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package httpd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// LimitListener returns a Listener that accepts at most n simultaneous
|
||||
// connections from the provided Listener and will drop extra connections.
|
||||
func LimitListener(l net.Listener, n int) net.Listener {
|
||||
return &limitListener{Listener: l, sem: make(chan struct{}, n)}
|
||||
}
|
||||
|
||||
// limitListener is a listener that limits the number of active connections
|
||||
// at any given time.
|
||||
type limitListener struct {
|
||||
net.Listener
|
||||
sem chan struct{}
|
||||
}
|
||||
|
||||
func (l *limitListener) release() {
|
||||
<-l.sem
|
||||
}
|
||||
|
||||
func (l *limitListener) Accept() (net.Conn, error) {
|
||||
for {
|
||||
c, err := l.Listener.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
select {
|
||||
case l.sem <- struct{}{}:
|
||||
return &limitListenerConn{Conn: c, release: l.release}, nil
|
||||
default:
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type limitListenerConn struct {
|
||||
net.Conn
|
||||
releaseOnce sync.Once
|
||||
release func()
|
||||
}
|
||||
|
||||
func (l *limitListenerConn) Close() error {
|
||||
err := l.Conn.Close()
|
||||
l.releaseOnce.Do(l.release)
|
||||
return err
|
||||
}
|
108
vendor/github.com/influxdata/influxdb/services/httpd/listen_test.go
generated
vendored
Normal file
108
vendor/github.com/influxdata/influxdb/services/httpd/listen_test.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
package httpd_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/services/httpd"
|
||||
)
|
||||
|
||||
type fakeListener struct {
|
||||
AcceptFn func() (net.Conn, error)
|
||||
}
|
||||
|
||||
func (l *fakeListener) Accept() (net.Conn, error) {
|
||||
if l.AcceptFn != nil {
|
||||
return l.AcceptFn()
|
||||
}
|
||||
return &fakeConn{}, nil
|
||||
}
|
||||
|
||||
func (*fakeListener) Close() error { return nil }
|
||||
func (*fakeListener) Addr() net.Addr { return nil }
|
||||
|
||||
type fakeConn struct {
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (*fakeConn) Read([]byte) (int, error) { return 0, io.EOF }
|
||||
func (*fakeConn) Write(b []byte) (int, error) { return len(b), nil }
|
||||
func (c *fakeConn) Close() error {
|
||||
c.closed = true
|
||||
return nil
|
||||
}
|
||||
func (*fakeConn) LocalAddr() net.Addr { return nil }
|
||||
func (*fakeConn) RemoteAddr() net.Addr { return nil }
|
||||
func (*fakeConn) SetDeadline(time.Time) error { return nil }
|
||||
func (*fakeConn) SetReadDeadline(time.Time) error { return nil }
|
||||
func (*fakeConn) SetWriteDeadline(time.Time) error { return nil }
|
||||
|
||||
func TestLimitListener(t *testing.T) {
|
||||
conns := make(chan net.Conn, 2)
|
||||
l := httpd.LimitListener(&fakeListener{
|
||||
AcceptFn: func() (net.Conn, error) {
|
||||
select {
|
||||
case c := <-conns:
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
default:
|
||||
}
|
||||
return nil, io.EOF
|
||||
},
|
||||
}, 1)
|
||||
c1, c2 := &fakeConn{}, &fakeConn{}
|
||||
conns <- c1
|
||||
conns <- c2
|
||||
|
||||
var c net.Conn
|
||||
var err error
|
||||
if c, err = l.Accept(); err != nil {
|
||||
t.Fatalf("expected accept to succeed: %s", err)
|
||||
}
|
||||
|
||||
if _, err = l.Accept(); err != io.EOF {
|
||||
t.Fatalf("expected eof, got %s", err)
|
||||
} else if !c2.closed {
|
||||
t.Fatalf("expected connection to be automatically closed")
|
||||
}
|
||||
c.Close()
|
||||
|
||||
conns <- &fakeConn{}
|
||||
if _, err = l.Accept(); err != nil {
|
||||
t.Fatalf("expeced accept to succeed: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLimitListener(b *testing.B) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(b.N)
|
||||
|
||||
l := httpd.LimitListener(&fakeListener{}, b.N)
|
||||
errC := make(chan error)
|
||||
for i := 0; i < b.N; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
c.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errC)
|
||||
}()
|
||||
|
||||
for err := range errC {
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
336
vendor/github.com/influxdata/influxdb/services/httpd/pprof.go
generated
vendored
Normal file
336
vendor/github.com/influxdata/influxdb/services/httpd/pprof.go
generated
vendored
Normal file
@@ -0,0 +1,336 @@
|
||||
package httpd
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
httppprof "net/http/pprof"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"strconv"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
)
|
||||
|
||||
// handleProfiles determines which profile to return to the requester.
|
||||
func (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/debug/pprof/cmdline":
|
||||
httppprof.Cmdline(w, r)
|
||||
case "/debug/pprof/profile":
|
||||
httppprof.Profile(w, r)
|
||||
case "/debug/pprof/symbol":
|
||||
httppprof.Symbol(w, r)
|
||||
case "/debug/pprof/all":
|
||||
h.archiveProfilesAndQueries(w, r)
|
||||
default:
|
||||
httppprof.Index(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// prof describes a profile name and a debug value, or in the case of a CPU
|
||||
// profile, the number of seconds to collect the profile for.
|
||||
type prof struct {
|
||||
Name string
|
||||
Debug int64
|
||||
}
|
||||
|
||||
// archiveProfilesAndQueries collects the following profiles:
|
||||
// - goroutine profile
|
||||
// - heap profile
|
||||
// - blocking profile
|
||||
// - (optionally) CPU profile
|
||||
//
|
||||
// It also collects the following query results:
|
||||
//
|
||||
// - SHOW SHARDS
|
||||
// - SHOW STATS
|
||||
// - SHOW DIAGNOSTICS
|
||||
//
|
||||
// All information is added to a tar archive and then compressed, before being
|
||||
// returned to the requester as an archive file. Where profiles support debug
|
||||
// parameters, the profile is collected with debug=1. To optionally include a
|
||||
// CPU profile, the requester should provide a `cpu` query parameter, and can
|
||||
// also provide a `seconds` parameter to specify a non-default profile
|
||||
// collection time. The default CPU profile collection time is 30 seconds.
|
||||
//
|
||||
// Example request including CPU profile:
|
||||
//
|
||||
// http://localhost:8086/debug/pprof/all?cpu=true&seconds=45
|
||||
//
|
||||
// The value after the `cpu` query parameter is not actually important, as long
|
||||
// as there is something there.
|
||||
//
|
||||
func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Request) {
|
||||
var allProfs = []*prof{
|
||||
{Name: "goroutine", Debug: 1},
|
||||
{Name: "block", Debug: 1},
|
||||
{Name: "heap", Debug: 1},
|
||||
}
|
||||
|
||||
// Capture a CPU profile?
|
||||
if r.FormValue("cpu") != "" {
|
||||
profile := &prof{Name: "cpu"}
|
||||
|
||||
// For a CPU profile we'll use the Debug field to indicate the number of
|
||||
// seconds to capture the profile for.
|
||||
profile.Debug, _ = strconv.ParseInt(r.FormValue("seconds"), 10, 64)
|
||||
if profile.Debug <= 0 {
|
||||
profile.Debug = 30
|
||||
}
|
||||
allProfs = append([]*prof{profile}, allProfs...) // CPU profile first.
|
||||
}
|
||||
|
||||
var (
|
||||
resp bytes.Buffer // Temporary buffer for entire archive.
|
||||
buf bytes.Buffer // Temporary buffer for each profile/query result.
|
||||
)
|
||||
|
||||
gz := gzip.NewWriter(&resp)
|
||||
tw := tar.NewWriter(gz)
|
||||
|
||||
// Collect and write out profiles.
|
||||
for _, profile := range allProfs {
|
||||
if profile.Name == "cpu" {
|
||||
if err := pprof.StartCPUProfile(&buf); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
sleep(w, time.Duration(profile.Debug)*time.Second)
|
||||
pprof.StopCPUProfile()
|
||||
} else {
|
||||
prof := pprof.Lookup(profile.Name)
|
||||
if prof == nil {
|
||||
http.Error(w, "unable to find profile "+profile.Name, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if err := prof.WriteTo(&buf, int(profile.Debug)); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Write the profile file's header.
|
||||
err := tw.WriteHeader(&tar.Header{
|
||||
Name: profile.Name + ".txt",
|
||||
Mode: 0600,
|
||||
Size: int64(buf.Len()),
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// Write the profile file's data.
|
||||
if _, err := tw.Write(buf.Bytes()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// Reset the buffer for the next profile.
|
||||
buf.Reset()
|
||||
}
|
||||
|
||||
// Collect and write out the queries.
|
||||
var allQueries = []struct {
|
||||
name string
|
||||
fn func() ([]*models.Row, error)
|
||||
}{
|
||||
{"shards", h.showShards},
|
||||
{"stats", h.showStats},
|
||||
{"diagnostics", h.showDiagnostics},
|
||||
}
|
||||
|
||||
tabW := tabwriter.NewWriter(&buf, 8, 8, 1, '\t', 0)
|
||||
for _, query := range allQueries {
|
||||
rows, err := query.fn()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
for i, row := range rows {
|
||||
var out []byte
|
||||
// Write the columns
|
||||
for _, col := range row.Columns {
|
||||
out = append(out, []byte(col+"\t")...)
|
||||
}
|
||||
out = append(out, '\n')
|
||||
if _, err := tabW.Write(out); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// Write all the values
|
||||
for _, val := range row.Values {
|
||||
out = out[:0]
|
||||
for _, v := range val {
|
||||
out = append(out, []byte(fmt.Sprintf("%v\t", v))...)
|
||||
}
|
||||
out = append(out, '\n')
|
||||
if _, err := tabW.Write(out); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
// Write a final newline
|
||||
if i < len(rows)-1 {
|
||||
if _, err := tabW.Write([]byte("\n")); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := tabW.Flush(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
err = tw.WriteHeader(&tar.Header{
|
||||
Name: query.name + ".txt",
|
||||
Mode: 0600,
|
||||
Size: int64(buf.Len()),
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// Write the query file's data.
|
||||
if _, err := tw.Write(buf.Bytes()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// Reset the buffer for the next query.
|
||||
buf.Reset()
|
||||
}
|
||||
|
||||
// Close the tar writer.
|
||||
if err := tw.Close(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// Close the gzip writer.
|
||||
if err := gz.Close(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// Return the gzipped archive.
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=profiles.tar.gz")
|
||||
w.Header().Set("Content-Type", "application/gzip")
|
||||
io.Copy(w, &resp) // Nothing we can really do about an error at this point.
|
||||
}
|
||||
|
||||
// showShards generates the same values that a StatementExecutor would if a
|
||||
// SHOW SHARDS query was executed.
|
||||
func (h *Handler) showShards() ([]*models.Row, error) {
|
||||
dis := h.MetaClient.Databases()
|
||||
|
||||
rows := []*models.Row{}
|
||||
for _, di := range dis {
|
||||
row := &models.Row{Columns: []string{"id", "database", "retention_policy", "shard_group", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name}
|
||||
for _, rpi := range di.RetentionPolicies {
|
||||
for _, sgi := range rpi.ShardGroups {
|
||||
// Shards associated with deleted shard groups are effectively deleted.
|
||||
// Don't list them.
|
||||
if sgi.Deleted() {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, si := range sgi.Shards {
|
||||
ownerIDs := make([]uint64, len(si.Owners))
|
||||
for i, owner := range si.Owners {
|
||||
ownerIDs[i] = owner.NodeID
|
||||
}
|
||||
|
||||
row.Values = append(row.Values, []interface{}{
|
||||
si.ID,
|
||||
di.Name,
|
||||
rpi.Name,
|
||||
sgi.ID,
|
||||
sgi.StartTime.UTC().Format(time.RFC3339),
|
||||
sgi.EndTime.UTC().Format(time.RFC3339),
|
||||
sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339),
|
||||
joinUint64(ownerIDs),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
rows = append(rows, row)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
// showDiagnostics generates the same values that a StatementExecutor would if a
|
||||
// SHOW DIAGNOSTICS query was executed.
|
||||
func (h *Handler) showDiagnostics() ([]*models.Row, error) {
|
||||
diags, err := h.Monitor.Diagnostics()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get a sorted list of diagnostics keys.
|
||||
sortedKeys := make([]string, 0, len(diags))
|
||||
for k := range diags {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
|
||||
rows := make([]*models.Row, 0, len(diags))
|
||||
for _, k := range sortedKeys {
|
||||
row := &models.Row{Name: k}
|
||||
|
||||
row.Columns = diags[k].Columns
|
||||
row.Values = diags[k].Rows
|
||||
rows = append(rows, row)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
// showStats generates the same values that a StatementExecutor would if a
|
||||
// SHOW STATS query was executed.
|
||||
func (h *Handler) showStats() ([]*models.Row, error) {
|
||||
stats, err := h.Monitor.Statistics(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var rows []*models.Row
|
||||
for _, stat := range stats {
|
||||
row := &models.Row{Name: stat.Name, Tags: stat.Tags}
|
||||
|
||||
values := make([]interface{}, 0, len(stat.Values))
|
||||
for _, k := range stat.ValueNames() {
|
||||
row.Columns = append(row.Columns, k)
|
||||
values = append(values, stat.Values[k])
|
||||
}
|
||||
row.Values = [][]interface{}{values}
|
||||
rows = append(rows, row)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
// joinUint64 returns a comma-delimited string of uint64 numbers.
|
||||
func joinUint64(a []uint64) string {
|
||||
var buf []byte // Could take a guess at initial size here.
|
||||
for i, x := range a {
|
||||
if i != 0 {
|
||||
buf = append(buf, ',')
|
||||
}
|
||||
buf = strconv.AppendUint(buf, x, 10)
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// Taken from net/http/pprof/pprof.go
|
||||
func sleep(w http.ResponseWriter, d time.Duration) {
|
||||
var clientGone <-chan bool
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
clientGone = cn.CloseNotify()
|
||||
}
|
||||
select {
|
||||
case <-time.After(d):
|
||||
case <-clientGone:
|
||||
}
|
||||
}
|
140
vendor/github.com/influxdata/influxdb/services/httpd/requests.go
generated
vendored
Normal file
140
vendor/github.com/influxdata/influxdb/services/httpd/requests.go
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
package httpd
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/influxdata/influxdb/services/meta"
|
||||
)
|
||||
|
||||
type RequestInfo struct {
|
||||
IPAddr string
|
||||
Username string
|
||||
}
|
||||
|
||||
type RequestStats struct {
|
||||
Writes int64 `json:"writes"`
|
||||
Queries int64 `json:"queries"`
|
||||
}
|
||||
|
||||
func (r *RequestInfo) String() string {
|
||||
if r.Username != "" {
|
||||
return fmt.Sprintf("%s:%s", r.Username, r.IPAddr)
|
||||
}
|
||||
return r.IPAddr
|
||||
}
|
||||
|
||||
type RequestProfile struct {
|
||||
tracker *RequestTracker
|
||||
elem *list.Element
|
||||
|
||||
mu sync.RWMutex
|
||||
Requests map[RequestInfo]*RequestStats
|
||||
}
|
||||
|
||||
func (p *RequestProfile) AddWrite(info RequestInfo) {
|
||||
p.add(info, p.addWrite)
|
||||
}
|
||||
|
||||
func (p *RequestProfile) AddQuery(info RequestInfo) {
|
||||
p.add(info, p.addQuery)
|
||||
}
|
||||
|
||||
func (p *RequestProfile) add(info RequestInfo, fn func(*RequestStats)) {
|
||||
// Look for a request entry for this request.
|
||||
p.mu.RLock()
|
||||
st, ok := p.Requests[info]
|
||||
p.mu.RUnlock()
|
||||
if ok {
|
||||
fn(st)
|
||||
return
|
||||
}
|
||||
|
||||
// There is no entry in the request tracker. Create one.
|
||||
p.mu.Lock()
|
||||
if st, ok := p.Requests[info]; ok {
|
||||
// Something else created this entry while we were waiting for the lock.
|
||||
p.mu.Unlock()
|
||||
fn(st)
|
||||
return
|
||||
}
|
||||
|
||||
st = &RequestStats{}
|
||||
p.Requests[info] = st
|
||||
p.mu.Unlock()
|
||||
fn(st)
|
||||
}
|
||||
|
||||
func (p *RequestProfile) addWrite(st *RequestStats) {
|
||||
atomic.AddInt64(&st.Writes, 1)
|
||||
}
|
||||
|
||||
func (p *RequestProfile) addQuery(st *RequestStats) {
|
||||
atomic.AddInt64(&st.Queries, 1)
|
||||
}
|
||||
|
||||
// Stop informs the RequestTracker to stop collecting statistics for this
|
||||
// profile.
|
||||
func (p *RequestProfile) Stop() {
|
||||
p.tracker.mu.Lock()
|
||||
p.tracker.profiles.Remove(p.elem)
|
||||
p.tracker.mu.Unlock()
|
||||
}
|
||||
|
||||
type RequestTracker struct {
|
||||
mu sync.RWMutex
|
||||
profiles *list.List
|
||||
}
|
||||
|
||||
func NewRequestTracker() *RequestTracker {
|
||||
return &RequestTracker{
|
||||
profiles: list.New(),
|
||||
}
|
||||
}
|
||||
|
||||
func (rt *RequestTracker) TrackRequests() *RequestProfile {
|
||||
// Perform the memory allocation outside of the lock.
|
||||
profile := &RequestProfile{
|
||||
Requests: make(map[RequestInfo]*RequestStats),
|
||||
tracker: rt,
|
||||
}
|
||||
|
||||
rt.mu.Lock()
|
||||
profile.elem = rt.profiles.PushBack(profile)
|
||||
rt.mu.Unlock()
|
||||
return profile
|
||||
}
|
||||
|
||||
func (rt *RequestTracker) Add(req *http.Request, user meta.User) {
|
||||
rt.mu.RLock()
|
||||
if rt.profiles.Len() == 0 {
|
||||
rt.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
defer rt.mu.RUnlock()
|
||||
|
||||
var info RequestInfo
|
||||
host, _, err := net.SplitHostPort(req.RemoteAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
info.IPAddr = host
|
||||
if user != nil {
|
||||
info.Username = user.ID()
|
||||
}
|
||||
|
||||
// Add the request info to the profiles.
|
||||
for p := rt.profiles.Front(); p != nil; p = p.Next() {
|
||||
profile := p.Value.(*RequestProfile)
|
||||
if req.URL.Path == "/query" {
|
||||
profile.AddQuery(info)
|
||||
} else if req.URL.Path == "/write" {
|
||||
profile.AddWrite(info)
|
||||
}
|
||||
}
|
||||
}
|
166
vendor/github.com/influxdata/influxdb/services/httpd/response_logger.go
generated
vendored
Normal file
166
vendor/github.com/influxdata/influxdb/services/httpd/response_logger.go
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
package httpd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
)
|
||||
|
||||
// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP status
|
||||
// code and body size
|
||||
type responseLogger struct {
|
||||
w http.ResponseWriter
|
||||
status int
|
||||
size int
|
||||
}
|
||||
|
||||
func (l *responseLogger) CloseNotify() <-chan bool {
|
||||
if notifier, ok := l.w.(http.CloseNotifier); ok {
|
||||
return notifier.CloseNotify()
|
||||
}
|
||||
// needed for response recorder for testing
|
||||
return make(<-chan bool)
|
||||
}
|
||||
|
||||
func (l *responseLogger) Header() http.Header {
|
||||
return l.w.Header()
|
||||
}
|
||||
|
||||
func (l *responseLogger) Flush() {
|
||||
l.w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func (l *responseLogger) Write(b []byte) (int, error) {
|
||||
if l.status == 0 {
|
||||
// Set status if WriteHeader has not been called
|
||||
l.status = http.StatusOK
|
||||
}
|
||||
size, err := l.w.Write(b)
|
||||
l.size += size
|
||||
return size, err
|
||||
}
|
||||
|
||||
func (l *responseLogger) WriteHeader(s int) {
|
||||
l.w.WriteHeader(s)
|
||||
l.status = s
|
||||
}
|
||||
|
||||
func (l *responseLogger) Status() int {
|
||||
if l.status == 0 {
|
||||
// This can happen if we never actually write data, but only set response headers.
|
||||
l.status = http.StatusOK
|
||||
}
|
||||
return l.status
|
||||
}
|
||||
|
||||
func (l *responseLogger) Size() int {
|
||||
return l.size
|
||||
}
|
||||
|
||||
// redact any occurrence of a password parameter, 'p'
|
||||
func redactPassword(r *http.Request) {
|
||||
q := r.URL.Query()
|
||||
if p := q.Get("p"); p != "" {
|
||||
q.Set("p", "[REDACTED]")
|
||||
r.URL.RawQuery = q.Encode()
|
||||
}
|
||||
}
|
||||
|
||||
// Common Log Format: http://en.wikipedia.org/wiki/Common_Log_Format
|
||||
|
||||
// buildLogLine creates a common log format
|
||||
// in addition to the common fields, we also append referrer, user agent,
|
||||
// request ID and response time (microseconds)
|
||||
// ie, in apache mod_log_config terms:
|
||||
// %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" %L %D
|
||||
func buildLogLine(l *responseLogger, r *http.Request, start time.Time) string {
|
||||
|
||||
redactPassword(r)
|
||||
|
||||
username := parseUsername(r)
|
||||
|
||||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
host = r.RemoteAddr
|
||||
}
|
||||
|
||||
if xff := r.Header["X-Forwarded-For"]; xff != nil {
|
||||
addrs := append(xff, host)
|
||||
host = strings.Join(addrs, ",")
|
||||
}
|
||||
|
||||
uri := r.URL.RequestURI()
|
||||
|
||||
referer := r.Referer()
|
||||
|
||||
userAgent := r.UserAgent()
|
||||
|
||||
return fmt.Sprintf(`%s - %s [%s] "%s %s %s" %s %s "%s" "%s" %s %d`,
|
||||
host,
|
||||
detect(username, "-"),
|
||||
start.Format("02/Jan/2006:15:04:05 -0700"),
|
||||
r.Method,
|
||||
uri,
|
||||
r.Proto,
|
||||
detect(strconv.Itoa(l.Status()), "-"),
|
||||
strconv.Itoa(l.Size()),
|
||||
detect(referer, "-"),
|
||||
detect(userAgent, "-"),
|
||||
r.Header.Get("Request-Id"),
|
||||
// response time, report in microseconds because this is consistent
|
||||
// with apache's %D parameter in mod_log_config
|
||||
int64(time.Since(start)/time.Microsecond))
|
||||
}
|
||||
|
||||
// detect detects the first presence of a non blank string and returns it
|
||||
func detect(values ...string) string {
|
||||
for _, v := range values {
|
||||
if v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// parses the username either from the url or auth header
|
||||
func parseUsername(r *http.Request) string {
|
||||
var (
|
||||
username = ""
|
||||
url = r.URL
|
||||
)
|
||||
|
||||
// get username from the url if passed there
|
||||
if url.User != nil {
|
||||
if name := url.User.Username(); name != "" {
|
||||
username = name
|
||||
}
|
||||
}
|
||||
|
||||
// Try to get the username from the query param 'u'
|
||||
q := url.Query()
|
||||
if u := q.Get("u"); u != "" {
|
||||
username = u
|
||||
}
|
||||
|
||||
// Try to get it from the authorization header if set there
|
||||
if username == "" {
|
||||
if u, _, ok := r.BasicAuth(); ok {
|
||||
username = u
|
||||
}
|
||||
}
|
||||
return username
|
||||
}
|
||||
|
||||
// sanitize redacts passwords from query string for logging.
|
||||
func sanitize(r *http.Request) {
|
||||
values := r.URL.Query()
|
||||
for i, q := range values["q"] {
|
||||
values["q"][i] = influxql.Sanitize(q)
|
||||
}
|
||||
r.URL.RawQuery = values.Encode()
|
||||
}
|
181
vendor/github.com/influxdata/influxdb/services/httpd/response_writer.go
generated
vendored
Normal file
181
vendor/github.com/influxdata/influxdb/services/httpd/response_writer.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
package httpd
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
)
|
||||
|
||||
// ResponseWriter is an interface for writing a response.
|
||||
type ResponseWriter interface {
|
||||
// WriteResponse writes a response.
|
||||
WriteResponse(resp Response) (int, error)
|
||||
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
// NewResponseWriter creates a new ResponseWriter based on the Accept header
|
||||
// in the request that wraps the ResponseWriter.
|
||||
func NewResponseWriter(w http.ResponseWriter, r *http.Request) ResponseWriter {
|
||||
pretty := r.URL.Query().Get("pretty") == "true"
|
||||
rw := &responseWriter{ResponseWriter: w}
|
||||
switch r.Header.Get("Accept") {
|
||||
case "application/csv", "text/csv":
|
||||
w.Header().Add("Content-Type", "text/csv")
|
||||
rw.formatter = &csvFormatter{statementID: -1, Writer: w}
|
||||
case "application/json":
|
||||
fallthrough
|
||||
default:
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
rw.formatter = &jsonFormatter{Pretty: pretty, Writer: w}
|
||||
}
|
||||
return rw
|
||||
}
|
||||
|
||||
// WriteError is a convenience function for writing an error response to the ResponseWriter.
|
||||
func WriteError(w ResponseWriter, err error) (int, error) {
|
||||
return w.WriteResponse(Response{Err: err})
|
||||
}
|
||||
|
||||
// responseWriter is an implementation of ResponseWriter.
|
||||
type responseWriter struct {
|
||||
formatter interface {
|
||||
WriteResponse(resp Response) (int, error)
|
||||
}
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
// WriteResponse writes the response using the formatter.
|
||||
func (w *responseWriter) WriteResponse(resp Response) (int, error) {
|
||||
return w.formatter.WriteResponse(resp)
|
||||
}
|
||||
|
||||
// Flush flushes the ResponseWriter if it has a Flush() method.
|
||||
func (w *responseWriter) Flush() {
|
||||
if w, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
w.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// CloseNotify calls CloseNotify on the underlying http.ResponseWriter if it
|
||||
// exists. Otherwise, it returns a nil channel that will never notify.
|
||||
func (w *responseWriter) CloseNotify() <-chan bool {
|
||||
if notifier, ok := w.ResponseWriter.(http.CloseNotifier); ok {
|
||||
return notifier.CloseNotify()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type jsonFormatter struct {
|
||||
io.Writer
|
||||
Pretty bool
|
||||
}
|
||||
|
||||
func (w *jsonFormatter) WriteResponse(resp Response) (n int, err error) {
|
||||
var b []byte
|
||||
if w.Pretty {
|
||||
b, err = json.MarshalIndent(resp, "", " ")
|
||||
} else {
|
||||
b, err = json.Marshal(resp)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
n, err = io.WriteString(w, err.Error())
|
||||
} else {
|
||||
n, err = w.Write(b)
|
||||
}
|
||||
|
||||
w.Write([]byte("\n"))
|
||||
n++
|
||||
return n, err
|
||||
}
|
||||
|
||||
type csvFormatter struct {
|
||||
io.Writer
|
||||
statementID int
|
||||
columns []string
|
||||
}
|
||||
|
||||
func (w *csvFormatter) WriteResponse(resp Response) (n int, err error) {
|
||||
csv := csv.NewWriter(w)
|
||||
for _, result := range resp.Results {
|
||||
if result.StatementID != w.statementID {
|
||||
// If there are no series in the result, skip past this result.
|
||||
if len(result.Series) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Set the statement id and print out a newline if this is not the first statement.
|
||||
if w.statementID >= 0 {
|
||||
// Flush the csv writer and write a newline.
|
||||
csv.Flush()
|
||||
if err := csv.Error(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
out, err := io.WriteString(w, "\n")
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n += out
|
||||
}
|
||||
w.statementID = result.StatementID
|
||||
|
||||
// Print out the column headers from the first series.
|
||||
w.columns = make([]string, 2+len(result.Series[0].Columns))
|
||||
w.columns[0] = "name"
|
||||
w.columns[1] = "tags"
|
||||
copy(w.columns[2:], result.Series[0].Columns)
|
||||
if err := csv.Write(w.columns); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, row := range result.Series {
|
||||
w.columns[0] = row.Name
|
||||
if len(row.Tags) > 0 {
|
||||
w.columns[1] = string(models.NewTags(row.Tags).HashKey()[1:])
|
||||
} else {
|
||||
w.columns[1] = ""
|
||||
}
|
||||
for _, values := range row.Values {
|
||||
for i, value := range values {
|
||||
if value == nil {
|
||||
w.columns[i+2] = ""
|
||||
continue
|
||||
}
|
||||
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
w.columns[i+2] = strconv.FormatFloat(v, 'f', -1, 64)
|
||||
case int64:
|
||||
w.columns[i+2] = strconv.FormatInt(v, 10)
|
||||
case string:
|
||||
w.columns[i+2] = v
|
||||
case bool:
|
||||
if v {
|
||||
w.columns[i+2] = "true"
|
||||
} else {
|
||||
w.columns[i+2] = "false"
|
||||
}
|
||||
case time.Time:
|
||||
w.columns[i+2] = strconv.FormatInt(v.UnixNano(), 10)
|
||||
case *float64, *int64, *string, *bool:
|
||||
w.columns[i+2] = ""
|
||||
}
|
||||
}
|
||||
csv.Write(w.columns)
|
||||
}
|
||||
}
|
||||
}
|
||||
csv.Flush()
|
||||
if err := csv.Error(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
61
vendor/github.com/influxdata/influxdb/services/httpd/response_writer_test.go
generated
vendored
Normal file
61
vendor/github.com/influxdata/influxdb/services/httpd/response_writer_test.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package httpd_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/influxdata/influxdb/services/httpd"
|
||||
)
|
||||
|
||||
func TestResponseWriter_CSV(t *testing.T) {
|
||||
header := make(http.Header)
|
||||
header.Set("Accept", "text/csv")
|
||||
r := &http.Request{
|
||||
Header: header,
|
||||
URL: &url.URL{},
|
||||
}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
writer := httpd.NewResponseWriter(w, r)
|
||||
writer.WriteResponse(httpd.Response{
|
||||
Results: []*influxql.Result{
|
||||
{
|
||||
StatementID: 0,
|
||||
Series: []*models.Row{
|
||||
{
|
||||
Name: "cpu",
|
||||
Tags: map[string]string{
|
||||
"host": "server01",
|
||||
"region": "uswest",
|
||||
},
|
||||
Columns: []string{"time", "value"},
|
||||
Values: [][]interface{}{
|
||||
{time.Unix(0, 10), float64(2.5)},
|
||||
{time.Unix(0, 20), int64(5)},
|
||||
{time.Unix(0, 30), nil},
|
||||
{time.Unix(0, 40), "foobar"},
|
||||
{time.Unix(0, 50), true},
|
||||
{time.Unix(0, 60), false},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if got, want := w.Body.String(), `name,tags,time,value
|
||||
cpu,"host=server01,region=uswest",10,2.5
|
||||
cpu,"host=server01,region=uswest",20,5
|
||||
cpu,"host=server01,region=uswest",30,
|
||||
cpu,"host=server01,region=uswest",40,foobar
|
||||
cpu,"host=server01,region=uswest",50,true
|
||||
cpu,"host=server01,region=uswest",60,false
|
||||
`; got != want {
|
||||
t.Errorf("unexpected output:\n\ngot=%v\nwant=%s", got, want)
|
||||
}
|
||||
}
|
214
vendor/github.com/influxdata/influxdb/services/httpd/service.go
generated
vendored
Normal file
214
vendor/github.com/influxdata/influxdb/services/httpd/service.go
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
// Package httpd implements the HTTP service and REST API for InfluxDB.
|
||||
package httpd // import "github.com/influxdata/influxdb/services/httpd"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/uber-go/zap"
|
||||
)
|
||||
|
||||
// statistics gathered by the httpd package.
|
||||
const (
|
||||
statRequest = "req" // Number of HTTP requests served
|
||||
statQueryRequest = "queryReq" // Number of query requests served
|
||||
statWriteRequest = "writeReq" // Number of write requests serverd
|
||||
statPingRequest = "pingReq" // Number of ping requests served
|
||||
statStatusRequest = "statusReq" // Number of status requests served
|
||||
statWriteRequestBytesReceived = "writeReqBytes" // Sum of all bytes in write requests
|
||||
statQueryRequestBytesTransmitted = "queryRespBytes" // Sum of all bytes returned in query reponses
|
||||
statPointsWrittenOK = "pointsWrittenOK" // Number of points written OK
|
||||
statPointsWrittenDropped = "pointsWrittenDropped" // Number of points dropped by the storage engine
|
||||
statPointsWrittenFail = "pointsWrittenFail" // Number of points that failed to be written
|
||||
statAuthFail = "authFail" // Number of authentication failures
|
||||
statRequestDuration = "reqDurationNs" // Number of (wall-time) nanoseconds spent inside requests
|
||||
statQueryRequestDuration = "queryReqDurationNs" // Number of (wall-time) nanoseconds spent inside query requests
|
||||
statWriteRequestDuration = "writeReqDurationNs" // Number of (wall-time) nanoseconds spent inside write requests
|
||||
statRequestsActive = "reqActive" // Number of currently active requests
|
||||
statWriteRequestsActive = "writeReqActive" // Number of currently active write requests
|
||||
statClientError = "clientError" // Number of HTTP responses due to client error
|
||||
statServerError = "serverError" // Number of HTTP responses due to server error
|
||||
)
|
||||
|
||||
// Service manages the listener and handler for an HTTP endpoint.
|
||||
type Service struct {
|
||||
ln net.Listener
|
||||
addr string
|
||||
https bool
|
||||
cert string
|
||||
key string
|
||||
limit int
|
||||
err chan error
|
||||
|
||||
unixSocket bool
|
||||
bindSocket string
|
||||
unixSocketListener net.Listener
|
||||
|
||||
Handler *Handler
|
||||
|
||||
Logger zap.Logger
|
||||
}
|
||||
|
||||
// NewService returns a new instance of Service.
|
||||
func NewService(c Config) *Service {
|
||||
s := &Service{
|
||||
addr: c.BindAddress,
|
||||
https: c.HTTPSEnabled,
|
||||
cert: c.HTTPSCertificate,
|
||||
key: c.HTTPSPrivateKey,
|
||||
limit: c.MaxConnectionLimit,
|
||||
err: make(chan error),
|
||||
unixSocket: c.UnixSocketEnabled,
|
||||
bindSocket: c.BindSocket,
|
||||
Handler: NewHandler(c),
|
||||
Logger: zap.New(zap.NullEncoder()),
|
||||
}
|
||||
if s.key == "" {
|
||||
s.key = s.cert
|
||||
}
|
||||
s.Handler.Logger = s.Logger
|
||||
return s
|
||||
}
|
||||
|
||||
// Open starts the service.
|
||||
func (s *Service) Open() error {
|
||||
s.Logger.Info("Starting HTTP service")
|
||||
s.Logger.Info(fmt.Sprint("Authentication enabled:", s.Handler.Config.AuthEnabled))
|
||||
|
||||
// Open listener.
|
||||
if s.https {
|
||||
cert, err := tls.LoadX509KeyPair(s.cert, s.key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
listener, err := tls.Listen("tcp", s.addr, &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Logger.Info(fmt.Sprint("Listening on HTTPS:", listener.Addr().String()))
|
||||
s.ln = listener
|
||||
} else {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Logger.Info(fmt.Sprint("Listening on HTTP:", listener.Addr().String()))
|
||||
s.ln = listener
|
||||
}
|
||||
|
||||
// Open unix socket listener.
|
||||
if s.unixSocket {
|
||||
if runtime.GOOS == "windows" {
|
||||
return fmt.Errorf("unable to use unix socket on windows")
|
||||
}
|
||||
if err := os.MkdirAll(path.Dir(s.bindSocket), 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := syscall.Unlink(s.bindSocket); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
listener, err := net.Listen("unix", s.bindSocket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Logger.Info(fmt.Sprint("Listening on unix socket:", listener.Addr().String()))
|
||||
s.unixSocketListener = listener
|
||||
|
||||
go s.serveUnixSocket()
|
||||
}
|
||||
|
||||
// Enforce a connection limit if one has been given.
|
||||
if s.limit > 0 {
|
||||
s.ln = LimitListener(s.ln, s.limit)
|
||||
}
|
||||
|
||||
// wait for the listeners to start
|
||||
timeout := time.Now().Add(time.Second)
|
||||
for {
|
||||
if s.ln.Addr() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if time.Now().After(timeout) {
|
||||
return fmt.Errorf("unable to open without http listener running")
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Begin listening for requests in a separate goroutine.
|
||||
go s.serveTCP()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the underlying listener.
|
||||
func (s *Service) Close() error {
|
||||
if s.ln != nil {
|
||||
if err := s.ln.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if s.unixSocketListener != nil {
|
||||
if err := s.unixSocketListener.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithLogger sets the logger for the service.
|
||||
func (s *Service) WithLogger(log zap.Logger) {
|
||||
s.Logger = log.With(zap.String("service", "httpd"))
|
||||
s.Handler.Logger = s.Logger
|
||||
}
|
||||
|
||||
// Err returns a channel for fatal errors that occur on the listener.
|
||||
func (s *Service) Err() <-chan error { return s.err }
|
||||
|
||||
// Addr returns the listener's address. Returns nil if listener is closed.
|
||||
func (s *Service) Addr() net.Addr {
|
||||
if s.ln != nil {
|
||||
return s.ln.Addr()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Statistics returns statistics for periodic monitoring.
|
||||
func (s *Service) Statistics(tags map[string]string) []models.Statistic {
|
||||
return s.Handler.Statistics(models.NewTags(map[string]string{"bind": s.addr}).Merge(tags).Map())
|
||||
}
|
||||
|
||||
// serveTCP serves the handler from the TCP listener.
|
||||
func (s *Service) serveTCP() {
|
||||
s.serve(s.ln)
|
||||
}
|
||||
|
||||
// serveUnixSocket serves the handler from the unix socket listener.
|
||||
func (s *Service) serveUnixSocket() {
|
||||
s.serve(s.unixSocketListener)
|
||||
}
|
||||
|
||||
// serve serves the handler from the listener.
|
||||
func (s *Service) serve(listener net.Listener) {
|
||||
// The listener was closed so exit
|
||||
// See https://github.com/golang/go/issues/4373
|
||||
err := http.Serve(listener, s.Handler)
|
||||
if err != nil && !strings.Contains(err.Error(), "closed") {
|
||||
s.err <- fmt.Errorf("listener failed: addr=%s, err=%s", s.Addr(), err)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user