mirror of
https://github.com/Oxalide/vsphere-influxdb-go.git
synced 2023-10-10 11:36:51 +00:00
add vendoring with go dep
This commit is contained in:
10
vendor/github.com/influxdata/influxdb/services/opentsdb/README.md
generated
vendored
Normal file
10
vendor/github.com/influxdata/influxdb/services/opentsdb/README.md
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
OpenTSDB Input
|
||||
============
|
||||
InfluxDB supports both the telnet and HTTP OpenTSDB protocol. This means that InfluxDB can act as a drop-in replacement for your OpenTSDB system.
|
||||
|
||||
## Configuration
|
||||
The OpenTSDB inputs allow the binding address, target database, and target retention policy within that database, to be set. If the database does not exist, it will be created automatically when the input is initialized. If you also decide to configure retention policy (without configuration the input will use the auto-created default retention policy), both the database and retention policy must already exist.
|
||||
|
||||
The write-consistency-level can also be set. If any write operations do not meet the configured consistency guarantees, an error will occur and the data will not be indexed. The default consistency-level is `ONE`.
|
||||
|
||||
The OpenTSDB input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.
|
129
vendor/github.com/influxdata/influxdb/services/opentsdb/config.go
generated
vendored
Normal file
129
vendor/github.com/influxdata/influxdb/services/opentsdb/config.go
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/monitor/diagnostics"
|
||||
"github.com/influxdata/influxdb/toml"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBindAddress is the default address that the service binds to.
|
||||
DefaultBindAddress = ":4242"
|
||||
|
||||
// DefaultDatabase is the default database used for writes.
|
||||
DefaultDatabase = "opentsdb"
|
||||
|
||||
// DefaultRetentionPolicy is the default retention policy used for writes.
|
||||
DefaultRetentionPolicy = ""
|
||||
|
||||
// DefaultConsistencyLevel is the default write consistency level.
|
||||
DefaultConsistencyLevel = "one"
|
||||
|
||||
// DefaultBatchSize is the default OpenTSDB batch size.
|
||||
DefaultBatchSize = 1000
|
||||
|
||||
// DefaultBatchTimeout is the default OpenTSDB batch timeout.
|
||||
DefaultBatchTimeout = time.Second
|
||||
|
||||
// DefaultBatchPending is the default number of batches that can be in the queue.
|
||||
DefaultBatchPending = 5
|
||||
|
||||
// DefaultCertificate is the default location of the certificate used when TLS is enabled.
|
||||
DefaultCertificate = "/etc/ssl/influxdb.pem"
|
||||
)
|
||||
|
||||
// Config represents the configuration of the OpenTSDB service.
|
||||
type Config struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
BindAddress string `toml:"bind-address"`
|
||||
Database string `toml:"database"`
|
||||
RetentionPolicy string `toml:"retention-policy"`
|
||||
ConsistencyLevel string `toml:"consistency-level"`
|
||||
TLSEnabled bool `toml:"tls-enabled"`
|
||||
Certificate string `toml:"certificate"`
|
||||
BatchSize int `toml:"batch-size"`
|
||||
BatchPending int `toml:"batch-pending"`
|
||||
BatchTimeout toml.Duration `toml:"batch-timeout"`
|
||||
LogPointErrors bool `toml:"log-point-errors"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new config for the service.
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
BindAddress: DefaultBindAddress,
|
||||
Database: DefaultDatabase,
|
||||
RetentionPolicy: DefaultRetentionPolicy,
|
||||
ConsistencyLevel: DefaultConsistencyLevel,
|
||||
TLSEnabled: false,
|
||||
Certificate: DefaultCertificate,
|
||||
BatchSize: DefaultBatchSize,
|
||||
BatchPending: DefaultBatchPending,
|
||||
BatchTimeout: toml.Duration(DefaultBatchTimeout),
|
||||
LogPointErrors: true,
|
||||
}
|
||||
}
|
||||
|
||||
// WithDefaults takes the given config and returns a new config with any required
|
||||
// default values set.
|
||||
func (c *Config) WithDefaults() *Config {
|
||||
d := *c
|
||||
if d.BindAddress == "" {
|
||||
d.BindAddress = DefaultBindAddress
|
||||
}
|
||||
if d.Database == "" {
|
||||
d.Database = DefaultDatabase
|
||||
}
|
||||
if d.RetentionPolicy == "" {
|
||||
d.RetentionPolicy = DefaultRetentionPolicy
|
||||
}
|
||||
if d.ConsistencyLevel == "" {
|
||||
d.ConsistencyLevel = DefaultConsistencyLevel
|
||||
}
|
||||
if d.Certificate == "" {
|
||||
d.Certificate = DefaultCertificate
|
||||
}
|
||||
if d.BatchSize == 0 {
|
||||
d.BatchSize = DefaultBatchSize
|
||||
}
|
||||
if d.BatchPending == 0 {
|
||||
d.BatchPending = DefaultBatchPending
|
||||
}
|
||||
if d.BatchTimeout == 0 {
|
||||
d.BatchTimeout = toml.Duration(DefaultBatchTimeout)
|
||||
}
|
||||
|
||||
return &d
|
||||
}
|
||||
|
||||
// Configs wraps a slice of Config to aggregate diagnostics.
|
||||
type Configs []Config
|
||||
|
||||
// Diagnostics returns one set of diagnostics for all of the Configs.
|
||||
func (c Configs) Diagnostics() (*diagnostics.Diagnostics, error) {
|
||||
d := &diagnostics.Diagnostics{
|
||||
Columns: []string{"enabled", "bind-address", "database", "retention-policy", "batch-size", "batch-pending", "batch-timeout"},
|
||||
}
|
||||
|
||||
for _, cc := range c {
|
||||
if !cc.Enabled {
|
||||
d.AddRow([]interface{}{false})
|
||||
continue
|
||||
}
|
||||
|
||||
r := []interface{}{true, cc.BindAddress, cc.Database, cc.RetentionPolicy, cc.BatchSize, cc.BatchPending, cc.BatchTimeout}
|
||||
d.AddRow(r)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Enabled returns true if any underlying Config is Enabled.
|
||||
func (c Configs) Enabled() bool {
|
||||
for _, cc := range c {
|
||||
if cc.Enabled {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
41
vendor/github.com/influxdata/influxdb/services/opentsdb/config_test.go
generated
vendored
Normal file
41
vendor/github.com/influxdata/influxdb/services/opentsdb/config_test.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package opentsdb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/influxdata/influxdb/services/opentsdb"
|
||||
)
|
||||
|
||||
func TestConfig_Parse(t *testing.T) {
|
||||
// Parse configuration.
|
||||
var c opentsdb.Config
|
||||
if _, err := toml.Decode(`
|
||||
enabled = true
|
||||
bind-address = ":9000"
|
||||
database = "xxx"
|
||||
consistency-level ="all"
|
||||
tls-enabled = true
|
||||
certificate = "/etc/ssl/cert.pem"
|
||||
log-point-errors = true
|
||||
`, &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate configuration.
|
||||
if c.Enabled != true {
|
||||
t.Fatalf("unexpected enabled: %v", c.Enabled)
|
||||
} else if c.BindAddress != ":9000" {
|
||||
t.Fatalf("unexpected bind address: %s", c.BindAddress)
|
||||
} else if c.Database != "xxx" {
|
||||
t.Fatalf("unexpected database: %s", c.Database)
|
||||
} else if c.ConsistencyLevel != "all" {
|
||||
t.Fatalf("unexpected consistency-level: %s", c.ConsistencyLevel)
|
||||
} else if c.TLSEnabled != true {
|
||||
t.Fatalf("unexpected tls-enabled: %v", c.TLSEnabled)
|
||||
} else if c.Certificate != "/etc/ssl/cert.pem" {
|
||||
t.Fatalf("unexpected certificate: %s", c.Certificate)
|
||||
} else if !c.LogPointErrors {
|
||||
t.Fatalf("unexpected log-point-errors: %v", c.LogPointErrors)
|
||||
}
|
||||
}
|
198
vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go
generated
vendored
Normal file
198
vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/uber-go/zap"
|
||||
)
|
||||
|
||||
// Handler is an http.Handler for the OpenTSDB service.
|
||||
type Handler struct {
|
||||
Database string
|
||||
RetentionPolicy string
|
||||
|
||||
PointsWriter interface {
|
||||
WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error
|
||||
}
|
||||
|
||||
Logger zap.Logger
|
||||
|
||||
stats *Statistics
|
||||
}
|
||||
|
||||
// ServeHTTP handles an HTTP request of the OpenTSDB REST API.
|
||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/metadata/put":
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
case "/api/put":
|
||||
h.servePut(w, r)
|
||||
default:
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// servePut implements OpenTSDB's HTTP /api/put endpoint.
|
||||
func (h *Handler) servePut(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
// Require POST method.
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Wrap reader if it's gzip encoded.
|
||||
var br *bufio.Reader
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := gzip.NewReader(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "could not read gzip, "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
br = bufio.NewReader(zr)
|
||||
} else {
|
||||
br = bufio.NewReader(r.Body)
|
||||
}
|
||||
|
||||
// Lookahead at the first byte.
|
||||
f, err := br.Peek(1)
|
||||
if err != nil || len(f) != 1 {
|
||||
http.Error(w, "peek error: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Peek to see if this is a JSON array.
|
||||
var multi bool
|
||||
switch f[0] {
|
||||
case '{':
|
||||
case '[':
|
||||
multi = true
|
||||
default:
|
||||
http.Error(w, "expected JSON array or hash", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Decode JSON data into slice of points.
|
||||
dps := make([]point, 1)
|
||||
if dec := json.NewDecoder(br); multi {
|
||||
if err = dec.Decode(&dps); err != nil {
|
||||
http.Error(w, "json array decode error", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err = dec.Decode(&dps[0]); err != nil {
|
||||
http.Error(w, "json object decode error", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Convert points into TSDB points.
|
||||
points := make([]models.Point, 0, len(dps))
|
||||
for i := range dps {
|
||||
p := dps[i]
|
||||
|
||||
// Convert timestamp to Go time.
|
||||
// If time value is over a billion then it's microseconds.
|
||||
var ts time.Time
|
||||
if p.Time < 10000000000 {
|
||||
ts = time.Unix(p.Time, 0)
|
||||
} else {
|
||||
ts = time.Unix(p.Time/1000, (p.Time%1000)*1000)
|
||||
}
|
||||
|
||||
pt, err := models.NewPoint(p.Metric, models.NewTags(p.Tags), map[string]interface{}{"value": p.Value}, ts)
|
||||
if err != nil {
|
||||
h.Logger.Info(fmt.Sprintf("Dropping point %v: %v", p.Metric, err))
|
||||
if h.stats != nil {
|
||||
atomic.AddInt64(&h.stats.InvalidDroppedPoints, 1)
|
||||
}
|
||||
continue
|
||||
}
|
||||
points = append(points, pt)
|
||||
}
|
||||
|
||||
// Write points.
|
||||
if err := h.PointsWriter.WritePointsPrivileged(h.Database, h.RetentionPolicy, models.ConsistencyLevelAny, points); influxdb.IsClientError(err) {
|
||||
h.Logger.Info(fmt.Sprint("write series error: ", err))
|
||||
http.Error(w, "write series error: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
} else if err != nil {
|
||||
h.Logger.Info(fmt.Sprint("write series error: ", err))
|
||||
http.Error(w, "write series error: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// chanListener represents a listener that receives connections through a channel.
|
||||
type chanListener struct {
|
||||
addr net.Addr
|
||||
ch chan net.Conn
|
||||
done chan struct{}
|
||||
closer sync.Once // closer ensures that Close is idempotent.
|
||||
}
|
||||
|
||||
// newChanListener returns a new instance of chanListener.
|
||||
func newChanListener(addr net.Addr) *chanListener {
|
||||
return &chanListener{
|
||||
addr: addr,
|
||||
ch: make(chan net.Conn),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (ln *chanListener) Accept() (net.Conn, error) {
|
||||
errClosed := errors.New("network connection closed")
|
||||
select {
|
||||
case <-ln.done:
|
||||
return nil, errClosed
|
||||
case conn, ok := <-ln.ch:
|
||||
if !ok {
|
||||
return nil, errClosed
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection channel.
|
||||
func (ln *chanListener) Close() error {
|
||||
ln.closer.Do(func() {
|
||||
close(ln.done)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Addr returns the network address of the listener.
|
||||
func (ln *chanListener) Addr() net.Addr { return ln.addr }
|
||||
|
||||
// readerConn represents a net.Conn with an assignable reader.
|
||||
type readerConn struct {
|
||||
net.Conn
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface.
|
||||
func (conn *readerConn) Read(b []byte) (n int, err error) { return conn.r.Read(b) }
|
||||
|
||||
// point represents an incoming JSON data point.
|
||||
type point struct {
|
||||
Metric string `json:"metric"`
|
||||
Time int64 `json:"timestamp"`
|
||||
Value float64 `json:"value"`
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
}
|
471
vendor/github.com/influxdata/influxdb/services/opentsdb/service.go
generated
vendored
Normal file
471
vendor/github.com/influxdata/influxdb/services/opentsdb/service.go
generated
vendored
Normal file
@@ -0,0 +1,471 @@
|
||||
// Package opentsdb provides a service for InfluxDB to ingest data via the opentsdb protocol.
|
||||
package opentsdb // import "github.com/influxdata/influxdb/services/opentsdb"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/influxdata/influxdb/services/meta"
|
||||
"github.com/influxdata/influxdb/tsdb"
|
||||
"github.com/uber-go/zap"
|
||||
)
|
||||
|
||||
// statistics gathered by the openTSDB package.
|
||||
const (
|
||||
statHTTPConnectionsHandled = "httpConnsHandled"
|
||||
statTelnetConnectionsActive = "tlConnsActive"
|
||||
statTelnetConnectionsHandled = "tlConnsHandled"
|
||||
statTelnetPointsReceived = "tlPointsRx"
|
||||
statTelnetBytesReceived = "tlBytesRx"
|
||||
statTelnetReadError = "tlReadErr"
|
||||
statTelnetBadLine = "tlBadLine"
|
||||
statTelnetBadTime = "tlBadTime"
|
||||
statTelnetBadTag = "tlBadTag"
|
||||
statTelnetBadFloat = "tlBadFloat"
|
||||
statBatchesTransmitted = "batchesTx"
|
||||
statPointsTransmitted = "pointsTx"
|
||||
statBatchesTransmitFail = "batchesTxFail"
|
||||
statConnectionsActive = "connsActive"
|
||||
statConnectionsHandled = "connsHandled"
|
||||
statDroppedPointsInvalid = "droppedPointsInvalid"
|
||||
)
|
||||
|
||||
// Service manages the listener and handler for an HTTP endpoint.
|
||||
type Service struct {
|
||||
ln net.Listener // main listener
|
||||
httpln *chanListener // http channel-based listener
|
||||
|
||||
wg sync.WaitGroup
|
||||
tls bool
|
||||
cert string
|
||||
|
||||
mu sync.RWMutex
|
||||
ready bool // Has the required database been created?
|
||||
done chan struct{} // Is the service closing or closed?
|
||||
|
||||
BindAddress string
|
||||
Database string
|
||||
RetentionPolicy string
|
||||
|
||||
PointsWriter interface {
|
||||
WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error
|
||||
}
|
||||
MetaClient interface {
|
||||
CreateDatabase(name string) (*meta.DatabaseInfo, error)
|
||||
}
|
||||
|
||||
// Points received over the telnet protocol are batched.
|
||||
batchSize int
|
||||
batchPending int
|
||||
batchTimeout time.Duration
|
||||
batcher *tsdb.PointBatcher
|
||||
|
||||
LogPointErrors bool
|
||||
Logger zap.Logger
|
||||
|
||||
stats *Statistics
|
||||
defaultTags models.StatisticTags
|
||||
}
|
||||
|
||||
// NewService returns a new instance of Service.
|
||||
func NewService(c Config) (*Service, error) {
|
||||
// Use defaults where necessary.
|
||||
d := c.WithDefaults()
|
||||
|
||||
s := &Service{
|
||||
tls: d.TLSEnabled,
|
||||
cert: d.Certificate,
|
||||
BindAddress: d.BindAddress,
|
||||
Database: d.Database,
|
||||
RetentionPolicy: d.RetentionPolicy,
|
||||
batchSize: d.BatchSize,
|
||||
batchPending: d.BatchPending,
|
||||
batchTimeout: time.Duration(d.BatchTimeout),
|
||||
Logger: zap.New(zap.NullEncoder()),
|
||||
LogPointErrors: d.LogPointErrors,
|
||||
stats: &Statistics{},
|
||||
defaultTags: models.StatisticTags{"bind": d.BindAddress},
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Open starts the service.
|
||||
func (s *Service) Open() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if !s.closed() {
|
||||
return nil // Already open.
|
||||
}
|
||||
s.done = make(chan struct{})
|
||||
|
||||
s.Logger.Info("Starting OpenTSDB service")
|
||||
|
||||
s.batcher = tsdb.NewPointBatcher(s.batchSize, s.batchPending, s.batchTimeout)
|
||||
s.batcher.Start()
|
||||
|
||||
// Start processing batches.
|
||||
s.wg.Add(1)
|
||||
go func() { defer s.wg.Done(); s.processBatches(s.batcher) }()
|
||||
|
||||
// Open listener.
|
||||
if s.tls {
|
||||
cert, err := tls.LoadX509KeyPair(s.cert, s.cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
listener, err := tls.Listen("tcp", s.BindAddress, &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Logger.Info(fmt.Sprint("Listening on TLS: ", listener.Addr().String()))
|
||||
s.ln = listener
|
||||
} else {
|
||||
listener, err := net.Listen("tcp", s.BindAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Logger.Info(fmt.Sprint("Listening on: ", listener.Addr().String()))
|
||||
s.ln = listener
|
||||
}
|
||||
s.httpln = newChanListener(s.ln.Addr())
|
||||
|
||||
// Begin listening for connections.
|
||||
s.wg.Add(2)
|
||||
go func() { defer s.wg.Done(); s.serve() }()
|
||||
go func() { defer s.wg.Done(); s.serveHTTP() }()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the openTSDB service.
|
||||
func (s *Service) Close() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.closed() {
|
||||
return nil // Already closed.
|
||||
}
|
||||
close(s.done)
|
||||
|
||||
// Close the listeners.
|
||||
if err := s.ln.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.httpln.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.wg.Wait()
|
||||
s.done = nil
|
||||
|
||||
if s.batcher != nil {
|
||||
s.batcher.Stop()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Closed returns true if the service is currently closed.
|
||||
func (s *Service) Closed() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.closed()
|
||||
}
|
||||
|
||||
func (s *Service) closed() bool {
|
||||
select {
|
||||
case <-s.done:
|
||||
// Service is closing.
|
||||
return true
|
||||
default:
|
||||
return s.done == nil
|
||||
}
|
||||
}
|
||||
|
||||
// createInternalStorage ensures that the required database has been created.
|
||||
func (s *Service) createInternalStorage() error {
|
||||
s.mu.RLock()
|
||||
ready := s.ready
|
||||
s.mu.RUnlock()
|
||||
if ready {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := s.MetaClient.CreateDatabase(s.Database); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The service is now ready.
|
||||
s.mu.Lock()
|
||||
s.ready = true
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithLogger sets the logger for the service.
|
||||
func (s *Service) WithLogger(log zap.Logger) {
|
||||
s.Logger = log.With(zap.String("service", "opentsdb"))
|
||||
}
|
||||
|
||||
// Statistics maintains statistics for the subscriber service.
|
||||
type Statistics struct {
|
||||
HTTPConnectionsHandled int64
|
||||
ActiveTelnetConnections int64
|
||||
HandledTelnetConnections int64
|
||||
TelnetPointsReceived int64
|
||||
TelnetBytesReceived int64
|
||||
TelnetReadError int64
|
||||
TelnetBadLine int64
|
||||
TelnetBadTime int64
|
||||
TelnetBadTag int64
|
||||
TelnetBadFloat int64
|
||||
BatchesTransmitted int64
|
||||
PointsTransmitted int64
|
||||
BatchesTransmitFail int64
|
||||
ActiveConnections int64
|
||||
HandledConnections int64
|
||||
InvalidDroppedPoints int64
|
||||
}
|
||||
|
||||
// Statistics returns statistics for periodic monitoring.
|
||||
func (s *Service) Statistics(tags map[string]string) []models.Statistic {
|
||||
return []models.Statistic{{
|
||||
Name: "opentsdb",
|
||||
Tags: s.defaultTags.Merge(tags),
|
||||
Values: map[string]interface{}{
|
||||
statHTTPConnectionsHandled: atomic.LoadInt64(&s.stats.HTTPConnectionsHandled),
|
||||
statTelnetConnectionsActive: atomic.LoadInt64(&s.stats.ActiveTelnetConnections),
|
||||
statTelnetConnectionsHandled: atomic.LoadInt64(&s.stats.HandledTelnetConnections),
|
||||
statTelnetPointsReceived: atomic.LoadInt64(&s.stats.TelnetPointsReceived),
|
||||
statTelnetBytesReceived: atomic.LoadInt64(&s.stats.TelnetBytesReceived),
|
||||
statTelnetReadError: atomic.LoadInt64(&s.stats.TelnetReadError),
|
||||
statTelnetBadLine: atomic.LoadInt64(&s.stats.TelnetBadLine),
|
||||
statTelnetBadTime: atomic.LoadInt64(&s.stats.TelnetBadTime),
|
||||
statTelnetBadTag: atomic.LoadInt64(&s.stats.TelnetBadTag),
|
||||
statTelnetBadFloat: atomic.LoadInt64(&s.stats.TelnetBadFloat),
|
||||
statBatchesTransmitted: atomic.LoadInt64(&s.stats.BatchesTransmitted),
|
||||
statPointsTransmitted: atomic.LoadInt64(&s.stats.PointsTransmitted),
|
||||
statBatchesTransmitFail: atomic.LoadInt64(&s.stats.BatchesTransmitFail),
|
||||
statConnectionsActive: atomic.LoadInt64(&s.stats.ActiveConnections),
|
||||
statConnectionsHandled: atomic.LoadInt64(&s.stats.HandledConnections),
|
||||
statDroppedPointsInvalid: atomic.LoadInt64(&s.stats.InvalidDroppedPoints),
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
// Addr returns the listener's address. Returns nil if listener is closed.
|
||||
func (s *Service) Addr() net.Addr {
|
||||
if s.ln == nil {
|
||||
return nil
|
||||
}
|
||||
return s.ln.Addr()
|
||||
}
|
||||
|
||||
// serve serves the handler from the listener.
|
||||
func (s *Service) serve() {
|
||||
for {
|
||||
// Wait for next connection.
|
||||
conn, err := s.ln.Accept()
|
||||
if opErr, ok := err.(*net.OpError); ok && !opErr.Temporary() {
|
||||
s.Logger.Info("openTSDB TCP listener closed")
|
||||
return
|
||||
} else if err != nil {
|
||||
s.Logger.Info(fmt.Sprint("error accepting openTSDB: ", err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle connection in separate goroutine.
|
||||
go s.handleConn(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// handleConn processes conn. This is run in a separate goroutine.
|
||||
func (s *Service) handleConn(conn net.Conn) {
|
||||
defer atomic.AddInt64(&s.stats.ActiveConnections, -1)
|
||||
atomic.AddInt64(&s.stats.ActiveConnections, 1)
|
||||
atomic.AddInt64(&s.stats.HandledConnections, 1)
|
||||
|
||||
// Read header into buffer to check if it's HTTP.
|
||||
var buf bytes.Buffer
|
||||
r := bufio.NewReader(io.TeeReader(conn, &buf))
|
||||
|
||||
// Attempt to parse connection as HTTP.
|
||||
_, err := http.ReadRequest(r)
|
||||
|
||||
// Rebuild connection from buffer and remaining connection data.
|
||||
bufr := bufio.NewReader(io.MultiReader(&buf, conn))
|
||||
conn = &readerConn{Conn: conn, r: bufr}
|
||||
|
||||
// If no HTTP parsing error occurred then process as HTTP.
|
||||
if err == nil {
|
||||
atomic.AddInt64(&s.stats.HTTPConnectionsHandled, 1)
|
||||
s.httpln.ch <- conn
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise handle in telnet format.
|
||||
s.wg.Add(1)
|
||||
s.handleTelnetConn(conn)
|
||||
s.wg.Done()
|
||||
}
|
||||
|
||||
// handleTelnetConn accepts OpenTSDB's telnet protocol.
|
||||
// Each telnet command consists of a line of the form:
|
||||
// put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0
|
||||
func (s *Service) handleTelnetConn(conn net.Conn) {
|
||||
defer conn.Close()
|
||||
defer atomic.AddInt64(&s.stats.ActiveTelnetConnections, -1)
|
||||
atomic.AddInt64(&s.stats.ActiveTelnetConnections, 1)
|
||||
atomic.AddInt64(&s.stats.HandledTelnetConnections, 1)
|
||||
|
||||
// Get connection details.
|
||||
remoteAddr := conn.RemoteAddr().String()
|
||||
|
||||
// Wrap connection in a text protocol reader.
|
||||
r := textproto.NewReader(bufio.NewReader(conn))
|
||||
for {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
atomic.AddInt64(&s.stats.TelnetReadError, 1)
|
||||
s.Logger.Info(fmt.Sprint("error reading from openTSDB connection ", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
atomic.AddInt64(&s.stats.TelnetPointsReceived, 1)
|
||||
atomic.AddInt64(&s.stats.TelnetBytesReceived, int64(len(line)))
|
||||
|
||||
inputStrs := strings.Fields(line)
|
||||
|
||||
if len(inputStrs) == 1 && inputStrs[0] == "version" {
|
||||
conn.Write([]byte("InfluxDB TSDB proxy"))
|
||||
continue
|
||||
}
|
||||
|
||||
if len(inputStrs) < 4 || inputStrs[0] != "put" {
|
||||
atomic.AddInt64(&s.stats.TelnetBadLine, 1)
|
||||
if s.LogPointErrors {
|
||||
s.Logger.Info(fmt.Sprintf("malformed line '%s' from %s", line, remoteAddr))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
measurement := inputStrs[1]
|
||||
tsStr := inputStrs[2]
|
||||
valueStr := inputStrs[3]
|
||||
tagStrs := inputStrs[4:]
|
||||
|
||||
var t time.Time
|
||||
ts, err := strconv.ParseInt(tsStr, 10, 64)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&s.stats.TelnetBadTime, 1)
|
||||
if s.LogPointErrors {
|
||||
s.Logger.Info(fmt.Sprintf("malformed time '%s' from %s", tsStr, remoteAddr))
|
||||
}
|
||||
}
|
||||
|
||||
switch len(tsStr) {
|
||||
case 10:
|
||||
t = time.Unix(ts, 0)
|
||||
case 13:
|
||||
t = time.Unix(ts/1000, (ts%1000)*1000)
|
||||
default:
|
||||
atomic.AddInt64(&s.stats.TelnetBadTime, 1)
|
||||
if s.LogPointErrors {
|
||||
s.Logger.Info(fmt.Sprintf("bad time '%s' must be 10 or 13 chars, from %s ", tsStr, remoteAddr))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
tags := make(map[string]string)
|
||||
for t := range tagStrs {
|
||||
parts := strings.SplitN(tagStrs[t], "=", 2)
|
||||
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
|
||||
atomic.AddInt64(&s.stats.TelnetBadTag, 1)
|
||||
if s.LogPointErrors {
|
||||
s.Logger.Info(fmt.Sprintf("malformed tag data '%v' from %s", tagStrs[t], remoteAddr))
|
||||
}
|
||||
continue
|
||||
}
|
||||
k := parts[0]
|
||||
|
||||
tags[k] = parts[1]
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
fv, err := strconv.ParseFloat(valueStr, 64)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&s.stats.TelnetBadFloat, 1)
|
||||
if s.LogPointErrors {
|
||||
s.Logger.Info(fmt.Sprintf("bad float '%s' from %s", valueStr, remoteAddr))
|
||||
}
|
||||
continue
|
||||
}
|
||||
fields["value"] = fv
|
||||
|
||||
pt, err := models.NewPoint(measurement, models.NewTags(tags), fields, t)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&s.stats.TelnetBadFloat, 1)
|
||||
if s.LogPointErrors {
|
||||
s.Logger.Info(fmt.Sprintf("bad float '%s' from %s", valueStr, remoteAddr))
|
||||
}
|
||||
continue
|
||||
}
|
||||
s.batcher.In() <- pt
|
||||
}
|
||||
}
|
||||
|
||||
// serveHTTP handles connections in HTTP format.
|
||||
func (s *Service) serveHTTP() {
|
||||
handler := &Handler{
|
||||
Database: s.Database,
|
||||
RetentionPolicy: s.RetentionPolicy,
|
||||
PointsWriter: s.PointsWriter,
|
||||
Logger: s.Logger,
|
||||
stats: s.stats,
|
||||
}
|
||||
srv := &http.Server{Handler: handler}
|
||||
srv.Serve(s.httpln)
|
||||
}
|
||||
|
||||
// processBatches continually drains the given batcher and writes the batches to the database.
|
||||
func (s *Service) processBatches(batcher *tsdb.PointBatcher) {
|
||||
for {
|
||||
select {
|
||||
case <-s.done:
|
||||
return
|
||||
case batch := <-batcher.Out():
|
||||
// Will attempt to create database if not yet created.
|
||||
if err := s.createInternalStorage(); err != nil {
|
||||
s.Logger.Info(fmt.Sprintf("Required database %s does not yet exist: %s", s.Database, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.PointsWriter.WritePointsPrivileged(s.Database, s.RetentionPolicy, models.ConsistencyLevelAny, batch); err == nil {
|
||||
atomic.AddInt64(&s.stats.BatchesTransmitted, 1)
|
||||
atomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch)))
|
||||
} else {
|
||||
s.Logger.Info(fmt.Sprintf("failed to write point batch to database %q: %s", s.Database, err))
|
||||
atomic.AddInt64(&s.stats.BatchesTransmitFail, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
295
vendor/github.com/influxdata/influxdb/services/opentsdb/service_test.go
generated
vendored
Normal file
295
vendor/github.com/influxdata/influxdb/services/opentsdb/service_test.go
generated
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/influxdata/influxdb/internal"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/influxdata/influxdb/services/meta"
|
||||
"github.com/uber-go/zap"
|
||||
)
|
||||
|
||||
func Test_Service_OpenClose(t *testing.T) {
|
||||
// Let the OS assign a random port since we are only opening and closing the service,
|
||||
// not actually connecting to it.
|
||||
service := NewTestService("db0", "127.0.0.1:0")
|
||||
|
||||
// Closing a closed service is fine.
|
||||
if err := service.Service.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Closing a closed service again is fine.
|
||||
if err := service.Service.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := service.Service.Open(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Opening an already open service is fine.
|
||||
if err := service.Service.Open(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reopening a previously opened service is fine.
|
||||
if err := service.Service.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := service.Service.Open(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Tidy up.
|
||||
if err := service.Service.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a point can be written via the telnet protocol.
|
||||
func TestService_CreatesDatabase(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
database := "db0"
|
||||
s := NewTestService(database, "127.0.0.1:0")
|
||||
s.WritePointsFn = func(string, string, models.ConsistencyLevel, []models.Point) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
called := make(chan struct{})
|
||||
s.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) {
|
||||
if name != database {
|
||||
t.Errorf("\n\texp = %s\n\tgot = %s\n", database, name)
|
||||
}
|
||||
// Allow some time for the caller to return and the ready status to
|
||||
// be set.
|
||||
time.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} })
|
||||
return nil, errors.New("an error")
|
||||
}
|
||||
|
||||
if err := s.Service.Open(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
points, err := models.ParsePointsString(`cpu value=1`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s.Service.batcher.In() <- points[0] // Send a point.
|
||||
s.Service.batcher.Flush()
|
||||
select {
|
||||
case <-called:
|
||||
// OK
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
t.Fatal("Service should have attempted to create database")
|
||||
}
|
||||
|
||||
// ready status should not have been switched due to meta client error.
|
||||
s.Service.mu.RLock()
|
||||
ready := s.Service.ready
|
||||
s.Service.mu.RUnlock()
|
||||
|
||||
if got, exp := ready, false; got != exp {
|
||||
t.Fatalf("got %v, expected %v", got, exp)
|
||||
}
|
||||
|
||||
// This time MC won't cause an error.
|
||||
s.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) {
|
||||
// Allow some time for the caller to return and the ready status to
|
||||
// be set.
|
||||
time.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} })
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
s.Service.batcher.In() <- points[0] // Send a point.
|
||||
s.Service.batcher.Flush()
|
||||
select {
|
||||
case <-called:
|
||||
// OK
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
t.Fatal("Service should have attempted to create database")
|
||||
}
|
||||
|
||||
// ready status should not have been switched due to meta client error.
|
||||
s.Service.mu.RLock()
|
||||
ready = s.Service.ready
|
||||
s.Service.mu.RUnlock()
|
||||
|
||||
if got, exp := ready, true; got != exp {
|
||||
t.Fatalf("got %v, expected %v", got, exp)
|
||||
}
|
||||
|
||||
s.Service.Close()
|
||||
}
|
||||
|
||||
// Ensure a point can be written via the telnet protocol.
|
||||
func TestService_Telnet(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
s := NewTestService("db0", "127.0.0.1:0")
|
||||
if err := s.Service.Open(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Service.Close()
|
||||
|
||||
// Mock points writer.
|
||||
var called int32
|
||||
s.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {
|
||||
atomic.StoreInt32(&called, 1)
|
||||
|
||||
if database != "db0" {
|
||||
t.Fatalf("unexpected database: %s", database)
|
||||
} else if retentionPolicy != "" {
|
||||
t.Fatalf("unexpected retention policy: %s", retentionPolicy)
|
||||
} else if !reflect.DeepEqual(points, []models.Point{
|
||||
models.MustNewPoint(
|
||||
"sys.cpu.user",
|
||||
models.NewTags(map[string]string{"host": "webserver01", "cpu": "0"}),
|
||||
map[string]interface{}{"value": 42.5},
|
||||
time.Unix(1356998400, 0),
|
||||
),
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %#v", points)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open connection to the service.
|
||||
conn, err := net.Dial("tcp", s.Service.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Write telnet data and close.
|
||||
if _, err := conn.Write([]byte("put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := conn.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tick := time.Tick(10 * time.Millisecond)
|
||||
timeout := time.After(10 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
// Verify that the writer was called.
|
||||
if atomic.LoadInt32(&called) > 0 {
|
||||
return
|
||||
}
|
||||
case <-timeout:
|
||||
t.Fatal("points writer not called")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a point can be written via the HTTP protocol.
|
||||
func TestService_HTTP(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
s := NewTestService("db0", "127.0.0.1:0")
|
||||
if err := s.Service.Open(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Service.Close()
|
||||
|
||||
// Mock points writer.
|
||||
var called bool
|
||||
s.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {
|
||||
called = true
|
||||
if database != "db0" {
|
||||
t.Fatalf("unexpected database: %s", database)
|
||||
} else if retentionPolicy != "" {
|
||||
t.Fatalf("unexpected retention policy: %s", retentionPolicy)
|
||||
} else if !reflect.DeepEqual(points, []models.Point{
|
||||
models.MustNewPoint(
|
||||
"sys.cpu.nice",
|
||||
models.NewTags(map[string]string{"dc": "lga", "host": "web01"}),
|
||||
map[string]interface{}{"value": 18.0},
|
||||
time.Unix(1346846400, 0),
|
||||
),
|
||||
}) {
|
||||
spew.Dump(points)
|
||||
t.Fatalf("unexpected points: %#v", points)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write HTTP request to server.
|
||||
resp, err := http.Post("http://"+s.Service.Addr().String()+"/api/put", "application/json", strings.NewReader(`{"metric":"sys.cpu.nice", "timestamp":1346846400, "value":18, "tags":{"host":"web01", "dc":"lga"}}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Verify status and body.
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Verify that the writer was called.
|
||||
if !called {
|
||||
t.Fatal("points writer not called")
|
||||
}
|
||||
}
|
||||
|
||||
type TestService struct {
|
||||
Service *Service
|
||||
MetaClient *internal.MetaClientMock
|
||||
WritePointsFn func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error
|
||||
}
|
||||
|
||||
// NewTestService returns a new instance of Service.
|
||||
func NewTestService(database string, bind string) *TestService {
|
||||
s, err := NewService(Config{
|
||||
BindAddress: bind,
|
||||
Database: database,
|
||||
ConsistencyLevel: "one",
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
service := &TestService{
|
||||
Service: s,
|
||||
MetaClient: &internal.MetaClientMock{},
|
||||
}
|
||||
|
||||
service.MetaClient.CreateDatabaseFn = func(db string) (*meta.DatabaseInfo, error) {
|
||||
if got, exp := db, database; got != exp {
|
||||
return nil, fmt.Errorf("got %v, expected %v", got, exp)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if testing.Verbose() {
|
||||
service.Service.WithLogger(zap.New(
|
||||
zap.NewTextEncoder(),
|
||||
zap.Output(os.Stderr),
|
||||
))
|
||||
}
|
||||
|
||||
service.Service.MetaClient = service.MetaClient
|
||||
service.Service.PointsWriter = service
|
||||
return service
|
||||
}
|
||||
|
||||
func (s *TestService) WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {
|
||||
return s.WritePointsFn(database, retentionPolicy, consistencyLevel, points)
|
||||
}
|
Reference in New Issue
Block a user