mirror of
https://github.com/Oxalide/vsphere-influxdb-go.git
synced 2023-10-10 11:36:51 +00:00
add vendoring with go dep
This commit is contained in:
261
vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go
generated
vendored
Normal file
261
vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go
generated
vendored
Normal file
@@ -0,0 +1,261 @@
|
||||
// Package run is the run (default) subcommand for the influxd command.
|
||||
package run
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/uber-go/zap"
|
||||
)
|
||||
|
||||
const logo = `
|
||||
8888888 .d888 888 8888888b. 888888b.
|
||||
888 d88P" 888 888 "Y88b 888 "88b
|
||||
888 888 888 888 888 888 .88P
|
||||
888 88888b. 888888 888 888 888 888 888 888 888 8888888K.
|
||||
888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b
|
||||
888 888 888 888 888 888 888 X88K 888 888 888 888
|
||||
888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P
|
||||
8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P"
|
||||
|
||||
`
|
||||
|
||||
// Command represents the command executed by "influxd run".
|
||||
type Command struct {
|
||||
Version string
|
||||
Branch string
|
||||
Commit string
|
||||
BuildTime string
|
||||
|
||||
closing chan struct{}
|
||||
Closed chan struct{}
|
||||
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
Logger zap.Logger
|
||||
|
||||
Server *Server
|
||||
}
|
||||
|
||||
// NewCommand return a new instance of Command.
|
||||
func NewCommand() *Command {
|
||||
return &Command{
|
||||
closing: make(chan struct{}),
|
||||
Closed: make(chan struct{}),
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
Logger: zap.New(zap.NullEncoder()),
|
||||
}
|
||||
}
|
||||
|
||||
// Run parses the config from args and runs the server.
|
||||
func (cmd *Command) Run(args ...string) error {
|
||||
// Parse the command line flags.
|
||||
options, err := cmd.ParseFlags(args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Print sweet InfluxDB logo.
|
||||
fmt.Print(logo)
|
||||
|
||||
// Mark start-up in log.
|
||||
cmd.Logger.Info(fmt.Sprintf("InfluxDB starting, version %s, branch %s, commit %s",
|
||||
cmd.Version, cmd.Branch, cmd.Commit))
|
||||
cmd.Logger.Info(fmt.Sprintf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0)))
|
||||
|
||||
// Write the PID file.
|
||||
if err := cmd.writePIDFile(options.PIDFile); err != nil {
|
||||
return fmt.Errorf("write pid file: %s", err)
|
||||
}
|
||||
|
||||
// Parse config
|
||||
config, err := cmd.ParseConfig(options.GetConfigPath())
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse config: %s", err)
|
||||
}
|
||||
|
||||
// Apply any environment variables on top of the parsed config
|
||||
if err := config.ApplyEnvOverrides(); err != nil {
|
||||
return fmt.Errorf("apply env config: %v", err)
|
||||
}
|
||||
|
||||
// Validate the configuration.
|
||||
if err := config.Validate(); err != nil {
|
||||
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err)
|
||||
}
|
||||
|
||||
if config.HTTPD.PprofEnabled {
|
||||
// Turn on block profiling to debug stuck databases
|
||||
runtime.SetBlockProfileRate(int(1 * time.Second))
|
||||
}
|
||||
|
||||
// Create server from config and start it.
|
||||
buildInfo := &BuildInfo{
|
||||
Version: cmd.Version,
|
||||
Commit: cmd.Commit,
|
||||
Branch: cmd.Branch,
|
||||
Time: cmd.BuildTime,
|
||||
}
|
||||
s, err := NewServer(config, buildInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create server: %s", err)
|
||||
}
|
||||
s.Logger = cmd.Logger
|
||||
s.CPUProfile = options.CPUProfile
|
||||
s.MemProfile = options.MemProfile
|
||||
if err := s.Open(); err != nil {
|
||||
return fmt.Errorf("open server: %s", err)
|
||||
}
|
||||
cmd.Server = s
|
||||
|
||||
// Begin monitoring the server's error channel.
|
||||
go cmd.monitorServerErrors()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close shuts down the server.
|
||||
func (cmd *Command) Close() error {
|
||||
defer close(cmd.Closed)
|
||||
close(cmd.closing)
|
||||
if cmd.Server != nil {
|
||||
return cmd.Server.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cmd *Command) monitorServerErrors() {
|
||||
logger := log.New(cmd.Stderr, "", log.LstdFlags)
|
||||
for {
|
||||
select {
|
||||
case err := <-cmd.Server.Err():
|
||||
logger.Println(err)
|
||||
case <-cmd.closing:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ParseFlags parses the command line flags from args and returns an options set.
|
||||
func (cmd *Command) ParseFlags(args ...string) (Options, error) {
|
||||
var options Options
|
||||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
fs.StringVar(&options.ConfigPath, "config", "", "")
|
||||
fs.StringVar(&options.PIDFile, "pidfile", "", "")
|
||||
// Ignore hostname option.
|
||||
_ = fs.String("hostname", "", "")
|
||||
fs.StringVar(&options.CPUProfile, "cpuprofile", "", "")
|
||||
fs.StringVar(&options.MemProfile, "memprofile", "", "")
|
||||
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) }
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return Options{}, err
|
||||
}
|
||||
return options, nil
|
||||
}
|
||||
|
||||
// writePIDFile writes the process ID to path.
|
||||
func (cmd *Command) writePIDFile(path string) error {
|
||||
// Ignore if path is not set.
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure the required directory structure exists.
|
||||
err := os.MkdirAll(filepath.Dir(path), 0777)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mkdir: %s", err)
|
||||
}
|
||||
|
||||
// Retrieve the PID and write it.
|
||||
pid := strconv.Itoa(os.Getpid())
|
||||
if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil {
|
||||
return fmt.Errorf("write file: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseConfig parses the config at path.
|
||||
// It returns a demo configuration if path is blank.
|
||||
func (cmd *Command) ParseConfig(path string) (*Config, error) {
|
||||
// Use demo configuration if no config path is specified.
|
||||
if path == "" {
|
||||
cmd.Logger.Info("no configuration provided, using default settings")
|
||||
return NewDemoConfig()
|
||||
}
|
||||
|
||||
cmd.Logger.Info(fmt.Sprintf("Using configuration at: %s", path))
|
||||
|
||||
config := NewConfig()
|
||||
if err := config.FromTomlFile(path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
const usage = `Runs the InfluxDB server.
|
||||
|
||||
Usage: influxd run [flags]
|
||||
|
||||
-config <path>
|
||||
Set the path to the configuration file.
|
||||
This defaults to the environment variable INFLUXDB_CONFIG_PATH,
|
||||
~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file
|
||||
is present at any of these locations.
|
||||
Disable the automatic loading of a configuration file using
|
||||
the null device (such as /dev/null).
|
||||
-pidfile <path>
|
||||
Write process ID to a file.
|
||||
-cpuprofile <path>
|
||||
Write CPU profiling information to a file.
|
||||
-memprofile <path>
|
||||
Write memory usage information to a file.
|
||||
`
|
||||
|
||||
// Options represents the command line options that can be parsed.
|
||||
type Options struct {
|
||||
ConfigPath string
|
||||
PIDFile string
|
||||
CPUProfile string
|
||||
MemProfile string
|
||||
}
|
||||
|
||||
// GetConfigPath returns the config path from the options.
|
||||
// It will return a path by searching in this order:
|
||||
// 1. The CLI option in ConfigPath
|
||||
// 2. The environment variable INFLUXDB_CONFIG_PATH
|
||||
// 3. The first influxdb.conf file on the path:
|
||||
// - ~/.influxdb
|
||||
// - /etc/influxdb
|
||||
func (opt *Options) GetConfigPath() string {
|
||||
if opt.ConfigPath != "" {
|
||||
if opt.ConfigPath == os.DevNull {
|
||||
return ""
|
||||
}
|
||||
return opt.ConfigPath
|
||||
} else if envVar := os.Getenv("INFLUXDB_CONFIG_PATH"); envVar != "" {
|
||||
return envVar
|
||||
}
|
||||
|
||||
for _, path := range []string{
|
||||
os.ExpandEnv("${HOME}/.influxdb/influxdb.conf"),
|
||||
"/etc/influxdb/influxdb.conf",
|
||||
} {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return path
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
363
vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go
generated
vendored
Normal file
363
vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go
generated
vendored
Normal file
@@ -0,0 +1,363 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/influxdata/influxdb/coordinator"
|
||||
"github.com/influxdata/influxdb/monitor"
|
||||
"github.com/influxdata/influxdb/monitor/diagnostics"
|
||||
"github.com/influxdata/influxdb/services/collectd"
|
||||
"github.com/influxdata/influxdb/services/continuous_querier"
|
||||
"github.com/influxdata/influxdb/services/graphite"
|
||||
"github.com/influxdata/influxdb/services/httpd"
|
||||
"github.com/influxdata/influxdb/services/meta"
|
||||
"github.com/influxdata/influxdb/services/opentsdb"
|
||||
"github.com/influxdata/influxdb/services/precreator"
|
||||
"github.com/influxdata/influxdb/services/retention"
|
||||
"github.com/influxdata/influxdb/services/subscriber"
|
||||
"github.com/influxdata/influxdb/services/udp"
|
||||
"github.com/influxdata/influxdb/tsdb"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBindAddress is the default address for various RPC services.
|
||||
DefaultBindAddress = "127.0.0.1:8088"
|
||||
)
|
||||
|
||||
// Config represents the configuration format for the influxd binary.
|
||||
type Config struct {
|
||||
Meta *meta.Config `toml:"meta"`
|
||||
Data tsdb.Config `toml:"data"`
|
||||
Coordinator coordinator.Config `toml:"coordinator"`
|
||||
Retention retention.Config `toml:"retention"`
|
||||
Precreator precreator.Config `toml:"shard-precreation"`
|
||||
|
||||
Monitor monitor.Config `toml:"monitor"`
|
||||
Subscriber subscriber.Config `toml:"subscriber"`
|
||||
HTTPD httpd.Config `toml:"http"`
|
||||
GraphiteInputs []graphite.Config `toml:"graphite"`
|
||||
CollectdInputs []collectd.Config `toml:"collectd"`
|
||||
OpenTSDBInputs []opentsdb.Config `toml:"opentsdb"`
|
||||
UDPInputs []udp.Config `toml:"udp"`
|
||||
|
||||
ContinuousQuery continuous_querier.Config `toml:"continuous_queries"`
|
||||
|
||||
// Server reporting
|
||||
ReportingDisabled bool `toml:"reporting-disabled"`
|
||||
|
||||
// BindAddress is the address that all TCP services use (Raft, Snapshot, Cluster, etc.)
|
||||
BindAddress string `toml:"bind-address"`
|
||||
}
|
||||
|
||||
// NewConfig returns an instance of Config with reasonable defaults.
|
||||
func NewConfig() *Config {
|
||||
c := &Config{}
|
||||
c.Meta = meta.NewConfig()
|
||||
c.Data = tsdb.NewConfig()
|
||||
c.Coordinator = coordinator.NewConfig()
|
||||
c.Precreator = precreator.NewConfig()
|
||||
|
||||
c.Monitor = monitor.NewConfig()
|
||||
c.Subscriber = subscriber.NewConfig()
|
||||
c.HTTPD = httpd.NewConfig()
|
||||
|
||||
c.GraphiteInputs = []graphite.Config{graphite.NewConfig()}
|
||||
c.CollectdInputs = []collectd.Config{collectd.NewConfig()}
|
||||
c.OpenTSDBInputs = []opentsdb.Config{opentsdb.NewConfig()}
|
||||
c.UDPInputs = []udp.Config{udp.NewConfig()}
|
||||
|
||||
c.ContinuousQuery = continuous_querier.NewConfig()
|
||||
c.Retention = retention.NewConfig()
|
||||
c.BindAddress = DefaultBindAddress
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// NewDemoConfig returns the config that runs when no config is specified.
|
||||
func NewDemoConfig() (*Config, error) {
|
||||
c := NewConfig()
|
||||
|
||||
var homeDir string
|
||||
// By default, store meta and data files in current users home directory
|
||||
u, err := user.Current()
|
||||
if err == nil {
|
||||
homeDir = u.HomeDir
|
||||
} else if os.Getenv("HOME") != "" {
|
||||
homeDir = os.Getenv("HOME")
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to determine current user for storage")
|
||||
}
|
||||
|
||||
c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta")
|
||||
c.Data.Dir = filepath.Join(homeDir, ".influxdb/data")
|
||||
c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal")
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
|
||||
// This is for Windows compatability only.
|
||||
// See https://github.com/influxdata/telegraf/issues/1378.
|
||||
func trimBOM(f []byte) []byte {
|
||||
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
||||
}
|
||||
|
||||
// FromTomlFile loads the config from a TOML file.
|
||||
func (c *Config) FromTomlFile(fpath string) error {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bs = trimBOM(bs)
|
||||
return c.FromToml(string(bs))
|
||||
}
|
||||
|
||||
// FromToml loads the config from TOML.
|
||||
func (c *Config) FromToml(input string) error {
|
||||
// Replace deprecated [cluster] with [coordinator]
|
||||
re := regexp.MustCompile(`(?m)^\s*\[cluster\]`)
|
||||
input = re.ReplaceAllStringFunc(input, func(in string) string {
|
||||
in = strings.TrimSpace(in)
|
||||
out := "[coordinator]"
|
||||
log.Printf("deprecated config option %s replaced with %s; %s will not be supported in a future release\n", in, out, in)
|
||||
return out
|
||||
})
|
||||
|
||||
_, err := toml.Decode(input, c)
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate returns an error if the config is invalid.
|
||||
func (c *Config) Validate() error {
|
||||
if err := c.Meta.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Data.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Monitor.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ContinuousQuery.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Retention.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Precreator.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Subscriber.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, graphite := range c.GraphiteInputs {
|
||||
if err := graphite.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid graphite config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, collectd := range c.CollectdInputs {
|
||||
if err := collectd.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid collectd config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyEnvOverrides apply the environment configuration on top of the config.
|
||||
func (c *Config) ApplyEnvOverrides() error {
|
||||
return c.applyEnvOverrides("INFLUXDB", reflect.ValueOf(c), "")
|
||||
}
|
||||
|
||||
func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value, structKey string) error {
|
||||
// If we have a pointer, dereference it
|
||||
element := spec
|
||||
if spec.Kind() == reflect.Ptr {
|
||||
element = spec.Elem()
|
||||
}
|
||||
|
||||
value := os.Getenv(prefix)
|
||||
|
||||
switch element.Kind() {
|
||||
case reflect.String:
|
||||
if len(value) == 0 {
|
||||
return nil
|
||||
}
|
||||
element.SetString(value)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
var intValue int64
|
||||
|
||||
// Handle toml.Duration
|
||||
if element.Type().Name() == "Duration" {
|
||||
dur, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value)
|
||||
}
|
||||
intValue = dur.Nanoseconds()
|
||||
} else {
|
||||
var err error
|
||||
intValue, err = strconv.ParseInt(value, 0, element.Type().Bits())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value)
|
||||
}
|
||||
}
|
||||
element.SetInt(intValue)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
intValue, err := strconv.ParseUint(value, 0, element.Type().Bits())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value)
|
||||
}
|
||||
element.SetUint(intValue)
|
||||
case reflect.Bool:
|
||||
boolValue, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value)
|
||||
}
|
||||
element.SetBool(boolValue)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
floatValue, err := strconv.ParseFloat(value, element.Type().Bits())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value)
|
||||
}
|
||||
element.SetFloat(floatValue)
|
||||
case reflect.Slice:
|
||||
// If the type is s slice, apply to each using the index as a suffix, e.g. GRAPHITE_0, GRAPHITE_0_TEMPLATES_0 or GRAPHITE_0_TEMPLATES="item1,item2"
|
||||
for j := 0; j < element.Len(); j++ {
|
||||
f := element.Index(j)
|
||||
if err := c.applyEnvOverrides(prefix, f, structKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.applyEnvOverrides(fmt.Sprintf("%s_%d", prefix, j), f, structKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If the type is s slice but have value not parsed as slice e.g. GRAPHITE_0_TEMPLATES="item1,item2"
|
||||
if element.Len() == 0 && len(value) > 0 {
|
||||
rules := strings.Split(value, ",")
|
||||
|
||||
for _, rule := range rules {
|
||||
element.Set(reflect.Append(element, reflect.ValueOf(rule)))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
typeOfSpec := element.Type()
|
||||
for i := 0; i < element.NumField(); i++ {
|
||||
field := element.Field(i)
|
||||
|
||||
// Skip any fields that we cannot set
|
||||
if !field.CanSet() && field.Kind() != reflect.Slice {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := typeOfSpec.Field(i).Name
|
||||
|
||||
configName := typeOfSpec.Field(i).Tag.Get("toml")
|
||||
// Replace hyphens with underscores to avoid issues with shells
|
||||
configName = strings.Replace(configName, "-", "_", -1)
|
||||
|
||||
envKey := strings.ToUpper(configName)
|
||||
if prefix != "" {
|
||||
envKey = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName))
|
||||
}
|
||||
|
||||
// If it's a sub-config, recursively apply
|
||||
if field.Kind() == reflect.Struct || field.Kind() == reflect.Ptr ||
|
||||
field.Kind() == reflect.Slice || field.Kind() == reflect.Array {
|
||||
if err := c.applyEnvOverrides(envKey, field, fieldName); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
value := os.Getenv(envKey)
|
||||
// Skip any fields we don't have a value to set
|
||||
if len(value) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := c.applyEnvOverrides(envKey, field, fieldName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Diagnostics returns a diagnostics representation of Config.
|
||||
func (c *Config) Diagnostics() (*diagnostics.Diagnostics, error) {
|
||||
return diagnostics.RowFromMap(map[string]interface{}{
|
||||
"reporting-disabled": c.ReportingDisabled,
|
||||
"bind-address": c.BindAddress,
|
||||
}), nil
|
||||
}
|
||||
|
||||
func (c *Config) diagnosticsClients() map[string]diagnostics.Client {
|
||||
// Config settings that are always present.
|
||||
m := map[string]diagnostics.Client{
|
||||
"config": c,
|
||||
|
||||
"config-data": c.Data,
|
||||
"config-meta": c.Meta,
|
||||
"config-coordinator": c.Coordinator,
|
||||
"config-retention": c.Retention,
|
||||
"config-precreator": c.Precreator,
|
||||
|
||||
"config-monitor": c.Monitor,
|
||||
"config-subscriber": c.Subscriber,
|
||||
"config-httpd": c.HTTPD,
|
||||
|
||||
"config-cqs": c.ContinuousQuery,
|
||||
}
|
||||
|
||||
// Config settings that can be repeated and can be disabled.
|
||||
if g := graphite.Configs(c.GraphiteInputs); g.Enabled() {
|
||||
m["config-graphite"] = g
|
||||
}
|
||||
if cc := collectd.Configs(c.CollectdInputs); cc.Enabled() {
|
||||
m["config-collectd"] = cc
|
||||
}
|
||||
if t := opentsdb.Configs(c.OpenTSDBInputs); t.Enabled() {
|
||||
m["config-opentsdb"] = t
|
||||
}
|
||||
if u := udp.Configs(c.UDPInputs); u.Enabled() {
|
||||
m["config-udp"] = u
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// registerDiagnostics registers the config settings with the Monitor.
|
||||
func (c *Config) registerDiagnostics(m *monitor.Monitor) {
|
||||
for name, dc := range c.diagnosticsClients() {
|
||||
m.RegisterDiagnosticsClient(name, dc)
|
||||
}
|
||||
}
|
||||
|
||||
// registerDiagnostics deregisters the config settings from the Monitor.
|
||||
func (c *Config) deregisterDiagnostics(m *monitor.Monitor) {
|
||||
for name := range c.diagnosticsClients() {
|
||||
m.DeregisterDiagnosticsClient(name)
|
||||
}
|
||||
}
|
92
vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go
generated
vendored
Normal file
92
vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
// PrintConfigCommand represents the command executed by "influxd config".
|
||||
type PrintConfigCommand struct {
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
}
|
||||
|
||||
// NewPrintConfigCommand return a new instance of PrintConfigCommand.
|
||||
func NewPrintConfigCommand() *PrintConfigCommand {
|
||||
return &PrintConfigCommand{
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
}
|
||||
}
|
||||
|
||||
// Run parses and prints the current config loaded.
|
||||
func (cmd *PrintConfigCommand) Run(args ...string) error {
|
||||
// Parse command flags.
|
||||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
configPath := fs.String("config", "", "")
|
||||
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) }
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse config from path.
|
||||
opt := Options{ConfigPath: *configPath}
|
||||
config, err := cmd.parseConfig(opt.GetConfigPath())
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse config: %s", err)
|
||||
}
|
||||
|
||||
// Apply any environment variables on top of the parsed config
|
||||
if err := config.ApplyEnvOverrides(); err != nil {
|
||||
return fmt.Errorf("apply env config: %v", err)
|
||||
}
|
||||
|
||||
// Validate the configuration.
|
||||
if err := config.Validate(); err != nil {
|
||||
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err)
|
||||
}
|
||||
|
||||
toml.NewEncoder(cmd.Stdout).Encode(config)
|
||||
fmt.Fprint(cmd.Stdout, "\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseConfig parses the config at path.
|
||||
// Returns a demo configuration if path is blank.
|
||||
func (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) {
|
||||
config, err := NewDemoConfig()
|
||||
if err != nil {
|
||||
config = NewConfig()
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
return config, nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Merging with configuration at: %s\n", path)
|
||||
|
||||
if err := config.FromTomlFile(path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
var printConfigUsage = `Displays the default configuration.
|
||||
|
||||
Usage: influxd config [flags]
|
||||
|
||||
-config <path>
|
||||
Set the path to the initial configuration file.
|
||||
This defaults to the environment variable INFLUXDB_CONFIG_PATH,
|
||||
~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file
|
||||
is present at any of these locations.
|
||||
Disable the automatic loading of a configuration file using
|
||||
the null device (such as /dev/null).
|
||||
`
|
312
vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go
generated
vendored
Normal file
312
vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go
generated
vendored
Normal file
@@ -0,0 +1,312 @@
|
||||
package run_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/influxdata/influxdb/cmd/influxd/run"
|
||||
)
|
||||
|
||||
// Ensure the configuration can be parsed.
|
||||
func TestConfig_Parse(t *testing.T) {
|
||||
// Parse configuration.
|
||||
var c run.Config
|
||||
if err := c.FromToml(`
|
||||
[meta]
|
||||
dir = "/tmp/meta"
|
||||
|
||||
[data]
|
||||
dir = "/tmp/data"
|
||||
|
||||
[coordinator]
|
||||
|
||||
[http]
|
||||
bind-address = ":8087"
|
||||
|
||||
[[graphite]]
|
||||
protocol = "udp"
|
||||
|
||||
[[graphite]]
|
||||
protocol = "tcp"
|
||||
|
||||
[[collectd]]
|
||||
bind-address = ":1000"
|
||||
|
||||
[[collectd]]
|
||||
bind-address = ":1010"
|
||||
|
||||
[[opentsdb]]
|
||||
bind-address = ":2000"
|
||||
|
||||
[[opentsdb]]
|
||||
bind-address = ":2010"
|
||||
|
||||
[[opentsdb]]
|
||||
bind-address = ":2020"
|
||||
|
||||
[[udp]]
|
||||
bind-address = ":4444"
|
||||
|
||||
[monitoring]
|
||||
enabled = true
|
||||
|
||||
[subscriber]
|
||||
enabled = true
|
||||
|
||||
[continuous_queries]
|
||||
enabled = true
|
||||
`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate configuration.
|
||||
if c.Meta.Dir != "/tmp/meta" {
|
||||
t.Fatalf("unexpected meta dir: %s", c.Meta.Dir)
|
||||
} else if c.Data.Dir != "/tmp/data" {
|
||||
t.Fatalf("unexpected data dir: %s", c.Data.Dir)
|
||||
} else if c.HTTPD.BindAddress != ":8087" {
|
||||
t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress)
|
||||
} else if len(c.GraphiteInputs) != 2 {
|
||||
t.Fatalf("unexpected graphiteInputs count: %d", len(c.GraphiteInputs))
|
||||
} else if c.GraphiteInputs[0].Protocol != "udp" {
|
||||
t.Fatalf("unexpected graphite protocol(0): %s", c.GraphiteInputs[0].Protocol)
|
||||
} else if c.GraphiteInputs[1].Protocol != "tcp" {
|
||||
t.Fatalf("unexpected graphite protocol(1): %s", c.GraphiteInputs[1].Protocol)
|
||||
} else if c.CollectdInputs[0].BindAddress != ":1000" {
|
||||
t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[0].BindAddress)
|
||||
} else if c.CollectdInputs[1].BindAddress != ":1010" {
|
||||
t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress)
|
||||
} else if c.OpenTSDBInputs[0].BindAddress != ":2000" {
|
||||
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress)
|
||||
} else if c.OpenTSDBInputs[1].BindAddress != ":2010" {
|
||||
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[1].BindAddress)
|
||||
} else if c.OpenTSDBInputs[2].BindAddress != ":2020" {
|
||||
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[2].BindAddress)
|
||||
} else if c.UDPInputs[0].BindAddress != ":4444" {
|
||||
t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress)
|
||||
} else if c.Subscriber.Enabled != true {
|
||||
t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled)
|
||||
} else if c.ContinuousQuery.Enabled != true {
|
||||
t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the configuration can be parsed.
|
||||
func TestConfig_Parse_EnvOverride(t *testing.T) {
|
||||
// Parse configuration.
|
||||
var c run.Config
|
||||
if _, err := toml.Decode(`
|
||||
[meta]
|
||||
dir = "/tmp/meta"
|
||||
|
||||
[data]
|
||||
dir = "/tmp/data"
|
||||
|
||||
[coordinator]
|
||||
|
||||
[admin]
|
||||
bind-address = ":8083"
|
||||
|
||||
[http]
|
||||
bind-address = ":8087"
|
||||
|
||||
[[graphite]]
|
||||
protocol = "udp"
|
||||
templates = [
|
||||
"default.* .template.in.config"
|
||||
]
|
||||
|
||||
[[graphite]]
|
||||
protocol = "tcp"
|
||||
|
||||
[[collectd]]
|
||||
bind-address = ":1000"
|
||||
|
||||
[[collectd]]
|
||||
bind-address = ":1010"
|
||||
|
||||
[[opentsdb]]
|
||||
bind-address = ":2000"
|
||||
|
||||
[[opentsdb]]
|
||||
bind-address = ":2010"
|
||||
|
||||
[[udp]]
|
||||
bind-address = ":4444"
|
||||
|
||||
[[udp]]
|
||||
|
||||
[monitoring]
|
||||
enabled = true
|
||||
|
||||
[continuous_queries]
|
||||
enabled = true
|
||||
`, &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil {
|
||||
t.Fatalf("failed to set env var: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("INFLUXDB_UDP_0_BIND_ADDRESS", ":5555"); err != nil {
|
||||
t.Fatalf("failed to set env var: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("INFLUXDB_GRAPHITE_0_TEMPLATES_0", "overide.* .template.0"); err != nil {
|
||||
t.Fatalf("failed to set env var: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("INFLUXDB_GRAPHITE_1_TEMPLATES", "overide.* .template.1.1,overide.* .template.1.2"); err != nil {
|
||||
t.Fatalf("failed to set env var: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("INFLUXDB_GRAPHITE_1_PROTOCOL", "udp"); err != nil {
|
||||
t.Fatalf("failed to set env var: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("INFLUXDB_COLLECTD_1_BIND_ADDRESS", ":1020"); err != nil {
|
||||
t.Fatalf("failed to set env var: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("INFLUXDB_OPENTSDB_0_BIND_ADDRESS", ":2020"); err != nil {
|
||||
t.Fatalf("failed to set env var: %v", err)
|
||||
}
|
||||
|
||||
// uint64 type
|
||||
if err := os.Setenv("INFLUXDB_DATA_CACHE_MAX_MEMORY_SIZE", "1000"); err != nil {
|
||||
t.Fatalf("failed to set env var: %v", err)
|
||||
}
|
||||
|
||||
if err := c.ApplyEnvOverrides(); err != nil {
|
||||
t.Fatalf("failed to apply env overrides: %v", err)
|
||||
}
|
||||
|
||||
if c.UDPInputs[0].BindAddress != ":5555" {
|
||||
t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress)
|
||||
}
|
||||
|
||||
if c.UDPInputs[1].BindAddress != ":1234" {
|
||||
t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[1].BindAddress)
|
||||
}
|
||||
|
||||
if len(c.GraphiteInputs[0].Templates) != 1 || c.GraphiteInputs[0].Templates[0] != "overide.* .template.0" {
|
||||
t.Fatalf("unexpected graphite 0 templates: %+v", c.GraphiteInputs[0].Templates)
|
||||
}
|
||||
|
||||
if len(c.GraphiteInputs[1].Templates) != 2 || c.GraphiteInputs[1].Templates[1] != "overide.* .template.1.2" {
|
||||
t.Fatalf("unexpected graphite 1 templates: %+v", c.GraphiteInputs[1].Templates)
|
||||
}
|
||||
|
||||
if c.GraphiteInputs[1].Protocol != "udp" {
|
||||
t.Fatalf("unexpected graphite protocol: %s", c.GraphiteInputs[1].Protocol)
|
||||
}
|
||||
|
||||
if c.CollectdInputs[1].BindAddress != ":1020" {
|
||||
t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress)
|
||||
}
|
||||
|
||||
if c.OpenTSDBInputs[0].BindAddress != ":2020" {
|
||||
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress)
|
||||
}
|
||||
|
||||
if c.Data.CacheMaxMemorySize != 1000 {
|
||||
t.Fatalf("unexpected cache max memory size: %v", c.Data.CacheMaxMemorySize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_ValidateNoServiceConfigured(t *testing.T) {
|
||||
var c run.Config
|
||||
if _, err := toml.Decode(`
|
||||
[meta]
|
||||
enabled = false
|
||||
|
||||
[data]
|
||||
enabled = false
|
||||
`, &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if e := c.Validate(); e == nil {
|
||||
t.Fatalf("got nil, expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_ValidateMonitorStore_MetaOnly(t *testing.T) {
|
||||
c := run.NewConfig()
|
||||
if _, err := toml.Decode(`
|
||||
[monitor]
|
||||
store-enabled = true
|
||||
|
||||
[meta]
|
||||
dir = "foo"
|
||||
|
||||
[data]
|
||||
enabled = false
|
||||
`, &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := c.Validate(); err == nil {
|
||||
t.Fatalf("got nil, expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_DeprecatedOptions(t *testing.T) {
|
||||
// Parse configuration.
|
||||
var c run.Config
|
||||
if err := c.FromToml(`
|
||||
[cluster]
|
||||
max-select-point = 100
|
||||
`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate configuration.
|
||||
if c.Coordinator.MaxSelectPointN != 100 {
|
||||
t.Fatalf("unexpected coordinator max select points: %d", c.Coordinator.MaxSelectPointN)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that Config.Validate correctly validates the individual subsections.
|
||||
func TestConfig_InvalidSubsections(t *testing.T) {
|
||||
// Precondition: NewDemoConfig must validate correctly.
|
||||
c, err := run.NewDemoConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating demo config: %s", err)
|
||||
}
|
||||
if err := c.Validate(); err != nil {
|
||||
t.Fatalf("new demo config failed validation: %s", err)
|
||||
}
|
||||
|
||||
// For each subsection, load a config with a single invalid setting.
|
||||
for _, tc := range []struct {
|
||||
section string
|
||||
kv string
|
||||
}{
|
||||
{"meta", `dir = ""`},
|
||||
{"data", `dir = ""`},
|
||||
{"monitor", `store-database = ""`},
|
||||
{"continuous_queries", `run-interval = "0s"`},
|
||||
{"subscriber", `http-timeout = "0s"`},
|
||||
{"retention", `check-interval = "0s"`},
|
||||
{"shard-precreation", `advance-period = "0s"`},
|
||||
} {
|
||||
c, err := run.NewDemoConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating demo config: %s", err)
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("\n[%s]\n%s\n", tc.section, tc.kv)
|
||||
if err := c.FromToml(s); err != nil {
|
||||
t.Fatalf("error loading toml %q: %s", s, err)
|
||||
}
|
||||
|
||||
if err := c.Validate(); err == nil {
|
||||
t.Fatalf("expected error but got nil for config: %s", s)
|
||||
}
|
||||
}
|
||||
}
|
616
vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go
generated
vendored
Normal file
616
vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go
generated
vendored
Normal file
@@ -0,0 +1,616 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb"
|
||||
"github.com/influxdata/influxdb/coordinator"
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/influxdata/influxdb/monitor"
|
||||
"github.com/influxdata/influxdb/services/collectd"
|
||||
"github.com/influxdata/influxdb/services/continuous_querier"
|
||||
"github.com/influxdata/influxdb/services/graphite"
|
||||
"github.com/influxdata/influxdb/services/httpd"
|
||||
"github.com/influxdata/influxdb/services/meta"
|
||||
"github.com/influxdata/influxdb/services/opentsdb"
|
||||
"github.com/influxdata/influxdb/services/precreator"
|
||||
"github.com/influxdata/influxdb/services/retention"
|
||||
"github.com/influxdata/influxdb/services/snapshotter"
|
||||
"github.com/influxdata/influxdb/services/subscriber"
|
||||
"github.com/influxdata/influxdb/services/udp"
|
||||
"github.com/influxdata/influxdb/tcp"
|
||||
"github.com/influxdata/influxdb/tsdb"
|
||||
client "github.com/influxdata/usage-client/v1"
|
||||
"github.com/uber-go/zap"
|
||||
|
||||
// Initialize the engine & index packages
|
||||
_ "github.com/influxdata/influxdb/tsdb/engine"
|
||||
_ "github.com/influxdata/influxdb/tsdb/index"
|
||||
)
|
||||
|
||||
var startTime time.Time
|
||||
|
||||
func init() {
|
||||
startTime = time.Now().UTC()
|
||||
}
|
||||
|
||||
// BuildInfo represents the build details for the server code.
|
||||
type BuildInfo struct {
|
||||
Version string
|
||||
Commit string
|
||||
Branch string
|
||||
Time string
|
||||
}
|
||||
|
||||
// Server represents a container for the metadata and storage data and services.
|
||||
// It is built using a Config and it manages the startup and shutdown of all
|
||||
// services in the proper order.
|
||||
type Server struct {
|
||||
buildInfo BuildInfo
|
||||
|
||||
err chan error
|
||||
closing chan struct{}
|
||||
|
||||
BindAddress string
|
||||
Listener net.Listener
|
||||
|
||||
Logger zap.Logger
|
||||
|
||||
MetaClient *meta.Client
|
||||
|
||||
TSDBStore *tsdb.Store
|
||||
QueryExecutor *influxql.QueryExecutor
|
||||
PointsWriter *coordinator.PointsWriter
|
||||
Subscriber *subscriber.Service
|
||||
|
||||
Services []Service
|
||||
|
||||
// These references are required for the tcp muxer.
|
||||
SnapshotterService *snapshotter.Service
|
||||
|
||||
Monitor *monitor.Monitor
|
||||
|
||||
// Server reporting and registration
|
||||
reportingDisabled bool
|
||||
|
||||
// Profiling
|
||||
CPUProfile string
|
||||
MemProfile string
|
||||
|
||||
// httpAPIAddr is the host:port combination for the main HTTP API for querying and writing data
|
||||
httpAPIAddr string
|
||||
|
||||
// httpUseTLS specifies if we should use a TLS connection to the http servers
|
||||
httpUseTLS bool
|
||||
|
||||
// tcpAddr is the host:port combination for the TCP listener that services mux onto
|
||||
tcpAddr string
|
||||
|
||||
config *Config
|
||||
}
|
||||
|
||||
// NewServer returns a new instance of Server built from a config.
|
||||
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
|
||||
// We need to ensure that a meta directory always exists even if
|
||||
// we don't start the meta store. node.json is always stored under
|
||||
// the meta directory.
|
||||
if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil {
|
||||
return nil, fmt.Errorf("mkdir all: %s", err)
|
||||
}
|
||||
|
||||
// 0.10-rc1 and prior would sometimes put the node.json at the root
|
||||
// dir which breaks backup/restore and restarting nodes. This moves
|
||||
// the file from the root so it's always under the meta dir.
|
||||
oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json")
|
||||
newPath := filepath.Join(c.Meta.Dir, "node.json")
|
||||
|
||||
if _, err := os.Stat(oldPath); err == nil {
|
||||
if err := os.Rename(oldPath, newPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
_, err := influxdb.LoadNode(c.Meta.Dir)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := raftDBExists(c.Meta.Dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// In 0.10.0 bind-address got moved to the top level. Check
|
||||
// The old location to keep things backwards compatible
|
||||
bind := c.BindAddress
|
||||
|
||||
s := &Server{
|
||||
buildInfo: *buildInfo,
|
||||
err: make(chan error),
|
||||
closing: make(chan struct{}),
|
||||
|
||||
BindAddress: bind,
|
||||
|
||||
Logger: zap.New(
|
||||
zap.NewTextEncoder(),
|
||||
zap.Output(os.Stderr),
|
||||
),
|
||||
|
||||
MetaClient: meta.NewClient(c.Meta),
|
||||
|
||||
reportingDisabled: c.ReportingDisabled,
|
||||
|
||||
httpAPIAddr: c.HTTPD.BindAddress,
|
||||
httpUseTLS: c.HTTPD.HTTPSEnabled,
|
||||
tcpAddr: bind,
|
||||
|
||||
config: c,
|
||||
}
|
||||
s.Monitor = monitor.New(s, c.Monitor)
|
||||
s.config.registerDiagnostics(s.Monitor)
|
||||
|
||||
if err := s.MetaClient.Open(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.TSDBStore = tsdb.NewStore(c.Data.Dir)
|
||||
s.TSDBStore.EngineOptions.Config = c.Data
|
||||
|
||||
// Copy TSDB configuration.
|
||||
s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
|
||||
s.TSDBStore.EngineOptions.IndexVersion = c.Data.Index
|
||||
|
||||
// Create the Subscriber service
|
||||
s.Subscriber = subscriber.NewService(c.Subscriber)
|
||||
|
||||
// Initialize points writer.
|
||||
s.PointsWriter = coordinator.NewPointsWriter()
|
||||
s.PointsWriter.WriteTimeout = time.Duration(c.Coordinator.WriteTimeout)
|
||||
s.PointsWriter.TSDBStore = s.TSDBStore
|
||||
s.PointsWriter.Subscriber = s.Subscriber
|
||||
|
||||
// Initialize query executor.
|
||||
s.QueryExecutor = influxql.NewQueryExecutor()
|
||||
s.QueryExecutor.StatementExecutor = &coordinator.StatementExecutor{
|
||||
MetaClient: s.MetaClient,
|
||||
TaskManager: s.QueryExecutor.TaskManager,
|
||||
TSDBStore: coordinator.LocalTSDBStore{Store: s.TSDBStore},
|
||||
ShardMapper: &coordinator.LocalShardMapper{
|
||||
MetaClient: s.MetaClient,
|
||||
TSDBStore: coordinator.LocalTSDBStore{Store: s.TSDBStore},
|
||||
},
|
||||
Monitor: s.Monitor,
|
||||
PointsWriter: s.PointsWriter,
|
||||
MaxSelectPointN: c.Coordinator.MaxSelectPointN,
|
||||
MaxSelectSeriesN: c.Coordinator.MaxSelectSeriesN,
|
||||
MaxSelectBucketsN: c.Coordinator.MaxSelectBucketsN,
|
||||
}
|
||||
s.QueryExecutor.TaskManager.QueryTimeout = time.Duration(c.Coordinator.QueryTimeout)
|
||||
s.QueryExecutor.TaskManager.LogQueriesAfter = time.Duration(c.Coordinator.LogQueriesAfter)
|
||||
s.QueryExecutor.TaskManager.MaxConcurrentQueries = c.Coordinator.MaxConcurrentQueries
|
||||
|
||||
// Initialize the monitor
|
||||
s.Monitor.Version = s.buildInfo.Version
|
||||
s.Monitor.Commit = s.buildInfo.Commit
|
||||
s.Monitor.Branch = s.buildInfo.Branch
|
||||
s.Monitor.BuildTime = s.buildInfo.Time
|
||||
s.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Statistics returns statistics for the services running in the Server.
|
||||
func (s *Server) Statistics(tags map[string]string) []models.Statistic {
|
||||
var statistics []models.Statistic
|
||||
statistics = append(statistics, s.QueryExecutor.Statistics(tags)...)
|
||||
statistics = append(statistics, s.TSDBStore.Statistics(tags)...)
|
||||
statistics = append(statistics, s.PointsWriter.Statistics(tags)...)
|
||||
statistics = append(statistics, s.Subscriber.Statistics(tags)...)
|
||||
for _, srv := range s.Services {
|
||||
if m, ok := srv.(monitor.Reporter); ok {
|
||||
statistics = append(statistics, m.Statistics(tags)...)
|
||||
}
|
||||
}
|
||||
return statistics
|
||||
}
|
||||
|
||||
func (s *Server) appendSnapshotterService() {
|
||||
srv := snapshotter.NewService()
|
||||
srv.TSDBStore = s.TSDBStore
|
||||
srv.MetaClient = s.MetaClient
|
||||
s.Services = append(s.Services, srv)
|
||||
s.SnapshotterService = srv
|
||||
}
|
||||
|
||||
// SetLogOutput sets the logger used for all messages. It must not be called
|
||||
// after the Open method has been called.
|
||||
func (s *Server) SetLogOutput(w io.Writer) {
|
||||
s.Logger = zap.New(zap.NewTextEncoder(), zap.Output(zap.AddSync(w)))
|
||||
}
|
||||
|
||||
func (s *Server) appendMonitorService() {
|
||||
s.Services = append(s.Services, s.Monitor)
|
||||
}
|
||||
|
||||
func (s *Server) appendRetentionPolicyService(c retention.Config) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
srv := retention.NewService(c)
|
||||
srv.MetaClient = s.MetaClient
|
||||
srv.TSDBStore = s.TSDBStore
|
||||
s.Services = append(s.Services, srv)
|
||||
}
|
||||
|
||||
func (s *Server) appendHTTPDService(c httpd.Config) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
srv := httpd.NewService(c)
|
||||
srv.Handler.MetaClient = s.MetaClient
|
||||
srv.Handler.QueryAuthorizer = meta.NewQueryAuthorizer(s.MetaClient)
|
||||
srv.Handler.WriteAuthorizer = meta.NewWriteAuthorizer(s.MetaClient)
|
||||
srv.Handler.QueryExecutor = s.QueryExecutor
|
||||
srv.Handler.Monitor = s.Monitor
|
||||
srv.Handler.PointsWriter = s.PointsWriter
|
||||
srv.Handler.Version = s.buildInfo.Version
|
||||
|
||||
s.Services = append(s.Services, srv)
|
||||
}
|
||||
|
||||
func (s *Server) appendCollectdService(c collectd.Config) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
srv := collectd.NewService(c)
|
||||
srv.MetaClient = s.MetaClient
|
||||
srv.PointsWriter = s.PointsWriter
|
||||
s.Services = append(s.Services, srv)
|
||||
}
|
||||
|
||||
func (s *Server) appendOpenTSDBService(c opentsdb.Config) error {
|
||||
if !c.Enabled {
|
||||
return nil
|
||||
}
|
||||
srv, err := opentsdb.NewService(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.PointsWriter = s.PointsWriter
|
||||
srv.MetaClient = s.MetaClient
|
||||
s.Services = append(s.Services, srv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) appendGraphiteService(c graphite.Config) error {
|
||||
if !c.Enabled {
|
||||
return nil
|
||||
}
|
||||
srv, err := graphite.NewService(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srv.PointsWriter = s.PointsWriter
|
||||
srv.MetaClient = s.MetaClient
|
||||
srv.Monitor = s.Monitor
|
||||
s.Services = append(s.Services, srv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) appendPrecreatorService(c precreator.Config) error {
|
||||
if !c.Enabled {
|
||||
return nil
|
||||
}
|
||||
srv, err := precreator.NewService(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srv.MetaClient = s.MetaClient
|
||||
s.Services = append(s.Services, srv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) appendUDPService(c udp.Config) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
srv := udp.NewService(c)
|
||||
srv.PointsWriter = s.PointsWriter
|
||||
srv.MetaClient = s.MetaClient
|
||||
s.Services = append(s.Services, srv)
|
||||
}
|
||||
|
||||
func (s *Server) appendContinuousQueryService(c continuous_querier.Config) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
srv := continuous_querier.NewService(c)
|
||||
srv.MetaClient = s.MetaClient
|
||||
srv.QueryExecutor = s.QueryExecutor
|
||||
s.Services = append(s.Services, srv)
|
||||
}
|
||||
|
||||
// Err returns an error channel that multiplexes all out of band errors received from all services.
|
||||
func (s *Server) Err() <-chan error { return s.err }
|
||||
|
||||
// Open opens the meta and data store and all services.
|
||||
func (s *Server) Open() error {
|
||||
// Start profiling, if set.
|
||||
startProfile(s.CPUProfile, s.MemProfile)
|
||||
|
||||
// Open shared TCP connection.
|
||||
ln, err := net.Listen("tcp", s.BindAddress)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %s", err)
|
||||
}
|
||||
s.Listener = ln
|
||||
|
||||
// Multiplex listener.
|
||||
mux := tcp.NewMux()
|
||||
go mux.Serve(ln)
|
||||
|
||||
// Append services.
|
||||
s.appendMonitorService()
|
||||
s.appendPrecreatorService(s.config.Precreator)
|
||||
s.appendSnapshotterService()
|
||||
s.appendContinuousQueryService(s.config.ContinuousQuery)
|
||||
s.appendHTTPDService(s.config.HTTPD)
|
||||
s.appendRetentionPolicyService(s.config.Retention)
|
||||
for _, i := range s.config.GraphiteInputs {
|
||||
if err := s.appendGraphiteService(i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, i := range s.config.CollectdInputs {
|
||||
s.appendCollectdService(i)
|
||||
}
|
||||
for _, i := range s.config.OpenTSDBInputs {
|
||||
if err := s.appendOpenTSDBService(i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, i := range s.config.UDPInputs {
|
||||
s.appendUDPService(i)
|
||||
}
|
||||
|
||||
s.Subscriber.MetaClient = s.MetaClient
|
||||
s.Subscriber.MetaClient = s.MetaClient
|
||||
s.PointsWriter.MetaClient = s.MetaClient
|
||||
s.Monitor.MetaClient = s.MetaClient
|
||||
|
||||
s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader)
|
||||
|
||||
// Configure logging for all services and clients.
|
||||
if s.config.Meta.LoggingEnabled {
|
||||
s.MetaClient.WithLogger(s.Logger)
|
||||
}
|
||||
s.TSDBStore.WithLogger(s.Logger)
|
||||
if s.config.Data.QueryLogEnabled {
|
||||
s.QueryExecutor.WithLogger(s.Logger)
|
||||
}
|
||||
s.PointsWriter.WithLogger(s.Logger)
|
||||
s.Subscriber.WithLogger(s.Logger)
|
||||
for _, svc := range s.Services {
|
||||
svc.WithLogger(s.Logger)
|
||||
}
|
||||
s.SnapshotterService.WithLogger(s.Logger)
|
||||
s.Monitor.WithLogger(s.Logger)
|
||||
|
||||
// Open TSDB store.
|
||||
if err := s.TSDBStore.Open(); err != nil {
|
||||
return fmt.Errorf("open tsdb store: %s", err)
|
||||
}
|
||||
|
||||
// Open the subcriber service
|
||||
if err := s.Subscriber.Open(); err != nil {
|
||||
return fmt.Errorf("open subscriber: %s", err)
|
||||
}
|
||||
|
||||
// Open the points writer service
|
||||
if err := s.PointsWriter.Open(); err != nil {
|
||||
return fmt.Errorf("open points writer: %s", err)
|
||||
}
|
||||
|
||||
for _, service := range s.Services {
|
||||
if err := service.Open(); err != nil {
|
||||
return fmt.Errorf("open service: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the reporting service, if not disabled.
|
||||
if !s.reportingDisabled {
|
||||
go s.startServerReporting()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close shuts down the meta and data stores and all services.
|
||||
func (s *Server) Close() error {
|
||||
stopProfile()
|
||||
|
||||
// Close the listener first to stop any new connections
|
||||
if s.Listener != nil {
|
||||
s.Listener.Close()
|
||||
}
|
||||
|
||||
// Close services to allow any inflight requests to complete
|
||||
// and prevent new requests from being accepted.
|
||||
for _, service := range s.Services {
|
||||
service.Close()
|
||||
}
|
||||
|
||||
s.config.deregisterDiagnostics(s.Monitor)
|
||||
|
||||
if s.PointsWriter != nil {
|
||||
s.PointsWriter.Close()
|
||||
}
|
||||
|
||||
if s.QueryExecutor != nil {
|
||||
s.QueryExecutor.Close()
|
||||
}
|
||||
|
||||
// Close the TSDBStore, no more reads or writes at this point
|
||||
if s.TSDBStore != nil {
|
||||
s.TSDBStore.Close()
|
||||
}
|
||||
|
||||
if s.Subscriber != nil {
|
||||
s.Subscriber.Close()
|
||||
}
|
||||
|
||||
if s.MetaClient != nil {
|
||||
s.MetaClient.Close()
|
||||
}
|
||||
|
||||
close(s.closing)
|
||||
return nil
|
||||
}
|
||||
|
||||
// startServerReporting starts periodic server reporting.
|
||||
func (s *Server) startServerReporting() {
|
||||
s.reportServer()
|
||||
|
||||
ticker := time.NewTicker(24 * time.Hour)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-s.closing:
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.reportServer()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reportServer reports usage statistics about the system.
|
||||
func (s *Server) reportServer() {
|
||||
dbs := s.MetaClient.Databases()
|
||||
numDatabases := len(dbs)
|
||||
|
||||
var (
|
||||
numMeasurements int64
|
||||
numSeries int64
|
||||
)
|
||||
|
||||
for _, db := range dbs {
|
||||
name := db.Name
|
||||
n, err := s.TSDBStore.SeriesCardinality(name)
|
||||
if err != nil {
|
||||
s.Logger.Error(fmt.Sprintf("Unable to get series cardinality for database %s: %v", name, err))
|
||||
} else {
|
||||
numSeries += n
|
||||
}
|
||||
|
||||
n, err = s.TSDBStore.MeasurementsCardinality(name)
|
||||
if err != nil {
|
||||
s.Logger.Error(fmt.Sprintf("Unable to get measurement cardinality for database %s: %v", name, err))
|
||||
} else {
|
||||
numMeasurements += n
|
||||
}
|
||||
}
|
||||
|
||||
clusterID := s.MetaClient.ClusterID()
|
||||
cl := client.New("")
|
||||
usage := client.Usage{
|
||||
Product: "influxdb",
|
||||
Data: []client.UsageData{
|
||||
{
|
||||
Values: client.Values{
|
||||
"os": runtime.GOOS,
|
||||
"arch": runtime.GOARCH,
|
||||
"version": s.buildInfo.Version,
|
||||
"cluster_id": fmt.Sprintf("%v", clusterID),
|
||||
"num_series": numSeries,
|
||||
"num_measurements": numMeasurements,
|
||||
"num_databases": numDatabases,
|
||||
"uptime": time.Since(startTime).Seconds(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s.Logger.Info("Sending usage statistics to usage.influxdata.com")
|
||||
|
||||
go cl.Save(usage)
|
||||
}
|
||||
|
||||
// Service represents a service attached to the server.
|
||||
type Service interface {
|
||||
WithLogger(log zap.Logger)
|
||||
Open() error
|
||||
Close() error
|
||||
}
|
||||
|
||||
// prof stores the file locations of active profiles.
|
||||
var prof struct {
|
||||
cpu *os.File
|
||||
mem *os.File
|
||||
}
|
||||
|
||||
// StartProfile initializes the cpu and memory profile, if specified.
|
||||
func startProfile(cpuprofile, memprofile string) {
|
||||
if cpuprofile != "" {
|
||||
f, err := os.Create(cpuprofile)
|
||||
if err != nil {
|
||||
log.Fatalf("cpuprofile: %v", err)
|
||||
}
|
||||
log.Printf("writing CPU profile to: %s\n", cpuprofile)
|
||||
prof.cpu = f
|
||||
pprof.StartCPUProfile(prof.cpu)
|
||||
}
|
||||
|
||||
if memprofile != "" {
|
||||
f, err := os.Create(memprofile)
|
||||
if err != nil {
|
||||
log.Fatalf("memprofile: %v", err)
|
||||
}
|
||||
log.Printf("writing mem profile to: %s\n", memprofile)
|
||||
prof.mem = f
|
||||
runtime.MemProfileRate = 4096
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// StopProfile closes the cpu and memory profiles if they are running.
|
||||
func stopProfile() {
|
||||
if prof.cpu != nil {
|
||||
pprof.StopCPUProfile()
|
||||
prof.cpu.Close()
|
||||
log.Println("CPU profile stopped")
|
||||
}
|
||||
if prof.mem != nil {
|
||||
pprof.Lookup("heap").WriteTo(prof.mem, 0)
|
||||
prof.mem.Close()
|
||||
log.Println("mem profile stopped")
|
||||
}
|
||||
}
|
||||
|
||||
// monitorPointsWriter is a wrapper around `coordinator.PointsWriter` that helps
|
||||
// to prevent a circular dependency between the `cluster` and `monitor` packages.
|
||||
type monitorPointsWriter coordinator.PointsWriter
|
||||
|
||||
func (pw *monitorPointsWriter) WritePoints(database, retentionPolicy string, points models.Points) error {
|
||||
return (*coordinator.PointsWriter)(pw).WritePointsPrivileged(database, retentionPolicy, models.ConsistencyLevelAny, points)
|
||||
}
|
||||
|
||||
func raftDBExists(dir string) error {
|
||||
// Check to see if there is a raft db, if so, error out with a message
|
||||
// to downgrade, export, and then import the meta data
|
||||
raftFile := filepath.Join(dir, "raft.db")
|
||||
if _, err := os.Stat(raftFile); err == nil {
|
||||
return fmt.Errorf("detected %s. To proceed, you'll need to either 1) downgrade to v0.11.x, export your metadata, upgrade to the current version again, and then import the metadata or 2) delete the file, which will effectively reset your database. For more assistance with the upgrade, see: https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/", raftFile)
|
||||
}
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user