1
0
mirror of https://github.com/Oxalide/vsphere-influxdb-go.git synced 2023-10-10 11:36:51 +00:00

add vendoring with go dep

This commit is contained in:
Adrian Todorov
2017-10-25 20:52:40 +00:00
parent 704f4d20d1
commit a59409f16b
1627 changed files with 489673 additions and 0 deletions

View File

@@ -0,0 +1,49 @@
package retention
import (
"errors"
"time"
"github.com/influxdata/influxdb/monitor/diagnostics"
"github.com/influxdata/influxdb/toml"
)
// Config represents the configuration for the retention service.
type Config struct {
Enabled bool `toml:"enabled"`
CheckInterval toml.Duration `toml:"check-interval"`
}
// NewConfig returns an instance of Config with defaults.
func NewConfig() Config {
return Config{Enabled: true, CheckInterval: toml.Duration(30 * time.Minute)}
}
// Validate returns an error if the Config is invalid.
func (c Config) Validate() error {
if !c.Enabled {
return nil
}
// TODO: Should we enforce a minimum interval?
// Polling every nanosecond, for instance, will greatly impact performance.
if c.CheckInterval <= 0 {
return errors.New("check-interval must be positive")
}
return nil
}
// Diagnostics returns a diagnostics representation of a subset of the Config.
func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {
if !c.Enabled {
return diagnostics.RowFromMap(map[string]interface{}{
"enabled": false,
}), nil
}
return diagnostics.RowFromMap(map[string]interface{}{
"enabled": true,
"check-interval": c.CheckInterval,
}), nil
}

View File

@@ -0,0 +1,46 @@
package retention_test
import (
"testing"
"time"
"github.com/BurntSushi/toml"
"github.com/influxdata/influxdb/services/retention"
)
func TestConfig_Parse(t *testing.T) {
// Parse configuration.
var c retention.Config
if _, err := toml.Decode(`
enabled = true
check-interval = "1s"
`, &c); err != nil {
t.Fatal(err)
}
// Validate configuration.
if c.Enabled != true {
t.Fatalf("unexpected enabled state: %v", c.Enabled)
} else if time.Duration(c.CheckInterval) != time.Second {
t.Fatalf("unexpected check interval: %v", c.CheckInterval)
}
}
func TestConfig_Validate(t *testing.T) {
c := retention.NewConfig()
if err := c.Validate(); err != nil {
t.Fatalf("unexpected validation fail from NewConfig: %s", err)
}
c = retention.NewConfig()
c.CheckInterval = 0
if err := c.Validate(); err == nil {
t.Fatal("expected error for check-interval = 0, got nil")
}
c = retention.NewConfig()
c.CheckInterval *= -1
if err := c.Validate(); err == nil {
t.Fatal("expected error for negative check-interval, got nil")
}
}

View File

@@ -0,0 +1,137 @@
// Package retention provides the retention policy enforcement service.
package retention // import "github.com/influxdata/influxdb/services/retention"
import (
"fmt"
"sync"
"time"
"github.com/influxdata/influxdb/services/meta"
"github.com/uber-go/zap"
)
// Service represents the retention policy enforcement service.
type Service struct {
MetaClient interface {
Databases() []meta.DatabaseInfo
DeleteShardGroup(database, policy string, id uint64) error
PruneShardGroups() error
}
TSDBStore interface {
ShardIDs() []uint64
DeleteShard(shardID uint64) error
}
checkInterval time.Duration
wg sync.WaitGroup
done chan struct{}
logger zap.Logger
}
// NewService returns a configured retention policy enforcement service.
func NewService(c Config) *Service {
return &Service{
checkInterval: time.Duration(c.CheckInterval),
done: make(chan struct{}),
logger: zap.New(zap.NullEncoder()),
}
}
// Open starts retention policy enforcement.
func (s *Service) Open() error {
s.logger.Info(fmt.Sprint("Starting retention policy enforcement service with check interval of ", s.checkInterval))
s.wg.Add(2)
go s.deleteShardGroups()
go s.deleteShards()
return nil
}
// Close stops retention policy enforcement.
func (s *Service) Close() error {
s.logger.Info("retention policy enforcement terminating")
close(s.done)
s.wg.Wait()
return nil
}
// WithLogger sets the logger on the service.
func (s *Service) WithLogger(log zap.Logger) {
s.logger = log.With(zap.String("service", "retention"))
}
func (s *Service) deleteShardGroups() {
defer s.wg.Done()
ticker := time.NewTicker(s.checkInterval)
defer ticker.Stop()
for {
select {
case <-s.done:
return
case <-ticker.C:
dbs := s.MetaClient.Databases()
for _, d := range dbs {
for _, r := range d.RetentionPolicies {
for _, g := range r.ExpiredShardGroups(time.Now().UTC()) {
if err := s.MetaClient.DeleteShardGroup(d.Name, r.Name, g.ID); err != nil {
s.logger.Info(fmt.Sprintf("failed to delete shard group %d from database %s, retention policy %s: %s",
g.ID, d.Name, r.Name, err.Error()))
} else {
s.logger.Info(fmt.Sprintf("deleted shard group %d from database %s, retention policy %s",
g.ID, d.Name, r.Name))
}
}
}
}
}
}
}
func (s *Service) deleteShards() {
defer s.wg.Done()
ticker := time.NewTicker(s.checkInterval)
defer ticker.Stop()
for {
select {
case <-s.done:
return
case <-ticker.C:
s.logger.Info("retention policy shard deletion check commencing")
type deletionInfo struct {
db string
rp string
}
deletedShardIDs := make(map[uint64]deletionInfo, 0)
dbs := s.MetaClient.Databases()
for _, d := range dbs {
for _, r := range d.RetentionPolicies {
for _, g := range r.DeletedShardGroups() {
for _, sh := range g.Shards {
deletedShardIDs[sh.ID] = deletionInfo{db: d.Name, rp: r.Name}
}
}
}
}
for _, id := range s.TSDBStore.ShardIDs() {
if info, ok := deletedShardIDs[id]; ok {
if err := s.TSDBStore.DeleteShard(id); err != nil {
s.logger.Error(fmt.Sprintf("failed to delete shard ID %d from database %s, retention policy %s: %s",
id, info.db, info.rp, err.Error()))
continue
}
s.logger.Info(fmt.Sprintf("shard ID %d from database %s, retention policy %s, deleted",
id, info.db, info.rp))
}
}
if err := s.MetaClient.PruneShardGroups(); err != nil {
s.logger.Info(fmt.Sprintf("error pruning shard groups: %s", err))
}
}
}
}