1
0
mirror of https://github.com/Oxalide/vsphere-influxdb-go.git synced 2023-10-10 11:36:51 +00:00

add vendoring with go dep

This commit is contained in:
Adrian Todorov
2017-10-25 20:52:40 +00:00
parent 704f4d20d1
commit a59409f16b
1627 changed files with 489673 additions and 0 deletions

174
vendor/github.com/influxdata/influxdb/tests/README.md generated vendored Normal file
View File

@@ -0,0 +1,174 @@
# Server Integration Tests
This directory contains integration tests for the database.
To run them using an in-process local server:
```sh
go test ./tests
```
They can also be run against a remote server running in a separate process
or machine
```sh
URL=http://127.0.0.1:8086 go test -parallel 1 ./tests
```
When running tests against a remote server, `-parallel 1` is currently needed
as many of the tests use the same DB and RP names which causes tests to fail
when run concurrently.
When adding tests, try to add tests that will always work for remote server usage.
## Structure
Currently, the file `server_test.go` has integration tests for single node scenarios.
At some point we'll need to add cluster tests, and may add them in a different file, or
rename `server_test.go` to `server_single_node_test.go` or something like that.
## What is in a test?
Each test is broken apart effectively into the following areas:
- Write sample data
- Use cases for table driven test, that include a command (typically a query) and an expected result.
When each test runs it does the following:
- init: determines if there are any writes and if so, writes them to the in-memory database
- queries: iterate through each query, executing the command, and comparing the results to the expected result.
## Idempotent - Allows for parallel tests
Each test should be `idempotent`, meaning that its data will not be affected by other tests, or use cases within the table tests themselves.
This allows for parallel testing, keeping the test suite total execution time very low.
### Basic sample test
```go
// Ensure the server can have a database with multiple measurements.
func TestServer_Query_Multiple_Measurements(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
// Make sure we do writes for measurements that will span across shards
writes := []string{
fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()),
}
test := NewTest("db0", "rp0")
test.write = strings.Join(writes, "\n")
test.addQueries([]*Query{
&Query{
name: "measurement in one shard but not another shouldn't panic server",
command: `SELECT host,value FROM db0.rp0.cpu`,
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`,
},
}...)
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
```
Let's break this down:
In this test, we first tell it to run in parallel with the `t.Parallel()` call.
We then open a new server with:
```go
s := OpenServer(NewConfig(), "")
defer s.Close()
```
If needed, we create a database and default retention policy. This is usually needed
when inserting and querying data. This is not needed if you are testing commands like `CREATE DATABASE`, `SHOW DIAGNOSTICS`, etc.
```go
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
```
Next, set up the write data you need:
```go
writes := []string{
fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()),
}
```
Create a new test with the database and retention policy:
```go
test := NewTest("db0", "rp0")
```
Send in the writes:
```go
test.write = strings.Join(writes, "\n")
```
Add some queries (the second one is mocked out to show how to add more than one):
```go
test.addQueries([]*Query{
&Query{
name: "measurement in one shard but not another shouldn't panic server",
command: `SELECT host,value FROM db0.rp0.cpu`,
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`,
},
&Query{
name: "another test here...",
command: `Some query command`,
exp: `the expected results`,
},
}...)
```
The rest of the code is boilerplate execution code. It is purposefully not refactored out to a helper
to make sure the test failure reports the proper lines for debugging purposes.
#### Running the tests
To run the tests:
```sh
go test ./cmd/influxd/run -parallel 500 -timeout 10s
```
#### Running a specific test
```sh
go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill
```
#### Verbose feedback
By default, all logs are silenced when testing. If you pass in the `-v` flag, the test suite becomes verbose, and enables all logging in the system
```sh
go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill -v
```

View File

@@ -0,0 +1,109 @@
package tests
import (
"io/ioutil"
"net"
"os"
"path/filepath"
"testing"
"time"
"github.com/influxdata/influxdb/cmd/influxd/backup"
"github.com/influxdata/influxdb/cmd/influxd/restore"
)
func TestServer_BackupAndRestore(t *testing.T) {
config := NewConfig()
config.Data.Engine = "tsm1"
config.Data.Dir, _ = ioutil.TempDir("", "data_backup")
config.Meta.Dir, _ = ioutil.TempDir("", "meta_backup")
config.BindAddress = freePort()
backupDir, _ := ioutil.TempDir("", "backup")
defer os.RemoveAll(backupDir)
db := "mydb"
rp := "forever"
expected := `{"results":[{"statement_id":0,"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23]]}]}]}`
// set the cache snapshot size low so that a single point will cause TSM file creation
config.Data.CacheSnapshotMemorySize = 1
func() {
s := OpenServer(config)
defer s.Close()
if _, ok := s.(*RemoteServer); ok {
t.Skip("Skipping. Cannot modify remote server config")
}
if err := s.CreateDatabaseAndRetentionPolicy(db, newRetentionPolicySpec(rp, 1, 0), true); err != nil {
t.Fatal(err)
}
if _, err := s.Write(db, rp, "myseries,host=A value=23 1000000", nil); err != nil {
t.Fatalf("failed to write: %s", err)
}
// wait for the snapshot to write
time.Sleep(time.Second)
res, err := s.Query(`select * from "mydb"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
}
if res != expected {
t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res)
}
// now backup
cmd := backup.NewCommand()
_, port, err := net.SplitHostPort(config.BindAddress)
if err != nil {
t.Fatal(err)
}
hostAddress := net.JoinHostPort("localhost", port)
if err := cmd.Run("-host", hostAddress, "-database", "mydb", backupDir); err != nil {
t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress)
}
}()
if _, err := os.Stat(config.Meta.Dir); err == nil || !os.IsNotExist(err) {
t.Fatalf("meta dir should be deleted")
}
if _, err := os.Stat(config.Data.Dir); err == nil || !os.IsNotExist(err) {
t.Fatalf("meta dir should be deleted")
}
// restore
cmd := restore.NewCommand()
if err := cmd.Run("-metadir", config.Meta.Dir, "-datadir", config.Data.Dir, "-database", "mydb", backupDir); err != nil {
t.Fatalf("error restoring: %s", err.Error())
}
// Make sure node.json was restored
nodePath := filepath.Join(config.Meta.Dir, "node.json")
if _, err := os.Stat(nodePath); err != nil || os.IsNotExist(err) {
t.Fatalf("node.json should exist")
}
// now open it up and verify we're good
s := OpenServer(config)
defer s.Close()
res, err := s.Query(`select * from "mydb"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
}
if res != expected {
t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res)
}
}
func freePort() string {
l, _ := net.Listen("tcp", "")
defer l.Close()
return l.Addr().String()
}

View File

@@ -0,0 +1,136 @@
package tests
import (
"bytes"
"fmt"
"net/url"
"testing"
)
var strResult string
func BenchmarkServer_Query_Count_1(b *testing.B) { benchmarkServerQueryCount(b, 1) }
func BenchmarkServer_Query_Count_1K(b *testing.B) { benchmarkServerQueryCount(b, 1000) }
func BenchmarkServer_Query_Count_100K(b *testing.B) { benchmarkServerQueryCount(b, 100000) }
func BenchmarkServer_Query_Count_1M(b *testing.B) { benchmarkServerQueryCount(b, 1000000) }
func benchmarkServerQueryCount(b *testing.B, pointN int) {
if _, err := benchServer.Query(`DROP MEASUREMENT cpu`); err != nil {
b.Fatal(err)
}
// Write data into server.
var buf bytes.Buffer
for i := 0; i < pointN; i++ {
fmt.Fprintf(&buf, `cpu value=100 %d`, i+1)
if i != pointN-1 {
fmt.Fprint(&buf, "\n")
}
}
benchServer.MustWrite("db0", "rp0", buf.String(), nil)
// Query simple count from server.
b.ResetTimer()
b.ReportAllocs()
var err error
for i := 0; i < b.N; i++ {
if strResult, err = benchServer.Query(`SELECT count(value) FROM db0.rp0.cpu`); err != nil {
b.Fatal(err)
} else if strResult != fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",%d]]}]}]}`, pointN) {
b.Fatalf("unexpected result: %s", strResult)
}
}
}
func BenchmarkServer_Query_Count_Where_500(b *testing.B) {
benchmarkServerQueryCountWhere(b, false, 500)
}
func BenchmarkServer_Query_Count_Where_1K(b *testing.B) {
benchmarkServerQueryCountWhere(b, false, 1000)
}
func BenchmarkServer_Query_Count_Where_10K(b *testing.B) {
benchmarkServerQueryCountWhere(b, false, 10000)
}
func BenchmarkServer_Query_Count_Where_100K(b *testing.B) {
benchmarkServerQueryCountWhere(b, false, 100000)
}
func BenchmarkServer_Query_Count_Where_Regex_500(b *testing.B) {
benchmarkServerQueryCountWhere(b, true, 500)
}
func BenchmarkServer_Query_Count_Where_Regex_1K(b *testing.B) {
benchmarkServerQueryCountWhere(b, true, 1000)
}
func BenchmarkServer_Query_Count_Where_Regex_10K(b *testing.B) {
benchmarkServerQueryCountWhere(b, true, 10000)
}
func BenchmarkServer_Query_Count_Where_Regex_100K(b *testing.B) {
benchmarkServerQueryCountWhere(b, true, 100000)
}
func benchmarkServerQueryCountWhere(b *testing.B, useRegex bool, pointN int) {
if _, err := benchServer.Query(`DROP MEASUREMENT cpu`); err != nil {
b.Fatal(err)
}
// Write data into server.
var buf bytes.Buffer
for i := 0; i < pointN; i++ {
fmt.Fprintf(&buf, `cpu,host=server-%d value=100 %d`, i, i)
if i != pointN-1 {
fmt.Fprint(&buf, "\n")
}
}
benchServer.MustWrite("db0", "rp0", buf.String(), nil)
// Query count from server with WHERE
var (
err error
condition = `host = 'server-487'`
)
if useRegex {
condition = `host =~ /^server-487$/`
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if strResult, err = benchServer.Query(fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE %s`, condition)); err != nil {
b.Fatal(err)
} else if strResult == `{"results":[{}]}` {
b.Fatal("no results")
}
}
}
func BenchmarkServer_ShowSeries_1(b *testing.B) { benchmarkServerShowSeries(b, 1) }
func BenchmarkServer_ShowSeries_1K(b *testing.B) { benchmarkServerShowSeries(b, 1000) }
func BenchmarkServer_ShowSeries_100K(b *testing.B) { benchmarkServerShowSeries(b, 100000) }
func BenchmarkServer_ShowSeries_1M(b *testing.B) { benchmarkServerShowSeries(b, 1000000) }
func benchmarkServerShowSeries(b *testing.B, pointN int) {
if _, err := benchServer.Query(`DROP MEASUREMENT cpu`); err != nil {
b.Fatal(err)
}
// Write data into server.
var buf bytes.Buffer
for i := 0; i < pointN; i++ {
fmt.Fprintf(&buf, `cpu,host=server%d value=100 %d`, i, i+1)
if i != pointN-1 {
fmt.Fprint(&buf, "\n")
}
}
benchServer.MustWrite("db0", "rp0", buf.String(), nil)
// Query simple count from server.
b.ResetTimer()
b.ReportAllocs()
var err error
for i := 0; i < b.N; i++ {
if strResult, err = benchServer.QueryWithParams(`SHOW SERIES`, url.Values{"db": {"db0"}}); err != nil {
b.Fatal(err)
}
}
}

View File

@@ -0,0 +1,730 @@
// This package is a set of convenience helpers and structs to make integration testing easier
package tests
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"sync"
"testing"
"time"
"github.com/influxdata/influxdb/cmd/influxd/run"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/services/httpd"
"github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/toml"
)
// Server represents a test wrapper for run.Server.
type Server interface {
URL() string
Open() error
SetLogOutput(w io.Writer)
Close()
Closed() bool
CreateDatabase(db string) (*meta.DatabaseInfo, error)
CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicySpec, makeDefault bool) error
CreateSubscription(database, rp, name, mode string, destinations []string) error
Reset() error
Query(query string) (results string, err error)
QueryWithParams(query string, values url.Values) (results string, err error)
Write(db, rp, body string, params url.Values) (results string, err error)
MustWrite(db, rp, body string, params url.Values) string
WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error
}
// RemoteServer is a Server that is accessed remotely via the HTTP API
type RemoteServer struct {
*client
url string
}
func (s *RemoteServer) URL() string {
return s.url
}
func (s *RemoteServer) Open() error {
resp, err := http.Get(s.URL() + "/ping")
if err != nil {
return err
}
body := strings.TrimSpace(string(MustReadAll(resp.Body)))
if resp.StatusCode != http.StatusNoContent {
return fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body)
}
return nil
}
func (s *RemoteServer) Close() {
// ignore, we can't shutdown a remote server
}
func (s *RemoteServer) SetLogOutput(w io.Writer) {
// ignore, we can't change the logging of a remote server
}
func (s *RemoteServer) Closed() bool {
return true
}
func (s *RemoteServer) CreateDatabase(db string) (*meta.DatabaseInfo, error) {
stmt := fmt.Sprintf("CREATE+DATABASE+%s", db)
_, err := s.HTTPPost(s.URL()+"/query?q="+stmt, nil)
if err != nil {
return nil, err
}
return &meta.DatabaseInfo{}, nil
}
func (s *RemoteServer) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicySpec, makeDefault bool) error {
if _, err := s.CreateDatabase(db); err != nil {
return err
}
stmt := fmt.Sprintf("CREATE+RETENTION+POLICY+%s+ON+\"%s\"+DURATION+%s+REPLICATION+%v+SHARD+DURATION+%s",
rp.Name, db, rp.Duration, *rp.ReplicaN, rp.ShardGroupDuration)
if makeDefault {
stmt += "+DEFAULT"
}
_, err := s.HTTPPost(s.URL()+"/query?q="+stmt, nil)
if err != nil {
return err
}
return nil
}
func (s *RemoteServer) CreateSubscription(database, rp, name, mode string, destinations []string) error {
dests := make([]string, 0, len(destinations))
for _, d := range destinations {
dests = append(dests, "'"+d+"'")
}
stmt := fmt.Sprintf("CREATE+SUBSCRIPTION+%s+ON+\"%s\".\"%s\"+DESTINATIONS+%v+%s",
name, database, rp, mode, strings.Join(dests, ","))
_, err := s.HTTPPost(s.URL()+"/query?q="+stmt, nil)
if err != nil {
return err
}
return nil
}
func (s *RemoteServer) DropDatabase(db string) error {
stmt := fmt.Sprintf("DROP+DATABASE+%s", db)
_, err := s.HTTPPost(s.URL()+"/query?q="+stmt, nil)
if err != nil {
return err
}
return nil
}
// Reset attempts to remove all database state by dropping everything
func (s *RemoteServer) Reset() error {
stmt := fmt.Sprintf("SHOW+DATABASES")
results, err := s.HTTPPost(s.URL()+"/query?q="+stmt, nil)
if err != nil {
return err
}
resp := &httpd.Response{}
if resp.UnmarshalJSON([]byte(results)); err != nil {
return err
}
for _, db := range resp.Results[0].Series[0].Values {
if err := s.DropDatabase(fmt.Sprintf("%s", db[0])); err != nil {
return err
}
}
return nil
}
func (s *RemoteServer) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error {
panic("WritePoints not implemented")
}
// NewServer returns a new instance of Server.
func NewServer(c *run.Config) Server {
buildInfo := &run.BuildInfo{
Version: "testServer",
Commit: "testCommit",
Branch: "testBranch",
}
// If URL exists, create a server that will run against a remote endpoint
if url := os.Getenv("URL"); url != "" {
s := &RemoteServer{
url: url,
client: &client{
URLFn: func() string {
return url
},
},
}
if err := s.Reset(); err != nil {
panic(err.Error())
}
return s
}
// Otherwise create a local server
srv, _ := run.NewServer(c, buildInfo)
s := LocalServer{
client: &client{},
Server: srv,
Config: c,
}
s.client.URLFn = s.URL
return &s
}
// OpenServer opens a test server.
func OpenServer(c *run.Config) Server {
s := NewServer(c)
configureLogging(s)
if err := s.Open(); err != nil {
panic(err.Error())
}
return s
}
// OpenServerWithVersion opens a test server with a specific version.
func OpenServerWithVersion(c *run.Config, version string) Server {
// We can't change the versino of a remote server. The test needs to
// be skipped if using this func.
if RemoteEnabled() {
panic("OpenServerWithVersion not support with remote server")
}
buildInfo := &run.BuildInfo{
Version: version,
Commit: "",
Branch: "",
}
srv, _ := run.NewServer(c, buildInfo)
s := LocalServer{
client: &client{},
Server: srv,
Config: c,
}
s.client.URLFn = s.URL
if err := s.Open(); err != nil {
panic(err.Error())
}
configureLogging(&s)
return &s
}
// OpenDefaultServer opens a test server with a default database & retention policy.
func OpenDefaultServer(c *run.Config) Server {
s := OpenServer(c)
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
panic(err)
}
return s
}
// LocalServer is a Server that is running in-process and can be accessed directly
type LocalServer struct {
mu sync.RWMutex
*run.Server
*client
Config *run.Config
}
// Close shuts down the server and removes all temporary paths.
func (s *LocalServer) Close() {
s.mu.Lock()
defer s.mu.Unlock()
if err := s.Server.Close(); err != nil {
panic(err.Error())
}
if err := os.RemoveAll(s.Config.Meta.Dir); err != nil {
panic(err.Error())
}
if err := os.RemoveAll(s.Config.Data.Dir); err != nil {
panic(err.Error())
}
// Nil the server so our deadlock detector goroutine can determine if we completed writes
// without timing out
s.Server = nil
}
func (s *LocalServer) Closed() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.Server == nil
}
// URL returns the base URL for the httpd endpoint.
func (s *LocalServer) URL() string {
s.mu.RLock()
defer s.mu.RUnlock()
for _, service := range s.Services {
if service, ok := service.(*httpd.Service); ok {
return "http://" + service.Addr().String()
}
}
panic("httpd server not found in services")
}
func (s *LocalServer) CreateDatabase(db string) (*meta.DatabaseInfo, error) {
s.mu.RLock()
defer s.mu.RUnlock()
return s.MetaClient.CreateDatabase(db)
}
// CreateDatabaseAndRetentionPolicy will create the database and retention policy.
func (s *LocalServer) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicySpec, makeDefault bool) error {
s.mu.RLock()
defer s.mu.RUnlock()
if _, err := s.MetaClient.CreateDatabase(db); err != nil {
return err
} else if _, err := s.MetaClient.CreateRetentionPolicy(db, rp, makeDefault); err != nil {
return err
}
return nil
}
func (s *LocalServer) CreateSubscription(database, rp, name, mode string, destinations []string) error {
s.mu.RLock()
defer s.mu.RUnlock()
return s.MetaClient.CreateSubscription(database, rp, name, mode, destinations)
}
func (s *LocalServer) DropDatabase(db string) error {
s.mu.RLock()
defer s.mu.RUnlock()
return s.MetaClient.DropDatabase(db)
}
func (s *LocalServer) Reset() error {
s.mu.RLock()
defer s.mu.RUnlock()
for _, db := range s.MetaClient.Databases() {
if err := s.DropDatabase(db.Name); err != nil {
return err
}
}
return nil
}
func (s *LocalServer) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error {
s.mu.RLock()
defer s.mu.RUnlock()
return s.PointsWriter.WritePoints(database, retentionPolicy, consistencyLevel, user, points)
}
// client abstract querying and writing to a Server using HTTP
type client struct {
URLFn func() string
}
func (c *client) URL() string {
return c.URLFn()
}
// Query executes a query against the server and returns the results.
func (s *client) Query(query string) (results string, err error) {
return s.QueryWithParams(query, nil)
}
// MustQuery executes a query against the server and returns the results.
func (s *client) MustQuery(query string) string {
results, err := s.Query(query)
if err != nil {
panic(err)
}
return results
}
// Query executes a query against the server and returns the results.
func (s *client) QueryWithParams(query string, values url.Values) (results string, err error) {
var v url.Values
if values == nil {
v = url.Values{}
} else {
v, _ = url.ParseQuery(values.Encode())
}
v.Set("q", query)
return s.HTTPPost(s.URL()+"/query?"+v.Encode(), nil)
}
// MustQueryWithParams executes a query against the server and returns the results.
func (s *client) MustQueryWithParams(query string, values url.Values) string {
results, err := s.QueryWithParams(query, values)
if err != nil {
panic(err)
}
return results
}
// HTTPGet makes an HTTP GET request to the server and returns the response.
func (s *client) HTTPGet(url string) (results string, err error) {
resp, err := http.Get(url)
if err != nil {
return "", err
}
body := strings.TrimSpace(string(MustReadAll(resp.Body)))
switch resp.StatusCode {
case http.StatusBadRequest:
if !expectPattern(".*error parsing query*.", body) {
return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body)
}
return body, nil
case http.StatusOK:
return body, nil
default:
return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body)
}
}
// HTTPPost makes an HTTP POST request to the server and returns the response.
func (s *client) HTTPPost(url string, content []byte) (results string, err error) {
buf := bytes.NewBuffer(content)
resp, err := http.Post(url, "application/json", buf)
if err != nil {
return "", err
}
body := strings.TrimSpace(string(MustReadAll(resp.Body)))
switch resp.StatusCode {
case http.StatusBadRequest:
if !expectPattern(".*error parsing query*.", body) {
return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body)
}
return body, nil
case http.StatusOK, http.StatusNoContent:
return body, nil
default:
return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body)
}
}
type WriteError struct {
body string
statusCode int
}
func (wr WriteError) StatusCode() int {
return wr.statusCode
}
func (wr WriteError) Body() string {
return wr.body
}
func (wr WriteError) Error() string {
return fmt.Sprintf("invalid status code: code=%d, body=%s", wr.statusCode, wr.body)
}
// Write executes a write against the server and returns the results.
func (s *client) Write(db, rp, body string, params url.Values) (results string, err error) {
if params == nil {
params = url.Values{}
}
if params.Get("db") == "" {
params.Set("db", db)
}
if params.Get("rp") == "" {
params.Set("rp", rp)
}
resp, err := http.Post(s.URL()+"/write?"+params.Encode(), "", strings.NewReader(body))
if err != nil {
return "", err
} else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return "", WriteError{statusCode: resp.StatusCode, body: string(MustReadAll(resp.Body))}
}
return string(MustReadAll(resp.Body)), nil
}
// MustWrite executes a write to the server. Panic on error.
func (s *client) MustWrite(db, rp, body string, params url.Values) string {
results, err := s.Write(db, rp, body, params)
if err != nil {
panic(err)
}
return results
}
// NewConfig returns the default config with temporary paths.
func NewConfig() *run.Config {
c := run.NewConfig()
c.BindAddress = "127.0.0.1:0"
c.ReportingDisabled = true
c.Coordinator.WriteTimeout = toml.Duration(30 * time.Second)
c.Meta.Dir = MustTempFile()
if !testing.Verbose() {
c.Meta.LoggingEnabled = false
}
c.Data.Dir = MustTempFile()
c.Data.WALDir = MustTempFile()
indexVersion := os.Getenv("INFLUXDB_DATA_INDEX_VERSION")
if indexVersion != "" {
c.Data.Index = indexVersion
}
c.HTTPD.Enabled = true
c.HTTPD.BindAddress = "127.0.0.1:0"
c.HTTPD.LogEnabled = testing.Verbose()
c.Monitor.StoreEnabled = false
return c
}
func newRetentionPolicySpec(name string, rf int, duration time.Duration) *meta.RetentionPolicySpec {
return &meta.RetentionPolicySpec{Name: name, ReplicaN: &rf, Duration: &duration}
}
func maxInt64() string {
maxInt64, _ := json.Marshal(^int64(0))
return string(maxInt64)
}
func now() time.Time {
return time.Now().UTC()
}
func yesterday() time.Time {
return now().Add(-1 * time.Hour * 24)
}
func mustParseTime(layout, value string) time.Time {
tm, err := time.Parse(layout, value)
if err != nil {
panic(err)
}
return tm
}
func mustParseLocation(tzname string) *time.Location {
loc, err := time.LoadLocation(tzname)
if err != nil {
panic(err)
}
return loc
}
var LosAngeles = mustParseLocation("America/Los_Angeles")
// MustReadAll reads r. Panic on error.
func MustReadAll(r io.Reader) []byte {
b, err := ioutil.ReadAll(r)
if err != nil {
panic(err)
}
return b
}
// MustTempFile returns a path to a temporary file.
func MustTempFile() string {
f, err := ioutil.TempFile("", "influxd-")
if err != nil {
panic(err)
}
f.Close()
os.Remove(f.Name())
return f.Name()
}
func RemoteEnabled() bool {
return os.Getenv("URL") != ""
}
func expectPattern(exp, act string) bool {
re := regexp.MustCompile(exp)
if !re.MatchString(act) {
return false
}
return true
}
type Query struct {
name string
command string
params url.Values
exp, act string
pattern bool
skip bool
repeat int
once bool
}
// Execute runs the command and returns an err if it fails
func (q *Query) Execute(s Server) (err error) {
if q.params == nil {
q.act, err = s.Query(q.command)
return
}
q.act, err = s.QueryWithParams(q.command, q.params)
return
}
func (q *Query) success() bool {
if q.pattern {
return expectPattern(q.exp, q.act)
}
return q.exp == q.act
}
func (q *Query) Error(err error) string {
return fmt.Sprintf("%s: %v", q.name, err)
}
func (q *Query) failureMessage() string {
return fmt.Sprintf("%s: unexpected results\nquery: %s\nparams: %v\nexp: %s\nactual: %s\n", q.name, q.command, q.params, q.exp, q.act)
}
type Write struct {
db string
rp string
data string
}
func (w *Write) duplicate() *Write {
return &Write{
db: w.db,
rp: w.rp,
data: w.data,
}
}
type Writes []*Write
func (a Writes) duplicate() Writes {
writes := make(Writes, 0, len(a))
for _, w := range a {
writes = append(writes, w.duplicate())
}
return writes
}
type Tests map[string]Test
type Test struct {
initialized bool
writes Writes
params url.Values
db string
rp string
exp string
queries []*Query
}
func NewTest(db, rp string) Test {
return Test{
db: db,
rp: rp,
}
}
func (t Test) duplicate() Test {
test := Test{
initialized: t.initialized,
writes: t.writes.duplicate(),
db: t.db,
rp: t.rp,
exp: t.exp,
queries: make([]*Query, len(t.queries)),
}
if t.params != nil {
t.params = url.Values{}
for k, a := range t.params {
vals := make([]string, len(a))
copy(vals, a)
test.params[k] = vals
}
}
copy(test.queries, t.queries)
return test
}
func (t *Test) addQueries(q ...*Query) {
t.queries = append(t.queries, q...)
}
func (t *Test) database() string {
if t.db != "" {
return t.db
}
return "db0"
}
func (t *Test) retentionPolicy() string {
if t.rp != "" {
return t.rp
}
return "default"
}
func (t *Test) init(s Server) error {
if len(t.writes) == 0 || t.initialized {
return nil
}
if t.db == "" {
t.db = "db0"
}
if t.rp == "" {
t.rp = "rp0"
}
if err := writeTestData(s, t); err != nil {
return err
}
t.initialized = true
return nil
}
func writeTestData(s Server, t *Test) error {
for i, w := range t.writes {
if w.db == "" {
w.db = t.database()
}
if w.rp == "" {
w.rp = t.retentionPolicy()
}
if err := s.CreateDatabaseAndRetentionPolicy(w.db, newRetentionPolicySpec(w.rp, 1, 0), true); err != nil {
return err
}
if res, err := s.Write(w.db, w.rp, w.data, t.params); err != nil {
return fmt.Errorf("write #%d: %s", i, err)
} else if t.exp != res {
return fmt.Errorf("unexpected results\nexp: %s\ngot: %s\n", t.exp, res)
}
}
return nil
}
func configureLogging(s Server) {
// Set the logger to discard unless verbose is on
if !testing.Verbose() {
s.SetLogOutput(ioutil.Discard)
}
}

View File

@@ -0,0 +1,535 @@
package tests
import (
"fmt"
"net/url"
"strings"
"testing"
"time"
)
var tests Tests
// Load all shared tests
func init() {
tests = make(map[string]Test)
tests["database_commands"] = Test{
queries: []*Query{
&Query{
name: "create database should succeed",
command: `CREATE DATABASE db0`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "create database with retention duration should succeed",
command: `CREATE DATABASE db0_r WITH DURATION 24h REPLICATION 2 NAME db0_r_policy`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "create database with retention policy should fail with invalid name",
command: `CREATE DATABASE db1 WITH NAME "."`,
exp: `{"results":[{"statement_id":0,"error":"invalid name"}]}`,
once: true,
},
&Query{
name: "create database should error with some unquoted names",
command: `CREATE DATABASE 0xdb0`,
exp: `{"error":"error parsing query: found 0xdb0, expected identifier at line 1, char 17"}`,
},
&Query{
name: "create database should error with invalid characters",
command: `CREATE DATABASE "."`,
exp: `{"results":[{"statement_id":0,"error":"invalid name"}]}`,
},
&Query{
name: "create database with retention duration should error with bad retention duration",
command: `CREATE DATABASE db0 WITH DURATION xyz`,
exp: `{"error":"error parsing query: found xyz, expected duration at line 1, char 35"}`,
},
&Query{
name: "create database with retention replication should error with bad retention replication number",
command: `CREATE DATABASE db0 WITH REPLICATION xyz`,
exp: `{"error":"error parsing query: found xyz, expected integer at line 1, char 38"}`,
},
&Query{
name: "create database with retention name should error with missing retention name",
command: `CREATE DATABASE db0 WITH NAME`,
exp: `{"error":"error parsing query: found EOF, expected identifier at line 1, char 31"}`,
},
&Query{
name: "show database should succeed",
command: `SHOW DATABASES`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db0_r"]]}]}]}`,
},
&Query{
name: "create database should not error with existing database",
command: `CREATE DATABASE db0`,
exp: `{"results":[{"statement_id":0}]}`,
},
&Query{
name: "create database should create non-existing database",
command: `CREATE DATABASE db1`,
exp: `{"results":[{"statement_id":0}]}`,
},
&Query{
name: "create database with retention duration should error if retention policy is different",
command: `CREATE DATABASE db1 WITH DURATION 24h`,
exp: `{"results":[{"statement_id":0,"error":"retention policy conflicts with an existing policy"}]}`,
},
&Query{
name: "create database should error with bad retention duration",
command: `CREATE DATABASE db1 WITH DURATION xyz`,
exp: `{"error":"error parsing query: found xyz, expected duration at line 1, char 35"}`,
},
&Query{
name: "show database should succeed",
command: `SHOW DATABASES`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db0_r"],["db1"]]}]}]}`,
},
&Query{
name: "drop database db0 should succeed",
command: `DROP DATABASE db0`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "drop database db0_r should succeed",
command: `DROP DATABASE db0_r`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "drop database db1 should succeed",
command: `DROP DATABASE db1`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "drop database should not error if it does not exists",
command: `DROP DATABASE db1`,
exp: `{"results":[{"statement_id":0}]}`,
},
&Query{
name: "drop database should not error with non-existing database db1",
command: `DROP DATABASE db1`,
exp: `{"results":[{"statement_id":0}]}`,
},
&Query{
name: "show database should have no results",
command: `SHOW DATABASES`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"databases","columns":["name"]}]}]}`,
},
&Query{
name: "create database with shard group duration should succeed",
command: `CREATE DATABASE db0 WITH SHARD DURATION 61m`,
exp: `{"results":[{"statement_id":0}]}`,
},
&Query{
name: "create database with shard group duration and duration should succeed",
command: `CREATE DATABASE db1 WITH DURATION 60m SHARD DURATION 30m`,
exp: `{"results":[{"statement_id":0}]}`,
},
},
}
tests["drop_and_recreate_database"] = Test{
db: "db0",
rp: "rp0",
writes: Writes{
&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())},
},
queries: []*Query{
&Query{
name: "Drop database after data write",
command: `DROP DATABASE db0`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "Recreate database",
command: `CREATE DATABASE db0`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "Recreate retention policy",
command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 365d REPLICATION 1 DEFAULT`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "Show measurements after recreate",
command: `SHOW MEASUREMENTS`,
exp: `{"results":[{"statement_id":0}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Query data after recreate",
command: `SELECT * FROM cpu`,
exp: `{"results":[{"statement_id":0}]}`,
params: url.Values{"db": []string{"db0"}},
},
},
}
tests["drop_database_isolated"] = Test{
db: "db0",
rp: "rp0",
writes: Writes{
&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())},
},
queries: []*Query{
&Query{
name: "Query data from 1st database",
command: `SELECT * FROM cpu`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Query data from 1st database with GROUP BY *",
command: `SELECT * FROM cpu GROUP BY *`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Drop other database",
command: `DROP DATABASE db1`,
once: true,
exp: `{"results":[{"statement_id":0}]}`,
},
&Query{
name: "Query data from 1st database and ensure it's still there",
command: `SELECT * FROM cpu`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Query data from 1st database and ensure it's still there with GROUP BY *",
command: `SELECT * FROM cpu GROUP BY *`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
},
}
tests["delete_series"] = Test{
db: "db0",
rp: "rp0",
writes: Writes{
&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())},
&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-02T00:00:00Z").UnixNano())},
&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=200 %d`, mustParseTime(time.RFC3339Nano, "2000-01-03T00:00:00Z").UnixNano())},
&Write{db: "db1", data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())},
},
queries: []*Query{
&Query{
name: "Show series is present",
command: `SHOW SERIES`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=serverA,region=uswest"]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Delete series",
command: `DELETE FROM cpu WHERE time < '2000-01-03T00:00:00Z'`,
exp: `{"results":[{"statement_id":0}]}`,
params: url.Values{"db": []string{"db0"}},
once: true,
},
&Query{
name: "Show series still exists",
command: `SHOW SERIES`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=serverA,region=uswest"]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Make sure last point still exists",
command: `SELECT * FROM cpu`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-03T00:00:00Z","serverA","uswest",200]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Make sure data wasn't deleted from other database.",
command: `SELECT * FROM cpu`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`,
params: url.Values{"db": []string{"db1"}},
},
},
}
tests["drop_and_recreate_series"] = Test{
db: "db0",
rp: "rp0",
writes: Writes{
&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())},
&Write{db: "db1", data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())},
},
queries: []*Query{
&Query{
name: "Show series is present",
command: `SHOW SERIES`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=serverA,region=uswest"]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Drop series after data write",
command: `DROP SERIES FROM cpu`,
exp: `{"results":[{"statement_id":0}]}`,
params: url.Values{"db": []string{"db0"}},
once: true,
},
&Query{
name: "Show series is gone",
command: `SHOW SERIES`,
exp: `{"results":[{"statement_id":0}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Make sure data wasn't deleted from other database.",
command: `SELECT * FROM cpu`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`,
params: url.Values{"db": []string{"db1"}},
},
},
}
tests["drop_and_recreate_series_retest"] = Test{
db: "db0",
rp: "rp0",
writes: Writes{
&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())},
},
queries: []*Query{
&Query{
name: "Show series is present again after re-write",
command: `SHOW SERIES`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=serverA,region=uswest"]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
},
}
tests["drop_series_from_regex"] = Test{
db: "db0",
rp: "rp0",
writes: Writes{
&Write{data: strings.Join([]string{
fmt.Sprintf(`a,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`aa,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`b,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`c,host=serverA,region=uswest val=30.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
}, "\n")},
},
queries: []*Query{
&Query{
name: "Show series is present",
command: `SHOW SERIES`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["a,host=serverA,region=uswest"],["aa,host=serverA,region=uswest"],["b,host=serverA,region=uswest"],["c,host=serverA,region=uswest"]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Drop series after data write",
command: `DROP SERIES FROM /a.*/`,
exp: `{"results":[{"statement_id":0}]}`,
params: url.Values{"db": []string{"db0"}},
once: true,
},
&Query{
name: "Show series is gone",
command: `SHOW SERIES`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["b,host=serverA,region=uswest"],["c,host=serverA,region=uswest"]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Drop series from regex that matches no measurements",
command: `DROP SERIES FROM /a.*/`,
exp: `{"results":[{"statement_id":0}]}`,
params: url.Values{"db": []string{"db0"}},
once: true,
},
&Query{
name: "make sure DROP SERIES doesn't delete anything when regex doesn't match",
command: `SHOW SERIES`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["b,host=serverA,region=uswest"],["c,host=serverA,region=uswest"]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Drop series with WHERE field should error",
command: `DROP SERIES FROM c WHERE val > 50.0`,
exp: `{"results":[{"statement_id":0,"error":"shard 1: fields not supported in WHERE clause during deletion"}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "make sure DROP SERIES with field in WHERE didn't delete data",
command: `SHOW SERIES`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["b,host=serverA,region=uswest"],["c,host=serverA,region=uswest"]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Drop series with WHERE time should error",
command: `DROP SERIES FROM c WHERE time > now() - 1d`,
exp: `{"results":[{"statement_id":0,"error":"DROP SERIES doesn't support time in WHERE clause"}]}`,
params: url.Values{"db": []string{"db0"}},
},
},
}
tests["retention_policy_commands"] = Test{
db: "db0",
queries: []*Query{
&Query{
name: "create retention policy with invalid name should return an error",
command: `CREATE RETENTION POLICY "." ON db0 DURATION 1d REPLICATION 1`,
exp: `{"results":[{"statement_id":0,"error":"invalid name"}]}`,
once: true,
},
&Query{
name: "create retention policy should succeed",
command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "show retention policy should succeed",
command: `SHOW RETENTION POLICIES ON db0`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","1h0m0s","1h0m0s",1,false]]}]}]}`,
},
&Query{
name: "alter retention policy should succeed",
command: `ALTER RETENTION POLICY rp0 ON db0 DURATION 2h REPLICATION 3 DEFAULT`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "show retention policy should have new altered information",
command: `SHOW RETENTION POLICIES ON db0`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","2h0m0s","1h0m0s",3,true]]}]}]}`,
},
&Query{
name: "show retention policy should still show policy",
command: `SHOW RETENTION POLICIES ON db0`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","2h0m0s","1h0m0s",3,true]]}]}]}`,
},
&Query{
name: "create a second non-default retention policy",
command: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "show retention policy should show both",
command: `SHOW RETENTION POLICIES ON db0`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","2h0m0s","1h0m0s",3,true],["rp2","1h0m0s","1h0m0s",1,false]]}]}]}`,
},
&Query{
name: "dropping non-default retention policy succeed",
command: `DROP RETENTION POLICY rp2 ON db0`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "create a third non-default retention policy",
command: `CREATE RETENTION POLICY rp3 ON db0 DURATION 1h REPLICATION 1 SHARD DURATION 30m`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "create retention policy with default on",
command: `CREATE RETENTION POLICY rp3 ON db0 DURATION 1h REPLICATION 1 SHARD DURATION 30m DEFAULT`,
exp: `{"results":[{"statement_id":0,"error":"retention policy conflicts with an existing policy"}]}`,
once: true,
},
&Query{
name: "show retention policy should show both with custom shard",
command: `SHOW RETENTION POLICIES ON db0`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","2h0m0s","1h0m0s",3,true],["rp3","1h0m0s","1h0m0s",1,false]]}]}]}`,
},
&Query{
name: "dropping non-default custom shard retention policy succeed",
command: `DROP RETENTION POLICY rp3 ON db0`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "show retention policy should show just default",
command: `SHOW RETENTION POLICIES ON db0`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rp0","2h0m0s","1h0m0s",3,true]]}]}]}`,
},
&Query{
name: "Ensure retention policy with unacceptable retention cannot be created",
command: `CREATE RETENTION POLICY rp4 ON db0 DURATION 1s REPLICATION 1`,
exp: `{"results":[{"statement_id":0,"error":"retention policy duration must be at least 1h0m0s"}]}`,
once: true,
},
&Query{
name: "Check error when deleting retention policy on non-existent database",
command: `DROP RETENTION POLICY rp1 ON mydatabase`,
exp: `{"results":[{"statement_id":0}]}`,
},
&Query{
name: "Ensure retention policy for non existing db is not created",
command: `CREATE RETENTION POLICY rp0 ON nodb DURATION 1h REPLICATION 1`,
exp: `{"results":[{"statement_id":0,"error":"database not found: nodb"}]}`,
once: true,
},
&Query{
name: "drop rp0",
command: `DROP RETENTION POLICY rp0 ON db0`,
exp: `{"results":[{"statement_id":0}]}`,
},
// INF Shard Group Duration will normalize to the Retention Policy Duration Default
&Query{
name: "create retention policy with inf shard group duration",
command: `CREATE RETENTION POLICY rpinf ON db0 DURATION INF REPLICATION 1 SHARD DURATION 0s`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
// 0s Shard Group Duration will normalize to the Replication Policy Duration
&Query{
name: "create retention policy with 0s shard group duration",
command: `CREATE RETENTION POLICY rpzero ON db0 DURATION 1h REPLICATION 1 SHARD DURATION 0s`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
// 1s Shard Group Duration will normalize to the MinDefaultRetentionPolicyDuration
&Query{
name: "create retention policy with 1s shard group duration",
command: `CREATE RETENTION POLICY rponesecond ON db0 DURATION 2h REPLICATION 1 SHARD DURATION 1s`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "show retention policy: validate normalized shard group durations are working",
command: `SHOW RETENTION POLICIES ON db0`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["rpinf","0s","168h0m0s",1,false],["rpzero","1h0m0s","1h0m0s",1,false],["rponesecond","2h0m0s","1h0m0s",1,false]]}]}]}`,
},
},
}
tests["retention_policy_auto_create"] = Test{
queries: []*Query{
&Query{
name: "create database should succeed",
command: `CREATE DATABASE db0`,
exp: `{"results":[{"statement_id":0}]}`,
once: true,
},
&Query{
name: "show retention policies should return auto-created policy",
command: `SHOW RETENTION POLICIES ON db0`,
exp: `{"results":[{"statement_id":0,"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["autogen","0s","168h0m0s",1,true]]}]}]}`,
},
},
}
}
func (tests Tests) load(t *testing.T, key string) Test {
test, ok := tests[key]
if !ok {
t.Fatalf("no test %q", key)
}
return test.duplicate()
}

File diff suppressed because it is too large Load Diff