mirror of
https://github.com/Oxalide/vsphere-influxdb-go.git
synced 2023-10-10 11:36:51 +00:00
add vendoring with go dep
This commit is contained in:
1161
vendor/github.com/influxdata/influxdb/influxql/README.md
generated
vendored
Normal file
1161
vendor/github.com/influxdata/influxdb/influxql/README.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
5371
vendor/github.com/influxdata/influxdb/influxql/ast.go
generated
vendored
Normal file
5371
vendor/github.com/influxdata/influxdb/influxql/ast.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1861
vendor/github.com/influxdata/influxdb/influxql/ast_test.go
generated
vendored
Normal file
1861
vendor/github.com/influxdata/influxdb/influxql/ast_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1069
vendor/github.com/influxdata/influxdb/influxql/call_iterator.go
generated
vendored
Normal file
1069
vendor/github.com/influxdata/influxdb/influxql/call_iterator.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
983
vendor/github.com/influxdata/influxdb/influxql/call_iterator_test.go
generated
vendored
Normal file
983
vendor/github.com/influxdata/influxdb/influxql/call_iterator_test.go
generated
vendored
Normal file
@@ -0,0 +1,983 @@
|
||||
package influxql_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
"github.com/influxdata/influxdb/pkg/deep"
|
||||
)
|
||||
|
||||
// Ensure that a float iterator can be created for a count() call.
|
||||
func TestCallIterator_Count_Float(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`count("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that an integer iterator can be created for a count() call.
|
||||
func TestCallIterator_Count_Integer(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&IntegerIterator{Points: []influxql.IntegerPoint{
|
||||
{Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`count("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a string iterator can be created for a count() call.
|
||||
func TestCallIterator_Count_String(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&StringIterator{Points: []influxql.StringPoint{
|
||||
{Name: "cpu", Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Name: "cpu", Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Name: "cpu", Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Name: "cpu", Time: 5, Value: "e", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Name: "cpu", Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Name: "cpu", Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Name: "mem", Time: 23, Value: "b", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`count("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a boolean iterator can be created for a count() call.
|
||||
func TestCallIterator_Count_Boolean(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&BooleanIterator{Points: []influxql.BooleanPoint{
|
||||
{Name: "cpu", Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Name: "cpu", Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Name: "cpu", Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Name: "cpu", Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Name: "mem", Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`count("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a float iterator can be created for a min() call.
|
||||
func TestCallIterator_Min_Float(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`min("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.FloatPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}},
|
||||
{&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a integer iterator can be created for a min() call.
|
||||
func TestCallIterator_Min_Integer(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&IntegerIterator{Points: []influxql.IntegerPoint{
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`min("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.IntegerPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}},
|
||||
{&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a boolean iterator can be created for a min() call.
|
||||
func TestCallIterator_Min_Boolean(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&BooleanIterator{Points: []influxql.BooleanPoint{
|
||||
{Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`min("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.BooleanPoint{Time: 23, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a float iterator can be created for a max() call.
|
||||
func TestCallIterator_Max_Float(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`max("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a integer iterator can be created for a max() call.
|
||||
func TestCallIterator_Max_Integer(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&IntegerIterator{Points: []influxql.IntegerPoint{
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`max("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a boolean iterator can be created for a max() call.
|
||||
func TestCallIterator_Max_Boolean(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&BooleanIterator{Points: []influxql.BooleanPoint{
|
||||
{Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`max("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.BooleanPoint{Time: 23, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a float iterator can be created for a sum() call.
|
||||
func TestCallIterator_Sum_Float(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`sum("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.FloatPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.FloatPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that an integer iterator can be created for a sum() call.
|
||||
func TestCallIterator_Sum_Integer(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&IntegerIterator{Points: []influxql.IntegerPoint{
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`sum("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.IntegerPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a float iterator can be created for a first() call.
|
||||
func TestCallIterator_First_Float(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`first("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that an integer iterator can be created for a first() call.
|
||||
func TestCallIterator_First_Integer(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&IntegerIterator{Points: []influxql.IntegerPoint{
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`first("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a string iterator can be created for a first() call.
|
||||
func TestCallIterator_First_String(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&StringIterator{Points: []influxql.StringPoint{
|
||||
{Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`first("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.StringPoint{Time: 0, Value: "d", Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a boolean iterator can be created for a first() call.
|
||||
func TestCallIterator_First_Boolean(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&BooleanIterator{Points: []influxql.BooleanPoint{
|
||||
{Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`first("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a float iterator can be created for a last() call.
|
||||
func TestCallIterator_Last_Float(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`last("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.FloatPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that an integer iterator can be created for a last() call.
|
||||
func TestCallIterator_Last_Integer(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&IntegerIterator{Points: []influxql.IntegerPoint{
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`last("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.IntegerPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a string iterator can be created for a last() call.
|
||||
func TestCallIterator_Last_String(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&StringIterator{Points: []influxql.StringPoint{
|
||||
{Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`last("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.StringPoint{Time: 2, Value: "b", Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a boolean iterator can be created for a last() call.
|
||||
func TestCallIterator_Last_Boolean(t *testing.T) {
|
||||
itr, _ := influxql.NewCallIterator(
|
||||
&BooleanIterator{Points: []influxql.BooleanPoint{
|
||||
{Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`last("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}},
|
||||
{&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}},
|
||||
{&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
{&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a float iterator can be created for a mode() call.
|
||||
func TestCallIterator_Mode_Float(t *testing.T) {
|
||||
itr, _ := influxql.NewModeIterator(&FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`mode("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.FloatPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 0}},
|
||||
{&influxql.FloatPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA"), Aggregated: 0}},
|
||||
{&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 0}},
|
||||
{&influxql.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 0}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a integer iterator can be created for a mode() call.
|
||||
func TestCallIterator_Mode_Integer(t *testing.T) {
|
||||
itr, _ := influxql.NewModeIterator(&IntegerIterator{Points: []influxql.IntegerPoint{
|
||||
{Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`mode("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.IntegerPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA")}},
|
||||
{&influxql.IntegerPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA")}},
|
||||
{&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB")}},
|
||||
{&influxql.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB")}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a string iterator can be created for a mode() call.
|
||||
func TestCallIterator_Mode_String(t *testing.T) {
|
||||
itr, _ := influxql.NewModeIterator(&StringIterator{Points: []influxql.StringPoint{
|
||||
{Time: 0, Value: "15", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: "10", Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 2, Value: "10", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 3, Value: "10", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 4, Value: "10", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 6, Value: "20", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 7, Value: "21", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 7, Value: "21", Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: "11", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 22, Value: "8", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: "8", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 24, Value: "25", Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`mode("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.StringPoint{Time: 0, Value: "10", Tags: ParseTags("host=hostA")}},
|
||||
{&influxql.StringPoint{Time: 5, Value: "21", Tags: ParseTags("host=hostA")}},
|
||||
{&influxql.StringPoint{Time: 1, Value: "11", Tags: ParseTags("host=hostB")}},
|
||||
{&influxql.StringPoint{Time: 20, Value: "8", Tags: ParseTags("host=hostB")}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a boolean iterator can be created for a modBooleanl.
|
||||
func TestCallIterator_Mode_Boolean(t *testing.T) {
|
||||
itr, _ := influxql.NewModeIterator(&BooleanIterator{Points: []influxql.BooleanPoint{
|
||||
{Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")},
|
||||
{Time: 2, Value: true, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 3, Value: true, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 4, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 7, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
{Time: 8, Value: false, Tags: ParseTags("region=us-east,host=hostA")},
|
||||
|
||||
{Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 22, Value: false, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
{Time: 24, Value: true, Tags: ParseTags("region=us-west,host=hostB")},
|
||||
}},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`mode("value")`),
|
||||
Dimensions: []string{"host"},
|
||||
Interval: influxql.Interval{Duration: 5 * time.Nanosecond},
|
||||
Ordered: true,
|
||||
Ascending: true,
|
||||
},
|
||||
)
|
||||
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA")}},
|
||||
{&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA")}},
|
||||
{&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB")}},
|
||||
{&influxql.BooleanPoint{Time: 20, Value: true, Tags: ParseTags("host=hostB")}},
|
||||
}) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewCallIterator_UnsupportedExprName(t *testing.T) {
|
||||
_, err := influxql.NewCallIterator(
|
||||
&FloatIterator{},
|
||||
influxql.IteratorOptions{
|
||||
Expr: MustParseExpr(`foobar("value")`),
|
||||
},
|
||||
)
|
||||
|
||||
if err == nil || err.Error() != "unsupported function call: foobar" {
|
||||
t.Errorf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCountIterator_1K(b *testing.B) { benchmarkCountIterator(b, 1000) }
|
||||
func BenchmarkCountIterator_100K(b *testing.B) { benchmarkCountIterator(b, 100000) }
|
||||
func BenchmarkCountIterator_1M(b *testing.B) { benchmarkCountIterator(b, 1000000) }
|
||||
|
||||
func benchmarkCountIterator(b *testing.B, pointN int) {
|
||||
benchmarkCallIterator(b, influxql.IteratorOptions{
|
||||
Expr: MustParseExpr("count(value)"),
|
||||
StartTime: influxql.MinTime,
|
||||
EndTime: influxql.MaxTime,
|
||||
}, pointN)
|
||||
}
|
||||
|
||||
func benchmarkCallIterator(b *testing.B, opt influxql.IteratorOptions, pointN int) {
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Create a lightweight point generator.
|
||||
p := influxql.FloatPoint{Name: "cpu", Value: 100}
|
||||
input := FloatPointGenerator{
|
||||
N: pointN,
|
||||
Fn: func(i int) *influxql.FloatPoint { return &p },
|
||||
}
|
||||
|
||||
// Execute call against input.
|
||||
itr, err := influxql.NewCallIterator(&input, opt)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
influxql.DrainIterator(itr)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSampleIterator_1k(b *testing.B) { benchmarkSampleIterator(b, 1000) }
|
||||
func BenchmarkSampleIterator_100k(b *testing.B) { benchmarkSampleIterator(b, 100000) }
|
||||
func BenchmarkSampleIterator_1M(b *testing.B) { benchmarkSampleIterator(b, 1000000) }
|
||||
|
||||
func benchmarkSampleIterator(b *testing.B, pointN int) {
|
||||
b.ReportAllocs()
|
||||
|
||||
// Create a lightweight point generator.
|
||||
p := influxql.FloatPoint{Name: "cpu"}
|
||||
input := FloatPointGenerator{
|
||||
N: pointN,
|
||||
Fn: func(i int) *influxql.FloatPoint {
|
||||
p.Value = float64(i)
|
||||
return &p
|
||||
},
|
||||
}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Execute call against input.
|
||||
itr, err := influxql.NewSampleIterator(&input, influxql.IteratorOptions{}, 100)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
influxql.DrainIterator(itr)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDistinctIterator_1K(b *testing.B) { benchmarkDistinctIterator(b, 1000) }
|
||||
func BenchmarkDistinctIterator_100K(b *testing.B) { benchmarkDistinctIterator(b, 100000) }
|
||||
func BenchmarkDistinctIterator_1M(b *testing.B) { benchmarkDistinctIterator(b, 1000000) }
|
||||
|
||||
func benchmarkDistinctIterator(b *testing.B, pointN int) {
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Create a lightweight point generator.
|
||||
p := influxql.FloatPoint{Name: "cpu"}
|
||||
input := FloatPointGenerator{
|
||||
N: pointN,
|
||||
Fn: func(i int) *influxql.FloatPoint {
|
||||
p.Value = float64(i % 10)
|
||||
return &p
|
||||
},
|
||||
}
|
||||
|
||||
// Execute call against input.
|
||||
itr, err := influxql.NewDistinctIterator(&input, influxql.IteratorOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
influxql.DrainIterator(itr)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkModeIterator_1K(b *testing.B) { benchmarkModeIterator(b, 1000) }
|
||||
func BenchmarkModeIterator_100K(b *testing.B) { benchmarkModeIterator(b, 100000) }
|
||||
func BenchmarkModeIterator_1M(b *testing.B) { benchmarkModeIterator(b, 1000000) }
|
||||
|
||||
func benchmarkModeIterator(b *testing.B, pointN int) {
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Create a lightweight point generator.
|
||||
p := influxql.FloatPoint{Name: "cpu"}
|
||||
input := FloatPointGenerator{
|
||||
N: pointN,
|
||||
Fn: func(i int) *influxql.FloatPoint {
|
||||
p.Value = float64(10)
|
||||
return &p
|
||||
},
|
||||
}
|
||||
|
||||
// Execute call against input.
|
||||
itr, err := influxql.NewModeIterator(&input, influxql.IteratorOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
influxql.DrainIterator(itr)
|
||||
}
|
||||
}
|
||||
|
||||
type FloatPointGenerator struct {
|
||||
i int
|
||||
N int
|
||||
Fn func(i int) *influxql.FloatPoint
|
||||
}
|
||||
|
||||
func (g *FloatPointGenerator) Close() error { return nil }
|
||||
func (g *FloatPointGenerator) Stats() influxql.IteratorStats { return influxql.IteratorStats{} }
|
||||
|
||||
func (g *FloatPointGenerator) Next() (*influxql.FloatPoint, error) {
|
||||
if g.i == g.N {
|
||||
return nil, nil
|
||||
}
|
||||
p := g.Fn(g.i)
|
||||
g.i++
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func MustCallIterator(input influxql.Iterator, opt influxql.IteratorOptions) influxql.Iterator {
|
||||
itr, err := influxql.NewCallIterator(input, opt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return itr
|
||||
}
|
41
vendor/github.com/influxdata/influxdb/influxql/cast.go
generated
vendored
Normal file
41
vendor/github.com/influxdata/influxdb/influxql/cast.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package influxql
|
||||
|
||||
func castToFloat(v interface{}) float64 {
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
return v
|
||||
case int64:
|
||||
return float64(v)
|
||||
default:
|
||||
return float64(0)
|
||||
}
|
||||
}
|
||||
|
||||
func castToInteger(v interface{}) int64 {
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
return int64(v)
|
||||
case int64:
|
||||
return v
|
||||
default:
|
||||
return int64(0)
|
||||
}
|
||||
}
|
||||
|
||||
func castToString(v interface{}) string {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return v
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func castToBoolean(v interface{}) bool {
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
return v
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
12
vendor/github.com/influxdata/influxdb/influxql/doc.go
generated
vendored
Normal file
12
vendor/github.com/influxdata/influxdb/influxql/doc.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
/*
|
||||
Package influxql implements a parser for the InfluxDB query language.
|
||||
|
||||
InfluxQL is a DML and DDL language for the InfluxDB time series database.
|
||||
It provides the ability to query for aggregate statistics as well as create
|
||||
and configure the InfluxDB server.
|
||||
|
||||
See https://docs.influxdata.com/influxdb/latest/query_language/
|
||||
for a reference on using InfluxQL.
|
||||
|
||||
*/
|
||||
package influxql
|
225
vendor/github.com/influxdata/influxdb/influxql/emitter.go
generated
vendored
Normal file
225
vendor/github.com/influxdata/influxdb/influxql/emitter.go
generated
vendored
Normal file
@@ -0,0 +1,225 @@
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
)
|
||||
|
||||
// Emitter groups values together by name, tags, and time.
|
||||
type Emitter struct {
|
||||
buf []Point
|
||||
itrs []Iterator
|
||||
ascending bool
|
||||
chunkSize int
|
||||
|
||||
tags Tags
|
||||
row *models.Row
|
||||
|
||||
// The columns to attach to each row.
|
||||
Columns []string
|
||||
|
||||
// The time zone location.
|
||||
Location *time.Location
|
||||
|
||||
// Removes the "time" column from output.
|
||||
// Used for meta queries where time does not apply.
|
||||
OmitTime bool
|
||||
}
|
||||
|
||||
// NewEmitter returns a new instance of Emitter that pulls from itrs.
|
||||
func NewEmitter(itrs []Iterator, ascending bool, chunkSize int) *Emitter {
|
||||
return &Emitter{
|
||||
buf: make([]Point, len(itrs)),
|
||||
itrs: itrs,
|
||||
ascending: ascending,
|
||||
chunkSize: chunkSize,
|
||||
Location: time.UTC,
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the underlying iterators.
|
||||
func (e *Emitter) Close() error {
|
||||
return Iterators(e.itrs).Close()
|
||||
}
|
||||
|
||||
// Emit returns the next row from the iterators.
|
||||
func (e *Emitter) Emit() (*models.Row, bool, error) {
|
||||
// Immediately end emission if there are no iterators.
|
||||
if len(e.itrs) == 0 {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// Continually read from iterators until they are exhausted.
|
||||
for {
|
||||
// Fill buffer. Return row if no more points remain.
|
||||
t, name, tags, err := e.loadBuf()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
} else if t == ZeroTime {
|
||||
row := e.row
|
||||
e.row = nil
|
||||
return row, false, nil
|
||||
}
|
||||
|
||||
// Read next set of values from all iterators at a given time/name/tags.
|
||||
// If no values are returned then return row.
|
||||
values := e.readAt(t, name, tags)
|
||||
if values == nil {
|
||||
row := e.row
|
||||
e.row = nil
|
||||
return row, false, nil
|
||||
}
|
||||
|
||||
// If there's no row yet then create one.
|
||||
// If the name and tags match the existing row, append to that row if
|
||||
// the number of values doesn't exceed the chunk size.
|
||||
// Otherwise return existing row and add values to next emitted row.
|
||||
if e.row == nil {
|
||||
e.createRow(name, tags, values)
|
||||
} else if e.row.Name == name && e.tags.Equals(&tags) {
|
||||
if e.chunkSize > 0 && len(e.row.Values) >= e.chunkSize {
|
||||
row := e.row
|
||||
row.Partial = true
|
||||
e.createRow(name, tags, values)
|
||||
return row, true, nil
|
||||
}
|
||||
e.row.Values = append(e.row.Values, values)
|
||||
} else {
|
||||
row := e.row
|
||||
e.createRow(name, tags, values)
|
||||
return row, true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// loadBuf reads in points into empty buffer slots.
|
||||
// Returns the next time/name/tags to emit for.
|
||||
func (e *Emitter) loadBuf() (t int64, name string, tags Tags, err error) {
|
||||
t = ZeroTime
|
||||
|
||||
for i := range e.itrs {
|
||||
// Load buffer, if empty.
|
||||
if e.buf[i] == nil {
|
||||
e.buf[i], err = e.readIterator(e.itrs[i])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Skip if buffer is empty.
|
||||
p := e.buf[i]
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
itrTime, itrName, itrTags := p.time(), p.name(), p.tags()
|
||||
|
||||
// Initialize range values if not set.
|
||||
if t == ZeroTime {
|
||||
t, name, tags = itrTime, itrName, itrTags
|
||||
continue
|
||||
}
|
||||
|
||||
// Update range values if lower and emitter is in time ascending order.
|
||||
if e.ascending {
|
||||
if (itrName < name) || (itrName == name && itrTags.ID() < tags.ID()) || (itrName == name && itrTags.ID() == tags.ID() && itrTime < t) {
|
||||
t, name, tags = itrTime, itrName, itrTags
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Update range values if higher and emitter is in time descending order.
|
||||
if (itrName > name) || (itrName == name && itrTags.ID() > tags.ID()) || (itrName == name && itrTags.ID() == tags.ID() && itrTime > t) {
|
||||
t, name, tags = itrTime, itrName, itrTags
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// createRow creates a new row attached to the emitter.
|
||||
func (e *Emitter) createRow(name string, tags Tags, values []interface{}) {
|
||||
e.tags = tags
|
||||
e.row = &models.Row{
|
||||
Name: name,
|
||||
Tags: tags.KeyValues(),
|
||||
Columns: e.Columns,
|
||||
Values: [][]interface{}{values},
|
||||
}
|
||||
}
|
||||
|
||||
// readAt returns the next slice of values from the iterators at time/name/tags.
|
||||
// Returns nil values once the iterators are exhausted.
|
||||
func (e *Emitter) readAt(t int64, name string, tags Tags) []interface{} {
|
||||
offset := 1
|
||||
if e.OmitTime {
|
||||
offset = 0
|
||||
}
|
||||
|
||||
values := make([]interface{}, len(e.itrs)+offset)
|
||||
if !e.OmitTime {
|
||||
values[0] = time.Unix(0, t).In(e.Location)
|
||||
}
|
||||
e.readInto(t, name, tags, values[offset:])
|
||||
return values
|
||||
}
|
||||
|
||||
func (e *Emitter) readInto(t int64, name string, tags Tags, values []interface{}) {
|
||||
for i, p := range e.buf {
|
||||
// Skip if buffer is empty.
|
||||
if p == nil {
|
||||
values[i] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip point if it doesn't match time/name/tags.
|
||||
pTags := p.tags()
|
||||
if p.time() != t || p.name() != name || !pTags.Equals(&tags) {
|
||||
values[i] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
// Read point value.
|
||||
values[i] = p.value()
|
||||
|
||||
// Clear buffer.
|
||||
e.buf[i] = nil
|
||||
}
|
||||
}
|
||||
|
||||
// readIterator reads the next point from itr.
|
||||
func (e *Emitter) readIterator(itr Iterator) (Point, error) {
|
||||
if itr == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
switch itr := itr.(type) {
|
||||
case FloatIterator:
|
||||
if p, err := itr.Next(); err != nil {
|
||||
return nil, err
|
||||
} else if p != nil {
|
||||
return p, nil
|
||||
}
|
||||
case IntegerIterator:
|
||||
if p, err := itr.Next(); err != nil {
|
||||
return nil, err
|
||||
} else if p != nil {
|
||||
return p, nil
|
||||
}
|
||||
case StringIterator:
|
||||
if p, err := itr.Next(); err != nil {
|
||||
return nil, err
|
||||
} else if p != nil {
|
||||
return p, nil
|
||||
}
|
||||
case BooleanIterator:
|
||||
if p, err := itr.Next(); err != nil {
|
||||
return nil, err
|
||||
} else if p != nil {
|
||||
return p, nil
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported iterator: %T", itr))
|
||||
}
|
||||
return nil, nil
|
||||
}
|
125
vendor/github.com/influxdata/influxdb/influxql/emitter_test.go
generated
vendored
Normal file
125
vendor/github.com/influxdata/influxdb/influxql/emitter_test.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
package influxql_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/influxdata/influxdb/pkg/deep"
|
||||
)
|
||||
|
||||
// Ensure the emitter can group iterators together into rows.
|
||||
func TestEmitter_Emit(t *testing.T) {
|
||||
// Build an emitter that pulls from two iterators.
|
||||
e := influxql.NewEmitter([]influxql.Iterator{
|
||||
&FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Name: "cpu", Tags: ParseTags("region=west"), Time: 0, Value: 1},
|
||||
{Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 2},
|
||||
}},
|
||||
&FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 4},
|
||||
{Name: "cpu", Tags: ParseTags("region=north"), Time: 0, Value: 4},
|
||||
{Name: "mem", Time: 4, Value: 5},
|
||||
}},
|
||||
}, true, 0)
|
||||
e.Columns = []string{"col1", "col2"}
|
||||
|
||||
// Verify the cpu region=west is emitted first.
|
||||
if row, _, err := e.Emit(); err != nil {
|
||||
t.Fatalf("unexpected error(0): %s", err)
|
||||
} else if !deep.Equal(row, &models.Row{
|
||||
Name: "cpu",
|
||||
Tags: map[string]string{"region": "west"},
|
||||
Columns: []string{"col1", "col2"},
|
||||
Values: [][]interface{}{
|
||||
{time.Unix(0, 0).UTC(), float64(1), nil},
|
||||
{time.Unix(0, 1).UTC(), float64(2), float64(4)},
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("unexpected row(0): %s", spew.Sdump(row))
|
||||
}
|
||||
|
||||
// Verify the cpu region=north is emitted next.
|
||||
if row, _, err := e.Emit(); err != nil {
|
||||
t.Fatalf("unexpected error(1): %s", err)
|
||||
} else if !deep.Equal(row, &models.Row{
|
||||
Name: "cpu",
|
||||
Tags: map[string]string{"region": "north"},
|
||||
Columns: []string{"col1", "col2"},
|
||||
Values: [][]interface{}{
|
||||
{time.Unix(0, 0).UTC(), nil, float64(4)},
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("unexpected row(1): %s", spew.Sdump(row))
|
||||
}
|
||||
|
||||
// Verify the mem series is emitted last.
|
||||
if row, _, err := e.Emit(); err != nil {
|
||||
t.Fatalf("unexpected error(2): %s", err)
|
||||
} else if !deep.Equal(row, &models.Row{
|
||||
Name: "mem",
|
||||
Columns: []string{"col1", "col2"},
|
||||
Values: [][]interface{}{
|
||||
{time.Unix(0, 4).UTC(), nil, float64(5)},
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("unexpected row(2): %s", spew.Sdump(row))
|
||||
}
|
||||
|
||||
// Verify EOF.
|
||||
if row, _, err := e.Emit(); err != nil {
|
||||
t.Fatalf("unexpected error(eof): %s", err)
|
||||
} else if row != nil {
|
||||
t.Fatalf("unexpected eof: %s", spew.Sdump(row))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the emitter will limit the chunked output from a series.
|
||||
func TestEmitter_ChunkSize(t *testing.T) {
|
||||
// Build an emitter that pulls from one iterator with multiple points in the same series.
|
||||
e := influxql.NewEmitter([]influxql.Iterator{
|
||||
&FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Name: "cpu", Tags: ParseTags("region=west"), Time: 0, Value: 1},
|
||||
{Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 2},
|
||||
}},
|
||||
}, true, 1)
|
||||
e.Columns = []string{"col1"}
|
||||
|
||||
// Verify the cpu region=west is emitted first.
|
||||
if row, _, err := e.Emit(); err != nil {
|
||||
t.Fatalf("unexpected error(0): %s", err)
|
||||
} else if !deep.Equal(row, &models.Row{
|
||||
Name: "cpu",
|
||||
Tags: map[string]string{"region": "west"},
|
||||
Columns: []string{"col1"},
|
||||
Values: [][]interface{}{
|
||||
{time.Unix(0, 0).UTC(), float64(1)},
|
||||
},
|
||||
Partial: true,
|
||||
}) {
|
||||
t.Fatalf("unexpected row(0): %s", spew.Sdump(row))
|
||||
}
|
||||
|
||||
// Verify the cpu region=north is emitted next.
|
||||
if row, _, err := e.Emit(); err != nil {
|
||||
t.Fatalf("unexpected error(1): %s", err)
|
||||
} else if !deep.Equal(row, &models.Row{
|
||||
Name: "cpu",
|
||||
Tags: map[string]string{"region": "west"},
|
||||
Columns: []string{"col1"},
|
||||
Values: [][]interface{}{
|
||||
{time.Unix(0, 1).UTC(), float64(2)},
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("unexpected row(1): %s", spew.Sdump(row))
|
||||
}
|
||||
|
||||
// Verify EOF.
|
||||
if row, _, err := e.Emit(); err != nil {
|
||||
t.Fatalf("unexpected error(eof): %s", err)
|
||||
} else if row != nil {
|
||||
t.Fatalf("unexpected eof: %s", spew.Sdump(row))
|
||||
}
|
||||
}
|
1669
vendor/github.com/influxdata/influxdb/influxql/functions.gen.go
generated
vendored
Normal file
1669
vendor/github.com/influxdata/influxdb/influxql/functions.gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
219
vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl
generated
vendored
Normal file
219
vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl
generated
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
{{with $types := .}}{{range $k := $types}}
|
||||
|
||||
// {{$k.Name}}PointAggregator aggregates points to produce a single point.
|
||||
type {{$k.Name}}PointAggregator interface {
|
||||
Aggregate{{$k.Name}}(p *{{$k.Name}}Point)
|
||||
}
|
||||
|
||||
// {{$k.Name}}BulkPointAggregator aggregates multiple points at a time.
|
||||
type {{$k.Name}}BulkPointAggregator interface {
|
||||
Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point)
|
||||
}
|
||||
|
||||
// Aggregate{{$k.Name}}Points feeds a slice of {{$k.Name}}Point into an
|
||||
// aggregator. If the aggregator is a {{$k.Name}}BulkPointAggregator, it will
|
||||
// use the AggregateBulk method.
|
||||
func Aggregate{{$k.Name}}Points(a {{$k.Name}}PointAggregator, points []{{$k.Name}}Point) {
|
||||
switch a := a.(type) {
|
||||
case {{$k.Name}}BulkPointAggregator:
|
||||
a.Aggregate{{$k.Name}}Bulk(points)
|
||||
default:
|
||||
for _, p := range points {
|
||||
a.Aggregate{{$k.Name}}(&p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// {{$k.Name}}PointEmitter produces a single point from an aggregate.
|
||||
type {{$k.Name}}PointEmitter interface {
|
||||
Emit() []{{$k.Name}}Point
|
||||
}
|
||||
|
||||
{{range $v := $types}}
|
||||
|
||||
// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func is the function called by a {{$k.Name}}Point reducer.
|
||||
type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func func(prev *{{$v.Name}}Point, curr *{{$k.Name}}Point) (t int64, v {{$v.Type}}, aux []interface{})
|
||||
|
||||
// {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that reduces
|
||||
// the passed in points to a single point using a reduce function.
|
||||
type {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct {
|
||||
prev *{{$v.Name}}Point
|
||||
fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func
|
||||
}
|
||||
|
||||
// New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}Func{{$v.Name}}Reducer.
|
||||
func New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func, prev *{{$v.Name}}Point) *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer {
|
||||
return &{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn, prev: prev}
|
||||
}
|
||||
|
||||
// Aggregate{{$k.Name}} takes a {{$k.Name}}Point and invokes the reduce function with the
|
||||
// current and new point to modify the current point.
|
||||
func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {
|
||||
t, v, aux := r.fn(r.prev, p)
|
||||
if r.prev == nil {
|
||||
r.prev = &{{$v.Name}}Point{}
|
||||
}
|
||||
r.prev.Time = t
|
||||
r.prev.Value = v
|
||||
r.prev.Aux = aux
|
||||
if p.Aggregated > 1 {
|
||||
r.prev.Aggregated += p.Aggregated
|
||||
} else {
|
||||
r.prev.Aggregated++
|
||||
}
|
||||
}
|
||||
|
||||
// Emit emits the point that was generated when reducing the points fed in with Aggregate{{$k.Name}}.
|
||||
func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point {
|
||||
return []{{$v.Name}}Point{*r.prev}
|
||||
}
|
||||
|
||||
// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc is the function called by a {{$k.Name}}Point reducer.
|
||||
type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc func(a []{{$k.Name}}Point) []{{$v.Name}}Point
|
||||
|
||||
// {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that aggregates
|
||||
// the passed in points and then invokes the function to reduce the points when they are emitted.
|
||||
type {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct {
|
||||
points []{{$k.Name}}Point
|
||||
fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc
|
||||
}
|
||||
|
||||
// New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer.
|
||||
func New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc) *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer {
|
||||
return &{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn}
|
||||
}
|
||||
|
||||
// Aggregate{{$k.Name}} copies the {{$k.Name}}Point into the internal slice to be passed
|
||||
// to the reduce function when Emit is called.
|
||||
func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {
|
||||
r.points = append(r.points, *p.Clone())
|
||||
}
|
||||
|
||||
// Aggregate{{$k.Name}}Bulk performs a bulk copy of {{$k.Name}}Points into the internal slice.
|
||||
// This is a more efficient version of calling Aggregate{{$k.Name}} on each point.
|
||||
func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) {
|
||||
r.points = append(r.points, points...)
|
||||
}
|
||||
|
||||
// Emit invokes the reduce function on the aggregated points to generate the aggregated points.
|
||||
// This method does not clear the points from the internal slice.
|
||||
func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point {
|
||||
return r.fn(r.points)
|
||||
}
|
||||
{{end}}
|
||||
|
||||
// {{$k.Name}}DistinctReducer returns the distinct points in a series.
|
||||
type {{$k.Name}}DistinctReducer struct {
|
||||
m map[{{$k.Type}}]{{$k.Name}}Point
|
||||
}
|
||||
|
||||
// New{{$k.Name}}DistinctReducer creates a new {{$k.Name}}DistinctReducer.
|
||||
func New{{$k.Name}}DistinctReducer() *{{$k.Name}}DistinctReducer {
|
||||
return &{{$k.Name}}DistinctReducer{m: make(map[{{$k.Type}}]{{$k.Name}}Point)}
|
||||
}
|
||||
|
||||
// Aggregate{{$k.Name}} aggregates a point into the reducer.
|
||||
func (r *{{$k.Name}}DistinctReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {
|
||||
if _, ok := r.m[p.Value]; !ok {
|
||||
r.m[p.Value] = *p
|
||||
}
|
||||
}
|
||||
|
||||
// Emit emits the distinct points that have been aggregated into the reducer.
|
||||
func (r *{{$k.Name}}DistinctReducer) Emit() []{{$k.Name}}Point {
|
||||
points := make([]{{$k.Name}}Point, 0, len(r.m))
|
||||
for _, p := range r.m {
|
||||
points = append(points, {{$k.Name}}Point{Time: p.Time, Value: p.Value})
|
||||
}
|
||||
sort.Sort({{$k.name}}Points(points))
|
||||
return points
|
||||
}
|
||||
|
||||
// {{$k.Name}}ElapsedReducer calculates the elapsed of the aggregated points.
|
||||
type {{$k.Name}}ElapsedReducer struct {
|
||||
unitConversion int64
|
||||
prev {{$k.Name}}Point
|
||||
curr {{$k.Name}}Point
|
||||
}
|
||||
|
||||
// New{{$k.Name}}ElapsedReducer creates a new {{$k.Name}}ElapsedReducer.
|
||||
func New{{$k.Name}}ElapsedReducer(interval Interval) *{{$k.Name}}ElapsedReducer {
|
||||
return &{{$k.Name}}ElapsedReducer{
|
||||
unitConversion: int64(interval.Duration),
|
||||
prev: {{$k.Name}}Point{Nil: true},
|
||||
curr: {{$k.Name}}Point{Nil: true},
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate{{$k.Name}} aggregates a point into the reducer and updates the current window.
|
||||
func (r *{{$k.Name}}ElapsedReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {
|
||||
r.prev = r.curr
|
||||
r.curr = *p
|
||||
}
|
||||
|
||||
// Emit emits the elapsed of the reducer at the current point.
|
||||
func (r *{{$k.Name}}ElapsedReducer) Emit() []IntegerPoint {
|
||||
if !r.prev.Nil {
|
||||
elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion
|
||||
return []IntegerPoint{
|
||||
{Time: r.curr.Time, Value: elapsed},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// {{$k.Name}}SampleReducer implements a reservoir sampling to calculate a random subset of points
|
||||
type {{$k.Name}}SampleReducer struct {
|
||||
count int // how many points we've iterated over
|
||||
rng *rand.Rand // random number generator for each reducer
|
||||
|
||||
points {{$k.name}}Points // the reservoir
|
||||
}
|
||||
|
||||
// New{{$k.Name}}SampleReducer creates a new {{$k.Name}}SampleReducer
|
||||
func New{{$k.Name}}SampleReducer(size int) *{{$k.Name}}SampleReducer {
|
||||
return &{{$k.Name}}SampleReducer{
|
||||
rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/
|
||||
points: make({{$k.name}}Points, size),
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate{{$k.Name}} aggregates a point into the reducer.
|
||||
func (r *{{$k.Name}}SampleReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {
|
||||
r.count++
|
||||
// Fill the reservoir with the first n points
|
||||
if r.count-1 < len(r.points) {
|
||||
p.CopyTo(&r.points[r.count-1])
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a random integer between 1 and the count and
|
||||
// if that number is less than the length of the slice
|
||||
// replace the point at that index rnd with p.
|
||||
rnd := r.rng.Intn(r.count)
|
||||
if rnd < len(r.points) {
|
||||
p.CopyTo(&r.points[rnd])
|
||||
}
|
||||
}
|
||||
|
||||
// Emit emits the reservoir sample as many points.
|
||||
func (r *{{$k.Name}}SampleReducer) Emit() []{{$k.Name}}Point {
|
||||
min := len(r.points)
|
||||
if r.count < min {
|
||||
min = r.count
|
||||
}
|
||||
pts := r.points[:min]
|
||||
sort.Sort(pts)
|
||||
return pts
|
||||
}
|
||||
|
||||
|
||||
{{end}}{{end}}
|
1163
vendor/github.com/influxdata/influxdb/influxql/functions.go
generated
vendored
Normal file
1163
vendor/github.com/influxdata/influxdb/influxql/functions.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
498
vendor/github.com/influxdata/influxdb/influxql/functions_test.go
generated
vendored
Normal file
498
vendor/github.com/influxdata/influxdb/influxql/functions_test.go
generated
vendored
Normal file
@@ -0,0 +1,498 @@
|
||||
package influxql_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
"github.com/influxdata/influxdb/pkg/deep"
|
||||
)
|
||||
|
||||
func almostEqual(got, exp float64) bool {
|
||||
return math.Abs(got-exp) < 1e-5 && !math.IsNaN(got)
|
||||
}
|
||||
|
||||
func TestHoltWinters_AusTourists(t *testing.T) {
|
||||
hw := influxql.NewFloatHoltWintersReducer(10, 4, false, 1)
|
||||
// Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists
|
||||
austourists := []influxql.FloatPoint{
|
||||
{Time: 1, Value: 30.052513},
|
||||
{Time: 2, Value: 19.148496},
|
||||
{Time: 3, Value: 25.317692},
|
||||
{Time: 4, Value: 27.591437},
|
||||
{Time: 5, Value: 32.076456},
|
||||
{Time: 6, Value: 23.487961},
|
||||
{Time: 7, Value: 28.47594},
|
||||
{Time: 8, Value: 35.123753},
|
||||
{Time: 9, Value: 36.838485},
|
||||
{Time: 10, Value: 25.007017},
|
||||
{Time: 11, Value: 30.72223},
|
||||
{Time: 12, Value: 28.693759},
|
||||
{Time: 13, Value: 36.640986},
|
||||
{Time: 14, Value: 23.824609},
|
||||
{Time: 15, Value: 29.311683},
|
||||
{Time: 16, Value: 31.770309},
|
||||
{Time: 17, Value: 35.177877},
|
||||
{Time: 18, Value: 19.775244},
|
||||
{Time: 19, Value: 29.60175},
|
||||
{Time: 20, Value: 34.538842},
|
||||
{Time: 21, Value: 41.273599},
|
||||
{Time: 22, Value: 26.655862},
|
||||
{Time: 23, Value: 28.279859},
|
||||
{Time: 24, Value: 35.191153},
|
||||
{Time: 25, Value: 41.727458},
|
||||
{Time: 26, Value: 24.04185},
|
||||
{Time: 27, Value: 32.328103},
|
||||
{Time: 28, Value: 37.328708},
|
||||
{Time: 29, Value: 46.213153},
|
||||
{Time: 30, Value: 29.346326},
|
||||
{Time: 31, Value: 36.48291},
|
||||
{Time: 32, Value: 42.977719},
|
||||
{Time: 33, Value: 48.901525},
|
||||
{Time: 34, Value: 31.180221},
|
||||
{Time: 35, Value: 37.717881},
|
||||
{Time: 36, Value: 40.420211},
|
||||
{Time: 37, Value: 51.206863},
|
||||
{Time: 38, Value: 31.887228},
|
||||
{Time: 39, Value: 40.978263},
|
||||
{Time: 40, Value: 43.772491},
|
||||
{Time: 41, Value: 55.558567},
|
||||
{Time: 42, Value: 33.850915},
|
||||
{Time: 43, Value: 42.076383},
|
||||
{Time: 44, Value: 45.642292},
|
||||
{Time: 45, Value: 59.76678},
|
||||
{Time: 46, Value: 35.191877},
|
||||
{Time: 47, Value: 44.319737},
|
||||
{Time: 48, Value: 47.913736},
|
||||
}
|
||||
|
||||
for _, p := range austourists {
|
||||
hw.AggregateFloat(&p)
|
||||
}
|
||||
points := hw.Emit()
|
||||
|
||||
forecasted := []influxql.FloatPoint{
|
||||
{Time: 49, Value: 51.85064132137853},
|
||||
{Time: 50, Value: 43.26055282315273},
|
||||
{Time: 51, Value: 41.827258044814464},
|
||||
{Time: 52, Value: 54.3990354591749},
|
||||
{Time: 53, Value: 54.62334472770803},
|
||||
{Time: 54, Value: 45.57155693625209},
|
||||
{Time: 55, Value: 44.06051240252263},
|
||||
{Time: 56, Value: 57.30029870759433},
|
||||
{Time: 57, Value: 57.53591513519172},
|
||||
{Time: 58, Value: 47.999008139396096},
|
||||
}
|
||||
|
||||
if exp, got := len(forecasted), len(points); exp != got {
|
||||
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
|
||||
}
|
||||
|
||||
for i := range forecasted {
|
||||
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
|
||||
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
|
||||
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHoltWinters_AusTourists_Missing(t *testing.T) {
|
||||
hw := influxql.NewFloatHoltWintersReducer(10, 4, false, 1)
|
||||
// Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists
|
||||
austourists := []influxql.FloatPoint{
|
||||
{Time: 1, Value: 30.052513},
|
||||
{Time: 3, Value: 25.317692},
|
||||
{Time: 4, Value: 27.591437},
|
||||
{Time: 5, Value: 32.076456},
|
||||
{Time: 6, Value: 23.487961},
|
||||
{Time: 7, Value: 28.47594},
|
||||
{Time: 9, Value: 36.838485},
|
||||
{Time: 10, Value: 25.007017},
|
||||
{Time: 11, Value: 30.72223},
|
||||
{Time: 12, Value: 28.693759},
|
||||
{Time: 13, Value: 36.640986},
|
||||
{Time: 14, Value: 23.824609},
|
||||
{Time: 15, Value: 29.311683},
|
||||
{Time: 16, Value: 31.770309},
|
||||
{Time: 17, Value: 35.177877},
|
||||
{Time: 19, Value: 29.60175},
|
||||
{Time: 20, Value: 34.538842},
|
||||
{Time: 21, Value: 41.273599},
|
||||
{Time: 22, Value: 26.655862},
|
||||
{Time: 23, Value: 28.279859},
|
||||
{Time: 24, Value: 35.191153},
|
||||
{Time: 25, Value: 41.727458},
|
||||
{Time: 26, Value: 24.04185},
|
||||
{Time: 27, Value: 32.328103},
|
||||
{Time: 28, Value: 37.328708},
|
||||
{Time: 30, Value: 29.346326},
|
||||
{Time: 31, Value: 36.48291},
|
||||
{Time: 32, Value: 42.977719},
|
||||
{Time: 34, Value: 31.180221},
|
||||
{Time: 35, Value: 37.717881},
|
||||
{Time: 36, Value: 40.420211},
|
||||
{Time: 37, Value: 51.206863},
|
||||
{Time: 38, Value: 31.887228},
|
||||
{Time: 41, Value: 55.558567},
|
||||
{Time: 42, Value: 33.850915},
|
||||
{Time: 43, Value: 42.076383},
|
||||
{Time: 44, Value: 45.642292},
|
||||
{Time: 45, Value: 59.76678},
|
||||
{Time: 46, Value: 35.191877},
|
||||
{Time: 47, Value: 44.319737},
|
||||
{Time: 48, Value: 47.913736},
|
||||
}
|
||||
|
||||
for _, p := range austourists {
|
||||
hw.AggregateFloat(&p)
|
||||
}
|
||||
points := hw.Emit()
|
||||
|
||||
forecasted := []influxql.FloatPoint{
|
||||
{Time: 49, Value: 54.84533610387743},
|
||||
{Time: 50, Value: 41.19329421863249},
|
||||
{Time: 51, Value: 45.71673175112451},
|
||||
{Time: 52, Value: 56.05759298805955},
|
||||
{Time: 53, Value: 59.32337460282217},
|
||||
{Time: 54, Value: 44.75280096850461},
|
||||
{Time: 55, Value: 49.98865098113751},
|
||||
{Time: 56, Value: 61.86084934967605},
|
||||
{Time: 57, Value: 65.95805633454883},
|
||||
{Time: 58, Value: 50.1502170480547},
|
||||
}
|
||||
|
||||
if exp, got := len(forecasted), len(points); exp != got {
|
||||
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
|
||||
}
|
||||
|
||||
for i := range forecasted {
|
||||
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
|
||||
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
|
||||
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHoltWinters_USPopulation(t *testing.T) {
|
||||
series := []influxql.FloatPoint{
|
||||
{Time: 1, Value: 3.93},
|
||||
{Time: 2, Value: 5.31},
|
||||
{Time: 3, Value: 7.24},
|
||||
{Time: 4, Value: 9.64},
|
||||
{Time: 5, Value: 12.90},
|
||||
{Time: 6, Value: 17.10},
|
||||
{Time: 7, Value: 23.20},
|
||||
{Time: 8, Value: 31.40},
|
||||
{Time: 9, Value: 39.80},
|
||||
{Time: 10, Value: 50.20},
|
||||
{Time: 11, Value: 62.90},
|
||||
{Time: 12, Value: 76.00},
|
||||
{Time: 13, Value: 92.00},
|
||||
{Time: 14, Value: 105.70},
|
||||
{Time: 15, Value: 122.80},
|
||||
{Time: 16, Value: 131.70},
|
||||
{Time: 17, Value: 151.30},
|
||||
{Time: 18, Value: 179.30},
|
||||
{Time: 19, Value: 203.20},
|
||||
}
|
||||
hw := influxql.NewFloatHoltWintersReducer(10, 0, true, 1)
|
||||
for _, p := range series {
|
||||
hw.AggregateFloat(&p)
|
||||
}
|
||||
points := hw.Emit()
|
||||
|
||||
forecasted := []influxql.FloatPoint{
|
||||
{Time: 1, Value: 3.93},
|
||||
{Time: 2, Value: 4.957405463559748},
|
||||
{Time: 3, Value: 7.012210102535647},
|
||||
{Time: 4, Value: 10.099589257439924},
|
||||
{Time: 5, Value: 14.229926188104242},
|
||||
{Time: 6, Value: 19.418878968703797},
|
||||
{Time: 7, Value: 25.68749172281409},
|
||||
{Time: 8, Value: 33.062351305731305},
|
||||
{Time: 9, Value: 41.575791076125206},
|
||||
{Time: 10, Value: 51.26614395589263},
|
||||
{Time: 11, Value: 62.178047564264595},
|
||||
{Time: 12, Value: 74.36280483872488},
|
||||
{Time: 13, Value: 87.87880423073163},
|
||||
{Time: 14, Value: 102.79200429905801},
|
||||
{Time: 15, Value: 119.17648832929542},
|
||||
{Time: 16, Value: 137.11509549747296},
|
||||
{Time: 17, Value: 156.70013608313175},
|
||||
{Time: 18, Value: 178.03419933863566},
|
||||
{Time: 19, Value: 201.23106385518594},
|
||||
{Time: 20, Value: 226.4167216525905},
|
||||
{Time: 21, Value: 253.73052878285205},
|
||||
{Time: 22, Value: 283.32649700397553},
|
||||
{Time: 23, Value: 315.37474308085984},
|
||||
{Time: 24, Value: 350.06311454009256},
|
||||
{Time: 25, Value: 387.59901328556873},
|
||||
{Time: 26, Value: 428.21144141893404},
|
||||
{Time: 27, Value: 472.1532969569147},
|
||||
{Time: 28, Value: 519.7039509590035},
|
||||
{Time: 29, Value: 571.1721419458248},
|
||||
}
|
||||
|
||||
if exp, got := len(forecasted), len(points); exp != got {
|
||||
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
|
||||
}
|
||||
for i := range forecasted {
|
||||
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
|
||||
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
|
||||
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHoltWinters_USPopulation_Missing(t *testing.T) {
|
||||
series := []influxql.FloatPoint{
|
||||
{Time: 1, Value: 3.93},
|
||||
{Time: 2, Value: 5.31},
|
||||
{Time: 3, Value: 7.24},
|
||||
{Time: 4, Value: 9.64},
|
||||
{Time: 5, Value: 12.90},
|
||||
{Time: 6, Value: 17.10},
|
||||
{Time: 7, Value: 23.20},
|
||||
{Time: 8, Value: 31.40},
|
||||
{Time: 10, Value: 50.20},
|
||||
{Time: 11, Value: 62.90},
|
||||
{Time: 12, Value: 76.00},
|
||||
{Time: 13, Value: 92.00},
|
||||
{Time: 15, Value: 122.80},
|
||||
{Time: 16, Value: 131.70},
|
||||
{Time: 17, Value: 151.30},
|
||||
{Time: 19, Value: 203.20},
|
||||
}
|
||||
hw := influxql.NewFloatHoltWintersReducer(10, 0, true, 1)
|
||||
for _, p := range series {
|
||||
hw.AggregateFloat(&p)
|
||||
}
|
||||
points := hw.Emit()
|
||||
|
||||
forecasted := []influxql.FloatPoint{
|
||||
{Time: 1, Value: 3.93},
|
||||
{Time: 2, Value: 4.8931364428135105},
|
||||
{Time: 3, Value: 6.962653629047061},
|
||||
{Time: 4, Value: 10.056207765903274},
|
||||
{Time: 5, Value: 14.18435088129532},
|
||||
{Time: 6, Value: 19.362939306110846},
|
||||
{Time: 7, Value: 25.613247940326584},
|
||||
{Time: 8, Value: 32.96213087008264},
|
||||
{Time: 9, Value: 41.442230043017204},
|
||||
{Time: 10, Value: 51.09223428526052},
|
||||
{Time: 11, Value: 61.95719155158485},
|
||||
{Time: 12, Value: 74.08887794968567},
|
||||
{Time: 13, Value: 87.54622778052787},
|
||||
{Time: 14, Value: 102.39582960014131},
|
||||
{Time: 15, Value: 118.7124941463221},
|
||||
{Time: 16, Value: 136.57990089987464},
|
||||
{Time: 17, Value: 156.09133107941278},
|
||||
{Time: 18, Value: 177.35049601833734},
|
||||
{Time: 19, Value: 200.472471161683},
|
||||
{Time: 20, Value: 225.58474737097785},
|
||||
{Time: 21, Value: 252.82841286206823},
|
||||
{Time: 22, Value: 282.35948095261017},
|
||||
{Time: 23, Value: 314.3503808953992},
|
||||
{Time: 24, Value: 348.99163145856954},
|
||||
{Time: 25, Value: 386.49371962730555},
|
||||
{Time: 26, Value: 427.08920989407727},
|
||||
{Time: 27, Value: 471.0351131332573},
|
||||
{Time: 28, Value: 518.615548088049},
|
||||
{Time: 29, Value: 570.1447331101863},
|
||||
}
|
||||
|
||||
if exp, got := len(forecasted), len(points); exp != got {
|
||||
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
|
||||
}
|
||||
for i := range forecasted {
|
||||
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
|
||||
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
|
||||
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestHoltWinters_RoundTime(t *testing.T) {
|
||||
maxTime := time.Unix(0, influxql.MaxTime).Round(time.Second).UnixNano()
|
||||
data := []influxql.FloatPoint{
|
||||
{Time: maxTime - int64(5*time.Second), Value: 1},
|
||||
{Time: maxTime - int64(4*time.Second+103*time.Millisecond), Value: 10},
|
||||
{Time: maxTime - int64(3*time.Second+223*time.Millisecond), Value: 2},
|
||||
{Time: maxTime - int64(2*time.Second+481*time.Millisecond), Value: 11},
|
||||
}
|
||||
hw := influxql.NewFloatHoltWintersReducer(2, 2, true, time.Second)
|
||||
for _, p := range data {
|
||||
hw.AggregateFloat(&p)
|
||||
}
|
||||
points := hw.Emit()
|
||||
|
||||
forecasted := []influxql.FloatPoint{
|
||||
{Time: maxTime - int64(5*time.Second), Value: 1},
|
||||
{Time: maxTime - int64(4*time.Second), Value: 10.006729104838234},
|
||||
{Time: maxTime - int64(3*time.Second), Value: 1.998341814469269},
|
||||
{Time: maxTime - int64(2*time.Second), Value: 10.997858830631172},
|
||||
{Time: maxTime - int64(1*time.Second), Value: 4.085860238030013},
|
||||
{Time: maxTime - int64(0*time.Second), Value: 11.35713604403339},
|
||||
}
|
||||
|
||||
if exp, got := len(forecasted), len(points); exp != got {
|
||||
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
|
||||
}
|
||||
for i := range forecasted {
|
||||
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
|
||||
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
|
||||
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHoltWinters_MaxTime(t *testing.T) {
|
||||
data := []influxql.FloatPoint{
|
||||
{Time: influxql.MaxTime - 1, Value: 1},
|
||||
{Time: influxql.MaxTime, Value: 2},
|
||||
}
|
||||
hw := influxql.NewFloatHoltWintersReducer(1, 0, true, 1)
|
||||
for _, p := range data {
|
||||
hw.AggregateFloat(&p)
|
||||
}
|
||||
points := hw.Emit()
|
||||
|
||||
forecasted := []influxql.FloatPoint{
|
||||
{Time: influxql.MaxTime - 1, Value: 1},
|
||||
{Time: influxql.MaxTime, Value: 2.001516944066403},
|
||||
{Time: influxql.MaxTime + 1, Value: 2.5365248972488343},
|
||||
}
|
||||
|
||||
if exp, got := len(forecasted), len(points); exp != got {
|
||||
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
|
||||
}
|
||||
for i := range forecasted {
|
||||
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
|
||||
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
|
||||
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSample_AllSamplesSeen attempts to verify that it is possible
|
||||
// to get every subsample in a reasonable number of iterations.
|
||||
//
|
||||
// The idea here is that 30 iterations should be enough to hit every possible
|
||||
// sequence at least once.
|
||||
func TestSample_AllSamplesSeen(t *testing.T) {
|
||||
ps := []influxql.FloatPoint{
|
||||
{Time: 1, Value: 1},
|
||||
{Time: 2, Value: 2},
|
||||
{Time: 3, Value: 3},
|
||||
}
|
||||
|
||||
// List of all the possible subsamples
|
||||
samples := [][]influxql.FloatPoint{
|
||||
{
|
||||
{Time: 1, Value: 1},
|
||||
{Time: 2, Value: 2},
|
||||
},
|
||||
{
|
||||
{Time: 1, Value: 1},
|
||||
{Time: 3, Value: 3},
|
||||
},
|
||||
{
|
||||
{Time: 2, Value: 2},
|
||||
{Time: 3, Value: 3},
|
||||
},
|
||||
}
|
||||
|
||||
// 30 iterations should be sufficient to guarantee that
|
||||
// we hit every possible subsample.
|
||||
for i := 0; i < 30; i++ {
|
||||
s := influxql.NewFloatSampleReducer(2)
|
||||
for _, p := range ps {
|
||||
s.AggregateFloat(&p)
|
||||
}
|
||||
|
||||
points := s.Emit()
|
||||
|
||||
for i, sample := range samples {
|
||||
// if we find a sample that it matches, remove it from
|
||||
// this list of possible samples
|
||||
if deep.Equal(sample, points) {
|
||||
samples = append(samples[:i], samples[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// if samples is empty we've seen every sample, so we're done
|
||||
if len(samples) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// The FloatSampleReducer is seeded with time.Now().UnixNano(), and without this sleep,
|
||||
// this test will fail on machines where UnixNano doesn't return full resolution.
|
||||
// Specifically, some Windows machines will only return timestamps accurate to 100ns.
|
||||
// While iterating through this test without an explicit sleep,
|
||||
// we would only see one or two unique seeds across all the calls to NewFloatSampleReducer.
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
|
||||
// If we missed a sample, report the error
|
||||
if len(samples) != 0 {
|
||||
t.Fatalf("expected all samples to be seen; unseen samples: %#v", samples)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSample_SampleSizeLessThanNumPoints(t *testing.T) {
|
||||
s := influxql.NewFloatSampleReducer(2)
|
||||
|
||||
ps := []influxql.FloatPoint{
|
||||
{Time: 1, Value: 1},
|
||||
{Time: 2, Value: 2},
|
||||
{Time: 3, Value: 3},
|
||||
}
|
||||
|
||||
for _, p := range ps {
|
||||
s.AggregateFloat(&p)
|
||||
}
|
||||
|
||||
points := s.Emit()
|
||||
|
||||
if exp, got := 2, len(points); exp != got {
|
||||
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSample_SampleSizeGreaterThanNumPoints(t *testing.T) {
|
||||
s := influxql.NewFloatSampleReducer(4)
|
||||
|
||||
ps := []influxql.FloatPoint{
|
||||
{Time: 1, Value: 1},
|
||||
{Time: 2, Value: 2},
|
||||
{Time: 3, Value: 3},
|
||||
}
|
||||
|
||||
for _, p := range ps {
|
||||
s.AggregateFloat(&p)
|
||||
}
|
||||
|
||||
points := s.Emit()
|
||||
|
||||
if exp, got := len(ps), len(points); exp != got {
|
||||
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
|
||||
}
|
||||
|
||||
if !deep.Equal(ps, points) {
|
||||
t.Fatalf("unexpected points: %s", spew.Sdump(points))
|
||||
}
|
||||
}
|
7
vendor/github.com/influxdata/influxdb/influxql/influxql.go
generated
vendored
Normal file
7
vendor/github.com/influxdata/influxdb/influxql/influxql.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package influxql // import "github.com/influxdata/influxdb/influxql"
|
||||
|
||||
//go:generate tmpl -data=@tmpldata iterator.gen.go.tmpl
|
||||
//go:generate tmpl -data=@tmpldata point.gen.go.tmpl
|
||||
//go:generate tmpl -data=@tmpldata functions.gen.go.tmpl
|
||||
|
||||
//go:generate protoc --gogo_out=. internal/internal.proto
|
564
vendor/github.com/influxdata/influxdb/influxql/internal/internal.pb.go
generated
vendored
Normal file
564
vendor/github.com/influxdata/influxdb/influxql/internal/internal.pb.go
generated
vendored
Normal file
@@ -0,0 +1,564 @@
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: internal/internal.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package influxql is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
internal/internal.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Point
|
||||
Aux
|
||||
IteratorOptions
|
||||
Measurements
|
||||
Measurement
|
||||
Interval
|
||||
IteratorStats
|
||||
VarRef
|
||||
*/
|
||||
package influxql
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Point struct {
|
||||
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||
Tags *string `protobuf:"bytes,2,req,name=Tags" json:"Tags,omitempty"`
|
||||
Time *int64 `protobuf:"varint,3,req,name=Time" json:"Time,omitempty"`
|
||||
Nil *bool `protobuf:"varint,4,req,name=Nil" json:"Nil,omitempty"`
|
||||
Aux []*Aux `protobuf:"bytes,5,rep,name=Aux" json:"Aux,omitempty"`
|
||||
Aggregated *uint32 `protobuf:"varint,6,opt,name=Aggregated" json:"Aggregated,omitempty"`
|
||||
FloatValue *float64 `protobuf:"fixed64,7,opt,name=FloatValue" json:"FloatValue,omitempty"`
|
||||
IntegerValue *int64 `protobuf:"varint,8,opt,name=IntegerValue" json:"IntegerValue,omitempty"`
|
||||
StringValue *string `protobuf:"bytes,9,opt,name=StringValue" json:"StringValue,omitempty"`
|
||||
BooleanValue *bool `protobuf:"varint,10,opt,name=BooleanValue" json:"BooleanValue,omitempty"`
|
||||
Stats *IteratorStats `protobuf:"bytes,11,opt,name=Stats" json:"Stats,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Point) Reset() { *m = Point{} }
|
||||
func (m *Point) String() string { return proto.CompactTextString(m) }
|
||||
func (*Point) ProtoMessage() {}
|
||||
func (*Point) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} }
|
||||
|
||||
func (m *Point) GetName() string {
|
||||
if m != nil && m.Name != nil {
|
||||
return *m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Point) GetTags() string {
|
||||
if m != nil && m.Tags != nil {
|
||||
return *m.Tags
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Point) GetTime() int64 {
|
||||
if m != nil && m.Time != nil {
|
||||
return *m.Time
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Point) GetNil() bool {
|
||||
if m != nil && m.Nil != nil {
|
||||
return *m.Nil
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Point) GetAux() []*Aux {
|
||||
if m != nil {
|
||||
return m.Aux
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Point) GetAggregated() uint32 {
|
||||
if m != nil && m.Aggregated != nil {
|
||||
return *m.Aggregated
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Point) GetFloatValue() float64 {
|
||||
if m != nil && m.FloatValue != nil {
|
||||
return *m.FloatValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Point) GetIntegerValue() int64 {
|
||||
if m != nil && m.IntegerValue != nil {
|
||||
return *m.IntegerValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Point) GetStringValue() string {
|
||||
if m != nil && m.StringValue != nil {
|
||||
return *m.StringValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Point) GetBooleanValue() bool {
|
||||
if m != nil && m.BooleanValue != nil {
|
||||
return *m.BooleanValue
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Point) GetStats() *IteratorStats {
|
||||
if m != nil {
|
||||
return m.Stats
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Aux struct {
|
||||
DataType *int32 `protobuf:"varint,1,req,name=DataType" json:"DataType,omitempty"`
|
||||
FloatValue *float64 `protobuf:"fixed64,2,opt,name=FloatValue" json:"FloatValue,omitempty"`
|
||||
IntegerValue *int64 `protobuf:"varint,3,opt,name=IntegerValue" json:"IntegerValue,omitempty"`
|
||||
StringValue *string `protobuf:"bytes,4,opt,name=StringValue" json:"StringValue,omitempty"`
|
||||
BooleanValue *bool `protobuf:"varint,5,opt,name=BooleanValue" json:"BooleanValue,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Aux) Reset() { *m = Aux{} }
|
||||
func (m *Aux) String() string { return proto.CompactTextString(m) }
|
||||
func (*Aux) ProtoMessage() {}
|
||||
func (*Aux) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} }
|
||||
|
||||
func (m *Aux) GetDataType() int32 {
|
||||
if m != nil && m.DataType != nil {
|
||||
return *m.DataType
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Aux) GetFloatValue() float64 {
|
||||
if m != nil && m.FloatValue != nil {
|
||||
return *m.FloatValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Aux) GetIntegerValue() int64 {
|
||||
if m != nil && m.IntegerValue != nil {
|
||||
return *m.IntegerValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Aux) GetStringValue() string {
|
||||
if m != nil && m.StringValue != nil {
|
||||
return *m.StringValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Aux) GetBooleanValue() bool {
|
||||
if m != nil && m.BooleanValue != nil {
|
||||
return *m.BooleanValue
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type IteratorOptions struct {
|
||||
Expr *string `protobuf:"bytes,1,opt,name=Expr" json:"Expr,omitempty"`
|
||||
Aux []string `protobuf:"bytes,2,rep,name=Aux" json:"Aux,omitempty"`
|
||||
Fields []*VarRef `protobuf:"bytes,17,rep,name=Fields" json:"Fields,omitempty"`
|
||||
Sources []*Measurement `protobuf:"bytes,3,rep,name=Sources" json:"Sources,omitempty"`
|
||||
Interval *Interval `protobuf:"bytes,4,opt,name=Interval" json:"Interval,omitempty"`
|
||||
Dimensions []string `protobuf:"bytes,5,rep,name=Dimensions" json:"Dimensions,omitempty"`
|
||||
GroupBy []string `protobuf:"bytes,19,rep,name=GroupBy" json:"GroupBy,omitempty"`
|
||||
Fill *int32 `protobuf:"varint,6,opt,name=Fill" json:"Fill,omitempty"`
|
||||
FillValue *float64 `protobuf:"fixed64,7,opt,name=FillValue" json:"FillValue,omitempty"`
|
||||
Condition *string `protobuf:"bytes,8,opt,name=Condition" json:"Condition,omitempty"`
|
||||
StartTime *int64 `protobuf:"varint,9,opt,name=StartTime" json:"StartTime,omitempty"`
|
||||
EndTime *int64 `protobuf:"varint,10,opt,name=EndTime" json:"EndTime,omitempty"`
|
||||
Location *string `protobuf:"bytes,21,opt,name=Location" json:"Location,omitempty"`
|
||||
Ascending *bool `protobuf:"varint,11,opt,name=Ascending" json:"Ascending,omitempty"`
|
||||
Limit *int64 `protobuf:"varint,12,opt,name=Limit" json:"Limit,omitempty"`
|
||||
Offset *int64 `protobuf:"varint,13,opt,name=Offset" json:"Offset,omitempty"`
|
||||
SLimit *int64 `protobuf:"varint,14,opt,name=SLimit" json:"SLimit,omitempty"`
|
||||
SOffset *int64 `protobuf:"varint,15,opt,name=SOffset" json:"SOffset,omitempty"`
|
||||
Dedupe *bool `protobuf:"varint,16,opt,name=Dedupe" json:"Dedupe,omitempty"`
|
||||
MaxSeriesN *int64 `protobuf:"varint,18,opt,name=MaxSeriesN" json:"MaxSeriesN,omitempty"`
|
||||
Ordered *bool `protobuf:"varint,20,opt,name=Ordered" json:"Ordered,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) Reset() { *m = IteratorOptions{} }
|
||||
func (m *IteratorOptions) String() string { return proto.CompactTextString(m) }
|
||||
func (*IteratorOptions) ProtoMessage() {}
|
||||
func (*IteratorOptions) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{2} }
|
||||
|
||||
func (m *IteratorOptions) GetExpr() string {
|
||||
if m != nil && m.Expr != nil {
|
||||
return *m.Expr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetAux() []string {
|
||||
if m != nil {
|
||||
return m.Aux
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetFields() []*VarRef {
|
||||
if m != nil {
|
||||
return m.Fields
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetSources() []*Measurement {
|
||||
if m != nil {
|
||||
return m.Sources
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetInterval() *Interval {
|
||||
if m != nil {
|
||||
return m.Interval
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetDimensions() []string {
|
||||
if m != nil {
|
||||
return m.Dimensions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetGroupBy() []string {
|
||||
if m != nil {
|
||||
return m.GroupBy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetFill() int32 {
|
||||
if m != nil && m.Fill != nil {
|
||||
return *m.Fill
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetFillValue() float64 {
|
||||
if m != nil && m.FillValue != nil {
|
||||
return *m.FillValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetCondition() string {
|
||||
if m != nil && m.Condition != nil {
|
||||
return *m.Condition
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetStartTime() int64 {
|
||||
if m != nil && m.StartTime != nil {
|
||||
return *m.StartTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetEndTime() int64 {
|
||||
if m != nil && m.EndTime != nil {
|
||||
return *m.EndTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetLocation() string {
|
||||
if m != nil && m.Location != nil {
|
||||
return *m.Location
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetAscending() bool {
|
||||
if m != nil && m.Ascending != nil {
|
||||
return *m.Ascending
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetLimit() int64 {
|
||||
if m != nil && m.Limit != nil {
|
||||
return *m.Limit
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetOffset() int64 {
|
||||
if m != nil && m.Offset != nil {
|
||||
return *m.Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetSLimit() int64 {
|
||||
if m != nil && m.SLimit != nil {
|
||||
return *m.SLimit
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetSOffset() int64 {
|
||||
if m != nil && m.SOffset != nil {
|
||||
return *m.SOffset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetDedupe() bool {
|
||||
if m != nil && m.Dedupe != nil {
|
||||
return *m.Dedupe
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetMaxSeriesN() int64 {
|
||||
if m != nil && m.MaxSeriesN != nil {
|
||||
return *m.MaxSeriesN
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IteratorOptions) GetOrdered() bool {
|
||||
if m != nil && m.Ordered != nil {
|
||||
return *m.Ordered
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Measurements struct {
|
||||
Items []*Measurement `protobuf:"bytes,1,rep,name=Items" json:"Items,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Measurements) Reset() { *m = Measurements{} }
|
||||
func (m *Measurements) String() string { return proto.CompactTextString(m) }
|
||||
func (*Measurements) ProtoMessage() {}
|
||||
func (*Measurements) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{3} }
|
||||
|
||||
func (m *Measurements) GetItems() []*Measurement {
|
||||
if m != nil {
|
||||
return m.Items
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Measurement struct {
|
||||
Database *string `protobuf:"bytes,1,opt,name=Database" json:"Database,omitempty"`
|
||||
RetentionPolicy *string `protobuf:"bytes,2,opt,name=RetentionPolicy" json:"RetentionPolicy,omitempty"`
|
||||
Name *string `protobuf:"bytes,3,opt,name=Name" json:"Name,omitempty"`
|
||||
Regex *string `protobuf:"bytes,4,opt,name=Regex" json:"Regex,omitempty"`
|
||||
IsTarget *bool `protobuf:"varint,5,opt,name=IsTarget" json:"IsTarget,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Measurement) Reset() { *m = Measurement{} }
|
||||
func (m *Measurement) String() string { return proto.CompactTextString(m) }
|
||||
func (*Measurement) ProtoMessage() {}
|
||||
func (*Measurement) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{4} }
|
||||
|
||||
func (m *Measurement) GetDatabase() string {
|
||||
if m != nil && m.Database != nil {
|
||||
return *m.Database
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Measurement) GetRetentionPolicy() string {
|
||||
if m != nil && m.RetentionPolicy != nil {
|
||||
return *m.RetentionPolicy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Measurement) GetName() string {
|
||||
if m != nil && m.Name != nil {
|
||||
return *m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Measurement) GetRegex() string {
|
||||
if m != nil && m.Regex != nil {
|
||||
return *m.Regex
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Measurement) GetIsTarget() bool {
|
||||
if m != nil && m.IsTarget != nil {
|
||||
return *m.IsTarget
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Interval struct {
|
||||
Duration *int64 `protobuf:"varint,1,opt,name=Duration" json:"Duration,omitempty"`
|
||||
Offset *int64 `protobuf:"varint,2,opt,name=Offset" json:"Offset,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Interval) Reset() { *m = Interval{} }
|
||||
func (m *Interval) String() string { return proto.CompactTextString(m) }
|
||||
func (*Interval) ProtoMessage() {}
|
||||
func (*Interval) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{5} }
|
||||
|
||||
func (m *Interval) GetDuration() int64 {
|
||||
if m != nil && m.Duration != nil {
|
||||
return *m.Duration
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Interval) GetOffset() int64 {
|
||||
if m != nil && m.Offset != nil {
|
||||
return *m.Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type IteratorStats struct {
|
||||
SeriesN *int64 `protobuf:"varint,1,opt,name=SeriesN" json:"SeriesN,omitempty"`
|
||||
PointN *int64 `protobuf:"varint,2,opt,name=PointN" json:"PointN,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *IteratorStats) Reset() { *m = IteratorStats{} }
|
||||
func (m *IteratorStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*IteratorStats) ProtoMessage() {}
|
||||
func (*IteratorStats) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{6} }
|
||||
|
||||
func (m *IteratorStats) GetSeriesN() int64 {
|
||||
if m != nil && m.SeriesN != nil {
|
||||
return *m.SeriesN
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IteratorStats) GetPointN() int64 {
|
||||
if m != nil && m.PointN != nil {
|
||||
return *m.PointN
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type VarRef struct {
|
||||
Val *string `protobuf:"bytes,1,req,name=Val" json:"Val,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=Type" json:"Type,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *VarRef) Reset() { *m = VarRef{} }
|
||||
func (m *VarRef) String() string { return proto.CompactTextString(m) }
|
||||
func (*VarRef) ProtoMessage() {}
|
||||
func (*VarRef) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{7} }
|
||||
|
||||
func (m *VarRef) GetVal() string {
|
||||
if m != nil && m.Val != nil {
|
||||
return *m.Val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *VarRef) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Point)(nil), "influxql.Point")
|
||||
proto.RegisterType((*Aux)(nil), "influxql.Aux")
|
||||
proto.RegisterType((*IteratorOptions)(nil), "influxql.IteratorOptions")
|
||||
proto.RegisterType((*Measurements)(nil), "influxql.Measurements")
|
||||
proto.RegisterType((*Measurement)(nil), "influxql.Measurement")
|
||||
proto.RegisterType((*Interval)(nil), "influxql.Interval")
|
||||
proto.RegisterType((*IteratorStats)(nil), "influxql.IteratorStats")
|
||||
proto.RegisterType((*VarRef)(nil), "influxql.VarRef")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("internal/internal.proto", fileDescriptorInternal) }
|
||||
|
||||
var fileDescriptorInternal = []byte{
|
||||
// 737 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0xdd, 0x6a, 0xdb, 0x4a,
|
||||
0x10, 0x46, 0x92, 0xe5, 0x58, 0xeb, 0xf8, 0x24, 0x67, 0x4f, 0x7e, 0x96, 0xc3, 0xe1, 0x54, 0xe8,
|
||||
0x4a, 0x50, 0xea, 0x40, 0x6e, 0x0b, 0x05, 0xa7, 0x49, 0x8a, 0x21, 0x71, 0xc2, 0x2a, 0xe4, 0x7e,
|
||||
0x6b, 0x8d, 0xc5, 0x82, 0x2c, 0xb9, 0xab, 0x55, 0x71, 0x1e, 0xa5, 0xcf, 0xd0, 0x87, 0xe9, 0xab,
|
||||
0xf4, 0x11, 0xca, 0xce, 0x4a, 0x96, 0x92, 0x42, 0x73, 0xa5, 0xf9, 0xbe, 0x99, 0x1d, 0xed, 0xcc,
|
||||
0x37, 0x3b, 0xe4, 0x54, 0x16, 0x1a, 0x54, 0x21, 0xf2, 0xb3, 0xd6, 0x98, 0x6e, 0x54, 0xa9, 0x4b,
|
||||
0x3a, 0x92, 0xc5, 0x2a, 0xaf, 0xb7, 0x5f, 0xf2, 0xe8, 0x87, 0x4b, 0xfc, 0xfb, 0x52, 0x16, 0x9a,
|
||||
0x52, 0x32, 0x58, 0x88, 0x35, 0x30, 0x27, 0x74, 0xe3, 0x80, 0xa3, 0x6d, 0xb8, 0x07, 0x91, 0x55,
|
||||
0xcc, 0xb5, 0x9c, 0xb1, 0x91, 0x93, 0x6b, 0x60, 0x5e, 0xe8, 0xc6, 0x1e, 0x47, 0x9b, 0x1e, 0x12,
|
||||
0x6f, 0x21, 0x73, 0x36, 0x08, 0xdd, 0x78, 0xc4, 0x8d, 0x49, 0xdf, 0x10, 0x6f, 0x56, 0x6f, 0x99,
|
||||
0x1f, 0x7a, 0xf1, 0xf8, 0x7c, 0x32, 0x6d, 0xff, 0x37, 0x9d, 0xd5, 0x5b, 0x6e, 0x3c, 0xf4, 0x7f,
|
||||
0x42, 0x66, 0x59, 0xa6, 0x20, 0x13, 0x1a, 0x52, 0x36, 0x0c, 0x9d, 0x78, 0xc2, 0x7b, 0x8c, 0xf1,
|
||||
0x5f, 0xe7, 0xa5, 0xd0, 0x8f, 0x22, 0xaf, 0x81, 0xed, 0x85, 0x4e, 0xec, 0xf0, 0x1e, 0x43, 0x23,
|
||||
0xb2, 0x3f, 0x2f, 0x34, 0x64, 0xa0, 0x6c, 0xc4, 0x28, 0x74, 0x62, 0x8f, 0x3f, 0xe3, 0x68, 0x48,
|
||||
0xc6, 0x89, 0x56, 0xb2, 0xc8, 0x6c, 0x48, 0x10, 0x3a, 0x71, 0xc0, 0xfb, 0x94, 0xc9, 0x72, 0x51,
|
||||
0x96, 0x39, 0x88, 0xc2, 0x86, 0x90, 0xd0, 0x89, 0x47, 0xfc, 0x19, 0x47, 0xdf, 0x11, 0x3f, 0xd1,
|
||||
0x42, 0x57, 0x6c, 0x1c, 0x3a, 0xf1, 0xf8, 0xfc, 0xb4, 0x2b, 0x66, 0xae, 0x41, 0x09, 0x5d, 0x2a,
|
||||
0x74, 0x73, 0x1b, 0x15, 0x7d, 0x77, 0xb0, 0x74, 0xfa, 0x2f, 0x19, 0x5d, 0x0a, 0x2d, 0x1e, 0x9e,
|
||||
0x36, 0xb6, 0xa7, 0x3e, 0xdf, 0xe1, 0x17, 0xc5, 0xb9, 0xaf, 0x16, 0xe7, 0xbd, 0x5e, 0xdc, 0xe0,
|
||||
0xf5, 0xe2, 0xfc, 0xdf, 0x8b, 0x8b, 0x7e, 0x0e, 0xc8, 0x41, 0x5b, 0xc6, 0xdd, 0x46, 0xcb, 0xb2,
|
||||
0x40, 0x85, 0xaf, 0xb6, 0x1b, 0xc5, 0x1c, 0x4c, 0x89, 0xb6, 0x51, 0xd8, 0xe8, 0xe9, 0x86, 0x5e,
|
||||
0x1c, 0x58, 0x01, 0x63, 0x32, 0xbc, 0x96, 0x90, 0xa7, 0x15, 0xfb, 0x1b, 0x45, 0x3e, 0xec, 0xfa,
|
||||
0xf2, 0x28, 0x14, 0x87, 0x15, 0x6f, 0xfc, 0xf4, 0x8c, 0xec, 0x25, 0x65, 0xad, 0x96, 0x50, 0x31,
|
||||
0x0f, 0x43, 0x8f, 0xbb, 0xd0, 0x5b, 0x10, 0x55, 0xad, 0x60, 0x0d, 0x85, 0xe6, 0x6d, 0x14, 0x9d,
|
||||
0x92, 0x91, 0x29, 0x55, 0x7d, 0x15, 0x39, 0xd6, 0x35, 0x3e, 0xa7, 0xbd, 0xa6, 0x37, 0x1e, 0xbe,
|
||||
0x8b, 0x31, 0xed, 0xbc, 0x94, 0x6b, 0x28, 0x2a, 0x73, 0x7d, 0x9c, 0xb9, 0x80, 0xf7, 0x18, 0xca,
|
||||
0xc8, 0xde, 0x27, 0x55, 0xd6, 0x9b, 0x8b, 0x27, 0xf6, 0x0f, 0x3a, 0x5b, 0x68, 0x4a, 0xbd, 0x96,
|
||||
0x79, 0x8e, 0xf3, 0xe7, 0x73, 0xb4, 0xe9, 0x7f, 0x24, 0x30, 0xdf, 0xfe, 0xe0, 0x75, 0x84, 0xf1,
|
||||
0x7e, 0x2c, 0x8b, 0x54, 0x9a, 0x56, 0xe1, 0xd0, 0x05, 0xbc, 0x23, 0x8c, 0x37, 0xd1, 0x42, 0x69,
|
||||
0x7c, 0x21, 0x01, 0xaa, 0xd6, 0x11, 0xe6, 0x1e, 0x57, 0x45, 0x8a, 0x3e, 0x82, 0xbe, 0x16, 0x9a,
|
||||
0x61, 0xb9, 0x29, 0x97, 0x02, 0x93, 0x1e, 0x63, 0xd2, 0x1d, 0x36, 0x39, 0x67, 0xd5, 0x12, 0x8a,
|
||||
0x54, 0x16, 0x19, 0xce, 0xe0, 0x88, 0x77, 0x04, 0x3d, 0x22, 0xfe, 0x8d, 0x5c, 0x4b, 0xcd, 0xf6,
|
||||
0x31, 0xa3, 0x05, 0xf4, 0x84, 0x0c, 0xef, 0x56, 0xab, 0x0a, 0x34, 0x9b, 0x20, 0xdd, 0x20, 0xc3,
|
||||
0x27, 0x36, 0xfc, 0x2f, 0xcb, 0x5b, 0x64, 0x6e, 0x96, 0x34, 0x07, 0x0e, 0xec, 0xcd, 0x92, 0xee,
|
||||
0xc4, 0x25, 0xa4, 0xf5, 0x06, 0xd8, 0x21, 0xfe, 0xba, 0x41, 0xa6, 0xe7, 0xb7, 0x62, 0x9b, 0x80,
|
||||
0x92, 0x50, 0x2d, 0x18, 0xc5, 0x43, 0x3d, 0xc6, 0x64, 0xbc, 0x53, 0x29, 0x28, 0x48, 0xd9, 0x11,
|
||||
0x1e, 0x6c, 0x61, 0xf4, 0x9e, 0xec, 0xf7, 0x54, 0xaf, 0xe8, 0x5b, 0xe2, 0xcf, 0x35, 0xac, 0x2b,
|
||||
0xe6, 0xfc, 0x69, 0x38, 0x6c, 0x4c, 0xf4, 0xcd, 0x21, 0xe3, 0x1e, 0xdd, 0xbe, 0xb2, 0xcf, 0xa2,
|
||||
0x82, 0x66, 0x5e, 0x77, 0x98, 0xc6, 0xe4, 0x80, 0x83, 0x86, 0xc2, 0x74, 0xf1, 0xbe, 0xcc, 0xe5,
|
||||
0xf2, 0x09, 0x9f, 0x5a, 0xc0, 0x5f, 0xd2, 0xbb, 0xdd, 0xe7, 0xd9, 0x89, 0xc7, 0xdd, 0x77, 0x44,
|
||||
0x7c, 0x0e, 0x19, 0x6c, 0x9b, 0x97, 0x65, 0x81, 0xf9, 0xdf, 0xbc, 0x7a, 0x10, 0x2a, 0x03, 0xdd,
|
||||
0xbc, 0xa7, 0x1d, 0x8e, 0x3e, 0x74, 0x63, 0x8b, 0xf7, 0xaa, 0x95, 0x15, 0xd4, 0xc1, 0xe6, 0xec,
|
||||
0x70, 0x4f, 0x1c, 0xb7, 0x2f, 0x4e, 0x34, 0x23, 0x93, 0x67, 0x1b, 0x05, 0x55, 0x69, 0x1a, 0xec,
|
||||
0x34, 0xaa, 0x34, 0xdd, 0x3d, 0x21, 0x43, 0xdc, 0xda, 0x8b, 0x36, 0x85, 0x45, 0xd1, 0x94, 0x0c,
|
||||
0xed, 0xe3, 0x33, 0x0f, 0xf6, 0x51, 0xe4, 0xcd, 0x36, 0x37, 0x26, 0x2e, 0x6e, 0xb3, 0x8c, 0x5c,
|
||||
0x3b, 0xeb, 0xc6, 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0xca, 0x3e, 0x5e, 0x08, 0x22, 0x06, 0x00,
|
||||
0x00,
|
||||
}
|
77
vendor/github.com/influxdata/influxdb/influxql/internal/internal.proto
generated
vendored
Normal file
77
vendor/github.com/influxdata/influxdb/influxql/internal/internal.proto
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
syntax = "proto2";
|
||||
package influxql;
|
||||
|
||||
message Point {
|
||||
required string Name = 1;
|
||||
required string Tags = 2;
|
||||
required int64 Time = 3;
|
||||
required bool Nil = 4;
|
||||
repeated Aux Aux = 5;
|
||||
optional uint32 Aggregated = 6;
|
||||
|
||||
optional double FloatValue = 7;
|
||||
optional int64 IntegerValue = 8;
|
||||
optional string StringValue = 9;
|
||||
optional bool BooleanValue = 10;
|
||||
|
||||
optional IteratorStats Stats = 11;
|
||||
}
|
||||
|
||||
message Aux {
|
||||
required int32 DataType = 1;
|
||||
optional double FloatValue = 2;
|
||||
optional int64 IntegerValue = 3;
|
||||
optional string StringValue = 4;
|
||||
optional bool BooleanValue = 5;
|
||||
}
|
||||
|
||||
message IteratorOptions {
|
||||
optional string Expr = 1;
|
||||
repeated string Aux = 2;
|
||||
repeated VarRef Fields = 17;
|
||||
repeated Measurement Sources = 3;
|
||||
optional Interval Interval = 4;
|
||||
repeated string Dimensions = 5;
|
||||
repeated string GroupBy = 19;
|
||||
optional int32 Fill = 6;
|
||||
optional double FillValue = 7;
|
||||
optional string Condition = 8;
|
||||
optional int64 StartTime = 9;
|
||||
optional int64 EndTime = 10;
|
||||
optional string Location = 21;
|
||||
optional bool Ascending = 11;
|
||||
optional int64 Limit = 12;
|
||||
optional int64 Offset = 13;
|
||||
optional int64 SLimit = 14;
|
||||
optional int64 SOffset = 15;
|
||||
optional bool Dedupe = 16;
|
||||
optional int64 MaxSeriesN = 18;
|
||||
optional bool Ordered = 20;
|
||||
}
|
||||
|
||||
message Measurements {
|
||||
repeated Measurement Items = 1;
|
||||
}
|
||||
|
||||
message Measurement {
|
||||
optional string Database = 1;
|
||||
optional string RetentionPolicy = 2;
|
||||
optional string Name = 3;
|
||||
optional string Regex = 4;
|
||||
optional bool IsTarget = 5;
|
||||
}
|
||||
|
||||
message Interval {
|
||||
optional int64 Duration = 1;
|
||||
optional int64 Offset = 2;
|
||||
}
|
||||
|
||||
message IteratorStats {
|
||||
optional int64 SeriesN = 1;
|
||||
optional int64 PointN = 2;
|
||||
}
|
||||
|
||||
message VarRef {
|
||||
required string Val = 1;
|
||||
optional int32 Type = 2;
|
||||
}
|
11929
vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go
generated
vendored
Normal file
11929
vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1818
vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl
generated
vendored
Normal file
1818
vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1402
vendor/github.com/influxdata/influxdb/influxql/iterator.go
generated
vendored
Normal file
1402
vendor/github.com/influxdata/influxdb/influxql/iterator.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
44
vendor/github.com/influxdata/influxdb/influxql/iterator_mapper.go
generated
vendored
Normal file
44
vendor/github.com/influxdata/influxdb/influxql/iterator_mapper.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
package influxql
|
||||
|
||||
import "fmt"
|
||||
|
||||
type IteratorMap interface {
|
||||
Value(tags Tags, buf []interface{}) interface{}
|
||||
}
|
||||
|
||||
type FieldMap int
|
||||
|
||||
func (i FieldMap) Value(tags Tags, buf []interface{}) interface{} { return buf[i] }
|
||||
|
||||
type TagMap string
|
||||
|
||||
func (s TagMap) Value(tags Tags, buf []interface{}) interface{} { return tags.Value(string(s)) }
|
||||
|
||||
type NullMap struct{}
|
||||
|
||||
func (NullMap) Value(tags Tags, buf []interface{}) interface{} { return nil }
|
||||
|
||||
func NewIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) Iterator {
|
||||
if driver != nil {
|
||||
switch driver := driver.(type) {
|
||||
case FieldMap:
|
||||
switch itrs[int(driver)].(type) {
|
||||
case FloatIterator:
|
||||
return newFloatIteratorMapper(itrs, driver, fields, opt)
|
||||
case IntegerIterator:
|
||||
return newIntegerIteratorMapper(itrs, driver, fields, opt)
|
||||
case StringIterator:
|
||||
return newStringIteratorMapper(itrs, driver, fields, opt)
|
||||
case BooleanIterator:
|
||||
return newBooleanIteratorMapper(itrs, driver, fields, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to map iterator type: %T", itrs[int(driver)]))
|
||||
}
|
||||
case TagMap:
|
||||
return newStringIteratorMapper(itrs, driver, fields, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to create iterator mapper with driveression type: %T", driver))
|
||||
}
|
||||
}
|
||||
return newFloatIteratorMapper(itrs, nil, fields, opt)
|
||||
}
|
62
vendor/github.com/influxdata/influxdb/influxql/iterator_mapper_test.go
generated
vendored
Normal file
62
vendor/github.com/influxdata/influxdb/influxql/iterator_mapper_test.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
package influxql_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
"github.com/influxdata/influxdb/pkg/deep"
|
||||
)
|
||||
|
||||
func TestIteratorMapper(t *testing.T) {
|
||||
val1itr := &FloatIterator{Points: []influxql.FloatPoint{
|
||||
{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1},
|
||||
{Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Value: 3},
|
||||
{Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Value: 2},
|
||||
{Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Value: 8},
|
||||
}}
|
||||
|
||||
val2itr := &StringIterator{Points: []influxql.StringPoint{
|
||||
{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"},
|
||||
{Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Value: "c"},
|
||||
{Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Value: "b"},
|
||||
{Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Value: "h"},
|
||||
}}
|
||||
inputs := []influxql.Iterator{val1itr, val2itr}
|
||||
|
||||
opt := influxql.IteratorOptions{
|
||||
Ascending: true,
|
||||
Aux: []influxql.VarRef{
|
||||
{Val: "val1", Type: influxql.Float},
|
||||
{Val: "val2", Type: influxql.String},
|
||||
},
|
||||
}
|
||||
itr := influxql.NewIteratorMapper(inputs, nil, []influxql.IteratorMap{
|
||||
influxql.FieldMap(0),
|
||||
influxql.FieldMap(1),
|
||||
influxql.TagMap("host"),
|
||||
}, opt)
|
||||
if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
} else if !deep.Equal(a, [][]influxql.Point{
|
||||
{&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Aux: []interface{}{float64(1), "a", "A"}}},
|
||||
{&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Aux: []interface{}{float64(3), "c", "A"}}},
|
||||
{&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Aux: []interface{}{float64(2), "b", "B"}}},
|
||||
{&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Aux: []interface{}{float64(8), "h", "B"}}},
|
||||
}) {
|
||||
t.Errorf("unexpected points: %s", spew.Sdump(a))
|
||||
}
|
||||
|
||||
for i, input := range inputs {
|
||||
switch input := input.(type) {
|
||||
case *FloatIterator:
|
||||
if !input.Closed {
|
||||
t.Errorf("iterator %d not closed", i)
|
||||
}
|
||||
case *StringIterator:
|
||||
if !input.Closed {
|
||||
t.Errorf("iterator %d not closed", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
1532
vendor/github.com/influxdata/influxdb/influxql/iterator_test.go
generated
vendored
Normal file
1532
vendor/github.com/influxdata/influxdb/influxql/iterator_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
21
vendor/github.com/influxdata/influxdb/influxql/linear.go
generated
vendored
Normal file
21
vendor/github.com/influxdata/influxdb/influxql/linear.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package influxql
|
||||
|
||||
// linearFloat computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
|
||||
// and returns the value of the point on the line with time windowTime
|
||||
// y = mx + b
|
||||
func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextValue float64) float64 {
|
||||
m := (nextValue - previousValue) / float64(nextTime-previousTime) // the slope of the line
|
||||
x := float64(windowTime - previousTime) // how far into the interval we are
|
||||
b := previousValue
|
||||
return m*x + b
|
||||
}
|
||||
|
||||
// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
|
||||
// and returns the value of the point on the line with time windowTime
|
||||
// y = mx + b
|
||||
func linearInteger(windowTime, previousTime, nextTime int64, previousValue, nextValue int64) int64 {
|
||||
m := float64(nextValue-previousValue) / float64(nextTime-previousTime) // the slope of the line
|
||||
x := float64(windowTime - previousTime) // how far into the interval we are
|
||||
b := float64(previousValue)
|
||||
return int64(m*x + b)
|
||||
}
|
23
vendor/github.com/influxdata/influxdb/influxql/monitor.go
generated
vendored
Normal file
23
vendor/github.com/influxdata/influxdb/influxql/monitor.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package influxql
|
||||
|
||||
import "time"
|
||||
|
||||
// PointLimitMonitor is a query monitor that exits when the number of points
|
||||
// emitted exceeds a threshold.
|
||||
func PointLimitMonitor(itrs Iterators, interval time.Duration, limit int) QueryMonitorFunc {
|
||||
return func(closing <-chan struct{}) error {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
stats := itrs.Stats()
|
||||
if stats.PointN >= limit {
|
||||
return ErrMaxSelectPointsLimitExceeded(stats.PointN, limit)
|
||||
}
|
||||
case <-closing:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
239
vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead.go
generated
vendored
Normal file
239
vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead.go
generated
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
// Package neldermead is an implementation of the Nelder-Mead optimization method.
|
||||
// Based on work by Michael F. Hutt: http://www.mikehutt.com/neldermead.html
|
||||
package neldermead
|
||||
|
||||
import "math"
|
||||
|
||||
const (
|
||||
defaultMaxIterations = 1000
|
||||
// reflection coefficient
|
||||
defaultAlpha = 1.0
|
||||
// contraction coefficient
|
||||
defaultBeta = 0.5
|
||||
// expansion coefficient
|
||||
defaultGamma = 2.0
|
||||
)
|
||||
|
||||
// Optimizer represents the parameters to the Nelder-Mead simplex method.
|
||||
type Optimizer struct {
|
||||
// Maximum number of iterations.
|
||||
MaxIterations int
|
||||
// Reflection coefficient.
|
||||
Alpha,
|
||||
// Contraction coefficient.
|
||||
Beta,
|
||||
// Expansion coefficient.
|
||||
Gamma float64
|
||||
}
|
||||
|
||||
// New returns a new instance of Optimizer with all values set to the defaults.
|
||||
func New() *Optimizer {
|
||||
return &Optimizer{
|
||||
MaxIterations: defaultMaxIterations,
|
||||
Alpha: defaultAlpha,
|
||||
Beta: defaultBeta,
|
||||
Gamma: defaultGamma,
|
||||
}
|
||||
}
|
||||
|
||||
// Optimize applies the Nelder-Mead simplex method with the Optimizer's settings.
|
||||
func (o *Optimizer) Optimize(
|
||||
objfunc func([]float64) float64,
|
||||
start []float64,
|
||||
epsilon,
|
||||
scale float64,
|
||||
) (float64, []float64) {
|
||||
n := len(start)
|
||||
|
||||
//holds vertices of simplex
|
||||
v := make([][]float64, n+1)
|
||||
for i := range v {
|
||||
v[i] = make([]float64, n)
|
||||
}
|
||||
|
||||
//value of function at each vertex
|
||||
f := make([]float64, n+1)
|
||||
|
||||
//reflection - coordinates
|
||||
vr := make([]float64, n)
|
||||
|
||||
//expansion - coordinates
|
||||
ve := make([]float64, n)
|
||||
|
||||
//contraction - coordinates
|
||||
vc := make([]float64, n)
|
||||
|
||||
//centroid - coordinates
|
||||
vm := make([]float64, n)
|
||||
|
||||
// create the initial simplex
|
||||
// assume one of the vertices is 0,0
|
||||
|
||||
pn := scale * (math.Sqrt(float64(n+1)) - 1 + float64(n)) / (float64(n) * math.Sqrt(2))
|
||||
qn := scale * (math.Sqrt(float64(n+1)) - 1) / (float64(n) * math.Sqrt(2))
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
v[0][i] = start[i]
|
||||
}
|
||||
|
||||
for i := 1; i <= n; i++ {
|
||||
for j := 0; j < n; j++ {
|
||||
if i-1 == j {
|
||||
v[i][j] = pn + start[j]
|
||||
} else {
|
||||
v[i][j] = qn + start[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find the initial function values
|
||||
for j := 0; j <= n; j++ {
|
||||
f[j] = objfunc(v[j])
|
||||
}
|
||||
|
||||
// begin the main loop of the minimization
|
||||
for itr := 1; itr <= o.MaxIterations; itr++ {
|
||||
|
||||
// find the indexes of the largest and smallest values
|
||||
vg := 0
|
||||
vs := 0
|
||||
for i := 0; i <= n; i++ {
|
||||
if f[i] > f[vg] {
|
||||
vg = i
|
||||
}
|
||||
if f[i] < f[vs] {
|
||||
vs = i
|
||||
}
|
||||
}
|
||||
// find the index of the second largest value
|
||||
vh := vs
|
||||
for i := 0; i <= n; i++ {
|
||||
if f[i] > f[vh] && f[i] < f[vg] {
|
||||
vh = i
|
||||
}
|
||||
}
|
||||
|
||||
// calculate the centroid
|
||||
for i := 0; i <= n-1; i++ {
|
||||
cent := 0.0
|
||||
for m := 0; m <= n; m++ {
|
||||
if m != vg {
|
||||
cent += v[m][i]
|
||||
}
|
||||
}
|
||||
vm[i] = cent / float64(n)
|
||||
}
|
||||
|
||||
// reflect vg to new vertex vr
|
||||
for i := 0; i <= n-1; i++ {
|
||||
vr[i] = vm[i] + o.Alpha*(vm[i]-v[vg][i])
|
||||
}
|
||||
|
||||
// value of function at reflection point
|
||||
fr := objfunc(vr)
|
||||
|
||||
if fr < f[vh] && fr >= f[vs] {
|
||||
for i := 0; i <= n-1; i++ {
|
||||
v[vg][i] = vr[i]
|
||||
}
|
||||
f[vg] = fr
|
||||
}
|
||||
|
||||
// investigate a step further in this direction
|
||||
if fr < f[vs] {
|
||||
for i := 0; i <= n-1; i++ {
|
||||
ve[i] = vm[i] + o.Gamma*(vr[i]-vm[i])
|
||||
}
|
||||
|
||||
// value of function at expansion point
|
||||
fe := objfunc(ve)
|
||||
|
||||
// by making fe < fr as opposed to fe < f[vs],
|
||||
// Rosenbrocks function takes 63 iterations as opposed
|
||||
// to 64 when using double variables.
|
||||
|
||||
if fe < fr {
|
||||
for i := 0; i <= n-1; i++ {
|
||||
v[vg][i] = ve[i]
|
||||
}
|
||||
f[vg] = fe
|
||||
} else {
|
||||
for i := 0; i <= n-1; i++ {
|
||||
v[vg][i] = vr[i]
|
||||
}
|
||||
f[vg] = fr
|
||||
}
|
||||
}
|
||||
|
||||
// check to see if a contraction is necessary
|
||||
if fr >= f[vh] {
|
||||
if fr < f[vg] && fr >= f[vh] {
|
||||
// perform outside contraction
|
||||
for i := 0; i <= n-1; i++ {
|
||||
vc[i] = vm[i] + o.Beta*(vr[i]-vm[i])
|
||||
}
|
||||
} else {
|
||||
// perform inside contraction
|
||||
for i := 0; i <= n-1; i++ {
|
||||
vc[i] = vm[i] - o.Beta*(vm[i]-v[vg][i])
|
||||
}
|
||||
}
|
||||
|
||||
// value of function at contraction point
|
||||
fc := objfunc(vc)
|
||||
|
||||
if fc < f[vg] {
|
||||
for i := 0; i <= n-1; i++ {
|
||||
v[vg][i] = vc[i]
|
||||
}
|
||||
f[vg] = fc
|
||||
} else {
|
||||
// at this point the contraction is not successful,
|
||||
// we must halve the distance from vs to all the
|
||||
// vertices of the simplex and then continue.
|
||||
|
||||
for row := 0; row <= n; row++ {
|
||||
if row != vs {
|
||||
for i := 0; i <= n-1; i++ {
|
||||
v[row][i] = v[vs][i] + (v[row][i]-v[vs][i])/2.0
|
||||
}
|
||||
}
|
||||
}
|
||||
f[vg] = objfunc(v[vg])
|
||||
f[vh] = objfunc(v[vh])
|
||||
}
|
||||
}
|
||||
|
||||
// test for convergence
|
||||
fsum := 0.0
|
||||
for i := 0; i <= n; i++ {
|
||||
fsum += f[i]
|
||||
}
|
||||
favg := fsum / float64(n+1)
|
||||
s := 0.0
|
||||
for i := 0; i <= n; i++ {
|
||||
s += math.Pow((f[i]-favg), 2.0) / float64(n)
|
||||
}
|
||||
s = math.Sqrt(s)
|
||||
if s < epsilon {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// find the index of the smallest value
|
||||
vs := 0
|
||||
for i := 0; i <= n; i++ {
|
||||
if f[i] < f[vs] {
|
||||
vs = i
|
||||
}
|
||||
}
|
||||
|
||||
parameters := make([]float64, n)
|
||||
for i := 0; i < n; i++ {
|
||||
parameters[i] = v[vs][i]
|
||||
}
|
||||
|
||||
min := objfunc(v[vs])
|
||||
|
||||
return min, parameters
|
||||
}
|
64
vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead_test.go
generated
vendored
Normal file
64
vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead_test.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package neldermead_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/influxql/neldermead"
|
||||
)
|
||||
|
||||
func round(num float64, precision float64) float64 {
|
||||
rnum := num * math.Pow(10, precision)
|
||||
var tnum float64
|
||||
if rnum < 0 {
|
||||
tnum = math.Floor(rnum - 0.5)
|
||||
} else {
|
||||
tnum = math.Floor(rnum + 0.5)
|
||||
}
|
||||
rnum = tnum / math.Pow(10, precision)
|
||||
return rnum
|
||||
}
|
||||
|
||||
func almostEqual(a, b, e float64) bool {
|
||||
return math.Abs(a-b) < e
|
||||
}
|
||||
|
||||
func Test_Optimize(t *testing.T) {
|
||||
|
||||
constraints := func(x []float64) {
|
||||
for i := range x {
|
||||
x[i] = round(x[i], 5)
|
||||
}
|
||||
}
|
||||
// 100*(b-a^2)^2 + (1-a)^2
|
||||
//
|
||||
// Obvious global minimum at (a,b) = (1,1)
|
||||
//
|
||||
// Useful visualization:
|
||||
// https://www.wolframalpha.com/input/?i=minimize(100*(b-a%5E2)%5E2+%2B+(1-a)%5E2)
|
||||
f := func(x []float64) float64 {
|
||||
constraints(x)
|
||||
// a = x[0]
|
||||
// b = x[1]
|
||||
return 100*(x[1]-x[0]*x[0])*(x[1]-x[0]*x[0]) + (1.0-x[0])*(1.0-x[0])
|
||||
}
|
||||
|
||||
start := []float64{-1.2, 1.0}
|
||||
|
||||
opt := neldermead.New()
|
||||
epsilon := 1e-5
|
||||
min, parameters := opt.Optimize(f, start, epsilon, 1)
|
||||
|
||||
if !almostEqual(min, 0, epsilon) {
|
||||
t.Errorf("unexpected min: got %f exp 0", min)
|
||||
}
|
||||
|
||||
if !almostEqual(parameters[0], 1, 1e-2) {
|
||||
t.Errorf("unexpected parameters[0]: got %f exp 1", parameters[0])
|
||||
}
|
||||
|
||||
if !almostEqual(parameters[1], 1, 1e-2) {
|
||||
t.Errorf("unexpected parameters[1]: got %f exp 1", parameters[1])
|
||||
}
|
||||
|
||||
}
|
3014
vendor/github.com/influxdata/influxdb/influxql/parser.go
generated
vendored
Normal file
3014
vendor/github.com/influxdata/influxdb/influxql/parser.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3552
vendor/github.com/influxdata/influxdb/influxql/parser_test.go
generated
vendored
Normal file
3552
vendor/github.com/influxdata/influxdb/influxql/parser_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
859
vendor/github.com/influxdata/influxdb/influxql/point.gen.go
generated
vendored
Normal file
859
vendor/github.com/influxdata/influxdb/influxql/point.gen.go
generated
vendored
Normal file
@@ -0,0 +1,859 @@
|
||||
// Generated by tmpl
|
||||
// https://github.com/benbjohnson/tmpl
|
||||
//
|
||||
// DO NOT EDIT!
|
||||
// Source: point.gen.go.tmpl
|
||||
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
internal "github.com/influxdata/influxdb/influxql/internal"
|
||||
)
|
||||
|
||||
// FloatPoint represents a point with a float64 value.
|
||||
// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.
|
||||
// See TestPoint_Fields in influxql/point_test.go for more details.
|
||||
type FloatPoint struct {
|
||||
Name string
|
||||
Tags Tags
|
||||
|
||||
Time int64
|
||||
Nil bool
|
||||
Value float64
|
||||
Aux []interface{}
|
||||
|
||||
// Total number of points that were combined into this point from an aggregate.
|
||||
// If this is zero, the point is not the result of an aggregate function.
|
||||
Aggregated uint32
|
||||
}
|
||||
|
||||
func (v *FloatPoint) name() string { return v.Name }
|
||||
func (v *FloatPoint) tags() Tags { return v.Tags }
|
||||
func (v *FloatPoint) time() int64 { return v.Time }
|
||||
func (v *FloatPoint) nil() bool { return v.Nil }
|
||||
func (v *FloatPoint) value() interface{} {
|
||||
if v.Nil {
|
||||
return nil
|
||||
}
|
||||
return v.Value
|
||||
}
|
||||
func (v *FloatPoint) aux() []interface{} { return v.Aux }
|
||||
|
||||
// Clone returns a copy of v.
|
||||
func (v *FloatPoint) Clone() *FloatPoint {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
other := *v
|
||||
if v.Aux != nil {
|
||||
other.Aux = make([]interface{}, len(v.Aux))
|
||||
copy(other.Aux, v.Aux)
|
||||
}
|
||||
|
||||
return &other
|
||||
}
|
||||
|
||||
// CopyTo makes a deep copy into the point.
|
||||
func (v *FloatPoint) CopyTo(other *FloatPoint) {
|
||||
*other = *v
|
||||
if v.Aux != nil {
|
||||
other.Aux = make([]interface{}, len(v.Aux))
|
||||
copy(other.Aux, v.Aux)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeFloatPoint(p *FloatPoint) *internal.Point {
|
||||
return &internal.Point{
|
||||
Name: proto.String(p.Name),
|
||||
Tags: proto.String(p.Tags.ID()),
|
||||
Time: proto.Int64(p.Time),
|
||||
Nil: proto.Bool(p.Nil),
|
||||
Aux: encodeAux(p.Aux),
|
||||
Aggregated: proto.Uint32(p.Aggregated),
|
||||
|
||||
FloatValue: proto.Float64(p.Value),
|
||||
}
|
||||
}
|
||||
|
||||
func decodeFloatPoint(pb *internal.Point) *FloatPoint {
|
||||
return &FloatPoint{
|
||||
Name: pb.GetName(),
|
||||
Tags: newTagsID(pb.GetTags()),
|
||||
Time: pb.GetTime(),
|
||||
Nil: pb.GetNil(),
|
||||
Aux: decodeAux(pb.Aux),
|
||||
Aggregated: pb.GetAggregated(),
|
||||
Value: pb.GetFloatValue(),
|
||||
}
|
||||
}
|
||||
|
||||
// floatPoints represents a slice of points sortable by value.
|
||||
type floatPoints []FloatPoint
|
||||
|
||||
func (a floatPoints) Len() int { return len(a) }
|
||||
func (a floatPoints) Less(i, j int) bool {
|
||||
if a[i].Time != a[j].Time {
|
||||
return a[i].Time < a[j].Time
|
||||
}
|
||||
return a[i].Value < a[j].Value
|
||||
}
|
||||
func (a floatPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// floatPointsByValue represents a slice of points sortable by value.
|
||||
type floatPointsByValue []FloatPoint
|
||||
|
||||
func (a floatPointsByValue) Len() int { return len(a) }
|
||||
|
||||
func (a floatPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
|
||||
func (a floatPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// floatPointsByTime represents a slice of points sortable by value.
|
||||
type floatPointsByTime []FloatPoint
|
||||
|
||||
func (a floatPointsByTime) Len() int { return len(a) }
|
||||
func (a floatPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }
|
||||
func (a floatPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// floatPointByFunc represents a slice of points sortable by a function.
|
||||
type floatPointsByFunc struct {
|
||||
points []FloatPoint
|
||||
cmp func(a, b *FloatPoint) bool
|
||||
}
|
||||
|
||||
func (a *floatPointsByFunc) Len() int { return len(a.points) }
|
||||
func (a *floatPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
|
||||
func (a *floatPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
|
||||
|
||||
func (a *floatPointsByFunc) Push(x interface{}) {
|
||||
a.points = append(a.points, x.(FloatPoint))
|
||||
}
|
||||
|
||||
func (a *floatPointsByFunc) Pop() interface{} {
|
||||
p := a.points[len(a.points)-1]
|
||||
a.points = a.points[:len(a.points)-1]
|
||||
return p
|
||||
}
|
||||
|
||||
func floatPointsSortBy(points []FloatPoint, cmp func(a, b *FloatPoint) bool) *floatPointsByFunc {
|
||||
return &floatPointsByFunc{
|
||||
points: points,
|
||||
cmp: cmp,
|
||||
}
|
||||
}
|
||||
|
||||
// FloatPointEncoder encodes FloatPoint points to a writer.
|
||||
type FloatPointEncoder struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// NewFloatPointEncoder returns a new instance of FloatPointEncoder that writes to w.
|
||||
func NewFloatPointEncoder(w io.Writer) *FloatPointEncoder {
|
||||
return &FloatPointEncoder{w: w}
|
||||
}
|
||||
|
||||
// EncodeFloatPoint marshals and writes p to the underlying writer.
|
||||
func (enc *FloatPointEncoder) EncodeFloatPoint(p *FloatPoint) error {
|
||||
// Marshal to bytes.
|
||||
buf, err := proto.Marshal(encodeFloatPoint(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the length.
|
||||
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the encoded point.
|
||||
if _, err := enc.w.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FloatPointDecoder decodes FloatPoint points from a reader.
|
||||
type FloatPointDecoder struct {
|
||||
r io.Reader
|
||||
stats IteratorStats
|
||||
}
|
||||
|
||||
// NewFloatPointDecoder returns a new instance of FloatPointDecoder that reads from r.
|
||||
func NewFloatPointDecoder(r io.Reader) *FloatPointDecoder {
|
||||
return &FloatPointDecoder{r: r}
|
||||
}
|
||||
|
||||
// Stats returns iterator stats embedded within the stream.
|
||||
func (dec *FloatPointDecoder) Stats() IteratorStats { return dec.stats }
|
||||
|
||||
// DecodeFloatPoint reads from the underlying reader and unmarshals into p.
|
||||
func (dec *FloatPointDecoder) DecodeFloatPoint(p *FloatPoint) error {
|
||||
for {
|
||||
// Read length.
|
||||
var sz uint32
|
||||
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read point data.
|
||||
buf := make([]byte, sz)
|
||||
if _, err := io.ReadFull(dec.r, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal into point.
|
||||
var pb internal.Point
|
||||
if err := proto.Unmarshal(buf, &pb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the point contains stats then read stats and retry.
|
||||
if pb.Stats != nil {
|
||||
dec.stats = decodeIteratorStats(pb.Stats)
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode into point object.
|
||||
*p = *decodeFloatPoint(&pb)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// IntegerPoint represents a point with a int64 value.
|
||||
// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.
|
||||
// See TestPoint_Fields in influxql/point_test.go for more details.
|
||||
type IntegerPoint struct {
|
||||
Name string
|
||||
Tags Tags
|
||||
|
||||
Time int64
|
||||
Nil bool
|
||||
Value int64
|
||||
Aux []interface{}
|
||||
|
||||
// Total number of points that were combined into this point from an aggregate.
|
||||
// If this is zero, the point is not the result of an aggregate function.
|
||||
Aggregated uint32
|
||||
}
|
||||
|
||||
func (v *IntegerPoint) name() string { return v.Name }
|
||||
func (v *IntegerPoint) tags() Tags { return v.Tags }
|
||||
func (v *IntegerPoint) time() int64 { return v.Time }
|
||||
func (v *IntegerPoint) nil() bool { return v.Nil }
|
||||
func (v *IntegerPoint) value() interface{} {
|
||||
if v.Nil {
|
||||
return nil
|
||||
}
|
||||
return v.Value
|
||||
}
|
||||
func (v *IntegerPoint) aux() []interface{} { return v.Aux }
|
||||
|
||||
// Clone returns a copy of v.
|
||||
func (v *IntegerPoint) Clone() *IntegerPoint {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
other := *v
|
||||
if v.Aux != nil {
|
||||
other.Aux = make([]interface{}, len(v.Aux))
|
||||
copy(other.Aux, v.Aux)
|
||||
}
|
||||
|
||||
return &other
|
||||
}
|
||||
|
||||
// CopyTo makes a deep copy into the point.
|
||||
func (v *IntegerPoint) CopyTo(other *IntegerPoint) {
|
||||
*other = *v
|
||||
if v.Aux != nil {
|
||||
other.Aux = make([]interface{}, len(v.Aux))
|
||||
copy(other.Aux, v.Aux)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeIntegerPoint(p *IntegerPoint) *internal.Point {
|
||||
return &internal.Point{
|
||||
Name: proto.String(p.Name),
|
||||
Tags: proto.String(p.Tags.ID()),
|
||||
Time: proto.Int64(p.Time),
|
||||
Nil: proto.Bool(p.Nil),
|
||||
Aux: encodeAux(p.Aux),
|
||||
Aggregated: proto.Uint32(p.Aggregated),
|
||||
|
||||
IntegerValue: proto.Int64(p.Value),
|
||||
}
|
||||
}
|
||||
|
||||
func decodeIntegerPoint(pb *internal.Point) *IntegerPoint {
|
||||
return &IntegerPoint{
|
||||
Name: pb.GetName(),
|
||||
Tags: newTagsID(pb.GetTags()),
|
||||
Time: pb.GetTime(),
|
||||
Nil: pb.GetNil(),
|
||||
Aux: decodeAux(pb.Aux),
|
||||
Aggregated: pb.GetAggregated(),
|
||||
Value: pb.GetIntegerValue(),
|
||||
}
|
||||
}
|
||||
|
||||
// integerPoints represents a slice of points sortable by value.
|
||||
type integerPoints []IntegerPoint
|
||||
|
||||
func (a integerPoints) Len() int { return len(a) }
|
||||
func (a integerPoints) Less(i, j int) bool {
|
||||
if a[i].Time != a[j].Time {
|
||||
return a[i].Time < a[j].Time
|
||||
}
|
||||
return a[i].Value < a[j].Value
|
||||
}
|
||||
func (a integerPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// integerPointsByValue represents a slice of points sortable by value.
|
||||
type integerPointsByValue []IntegerPoint
|
||||
|
||||
func (a integerPointsByValue) Len() int { return len(a) }
|
||||
|
||||
func (a integerPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
|
||||
func (a integerPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// integerPointsByTime represents a slice of points sortable by value.
|
||||
type integerPointsByTime []IntegerPoint
|
||||
|
||||
func (a integerPointsByTime) Len() int { return len(a) }
|
||||
func (a integerPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }
|
||||
func (a integerPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// integerPointByFunc represents a slice of points sortable by a function.
|
||||
type integerPointsByFunc struct {
|
||||
points []IntegerPoint
|
||||
cmp func(a, b *IntegerPoint) bool
|
||||
}
|
||||
|
||||
func (a *integerPointsByFunc) Len() int { return len(a.points) }
|
||||
func (a *integerPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
|
||||
func (a *integerPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
|
||||
|
||||
func (a *integerPointsByFunc) Push(x interface{}) {
|
||||
a.points = append(a.points, x.(IntegerPoint))
|
||||
}
|
||||
|
||||
func (a *integerPointsByFunc) Pop() interface{} {
|
||||
p := a.points[len(a.points)-1]
|
||||
a.points = a.points[:len(a.points)-1]
|
||||
return p
|
||||
}
|
||||
|
||||
func integerPointsSortBy(points []IntegerPoint, cmp func(a, b *IntegerPoint) bool) *integerPointsByFunc {
|
||||
return &integerPointsByFunc{
|
||||
points: points,
|
||||
cmp: cmp,
|
||||
}
|
||||
}
|
||||
|
||||
// IntegerPointEncoder encodes IntegerPoint points to a writer.
|
||||
type IntegerPointEncoder struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// NewIntegerPointEncoder returns a new instance of IntegerPointEncoder that writes to w.
|
||||
func NewIntegerPointEncoder(w io.Writer) *IntegerPointEncoder {
|
||||
return &IntegerPointEncoder{w: w}
|
||||
}
|
||||
|
||||
// EncodeIntegerPoint marshals and writes p to the underlying writer.
|
||||
func (enc *IntegerPointEncoder) EncodeIntegerPoint(p *IntegerPoint) error {
|
||||
// Marshal to bytes.
|
||||
buf, err := proto.Marshal(encodeIntegerPoint(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the length.
|
||||
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the encoded point.
|
||||
if _, err := enc.w.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IntegerPointDecoder decodes IntegerPoint points from a reader.
|
||||
type IntegerPointDecoder struct {
|
||||
r io.Reader
|
||||
stats IteratorStats
|
||||
}
|
||||
|
||||
// NewIntegerPointDecoder returns a new instance of IntegerPointDecoder that reads from r.
|
||||
func NewIntegerPointDecoder(r io.Reader) *IntegerPointDecoder {
|
||||
return &IntegerPointDecoder{r: r}
|
||||
}
|
||||
|
||||
// Stats returns iterator stats embedded within the stream.
|
||||
func (dec *IntegerPointDecoder) Stats() IteratorStats { return dec.stats }
|
||||
|
||||
// DecodeIntegerPoint reads from the underlying reader and unmarshals into p.
|
||||
func (dec *IntegerPointDecoder) DecodeIntegerPoint(p *IntegerPoint) error {
|
||||
for {
|
||||
// Read length.
|
||||
var sz uint32
|
||||
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read point data.
|
||||
buf := make([]byte, sz)
|
||||
if _, err := io.ReadFull(dec.r, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal into point.
|
||||
var pb internal.Point
|
||||
if err := proto.Unmarshal(buf, &pb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the point contains stats then read stats and retry.
|
||||
if pb.Stats != nil {
|
||||
dec.stats = decodeIteratorStats(pb.Stats)
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode into point object.
|
||||
*p = *decodeIntegerPoint(&pb)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringPoint represents a point with a string value.
|
||||
// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.
|
||||
// See TestPoint_Fields in influxql/point_test.go for more details.
|
||||
type StringPoint struct {
|
||||
Name string
|
||||
Tags Tags
|
||||
|
||||
Time int64
|
||||
Nil bool
|
||||
Value string
|
||||
Aux []interface{}
|
||||
|
||||
// Total number of points that were combined into this point from an aggregate.
|
||||
// If this is zero, the point is not the result of an aggregate function.
|
||||
Aggregated uint32
|
||||
}
|
||||
|
||||
func (v *StringPoint) name() string { return v.Name }
|
||||
func (v *StringPoint) tags() Tags { return v.Tags }
|
||||
func (v *StringPoint) time() int64 { return v.Time }
|
||||
func (v *StringPoint) nil() bool { return v.Nil }
|
||||
func (v *StringPoint) value() interface{} {
|
||||
if v.Nil {
|
||||
return nil
|
||||
}
|
||||
return v.Value
|
||||
}
|
||||
func (v *StringPoint) aux() []interface{} { return v.Aux }
|
||||
|
||||
// Clone returns a copy of v.
|
||||
func (v *StringPoint) Clone() *StringPoint {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
other := *v
|
||||
if v.Aux != nil {
|
||||
other.Aux = make([]interface{}, len(v.Aux))
|
||||
copy(other.Aux, v.Aux)
|
||||
}
|
||||
|
||||
return &other
|
||||
}
|
||||
|
||||
// CopyTo makes a deep copy into the point.
|
||||
func (v *StringPoint) CopyTo(other *StringPoint) {
|
||||
*other = *v
|
||||
if v.Aux != nil {
|
||||
other.Aux = make([]interface{}, len(v.Aux))
|
||||
copy(other.Aux, v.Aux)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeStringPoint(p *StringPoint) *internal.Point {
|
||||
return &internal.Point{
|
||||
Name: proto.String(p.Name),
|
||||
Tags: proto.String(p.Tags.ID()),
|
||||
Time: proto.Int64(p.Time),
|
||||
Nil: proto.Bool(p.Nil),
|
||||
Aux: encodeAux(p.Aux),
|
||||
Aggregated: proto.Uint32(p.Aggregated),
|
||||
|
||||
StringValue: proto.String(p.Value),
|
||||
}
|
||||
}
|
||||
|
||||
func decodeStringPoint(pb *internal.Point) *StringPoint {
|
||||
return &StringPoint{
|
||||
Name: pb.GetName(),
|
||||
Tags: newTagsID(pb.GetTags()),
|
||||
Time: pb.GetTime(),
|
||||
Nil: pb.GetNil(),
|
||||
Aux: decodeAux(pb.Aux),
|
||||
Aggregated: pb.GetAggregated(),
|
||||
Value: pb.GetStringValue(),
|
||||
}
|
||||
}
|
||||
|
||||
// stringPoints represents a slice of points sortable by value.
|
||||
type stringPoints []StringPoint
|
||||
|
||||
func (a stringPoints) Len() int { return len(a) }
|
||||
func (a stringPoints) Less(i, j int) bool {
|
||||
if a[i].Time != a[j].Time {
|
||||
return a[i].Time < a[j].Time
|
||||
}
|
||||
return a[i].Value < a[j].Value
|
||||
}
|
||||
func (a stringPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// stringPointsByValue represents a slice of points sortable by value.
|
||||
type stringPointsByValue []StringPoint
|
||||
|
||||
func (a stringPointsByValue) Len() int { return len(a) }
|
||||
|
||||
func (a stringPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
|
||||
func (a stringPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// stringPointsByTime represents a slice of points sortable by value.
|
||||
type stringPointsByTime []StringPoint
|
||||
|
||||
func (a stringPointsByTime) Len() int { return len(a) }
|
||||
func (a stringPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }
|
||||
func (a stringPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// stringPointByFunc represents a slice of points sortable by a function.
|
||||
type stringPointsByFunc struct {
|
||||
points []StringPoint
|
||||
cmp func(a, b *StringPoint) bool
|
||||
}
|
||||
|
||||
func (a *stringPointsByFunc) Len() int { return len(a.points) }
|
||||
func (a *stringPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
|
||||
func (a *stringPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
|
||||
|
||||
func (a *stringPointsByFunc) Push(x interface{}) {
|
||||
a.points = append(a.points, x.(StringPoint))
|
||||
}
|
||||
|
||||
func (a *stringPointsByFunc) Pop() interface{} {
|
||||
p := a.points[len(a.points)-1]
|
||||
a.points = a.points[:len(a.points)-1]
|
||||
return p
|
||||
}
|
||||
|
||||
func stringPointsSortBy(points []StringPoint, cmp func(a, b *StringPoint) bool) *stringPointsByFunc {
|
||||
return &stringPointsByFunc{
|
||||
points: points,
|
||||
cmp: cmp,
|
||||
}
|
||||
}
|
||||
|
||||
// StringPointEncoder encodes StringPoint points to a writer.
|
||||
type StringPointEncoder struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// NewStringPointEncoder returns a new instance of StringPointEncoder that writes to w.
|
||||
func NewStringPointEncoder(w io.Writer) *StringPointEncoder {
|
||||
return &StringPointEncoder{w: w}
|
||||
}
|
||||
|
||||
// EncodeStringPoint marshals and writes p to the underlying writer.
|
||||
func (enc *StringPointEncoder) EncodeStringPoint(p *StringPoint) error {
|
||||
// Marshal to bytes.
|
||||
buf, err := proto.Marshal(encodeStringPoint(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the length.
|
||||
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the encoded point.
|
||||
if _, err := enc.w.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StringPointDecoder decodes StringPoint points from a reader.
|
||||
type StringPointDecoder struct {
|
||||
r io.Reader
|
||||
stats IteratorStats
|
||||
}
|
||||
|
||||
// NewStringPointDecoder returns a new instance of StringPointDecoder that reads from r.
|
||||
func NewStringPointDecoder(r io.Reader) *StringPointDecoder {
|
||||
return &StringPointDecoder{r: r}
|
||||
}
|
||||
|
||||
// Stats returns iterator stats embedded within the stream.
|
||||
func (dec *StringPointDecoder) Stats() IteratorStats { return dec.stats }
|
||||
|
||||
// DecodeStringPoint reads from the underlying reader and unmarshals into p.
|
||||
func (dec *StringPointDecoder) DecodeStringPoint(p *StringPoint) error {
|
||||
for {
|
||||
// Read length.
|
||||
var sz uint32
|
||||
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read point data.
|
||||
buf := make([]byte, sz)
|
||||
if _, err := io.ReadFull(dec.r, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal into point.
|
||||
var pb internal.Point
|
||||
if err := proto.Unmarshal(buf, &pb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the point contains stats then read stats and retry.
|
||||
if pb.Stats != nil {
|
||||
dec.stats = decodeIteratorStats(pb.Stats)
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode into point object.
|
||||
*p = *decodeStringPoint(&pb)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BooleanPoint represents a point with a bool value.
|
||||
// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.
|
||||
// See TestPoint_Fields in influxql/point_test.go for more details.
|
||||
type BooleanPoint struct {
|
||||
Name string
|
||||
Tags Tags
|
||||
|
||||
Time int64
|
||||
Nil bool
|
||||
Value bool
|
||||
Aux []interface{}
|
||||
|
||||
// Total number of points that were combined into this point from an aggregate.
|
||||
// If this is zero, the point is not the result of an aggregate function.
|
||||
Aggregated uint32
|
||||
}
|
||||
|
||||
func (v *BooleanPoint) name() string { return v.Name }
|
||||
func (v *BooleanPoint) tags() Tags { return v.Tags }
|
||||
func (v *BooleanPoint) time() int64 { return v.Time }
|
||||
func (v *BooleanPoint) nil() bool { return v.Nil }
|
||||
func (v *BooleanPoint) value() interface{} {
|
||||
if v.Nil {
|
||||
return nil
|
||||
}
|
||||
return v.Value
|
||||
}
|
||||
func (v *BooleanPoint) aux() []interface{} { return v.Aux }
|
||||
|
||||
// Clone returns a copy of v.
|
||||
func (v *BooleanPoint) Clone() *BooleanPoint {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
other := *v
|
||||
if v.Aux != nil {
|
||||
other.Aux = make([]interface{}, len(v.Aux))
|
||||
copy(other.Aux, v.Aux)
|
||||
}
|
||||
|
||||
return &other
|
||||
}
|
||||
|
||||
// CopyTo makes a deep copy into the point.
|
||||
func (v *BooleanPoint) CopyTo(other *BooleanPoint) {
|
||||
*other = *v
|
||||
if v.Aux != nil {
|
||||
other.Aux = make([]interface{}, len(v.Aux))
|
||||
copy(other.Aux, v.Aux)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeBooleanPoint(p *BooleanPoint) *internal.Point {
|
||||
return &internal.Point{
|
||||
Name: proto.String(p.Name),
|
||||
Tags: proto.String(p.Tags.ID()),
|
||||
Time: proto.Int64(p.Time),
|
||||
Nil: proto.Bool(p.Nil),
|
||||
Aux: encodeAux(p.Aux),
|
||||
Aggregated: proto.Uint32(p.Aggregated),
|
||||
|
||||
BooleanValue: proto.Bool(p.Value),
|
||||
}
|
||||
}
|
||||
|
||||
func decodeBooleanPoint(pb *internal.Point) *BooleanPoint {
|
||||
return &BooleanPoint{
|
||||
Name: pb.GetName(),
|
||||
Tags: newTagsID(pb.GetTags()),
|
||||
Time: pb.GetTime(),
|
||||
Nil: pb.GetNil(),
|
||||
Aux: decodeAux(pb.Aux),
|
||||
Aggregated: pb.GetAggregated(),
|
||||
Value: pb.GetBooleanValue(),
|
||||
}
|
||||
}
|
||||
|
||||
// booleanPoints represents a slice of points sortable by value.
|
||||
type booleanPoints []BooleanPoint
|
||||
|
||||
func (a booleanPoints) Len() int { return len(a) }
|
||||
func (a booleanPoints) Less(i, j int) bool {
|
||||
if a[i].Time != a[j].Time {
|
||||
return a[i].Time < a[j].Time
|
||||
}
|
||||
return !a[i].Value
|
||||
}
|
||||
func (a booleanPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// booleanPointsByValue represents a slice of points sortable by value.
|
||||
type booleanPointsByValue []BooleanPoint
|
||||
|
||||
func (a booleanPointsByValue) Len() int { return len(a) }
|
||||
|
||||
func (a booleanPointsByValue) Less(i, j int) bool { return !a[i].Value }
|
||||
|
||||
func (a booleanPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// booleanPointsByTime represents a slice of points sortable by value.
|
||||
type booleanPointsByTime []BooleanPoint
|
||||
|
||||
func (a booleanPointsByTime) Len() int { return len(a) }
|
||||
func (a booleanPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }
|
||||
func (a booleanPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// booleanPointByFunc represents a slice of points sortable by a function.
|
||||
type booleanPointsByFunc struct {
|
||||
points []BooleanPoint
|
||||
cmp func(a, b *BooleanPoint) bool
|
||||
}
|
||||
|
||||
func (a *booleanPointsByFunc) Len() int { return len(a.points) }
|
||||
func (a *booleanPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
|
||||
func (a *booleanPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
|
||||
|
||||
func (a *booleanPointsByFunc) Push(x interface{}) {
|
||||
a.points = append(a.points, x.(BooleanPoint))
|
||||
}
|
||||
|
||||
func (a *booleanPointsByFunc) Pop() interface{} {
|
||||
p := a.points[len(a.points)-1]
|
||||
a.points = a.points[:len(a.points)-1]
|
||||
return p
|
||||
}
|
||||
|
||||
func booleanPointsSortBy(points []BooleanPoint, cmp func(a, b *BooleanPoint) bool) *booleanPointsByFunc {
|
||||
return &booleanPointsByFunc{
|
||||
points: points,
|
||||
cmp: cmp,
|
||||
}
|
||||
}
|
||||
|
||||
// BooleanPointEncoder encodes BooleanPoint points to a writer.
|
||||
type BooleanPointEncoder struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// NewBooleanPointEncoder returns a new instance of BooleanPointEncoder that writes to w.
|
||||
func NewBooleanPointEncoder(w io.Writer) *BooleanPointEncoder {
|
||||
return &BooleanPointEncoder{w: w}
|
||||
}
|
||||
|
||||
// EncodeBooleanPoint marshals and writes p to the underlying writer.
|
||||
func (enc *BooleanPointEncoder) EncodeBooleanPoint(p *BooleanPoint) error {
|
||||
// Marshal to bytes.
|
||||
buf, err := proto.Marshal(encodeBooleanPoint(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the length.
|
||||
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the encoded point.
|
||||
if _, err := enc.w.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BooleanPointDecoder decodes BooleanPoint points from a reader.
|
||||
type BooleanPointDecoder struct {
|
||||
r io.Reader
|
||||
stats IteratorStats
|
||||
}
|
||||
|
||||
// NewBooleanPointDecoder returns a new instance of BooleanPointDecoder that reads from r.
|
||||
func NewBooleanPointDecoder(r io.Reader) *BooleanPointDecoder {
|
||||
return &BooleanPointDecoder{r: r}
|
||||
}
|
||||
|
||||
// Stats returns iterator stats embedded within the stream.
|
||||
func (dec *BooleanPointDecoder) Stats() IteratorStats { return dec.stats }
|
||||
|
||||
// DecodeBooleanPoint reads from the underlying reader and unmarshals into p.
|
||||
func (dec *BooleanPointDecoder) DecodeBooleanPoint(p *BooleanPoint) error {
|
||||
for {
|
||||
// Read length.
|
||||
var sz uint32
|
||||
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read point data.
|
||||
buf := make([]byte, sz)
|
||||
if _, err := io.ReadFull(dec.r, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal into point.
|
||||
var pb internal.Point
|
||||
if err := proto.Unmarshal(buf, &pb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the point contains stats then read stats and retry.
|
||||
if pb.Stats != nil {
|
||||
dec.stats = decodeIteratorStats(pb.Stats)
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode into point object.
|
||||
*p = *decodeBooleanPoint(&pb)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
235
vendor/github.com/influxdata/influxdb/influxql/point.gen.go.tmpl
generated
vendored
Normal file
235
vendor/github.com/influxdata/influxdb/influxql/point.gen.go.tmpl
generated
vendored
Normal file
@@ -0,0 +1,235 @@
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
internal "github.com/influxdata/influxdb/influxql/internal"
|
||||
)
|
||||
|
||||
{{range .}}
|
||||
|
||||
// {{.Name}}Point represents a point with a {{.Type}} value.
|
||||
// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.
|
||||
// See TestPoint_Fields in influxql/point_test.go for more details.
|
||||
type {{.Name}}Point struct {
|
||||
Name string
|
||||
Tags Tags
|
||||
|
||||
Time int64
|
||||
Nil bool
|
||||
Value {{.Type}}
|
||||
Aux []interface{}
|
||||
|
||||
// Total number of points that were combined into this point from an aggregate.
|
||||
// If this is zero, the point is not the result of an aggregate function.
|
||||
Aggregated uint32
|
||||
}
|
||||
|
||||
func (v *{{.Name}}Point) name() string { return v.Name }
|
||||
func (v *{{.Name}}Point) tags() Tags { return v.Tags }
|
||||
func (v *{{.Name}}Point) time() int64 { return v.Time }
|
||||
func (v *{{.Name}}Point) nil() bool { return v.Nil }
|
||||
func (v *{{.Name}}Point) value() interface{} {
|
||||
if v.Nil {
|
||||
return nil
|
||||
}
|
||||
return v.Value
|
||||
}
|
||||
func (v *{{.Name}}Point) aux() []interface{} { return v.Aux }
|
||||
|
||||
// Clone returns a copy of v.
|
||||
func (v *{{.Name}}Point) Clone() *{{.Name}}Point {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
other := *v
|
||||
if v.Aux != nil {
|
||||
other.Aux = make([]interface{}, len(v.Aux))
|
||||
copy(other.Aux, v.Aux)
|
||||
}
|
||||
|
||||
return &other
|
||||
}
|
||||
|
||||
// CopyTo makes a deep copy into the point.
|
||||
func (v *{{.Name}}Point) CopyTo(other *{{.Name}}Point) {
|
||||
*other = *v
|
||||
if v.Aux != nil {
|
||||
other.Aux = make([]interface{}, len(v.Aux))
|
||||
copy(other.Aux, v.Aux)
|
||||
}
|
||||
}
|
||||
|
||||
func encode{{.Name}}Point(p *{{.Name}}Point) *internal.Point {
|
||||
return &internal.Point{
|
||||
Name: proto.String(p.Name),
|
||||
Tags: proto.String(p.Tags.ID()),
|
||||
Time: proto.Int64(p.Time),
|
||||
Nil: proto.Bool(p.Nil),
|
||||
Aux: encodeAux(p.Aux),
|
||||
Aggregated: proto.Uint32(p.Aggregated),
|
||||
|
||||
{{if eq .Name "Float"}}
|
||||
FloatValue: proto.Float64(p.Value),
|
||||
{{else if eq .Name "Integer"}}
|
||||
IntegerValue: proto.Int64(p.Value),
|
||||
{{else if eq .Name "String"}}
|
||||
StringValue: proto.String(p.Value),
|
||||
{{else if eq .Name "Boolean"}}
|
||||
BooleanValue: proto.Bool(p.Value),
|
||||
{{end}}
|
||||
}
|
||||
}
|
||||
|
||||
func decode{{.Name}}Point(pb *internal.Point) *{{.Name}}Point {
|
||||
return &{{.Name}}Point{
|
||||
Name: pb.GetName(),
|
||||
Tags: newTagsID(pb.GetTags()),
|
||||
Time: pb.GetTime(),
|
||||
Nil: pb.GetNil(),
|
||||
Aux: decodeAux(pb.Aux),
|
||||
Aggregated: pb.GetAggregated(),
|
||||
Value: pb.Get{{.Name}}Value(),
|
||||
}
|
||||
}
|
||||
|
||||
// {{.name}}Points represents a slice of points sortable by value.
|
||||
type {{.name}}Points []{{.Name}}Point
|
||||
|
||||
func (a {{.name}}Points) Len() int { return len(a) }
|
||||
func (a {{.name}}Points) Less(i, j int) bool {
|
||||
if a[i].Time != a[j].Time {
|
||||
return a[i].Time < a[j].Time
|
||||
}
|
||||
return {{if ne .Name "Boolean"}}a[i].Value < a[j].Value{{else}}!a[i].Value{{end}}
|
||||
}
|
||||
func (a {{.name}}Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// {{.name}}PointsByValue represents a slice of points sortable by value.
|
||||
type {{.name}}PointsByValue []{{.Name}}Point
|
||||
|
||||
func (a {{.name}}PointsByValue) Len() int { return len(a) }
|
||||
{{if eq .Name "Boolean"}}
|
||||
func (a {{.name}}PointsByValue) Less(i, j int) bool { return !a[i].Value }
|
||||
{{else}}
|
||||
func (a {{.name}}PointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
{{end}}
|
||||
func (a {{.name}}PointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// {{.name}}PointsByTime represents a slice of points sortable by value.
|
||||
type {{.name}}PointsByTime []{{.Name}}Point
|
||||
|
||||
func (a {{.name}}PointsByTime) Len() int { return len(a) }
|
||||
func (a {{.name}}PointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }
|
||||
func (a {{.name}}PointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// {{.name}}PointByFunc represents a slice of points sortable by a function.
|
||||
type {{.name}}PointsByFunc struct {
|
||||
points []{{.Name}}Point
|
||||
cmp func(a, b *{{.Name}}Point) bool
|
||||
}
|
||||
|
||||
func (a *{{.name}}PointsByFunc) Len() int { return len(a.points) }
|
||||
func (a *{{.name}}PointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
|
||||
func (a *{{.name}}PointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
|
||||
|
||||
func (a *{{.name}}PointsByFunc) Push(x interface{}) {
|
||||
a.points = append(a.points, x.({{.Name}}Point))
|
||||
}
|
||||
|
||||
func (a *{{.name}}PointsByFunc) Pop() interface{} {
|
||||
p := a.points[len(a.points)-1]
|
||||
a.points = a.points[:len(a.points)-1]
|
||||
return p
|
||||
}
|
||||
|
||||
func {{.name}}PointsSortBy(points []{{.Name}}Point, cmp func(a, b *{{.Name}}Point) bool) *{{.name}}PointsByFunc {
|
||||
return &{{.name}}PointsByFunc{
|
||||
points: points,
|
||||
cmp: cmp,
|
||||
}
|
||||
}
|
||||
|
||||
// {{.Name}}PointEncoder encodes {{.Name}}Point points to a writer.
|
||||
type {{.Name}}PointEncoder struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// New{{.Name}}PointEncoder returns a new instance of {{.Name}}PointEncoder that writes to w.
|
||||
func New{{.Name}}PointEncoder(w io.Writer) *{{.Name}}PointEncoder {
|
||||
return &{{.Name}}PointEncoder{w: w}
|
||||
}
|
||||
|
||||
// Encode{{.Name}}Point marshals and writes p to the underlying writer.
|
||||
func (enc *{{.Name}}PointEncoder) Encode{{.Name}}Point(p *{{.Name}}Point) error {
|
||||
// Marshal to bytes.
|
||||
buf, err := proto.Marshal(encode{{.Name}}Point(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the length.
|
||||
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the encoded point.
|
||||
if _, err := enc.w.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
// {{.Name}}PointDecoder decodes {{.Name}}Point points from a reader.
|
||||
type {{.Name}}PointDecoder struct {
|
||||
r io.Reader
|
||||
stats IteratorStats
|
||||
}
|
||||
|
||||
// New{{.Name}}PointDecoder returns a new instance of {{.Name}}PointDecoder that reads from r.
|
||||
func New{{.Name}}PointDecoder(r io.Reader) *{{.Name}}PointDecoder {
|
||||
return &{{.Name}}PointDecoder{r: r}
|
||||
}
|
||||
|
||||
// Stats returns iterator stats embedded within the stream.
|
||||
func (dec *{{.Name}}PointDecoder) Stats() IteratorStats { return dec.stats }
|
||||
|
||||
// Decode{{.Name}}Point reads from the underlying reader and unmarshals into p.
|
||||
func (dec *{{.Name}}PointDecoder) Decode{{.Name}}Point(p *{{.Name}}Point) error {
|
||||
for {
|
||||
// Read length.
|
||||
var sz uint32
|
||||
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read point data.
|
||||
buf := make([]byte, sz)
|
||||
if _, err := io.ReadFull(dec.r, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal into point.
|
||||
var pb internal.Point
|
||||
if err := proto.Unmarshal(buf, &pb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the point contains stats then read stats and retry.
|
||||
if pb.Stats != nil {
|
||||
dec.stats = decodeIteratorStats(pb.Stats)
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode into point object.
|
||||
*p = *decode{{.Name}}Point(&pb)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
{{end}}
|
348
vendor/github.com/influxdata/influxdb/influxql/point.go
generated
vendored
Normal file
348
vendor/github.com/influxdata/influxdb/influxql/point.go
generated
vendored
Normal file
@@ -0,0 +1,348 @@
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
internal "github.com/influxdata/influxdb/influxql/internal"
|
||||
)
|
||||
|
||||
// ZeroTime is the Unix nanosecond timestamp for no time.
|
||||
// This time is not used by the query engine or the storage engine as a valid time.
|
||||
const ZeroTime = int64(math.MinInt64)
|
||||
|
||||
// Point represents a value in a series that occurred at a given time.
|
||||
type Point interface {
|
||||
// Name and tags uniquely identify the series the value belongs to.
|
||||
name() string
|
||||
tags() Tags
|
||||
|
||||
// The time that the value occurred at.
|
||||
time() int64
|
||||
|
||||
// The value at the given time.
|
||||
value() interface{}
|
||||
|
||||
// Auxillary values passed along with the value.
|
||||
aux() []interface{}
|
||||
}
|
||||
|
||||
// Points represents a list of points.
|
||||
type Points []Point
|
||||
|
||||
// Clone returns a deep copy of a.
|
||||
func (a Points) Clone() []Point {
|
||||
other := make([]Point, len(a))
|
||||
for i, p := range a {
|
||||
if p == nil {
|
||||
other[i] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
switch p := p.(type) {
|
||||
case *FloatPoint:
|
||||
other[i] = p.Clone()
|
||||
case *IntegerPoint:
|
||||
other[i] = p.Clone()
|
||||
case *StringPoint:
|
||||
other[i] = p.Clone()
|
||||
case *BooleanPoint:
|
||||
other[i] = p.Clone()
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to clone point: %T", p))
|
||||
}
|
||||
}
|
||||
return other
|
||||
}
|
||||
|
||||
// Tags represent a map of keys and values.
|
||||
// It memoizes its key so it can be used efficiently during query execution.
|
||||
type Tags struct {
|
||||
id string
|
||||
m map[string]string
|
||||
}
|
||||
|
||||
// NewTags returns a new instance of Tags.
|
||||
func NewTags(m map[string]string) Tags {
|
||||
if len(m) == 0 {
|
||||
return Tags{}
|
||||
}
|
||||
return Tags{
|
||||
id: string(encodeTags(m)),
|
||||
m: m,
|
||||
}
|
||||
}
|
||||
|
||||
// newTagsID returns a new instance of Tags by parsing the given tag ID.
|
||||
func newTagsID(id string) Tags {
|
||||
m := decodeTags([]byte(id))
|
||||
if len(m) == 0 {
|
||||
return Tags{}
|
||||
}
|
||||
return Tags{id: id, m: m}
|
||||
}
|
||||
|
||||
// ID returns the string identifier for the tags.
|
||||
func (t Tags) ID() string { return t.id }
|
||||
|
||||
// KeyValues returns the underlying map for the tags.
|
||||
func (t Tags) KeyValues() map[string]string { return t.m }
|
||||
|
||||
// Keys returns a sorted list of all keys on the tag.
|
||||
func (t *Tags) Keys() []string {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var a []string
|
||||
for k := range t.m {
|
||||
a = append(a, k)
|
||||
}
|
||||
sort.Strings(a)
|
||||
return a
|
||||
}
|
||||
|
||||
// Value returns the value for a given key.
|
||||
func (t *Tags) Value(k string) string {
|
||||
if t == nil {
|
||||
return ""
|
||||
}
|
||||
return t.m[k]
|
||||
}
|
||||
|
||||
// Subset returns a new tags object with a subset of the keys.
|
||||
func (t *Tags) Subset(keys []string) Tags {
|
||||
if len(keys) == 0 {
|
||||
return Tags{}
|
||||
}
|
||||
|
||||
// If keys match existing keys, simply return this tagset.
|
||||
if keysMatch(t.m, keys) {
|
||||
return *t
|
||||
}
|
||||
|
||||
// Otherwise create new tag set.
|
||||
m := make(map[string]string, len(keys))
|
||||
for _, k := range keys {
|
||||
m[k] = t.m[k]
|
||||
}
|
||||
return NewTags(m)
|
||||
}
|
||||
|
||||
// Equals returns true if t equals other.
|
||||
func (t *Tags) Equals(other *Tags) bool {
|
||||
if t == nil && other == nil {
|
||||
return true
|
||||
} else if t == nil || other == nil {
|
||||
return false
|
||||
}
|
||||
return t.id == other.id
|
||||
}
|
||||
|
||||
// keysMatch returns true if m has exactly the same keys as listed in keys.
|
||||
func keysMatch(m map[string]string, keys []string) bool {
|
||||
if len(keys) != len(m) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
if _, ok := m[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// encodeTags converts a map of strings to an identifier.
|
||||
func encodeTags(m map[string]string) []byte {
|
||||
// Empty maps marshal to empty bytes.
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract keys and determine final size.
|
||||
sz := (len(m) * 2) - 1 // separators
|
||||
keys := make([]string, 0, len(m))
|
||||
for k, v := range m {
|
||||
keys = append(keys, k)
|
||||
sz += len(k) + len(v)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Generate marshaled bytes.
|
||||
b := make([]byte, sz)
|
||||
buf := b
|
||||
for _, k := range keys {
|
||||
copy(buf, k)
|
||||
buf[len(k)] = '\x00'
|
||||
buf = buf[len(k)+1:]
|
||||
}
|
||||
for i, k := range keys {
|
||||
v := m[k]
|
||||
copy(buf, v)
|
||||
if i < len(keys)-1 {
|
||||
buf[len(v)] = '\x00'
|
||||
buf = buf[len(v)+1:]
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// decodeTags parses an identifier into a map of tags.
|
||||
func decodeTags(id []byte) map[string]string {
|
||||
a := bytes.Split(id, []byte{'\x00'})
|
||||
|
||||
// There must be an even number of segments.
|
||||
if len(a) > 0 && len(a)%2 == 1 {
|
||||
a = a[:len(a)-1]
|
||||
}
|
||||
|
||||
// Return nil if there are no segments.
|
||||
if len(a) == 0 {
|
||||
return nil
|
||||
}
|
||||
mid := len(a) / 2
|
||||
|
||||
// Decode key/value tags.
|
||||
m := make(map[string]string)
|
||||
for i := 0; i < mid; i++ {
|
||||
m[string(a[i])] = string(a[i+mid])
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func encodeAux(aux []interface{}) []*internal.Aux {
|
||||
pb := make([]*internal.Aux, len(aux))
|
||||
for i := range aux {
|
||||
switch v := aux[i].(type) {
|
||||
case float64:
|
||||
pb[i] = &internal.Aux{DataType: proto.Int32(Float), FloatValue: proto.Float64(v)}
|
||||
case *float64:
|
||||
pb[i] = &internal.Aux{DataType: proto.Int32(Float)}
|
||||
case int64:
|
||||
pb[i] = &internal.Aux{DataType: proto.Int32(Integer), IntegerValue: proto.Int64(v)}
|
||||
case *int64:
|
||||
pb[i] = &internal.Aux{DataType: proto.Int32(Integer)}
|
||||
case string:
|
||||
pb[i] = &internal.Aux{DataType: proto.Int32(String), StringValue: proto.String(v)}
|
||||
case *string:
|
||||
pb[i] = &internal.Aux{DataType: proto.Int32(String)}
|
||||
case bool:
|
||||
pb[i] = &internal.Aux{DataType: proto.Int32(Boolean), BooleanValue: proto.Bool(v)}
|
||||
case *bool:
|
||||
pb[i] = &internal.Aux{DataType: proto.Int32(Boolean)}
|
||||
default:
|
||||
pb[i] = &internal.Aux{DataType: proto.Int32(int32(Unknown))}
|
||||
}
|
||||
}
|
||||
return pb
|
||||
}
|
||||
|
||||
func decodeAux(pb []*internal.Aux) []interface{} {
|
||||
if len(pb) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
aux := make([]interface{}, len(pb))
|
||||
for i := range pb {
|
||||
switch pb[i].GetDataType() {
|
||||
case Float:
|
||||
if pb[i].FloatValue != nil {
|
||||
aux[i] = *pb[i].FloatValue
|
||||
} else {
|
||||
aux[i] = (*float64)(nil)
|
||||
}
|
||||
case Integer:
|
||||
if pb[i].IntegerValue != nil {
|
||||
aux[i] = *pb[i].IntegerValue
|
||||
} else {
|
||||
aux[i] = (*int64)(nil)
|
||||
}
|
||||
case String:
|
||||
if pb[i].StringValue != nil {
|
||||
aux[i] = *pb[i].StringValue
|
||||
} else {
|
||||
aux[i] = (*string)(nil)
|
||||
}
|
||||
case Boolean:
|
||||
if pb[i].BooleanValue != nil {
|
||||
aux[i] = *pb[i].BooleanValue
|
||||
} else {
|
||||
aux[i] = (*bool)(nil)
|
||||
}
|
||||
default:
|
||||
aux[i] = nil
|
||||
}
|
||||
}
|
||||
return aux
|
||||
}
|
||||
|
||||
func cloneAux(src []interface{}) []interface{} {
|
||||
if src == nil {
|
||||
return src
|
||||
}
|
||||
dest := make([]interface{}, len(src))
|
||||
copy(dest, src)
|
||||
return dest
|
||||
}
|
||||
|
||||
// PointDecoder decodes generic points from a reader.
|
||||
type PointDecoder struct {
|
||||
r io.Reader
|
||||
stats IteratorStats
|
||||
}
|
||||
|
||||
// NewPointDecoder returns a new instance of PointDecoder that reads from r.
|
||||
func NewPointDecoder(r io.Reader) *PointDecoder {
|
||||
return &PointDecoder{r: r}
|
||||
}
|
||||
|
||||
// Stats returns iterator stats embedded within the stream.
|
||||
func (dec *PointDecoder) Stats() IteratorStats { return dec.stats }
|
||||
|
||||
// DecodePoint reads from the underlying reader and unmarshals into p.
|
||||
func (dec *PointDecoder) DecodePoint(p *Point) error {
|
||||
for {
|
||||
// Read length.
|
||||
var sz uint32
|
||||
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read point data.
|
||||
buf := make([]byte, sz)
|
||||
if _, err := io.ReadFull(dec.r, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal into point.
|
||||
var pb internal.Point
|
||||
if err := proto.Unmarshal(buf, &pb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the point contains stats then read stats and retry.
|
||||
if pb.Stats != nil {
|
||||
dec.stats = decodeIteratorStats(pb.Stats)
|
||||
continue
|
||||
}
|
||||
|
||||
if pb.IntegerValue != nil {
|
||||
*p = decodeIntegerPoint(&pb)
|
||||
} else if pb.StringValue != nil {
|
||||
*p = decodeStringPoint(&pb)
|
||||
} else if pb.BooleanValue != nil {
|
||||
*p = decodeBooleanPoint(&pb)
|
||||
} else {
|
||||
*p = decodeFloatPoint(&pb)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
187
vendor/github.com/influxdata/influxdb/influxql/point_test.go
generated
vendored
Normal file
187
vendor/github.com/influxdata/influxdb/influxql/point_test.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
package influxql_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
"github.com/influxdata/influxdb/pkg/deep"
|
||||
)
|
||||
|
||||
func TestPoint_Clone_Float(t *testing.T) {
|
||||
p := &influxql.FloatPoint{
|
||||
Name: "cpu",
|
||||
Tags: ParseTags("host=server01"),
|
||||
Time: 5,
|
||||
Value: 2,
|
||||
Aux: []interface{}{float64(45)},
|
||||
}
|
||||
c := p.Clone()
|
||||
if p == c {
|
||||
t.Errorf("clone has the same address as the original: %v == %v", p, c)
|
||||
}
|
||||
if !deep.Equal(p, c) {
|
||||
t.Errorf("mismatched point: %s", spew.Sdump(c))
|
||||
}
|
||||
if &p.Aux[0] == &c.Aux[0] {
|
||||
t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux)
|
||||
} else if !deep.Equal(p.Aux, c.Aux) {
|
||||
t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPoint_Clone_Integer(t *testing.T) {
|
||||
p := &influxql.IntegerPoint{
|
||||
Name: "cpu",
|
||||
Tags: ParseTags("host=server01"),
|
||||
Time: 5,
|
||||
Value: 2,
|
||||
Aux: []interface{}{float64(45)},
|
||||
}
|
||||
c := p.Clone()
|
||||
if p == c {
|
||||
t.Errorf("clone has the same address as the original: %v == %v", p, c)
|
||||
}
|
||||
if !deep.Equal(p, c) {
|
||||
t.Errorf("mismatched point: %s", spew.Sdump(c))
|
||||
}
|
||||
if &p.Aux[0] == &c.Aux[0] {
|
||||
t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux)
|
||||
} else if !deep.Equal(p.Aux, c.Aux) {
|
||||
t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPoint_Clone_String(t *testing.T) {
|
||||
p := &influxql.StringPoint{
|
||||
Name: "cpu",
|
||||
Tags: ParseTags("host=server01"),
|
||||
Time: 5,
|
||||
Value: "clone",
|
||||
Aux: []interface{}{float64(45)},
|
||||
}
|
||||
c := p.Clone()
|
||||
if p == c {
|
||||
t.Errorf("clone has the same address as the original: %v == %v", p, c)
|
||||
}
|
||||
if !deep.Equal(p, c) {
|
||||
t.Errorf("mismatched point: %s", spew.Sdump(c))
|
||||
}
|
||||
if &p.Aux[0] == &c.Aux[0] {
|
||||
t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux)
|
||||
} else if !deep.Equal(p.Aux, c.Aux) {
|
||||
t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPoint_Clone_Boolean(t *testing.T) {
|
||||
p := &influxql.BooleanPoint{
|
||||
Name: "cpu",
|
||||
Tags: ParseTags("host=server01"),
|
||||
Time: 5,
|
||||
Value: true,
|
||||
Aux: []interface{}{float64(45)},
|
||||
}
|
||||
c := p.Clone()
|
||||
if p == c {
|
||||
t.Errorf("clone has the same address as the original: %v == %v", p, c)
|
||||
}
|
||||
if !deep.Equal(p, c) {
|
||||
t.Errorf("mismatched point: %s", spew.Sdump(c))
|
||||
}
|
||||
if &p.Aux[0] == &c.Aux[0] {
|
||||
t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux)
|
||||
} else if !deep.Equal(p.Aux, c.Aux) {
|
||||
t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPoint_Clone_Nil(t *testing.T) {
|
||||
var fp *influxql.FloatPoint
|
||||
if p := fp.Clone(); p != nil {
|
||||
t.Errorf("expected nil, got %v", p)
|
||||
}
|
||||
|
||||
var ip *influxql.IntegerPoint
|
||||
if p := ip.Clone(); p != nil {
|
||||
t.Errorf("expected nil, got %v", p)
|
||||
}
|
||||
|
||||
var sp *influxql.StringPoint
|
||||
if p := sp.Clone(); p != nil {
|
||||
t.Errorf("expected nil, got %v", p)
|
||||
}
|
||||
|
||||
var bp *influxql.BooleanPoint
|
||||
if p := bp.Clone(); p != nil {
|
||||
t.Errorf("expected nil, got %v", p)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPoint_Fields ensures that no additional fields are added to the point structs.
|
||||
// This struct is very sensitive and can effect performance unless handled carefully.
|
||||
// To avoid the struct becoming a dumping ground for every function that needs to store
|
||||
// miscellaneous information, this test is meant to ensure that new fields don't slip
|
||||
// into the struct.
|
||||
func TestPoint_Fields(t *testing.T) {
|
||||
allowedFields := map[string]bool{
|
||||
"Name": true,
|
||||
"Tags": true,
|
||||
"Time": true,
|
||||
"Nil": true,
|
||||
"Value": true,
|
||||
"Aux": true,
|
||||
"Aggregated": true,
|
||||
}
|
||||
|
||||
for _, typ := range []reflect.Type{
|
||||
reflect.TypeOf(influxql.FloatPoint{}),
|
||||
reflect.TypeOf(influxql.IntegerPoint{}),
|
||||
reflect.TypeOf(influxql.StringPoint{}),
|
||||
reflect.TypeOf(influxql.BooleanPoint{}),
|
||||
} {
|
||||
f, ok := typ.FieldByNameFunc(func(name string) bool {
|
||||
return !allowedFields[name]
|
||||
})
|
||||
if ok {
|
||||
t.Errorf("found an unallowed field in %s: %s %s", typ, f.Name, f.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that tags can return a unique id.
|
||||
func TestTags_ID(t *testing.T) {
|
||||
tags := influxql.NewTags(map[string]string{"foo": "bar", "baz": "bat"})
|
||||
if id := tags.ID(); id != "baz\x00foo\x00bat\x00bar" {
|
||||
t.Fatalf("unexpected id: %q", id)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a subset can be created from a tag set.
|
||||
func TestTags_Subset(t *testing.T) {
|
||||
tags := influxql.NewTags(map[string]string{"a": "0", "b": "1", "c": "2"})
|
||||
subset := tags.Subset([]string{"b", "c", "d"})
|
||||
if keys := subset.Keys(); !reflect.DeepEqual(keys, []string{"b", "c", "d"}) {
|
||||
t.Fatalf("unexpected keys: %+v", keys)
|
||||
} else if v := subset.Value("a"); v != "" {
|
||||
t.Fatalf("unexpected 'a' value: %s", v)
|
||||
} else if v := subset.Value("b"); v != "1" {
|
||||
t.Fatalf("unexpected 'b' value: %s", v)
|
||||
} else if v := subset.Value("c"); v != "2" {
|
||||
t.Fatalf("unexpected 'c' value: %s", v)
|
||||
} else if v := subset.Value("d"); v != "" {
|
||||
t.Fatalf("unexpected 'd' value: %s", v)
|
||||
}
|
||||
}
|
||||
|
||||
// ParseTags returns an instance of Tags for a comma-delimited list of key/values.
|
||||
func ParseTags(s string) influxql.Tags {
|
||||
m := make(map[string]string)
|
||||
for _, kv := range strings.Split(s, ",") {
|
||||
a := strings.Split(kv, "=")
|
||||
m[a[0]] = a[1]
|
||||
}
|
||||
return influxql.NewTags(m)
|
||||
}
|
451
vendor/github.com/influxdata/influxdb/influxql/query_executor.go
generated
vendored
Normal file
451
vendor/github.com/influxdata/influxdb/influxql/query_executor.go
generated
vendored
Normal file
@@ -0,0 +1,451 @@
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/uber-go/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidQuery is returned when executing an unknown query type.
|
||||
ErrInvalidQuery = errors.New("invalid query")
|
||||
|
||||
// ErrNotExecuted is returned when a statement is not executed in a query.
|
||||
// This can occur when a previous statement in the same query has errored.
|
||||
ErrNotExecuted = errors.New("not executed")
|
||||
|
||||
// ErrQueryInterrupted is an error returned when the query is interrupted.
|
||||
ErrQueryInterrupted = errors.New("query interrupted")
|
||||
|
||||
// ErrQueryAborted is an error returned when the query is aborted.
|
||||
ErrQueryAborted = errors.New("query aborted")
|
||||
|
||||
// ErrQueryEngineShutdown is an error sent when the query cannot be
|
||||
// created because the query engine was shutdown.
|
||||
ErrQueryEngineShutdown = errors.New("query engine shutdown")
|
||||
|
||||
// ErrQueryTimeoutLimitExceeded is an error when a query hits the max time allowed to run.
|
||||
ErrQueryTimeoutLimitExceeded = errors.New("query-timeout limit exceeded")
|
||||
)
|
||||
|
||||
// Statistics for the QueryExecutor
|
||||
const (
|
||||
statQueriesActive = "queriesActive" // Number of queries currently being executed
|
||||
statQueriesExecuted = "queriesExecuted" // Number of queries that have been executed (started).
|
||||
statQueriesFinished = "queriesFinished" // Number of queries that have finished.
|
||||
statQueryExecutionDuration = "queryDurationNs" // Total (wall) time spent executing queries
|
||||
)
|
||||
|
||||
// ErrDatabaseNotFound returns a database not found error for the given database name.
|
||||
func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) }
|
||||
|
||||
// ErrMaxSelectPointsLimitExceeded is an error when a query hits the maximum number of points.
|
||||
func ErrMaxSelectPointsLimitExceeded(n, limit int) error {
|
||||
return fmt.Errorf("max-select-point limit exceeed: (%d/%d)", n, limit)
|
||||
}
|
||||
|
||||
// ErrMaxConcurrentQueriesLimitExceeded is an error when a query cannot be run
|
||||
// because the maximum number of queries has been reached.
|
||||
func ErrMaxConcurrentQueriesLimitExceeded(n, limit int) error {
|
||||
return fmt.Errorf("max-concurrent-queries limit exceeded(%d, %d)", n, limit)
|
||||
}
|
||||
|
||||
// Authorizer reports whether certain operations are authorized.
|
||||
type Authorizer interface {
|
||||
// AuthorizeDatabase indicates whether the given Privilege is authorized on the database with the given name.
|
||||
AuthorizeDatabase(p Privilege, name string) bool
|
||||
|
||||
// AuthorizeQuery returns an error if the query cannot be executed
|
||||
AuthorizeQuery(database string, query *Query) error
|
||||
|
||||
// AuthorizeSeriesRead determines if a series is authorized for reading
|
||||
AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool
|
||||
|
||||
// AuthorizeSeriesWrite determines if a series is authorized for writing
|
||||
AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool
|
||||
}
|
||||
|
||||
// OpenAuthorizer is the Authorizer used when authorization is disabled.
|
||||
// It allows all operations.
|
||||
type OpenAuthorizer struct{}
|
||||
|
||||
var _ Authorizer = OpenAuthorizer{}
|
||||
|
||||
// AuthorizeDatabase returns true to allow any operation on a database.
|
||||
func (_ OpenAuthorizer) AuthorizeDatabase(Privilege, string) bool { return true }
|
||||
|
||||
func (_ OpenAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (_ OpenAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (_ OpenAuthorizer) AuthorizeQuery(_ string, _ *Query) error { return nil }
|
||||
|
||||
// ExecutionOptions contains the options for executing a query.
|
||||
type ExecutionOptions struct {
|
||||
// The database the query is running against.
|
||||
Database string
|
||||
|
||||
// How to determine whether the query is allowed to execute,
|
||||
// what resources can be returned in SHOW queries, etc.
|
||||
Authorizer Authorizer
|
||||
|
||||
// The requested maximum number of points to return in each result.
|
||||
ChunkSize int
|
||||
|
||||
// If this query is being executed in a read-only context.
|
||||
ReadOnly bool
|
||||
|
||||
// Node to execute on.
|
||||
NodeID uint64
|
||||
|
||||
// Quiet suppresses non-essential output from the query executor.
|
||||
Quiet bool
|
||||
|
||||
// AbortCh is a channel that signals when results are no longer desired by the caller.
|
||||
AbortCh <-chan struct{}
|
||||
}
|
||||
|
||||
// ExecutionContext contains state that the query is currently executing with.
|
||||
type ExecutionContext struct {
|
||||
// The statement ID of the executing query.
|
||||
StatementID int
|
||||
|
||||
// The query ID of the executing query.
|
||||
QueryID uint64
|
||||
|
||||
// The query task information available to the StatementExecutor.
|
||||
Query *QueryTask
|
||||
|
||||
// Output channel where results and errors should be sent.
|
||||
Results chan *Result
|
||||
|
||||
// Hold the query executor's logger.
|
||||
Log zap.Logger
|
||||
|
||||
// A channel that is closed when the query is interrupted.
|
||||
InterruptCh <-chan struct{}
|
||||
|
||||
// Options used to start this query.
|
||||
ExecutionOptions
|
||||
}
|
||||
|
||||
// send sends a Result to the Results channel and will exit if the query has
|
||||
// been aborted.
|
||||
func (ctx *ExecutionContext) send(result *Result) error {
|
||||
select {
|
||||
case <-ctx.AbortCh:
|
||||
return ErrQueryAborted
|
||||
case ctx.Results <- result:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send sends a Result to the Results channel and will exit if the query has
|
||||
// been interrupted or aborted.
|
||||
func (ctx *ExecutionContext) Send(result *Result) error {
|
||||
select {
|
||||
case <-ctx.InterruptCh:
|
||||
return ErrQueryInterrupted
|
||||
case <-ctx.AbortCh:
|
||||
return ErrQueryAborted
|
||||
case ctx.Results <- result:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatementExecutor executes a statement within the QueryExecutor.
|
||||
type StatementExecutor interface {
|
||||
// ExecuteStatement executes a statement. Results should be sent to the
|
||||
// results channel in the ExecutionContext.
|
||||
ExecuteStatement(stmt Statement, ctx ExecutionContext) error
|
||||
}
|
||||
|
||||
// StatementNormalizer normalizes a statement before it is executed.
|
||||
type StatementNormalizer interface {
|
||||
// NormalizeStatement adds a default database and policy to the
|
||||
// measurements in the statement.
|
||||
NormalizeStatement(stmt Statement, database string) error
|
||||
}
|
||||
|
||||
// QueryExecutor executes every statement in an Query.
|
||||
type QueryExecutor struct {
|
||||
// Used for executing a statement in the query.
|
||||
StatementExecutor StatementExecutor
|
||||
|
||||
// Used for tracking running queries.
|
||||
TaskManager *TaskManager
|
||||
|
||||
// Logger to use for all logging.
|
||||
// Defaults to discarding all log output.
|
||||
Logger zap.Logger
|
||||
|
||||
// expvar-based stats.
|
||||
stats *QueryStatistics
|
||||
}
|
||||
|
||||
// NewQueryExecutor returns a new instance of QueryExecutor.
|
||||
func NewQueryExecutor() *QueryExecutor {
|
||||
return &QueryExecutor{
|
||||
TaskManager: NewTaskManager(),
|
||||
Logger: zap.New(zap.NullEncoder()),
|
||||
stats: &QueryStatistics{},
|
||||
}
|
||||
}
|
||||
|
||||
// QueryStatistics keeps statistics related to the QueryExecutor.
|
||||
type QueryStatistics struct {
|
||||
ActiveQueries int64
|
||||
ExecutedQueries int64
|
||||
FinishedQueries int64
|
||||
QueryExecutionDuration int64
|
||||
}
|
||||
|
||||
// Statistics returns statistics for periodic monitoring.
|
||||
func (e *QueryExecutor) Statistics(tags map[string]string) []models.Statistic {
|
||||
return []models.Statistic{{
|
||||
Name: "queryExecutor",
|
||||
Tags: tags,
|
||||
Values: map[string]interface{}{
|
||||
statQueriesActive: atomic.LoadInt64(&e.stats.ActiveQueries),
|
||||
statQueriesExecuted: atomic.LoadInt64(&e.stats.ExecutedQueries),
|
||||
statQueriesFinished: atomic.LoadInt64(&e.stats.FinishedQueries),
|
||||
statQueryExecutionDuration: atomic.LoadInt64(&e.stats.QueryExecutionDuration),
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
// Close kills all running queries and prevents new queries from being attached.
|
||||
func (e *QueryExecutor) Close() error {
|
||||
return e.TaskManager.Close()
|
||||
}
|
||||
|
||||
// SetLogOutput sets the writer to which all logs are written. It must not be
|
||||
// called after Open is called.
|
||||
func (e *QueryExecutor) WithLogger(log zap.Logger) {
|
||||
e.Logger = log.With(zap.String("service", "query"))
|
||||
e.TaskManager.Logger = e.Logger
|
||||
}
|
||||
|
||||
// ExecuteQuery executes each statement within a query.
|
||||
func (e *QueryExecutor) ExecuteQuery(query *Query, opt ExecutionOptions, closing chan struct{}) <-chan *Result {
|
||||
results := make(chan *Result)
|
||||
go e.executeQuery(query, opt, closing, results)
|
||||
return results
|
||||
}
|
||||
|
||||
func (e *QueryExecutor) executeQuery(query *Query, opt ExecutionOptions, closing <-chan struct{}, results chan *Result) {
|
||||
defer close(results)
|
||||
defer e.recover(query, results)
|
||||
|
||||
atomic.AddInt64(&e.stats.ActiveQueries, 1)
|
||||
atomic.AddInt64(&e.stats.ExecutedQueries, 1)
|
||||
defer func(start time.Time) {
|
||||
atomic.AddInt64(&e.stats.ActiveQueries, -1)
|
||||
atomic.AddInt64(&e.stats.FinishedQueries, 1)
|
||||
atomic.AddInt64(&e.stats.QueryExecutionDuration, time.Since(start).Nanoseconds())
|
||||
}(time.Now())
|
||||
|
||||
qid, task, err := e.TaskManager.AttachQuery(query, opt.Database, closing)
|
||||
if err != nil {
|
||||
select {
|
||||
case results <- &Result{Err: err}:
|
||||
case <-opt.AbortCh:
|
||||
}
|
||||
return
|
||||
}
|
||||
defer e.TaskManager.KillQuery(qid)
|
||||
|
||||
// Setup the execution context that will be used when executing statements.
|
||||
ctx := ExecutionContext{
|
||||
QueryID: qid,
|
||||
Query: task,
|
||||
Results: results,
|
||||
Log: e.Logger,
|
||||
InterruptCh: task.closing,
|
||||
ExecutionOptions: opt,
|
||||
}
|
||||
|
||||
var i int
|
||||
LOOP:
|
||||
for ; i < len(query.Statements); i++ {
|
||||
ctx.StatementID = i
|
||||
stmt := query.Statements[i]
|
||||
|
||||
// If a default database wasn't passed in by the caller, check the statement.
|
||||
defaultDB := opt.Database
|
||||
if defaultDB == "" {
|
||||
if s, ok := stmt.(HasDefaultDatabase); ok {
|
||||
defaultDB = s.DefaultDatabase()
|
||||
}
|
||||
}
|
||||
|
||||
// Do not let queries manually use the system measurements. If we find
|
||||
// one, return an error. This prevents a person from using the
|
||||
// measurement incorrectly and causing a panic.
|
||||
if stmt, ok := stmt.(*SelectStatement); ok {
|
||||
for _, s := range stmt.Sources {
|
||||
switch s := s.(type) {
|
||||
case *Measurement:
|
||||
if IsSystemName(s.Name) {
|
||||
command := "the appropriate meta command"
|
||||
switch s.Name {
|
||||
case "_fieldKeys":
|
||||
command = "SHOW FIELD KEYS"
|
||||
case "_measurements":
|
||||
command = "SHOW MEASUREMENTS"
|
||||
case "_series":
|
||||
command = "SHOW SERIES"
|
||||
case "_tagKeys":
|
||||
command = "SHOW TAG KEYS"
|
||||
case "_tags":
|
||||
command = "SHOW TAG VALUES"
|
||||
}
|
||||
results <- &Result{
|
||||
Err: fmt.Errorf("unable to use system source '%s': use %s instead", s.Name, command),
|
||||
}
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rewrite statements, if necessary.
|
||||
// This can occur on meta read statements which convert to SELECT statements.
|
||||
newStmt, err := RewriteStatement(stmt)
|
||||
if err != nil {
|
||||
results <- &Result{Err: err}
|
||||
break
|
||||
}
|
||||
stmt = newStmt
|
||||
|
||||
// Normalize each statement if possible.
|
||||
if normalizer, ok := e.StatementExecutor.(StatementNormalizer); ok {
|
||||
if err := normalizer.NormalizeStatement(stmt, defaultDB); err != nil {
|
||||
if err := ctx.send(&Result{Err: err}); err == ErrQueryAborted {
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Log each normalized statement.
|
||||
if !ctx.Quiet {
|
||||
e.Logger.Info(stmt.String())
|
||||
}
|
||||
|
||||
// Send any other statements to the underlying statement executor.
|
||||
err = e.StatementExecutor.ExecuteStatement(stmt, ctx)
|
||||
if err == ErrQueryInterrupted {
|
||||
// Query was interrupted so retrieve the real interrupt error from
|
||||
// the query task if there is one.
|
||||
if qerr := task.Error(); qerr != nil {
|
||||
err = qerr
|
||||
}
|
||||
}
|
||||
|
||||
// Send an error for this result if it failed for some reason.
|
||||
if err != nil {
|
||||
if err := ctx.send(&Result{
|
||||
StatementID: i,
|
||||
Err: err,
|
||||
}); err == ErrQueryAborted {
|
||||
return
|
||||
}
|
||||
// Stop after the first error.
|
||||
break
|
||||
}
|
||||
|
||||
// Check if the query was interrupted during an uninterruptible statement.
|
||||
interrupted := false
|
||||
if ctx.InterruptCh != nil {
|
||||
select {
|
||||
case <-ctx.InterruptCh:
|
||||
interrupted = true
|
||||
default:
|
||||
// Query has not been interrupted.
|
||||
}
|
||||
}
|
||||
|
||||
if interrupted {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Send error results for any statements which were not executed.
|
||||
for ; i < len(query.Statements)-1; i++ {
|
||||
if err := ctx.send(&Result{
|
||||
StatementID: i,
|
||||
Err: ErrNotExecuted,
|
||||
}); err == ErrQueryAborted {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *QueryExecutor) recover(query *Query, results chan *Result) {
|
||||
if err := recover(); err != nil {
|
||||
e.Logger.Error(fmt.Sprintf("%s [panic:%s] %s", query.String(), err, debug.Stack()))
|
||||
results <- &Result{
|
||||
StatementID: -1,
|
||||
Err: fmt.Errorf("%s [panic:%s]", query.String(), err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// QueryMonitorFunc is a function that will be called to check if a query
|
||||
// is currently healthy. If the query needs to be interrupted for some reason,
|
||||
// the error should be returned by this function.
|
||||
type QueryMonitorFunc func(<-chan struct{}) error
|
||||
|
||||
// QueryTask is the internal data structure for managing queries.
|
||||
// For the public use data structure that gets returned, see QueryTask.
|
||||
type QueryTask struct {
|
||||
query string
|
||||
database string
|
||||
startTime time.Time
|
||||
closing chan struct{}
|
||||
monitorCh chan error
|
||||
err error
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Monitor starts a new goroutine that will monitor a query. The function
|
||||
// will be passed in a channel to signal when the query has been finished
|
||||
// normally. If the function returns with an error and the query is still
|
||||
// running, the query will be terminated.
|
||||
func (q *QueryTask) Monitor(fn QueryMonitorFunc) {
|
||||
go q.monitor(fn)
|
||||
}
|
||||
|
||||
// Error returns any asynchronous error that may have occured while executing
|
||||
// the query.
|
||||
func (q *QueryTask) Error() error {
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
return q.err
|
||||
}
|
||||
|
||||
func (q *QueryTask) setError(err error) {
|
||||
q.mu.Lock()
|
||||
q.err = err
|
||||
q.mu.Unlock()
|
||||
}
|
||||
|
||||
func (q *QueryTask) monitor(fn QueryMonitorFunc) {
|
||||
if err := fn(q.closing); err != nil {
|
||||
select {
|
||||
case <-q.closing:
|
||||
case q.monitorCh <- err:
|
||||
}
|
||||
}
|
||||
}
|
367
vendor/github.com/influxdata/influxdb/influxql/query_executor_test.go
generated
vendored
Normal file
367
vendor/github.com/influxdata/influxdb/influxql/query_executor_test.go
generated
vendored
Normal file
@@ -0,0 +1,367 @@
|
||||
package influxql_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
)
|
||||
|
||||
var errUnexpected = errors.New("unexpected error")
|
||||
|
||||
type StatementExecutor struct {
|
||||
ExecuteStatementFn func(stmt influxql.Statement, ctx influxql.ExecutionContext) error
|
||||
}
|
||||
|
||||
func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
return e.ExecuteStatementFn(stmt, ctx)
|
||||
}
|
||||
|
||||
func NewQueryExecutor() *influxql.QueryExecutor {
|
||||
return influxql.NewQueryExecutor()
|
||||
}
|
||||
|
||||
func TestQueryExecutor_AttachQuery(t *testing.T) {
|
||||
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
e := NewQueryExecutor()
|
||||
e.StatementExecutor = &StatementExecutor{
|
||||
ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
if ctx.QueryID != 1 {
|
||||
t.Errorf("incorrect query id: exp=1 got=%d", ctx.QueryID)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
discardOutput(e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil))
|
||||
}
|
||||
|
||||
func TestQueryExecutor_KillQuery(t *testing.T) {
|
||||
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
qid := make(chan uint64)
|
||||
|
||||
e := NewQueryExecutor()
|
||||
e.StatementExecutor = &StatementExecutor{
|
||||
ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
switch stmt.(type) {
|
||||
case *influxql.KillQueryStatement:
|
||||
return e.TaskManager.ExecuteStatement(stmt, ctx)
|
||||
}
|
||||
|
||||
qid <- ctx.QueryID
|
||||
select {
|
||||
case <-ctx.InterruptCh:
|
||||
return influxql.ErrQueryInterrupted
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Error("killing the query did not close the channel after 100 milliseconds")
|
||||
return errUnexpected
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)
|
||||
q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
discardOutput(e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil))
|
||||
|
||||
result := <-results
|
||||
if result.Err != influxql.ErrQueryInterrupted {
|
||||
t.Errorf("unexpected error: %s", result.Err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryExecutor_Interrupt(t *testing.T) {
|
||||
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
e := NewQueryExecutor()
|
||||
e.StatementExecutor = &StatementExecutor{
|
||||
ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
select {
|
||||
case <-ctx.InterruptCh:
|
||||
return influxql.ErrQueryInterrupted
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Error("killing the query did not close the channel after 100 milliseconds")
|
||||
return errUnexpected
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
closing := make(chan struct{})
|
||||
results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, closing)
|
||||
close(closing)
|
||||
result := <-results
|
||||
if result.Err != influxql.ErrQueryInterrupted {
|
||||
t.Errorf("unexpected error: %s", result.Err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryExecutor_Abort(t *testing.T) {
|
||||
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ch1 := make(chan struct{})
|
||||
ch2 := make(chan struct{})
|
||||
|
||||
e := NewQueryExecutor()
|
||||
e.StatementExecutor = &StatementExecutor{
|
||||
ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
<-ch1
|
||||
if err := ctx.Send(&influxql.Result{Err: errUnexpected}); err != influxql.ErrQueryAborted {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
close(ch2)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
close(done)
|
||||
|
||||
results := e.ExecuteQuery(q, influxql.ExecutionOptions{AbortCh: done}, nil)
|
||||
close(ch1)
|
||||
|
||||
<-ch2
|
||||
discardOutput(results)
|
||||
}
|
||||
|
||||
func TestQueryExecutor_ShowQueries(t *testing.T) {
|
||||
e := NewQueryExecutor()
|
||||
e.StatementExecutor = &StatementExecutor{
|
||||
ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
switch stmt.(type) {
|
||||
case *influxql.ShowQueriesStatement:
|
||||
return e.TaskManager.ExecuteStatement(stmt, ctx)
|
||||
}
|
||||
|
||||
t.Errorf("unexpected statement: %s", stmt)
|
||||
return errUnexpected
|
||||
},
|
||||
}
|
||||
|
||||
q, err := influxql.ParseQuery(`SHOW QUERIES`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)
|
||||
result := <-results
|
||||
if len(result.Series) != 1 {
|
||||
t.Errorf("expected %d rows, got %d", 1, len(result.Series))
|
||||
}
|
||||
if result.Err != nil {
|
||||
t.Errorf("unexpected error: %s", result.Err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryExecutor_Limit_Timeout(t *testing.T) {
|
||||
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
e := NewQueryExecutor()
|
||||
e.StatementExecutor = &StatementExecutor{
|
||||
ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
select {
|
||||
case <-ctx.InterruptCh:
|
||||
return influxql.ErrQueryInterrupted
|
||||
case <-time.After(time.Second):
|
||||
t.Errorf("timeout has not killed the query")
|
||||
return errUnexpected
|
||||
}
|
||||
},
|
||||
}
|
||||
e.TaskManager.QueryTimeout = time.Nanosecond
|
||||
|
||||
results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)
|
||||
result := <-results
|
||||
if result.Err == nil || !strings.Contains(result.Err.Error(), "query-timeout") {
|
||||
t.Errorf("unexpected error: %s", result.Err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryExecutor_Limit_ConcurrentQueries(t *testing.T) {
|
||||
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
qid := make(chan uint64)
|
||||
|
||||
e := NewQueryExecutor()
|
||||
e.StatementExecutor = &StatementExecutor{
|
||||
ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
qid <- ctx.QueryID
|
||||
<-ctx.InterruptCh
|
||||
return influxql.ErrQueryInterrupted
|
||||
},
|
||||
}
|
||||
e.TaskManager.MaxConcurrentQueries = 1
|
||||
defer e.Close()
|
||||
|
||||
// Start first query and wait for it to be executing.
|
||||
go discardOutput(e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil))
|
||||
<-qid
|
||||
|
||||
// Start second query and expect for it to fail.
|
||||
results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)
|
||||
|
||||
select {
|
||||
case result := <-results:
|
||||
if len(result.Series) != 0 {
|
||||
t.Errorf("expected %d rows, got %d", 0, len(result.Series))
|
||||
}
|
||||
if result.Err == nil || !strings.Contains(result.Err.Error(), "max-concurrent-queries") {
|
||||
t.Errorf("unexpected error: %s", result.Err)
|
||||
}
|
||||
case <-qid:
|
||||
t.Errorf("unexpected statement execution for the second query")
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryExecutor_Close(t *testing.T) {
|
||||
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ch1 := make(chan struct{})
|
||||
ch2 := make(chan struct{})
|
||||
|
||||
e := NewQueryExecutor()
|
||||
e.StatementExecutor = &StatementExecutor{
|
||||
ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
close(ch1)
|
||||
<-ctx.InterruptCh
|
||||
return influxql.ErrQueryInterrupted
|
||||
},
|
||||
}
|
||||
|
||||
results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)
|
||||
go func(results <-chan *influxql.Result) {
|
||||
result := <-results
|
||||
if result.Err != influxql.ErrQueryEngineShutdown {
|
||||
t.Errorf("unexpected error: %s", result.Err)
|
||||
}
|
||||
close(ch2)
|
||||
}(results)
|
||||
|
||||
// Wait for the statement to start executing.
|
||||
<-ch1
|
||||
|
||||
// Close the query executor.
|
||||
e.Close()
|
||||
|
||||
// Check that the statement gets interrupted and finishes.
|
||||
select {
|
||||
case <-ch2:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("closing the query manager did not kill the query after 100 milliseconds")
|
||||
}
|
||||
|
||||
results = e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)
|
||||
result := <-results
|
||||
if len(result.Series) != 0 {
|
||||
t.Errorf("expected %d rows, got %d", 0, len(result.Series))
|
||||
}
|
||||
if result.Err != influxql.ErrQueryEngineShutdown {
|
||||
t.Errorf("unexpected error: %s", result.Err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryExecutor_Panic(t *testing.T) {
|
||||
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
e := NewQueryExecutor()
|
||||
e.StatementExecutor = &StatementExecutor{
|
||||
ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
panic("test error")
|
||||
},
|
||||
}
|
||||
|
||||
results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)
|
||||
result := <-results
|
||||
if len(result.Series) != 0 {
|
||||
t.Errorf("expected %d rows, got %d", 0, len(result.Series))
|
||||
}
|
||||
if result.Err == nil || result.Err.Error() != "SELECT count(value) FROM cpu [panic:test error]" {
|
||||
t.Errorf("unexpected error: %s", result.Err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryExecutor_InvalidSource(t *testing.T) {
|
||||
e := NewQueryExecutor()
|
||||
e.StatementExecutor = &StatementExecutor{
|
||||
ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
|
||||
return errors.New("statement executed unexpectedly")
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range []struct {
|
||||
q string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
q: `SELECT fieldKey, fieldType FROM _fieldKeys`,
|
||||
err: `unable to use system source '_fieldKeys': use SHOW FIELD KEYS instead`,
|
||||
},
|
||||
{
|
||||
q: `SELECT "name" FROM _measurements`,
|
||||
err: `unable to use system source '_measurements': use SHOW MEASUREMENTS instead`,
|
||||
},
|
||||
{
|
||||
q: `SELECT "key" FROM _series`,
|
||||
err: `unable to use system source '_series': use SHOW SERIES instead`,
|
||||
},
|
||||
{
|
||||
q: `SELECT tagKey FROM _tagKeys`,
|
||||
err: `unable to use system source '_tagKeys': use SHOW TAG KEYS instead`,
|
||||
},
|
||||
{
|
||||
q: `SELECT "key", value FROM _tags`,
|
||||
err: `unable to use system source '_tags': use SHOW TAG VALUES instead`,
|
||||
},
|
||||
} {
|
||||
q, err := influxql.ParseQuery(tt.q)
|
||||
if err != nil {
|
||||
t.Errorf("%d. unable to parse: %s", i, tt.q)
|
||||
continue
|
||||
}
|
||||
|
||||
results := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)
|
||||
result := <-results
|
||||
if len(result.Series) != 0 {
|
||||
t.Errorf("%d. expected %d rows, got %d", 0, i, len(result.Series))
|
||||
}
|
||||
if result.Err == nil || result.Err.Error() != tt.err {
|
||||
t.Errorf("%d. unexpected error: %s", i, result.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func discardOutput(results <-chan *influxql.Result) {
|
||||
for range results {
|
||||
// Read all results and discard.
|
||||
}
|
||||
}
|
121
vendor/github.com/influxdata/influxdb/influxql/result.go
generated
vendored
Normal file
121
vendor/github.com/influxdata/influxdb/influxql/result.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
)
|
||||
|
||||
const (
|
||||
// WarningLevel is the message level for a warning.
|
||||
WarningLevel = "warning"
|
||||
)
|
||||
|
||||
// TagSet is a fundamental concept within the query system. It represents a composite series,
|
||||
// composed of multiple individual series that share a set of tag attributes.
|
||||
type TagSet struct {
|
||||
Tags map[string]string
|
||||
Filters []Expr
|
||||
SeriesKeys []string
|
||||
Key []byte
|
||||
}
|
||||
|
||||
// AddFilter adds a series-level filter to the Tagset.
|
||||
func (t *TagSet) AddFilter(key string, filter Expr) {
|
||||
t.SeriesKeys = append(t.SeriesKeys, key)
|
||||
t.Filters = append(t.Filters, filter)
|
||||
}
|
||||
|
||||
func (t *TagSet) Len() int { return len(t.SeriesKeys) }
|
||||
func (t *TagSet) Less(i, j int) bool { return t.SeriesKeys[i] < t.SeriesKeys[j] }
|
||||
func (t *TagSet) Swap(i, j int) {
|
||||
t.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i]
|
||||
t.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i]
|
||||
}
|
||||
|
||||
// Reverse reverses the order of series keys and filters in the TagSet.
|
||||
func (t *TagSet) Reverse() {
|
||||
for i, j := 0, len(t.Filters)-1; i < j; i, j = i+1, j-1 {
|
||||
t.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i]
|
||||
t.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Message represents a user-facing message to be included with the result.
|
||||
type Message struct {
|
||||
Level string `json:"level"`
|
||||
Text string `json:"text"`
|
||||
}
|
||||
|
||||
// ReadOnlyWarning generates a warning message that tells the user the command
|
||||
// they are using is being used for writing in a read only context.
|
||||
//
|
||||
// This is a temporary method while to be used while transitioning to read only
|
||||
// operations for issue #6290.
|
||||
func ReadOnlyWarning(stmt string) *Message {
|
||||
return &Message{
|
||||
Level: WarningLevel,
|
||||
Text: fmt.Sprintf("deprecated use of '%s' in a read only context, please use a POST request instead", stmt),
|
||||
}
|
||||
}
|
||||
|
||||
// Result represents a resultset returned from a single statement.
|
||||
// Rows represents a list of rows that can be sorted consistently by name/tag.
|
||||
type Result struct {
|
||||
// StatementID is just the statement's position in the query. It's used
|
||||
// to combine statement results if they're being buffered in memory.
|
||||
StatementID int
|
||||
Series models.Rows
|
||||
Messages []*Message
|
||||
Partial bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// MarshalJSON encodes the result into JSON.
|
||||
func (r *Result) MarshalJSON() ([]byte, error) {
|
||||
// Define a struct that outputs "error" as a string.
|
||||
var o struct {
|
||||
StatementID int `json:"statement_id"`
|
||||
Series []*models.Row `json:"series,omitempty"`
|
||||
Messages []*Message `json:"messages,omitempty"`
|
||||
Partial bool `json:"partial,omitempty"`
|
||||
Err string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// Copy fields to output struct.
|
||||
o.StatementID = r.StatementID
|
||||
o.Series = r.Series
|
||||
o.Messages = r.Messages
|
||||
o.Partial = r.Partial
|
||||
if r.Err != nil {
|
||||
o.Err = r.Err.Error()
|
||||
}
|
||||
|
||||
return json.Marshal(&o)
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes the data into the Result struct
|
||||
func (r *Result) UnmarshalJSON(b []byte) error {
|
||||
var o struct {
|
||||
StatementID int `json:"statement_id"`
|
||||
Series []*models.Row `json:"series,omitempty"`
|
||||
Messages []*Message `json:"messages,omitempty"`
|
||||
Partial bool `json:"partial,omitempty"`
|
||||
Err string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
err := json.Unmarshal(b, &o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.StatementID = o.StatementID
|
||||
r.Series = o.Series
|
||||
r.Messages = o.Messages
|
||||
r.Partial = o.Partial
|
||||
if o.Err != "" {
|
||||
r.Err = errors.New(o.Err)
|
||||
}
|
||||
return nil
|
||||
}
|
47
vendor/github.com/influxdata/influxdb/influxql/sanitize.go
generated
vendored
Normal file
47
vendor/github.com/influxdata/influxdb/influxql/sanitize.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
sanitizeSetPassword = regexp.MustCompile(`(?i)password\s+for[^=]*=\s+(["']?[^\s"]+["']?)`)
|
||||
|
||||
sanitizeCreatePassword = regexp.MustCompile(`(?i)with\s+password\s+(["']?[^\s"]+["']?)`)
|
||||
)
|
||||
|
||||
// Sanitize attempts to sanitize passwords out of a raw query.
|
||||
// It looks for patterns that may be related to the SET PASSWORD and CREATE USER
|
||||
// statements and will redact the password that should be there. It will attempt
|
||||
// to redact information from common invalid queries too, but it's not guaranteed
|
||||
// to succeed on improper queries.
|
||||
//
|
||||
// This function works on the raw query and attempts to retain the original input
|
||||
// as much as possible.
|
||||
func Sanitize(query string) string {
|
||||
if matches := sanitizeSetPassword.FindAllStringSubmatchIndex(query, -1); matches != nil {
|
||||
var buf bytes.Buffer
|
||||
i := 0
|
||||
for _, match := range matches {
|
||||
buf.WriteString(query[i:match[2]])
|
||||
buf.WriteString("[REDACTED]")
|
||||
i = match[3]
|
||||
}
|
||||
buf.WriteString(query[i:])
|
||||
query = buf.String()
|
||||
}
|
||||
|
||||
if matches := sanitizeCreatePassword.FindAllStringSubmatchIndex(query, -1); matches != nil {
|
||||
var buf bytes.Buffer
|
||||
i := 0
|
||||
for _, match := range matches {
|
||||
buf.WriteString(query[i:match[2]])
|
||||
buf.WriteString("[REDACTED]")
|
||||
i = match[3]
|
||||
}
|
||||
buf.WriteString(query[i:])
|
||||
query = buf.String()
|
||||
}
|
||||
return query
|
||||
}
|
49
vendor/github.com/influxdata/influxdb/influxql/sanitize_test.go
generated
vendored
Normal file
49
vendor/github.com/influxdata/influxdb/influxql/sanitize_test.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package influxql_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
)
|
||||
|
||||
func TestSanitize(t *testing.T) {
|
||||
var tests = []struct {
|
||||
s string
|
||||
stmt string
|
||||
}{
|
||||
// Proper statements that should be redacted.
|
||||
{
|
||||
s: `create user "admin" with password 'admin'`,
|
||||
stmt: `create user "admin" with password [REDACTED]`,
|
||||
},
|
||||
{
|
||||
s: `set password for "admin" = 'admin'`,
|
||||
stmt: `set password for "admin" = [REDACTED]`,
|
||||
},
|
||||
|
||||
// Common invalid statements that should still be redacted.
|
||||
{
|
||||
s: `create user "admin" with password "admin"`,
|
||||
stmt: `create user "admin" with password [REDACTED]`,
|
||||
},
|
||||
{
|
||||
s: `set password for "admin" = "admin"`,
|
||||
stmt: `set password for "admin" = [REDACTED]`,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
stmt := influxql.Sanitize(tt.s)
|
||||
if tt.stmt != stmt {
|
||||
t.Errorf("%d. %q\n\nsanitize mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSanitize(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
q := `create user "admin" with password 'admin'; set password for "admin" = 'admin'`
|
||||
for i := 0; i < b.N; i++ {
|
||||
influxql.Sanitize(q)
|
||||
}
|
||||
}
|
617
vendor/github.com/influxdata/influxdb/influxql/scanner.go
generated
vendored
Normal file
617
vendor/github.com/influxdata/influxdb/influxql/scanner.go
generated
vendored
Normal file
@@ -0,0 +1,617 @@
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Scanner represents a lexical scanner for InfluxQL.
|
||||
type Scanner struct {
|
||||
r *reader
|
||||
}
|
||||
|
||||
// NewScanner returns a new instance of Scanner.
|
||||
func NewScanner(r io.Reader) *Scanner {
|
||||
return &Scanner{r: &reader{r: bufio.NewReader(r)}}
|
||||
}
|
||||
|
||||
// Scan returns the next token and position from the underlying reader.
|
||||
// Also returns the literal text read for strings, numbers, and duration tokens
|
||||
// since these token types can have different literal representations.
|
||||
func (s *Scanner) Scan() (tok Token, pos Pos, lit string) {
|
||||
// Read next code point.
|
||||
ch0, pos := s.r.read()
|
||||
|
||||
// If we see whitespace then consume all contiguous whitespace.
|
||||
// If we see a letter, or certain acceptable special characters, then consume
|
||||
// as an ident or reserved word.
|
||||
if isWhitespace(ch0) {
|
||||
return s.scanWhitespace()
|
||||
} else if isLetter(ch0) || ch0 == '_' {
|
||||
s.r.unread()
|
||||
return s.scanIdent(true)
|
||||
} else if isDigit(ch0) {
|
||||
return s.scanNumber()
|
||||
}
|
||||
|
||||
// Otherwise parse individual characters.
|
||||
switch ch0 {
|
||||
case eof:
|
||||
return EOF, pos, ""
|
||||
case '"':
|
||||
s.r.unread()
|
||||
return s.scanIdent(true)
|
||||
case '\'':
|
||||
return s.scanString()
|
||||
case '.':
|
||||
ch1, _ := s.r.read()
|
||||
s.r.unread()
|
||||
if isDigit(ch1) {
|
||||
return s.scanNumber()
|
||||
}
|
||||
return DOT, pos, ""
|
||||
case '$':
|
||||
tok, _, lit = s.scanIdent(false)
|
||||
if tok != IDENT {
|
||||
return tok, pos, "$" + lit
|
||||
}
|
||||
return BOUNDPARAM, pos, "$" + lit
|
||||
case '+':
|
||||
return ADD, pos, ""
|
||||
case '-':
|
||||
ch1, _ := s.r.read()
|
||||
if ch1 == '-' {
|
||||
s.skipUntilNewline()
|
||||
return COMMENT, pos, ""
|
||||
}
|
||||
s.r.unread()
|
||||
return SUB, pos, ""
|
||||
case '*':
|
||||
return MUL, pos, ""
|
||||
case '/':
|
||||
ch1, _ := s.r.read()
|
||||
if ch1 == '*' {
|
||||
if err := s.skipUntilEndComment(); err != nil {
|
||||
return ILLEGAL, pos, ""
|
||||
}
|
||||
return COMMENT, pos, ""
|
||||
} else {
|
||||
s.r.unread()
|
||||
}
|
||||
return DIV, pos, ""
|
||||
case '%':
|
||||
return MOD, pos, ""
|
||||
case '&':
|
||||
return BITWISE_AND, pos, ""
|
||||
case '|':
|
||||
return BITWISE_OR, pos, ""
|
||||
case '^':
|
||||
return BITWISE_XOR, pos, ""
|
||||
case '=':
|
||||
if ch1, _ := s.r.read(); ch1 == '~' {
|
||||
return EQREGEX, pos, ""
|
||||
}
|
||||
s.r.unread()
|
||||
return EQ, pos, ""
|
||||
case '!':
|
||||
if ch1, _ := s.r.read(); ch1 == '=' {
|
||||
return NEQ, pos, ""
|
||||
} else if ch1 == '~' {
|
||||
return NEQREGEX, pos, ""
|
||||
}
|
||||
s.r.unread()
|
||||
case '>':
|
||||
if ch1, _ := s.r.read(); ch1 == '=' {
|
||||
return GTE, pos, ""
|
||||
}
|
||||
s.r.unread()
|
||||
return GT, pos, ""
|
||||
case '<':
|
||||
if ch1, _ := s.r.read(); ch1 == '=' {
|
||||
return LTE, pos, ""
|
||||
} else if ch1 == '>' {
|
||||
return NEQ, pos, ""
|
||||
}
|
||||
s.r.unread()
|
||||
return LT, pos, ""
|
||||
case '(':
|
||||
return LPAREN, pos, ""
|
||||
case ')':
|
||||
return RPAREN, pos, ""
|
||||
case ',':
|
||||
return COMMA, pos, ""
|
||||
case ';':
|
||||
return SEMICOLON, pos, ""
|
||||
case ':':
|
||||
if ch1, _ := s.r.read(); ch1 == ':' {
|
||||
return DOUBLECOLON, pos, ""
|
||||
}
|
||||
s.r.unread()
|
||||
return COLON, pos, ""
|
||||
}
|
||||
|
||||
return ILLEGAL, pos, string(ch0)
|
||||
}
|
||||
|
||||
// scanWhitespace consumes the current rune and all contiguous whitespace.
|
||||
func (s *Scanner) scanWhitespace() (tok Token, pos Pos, lit string) {
|
||||
// Create a buffer and read the current character into it.
|
||||
var buf bytes.Buffer
|
||||
ch, pos := s.r.curr()
|
||||
_, _ = buf.WriteRune(ch)
|
||||
|
||||
// Read every subsequent whitespace character into the buffer.
|
||||
// Non-whitespace characters and EOF will cause the loop to exit.
|
||||
for {
|
||||
ch, _ = s.r.read()
|
||||
if ch == eof {
|
||||
break
|
||||
} else if !isWhitespace(ch) {
|
||||
s.r.unread()
|
||||
break
|
||||
} else {
|
||||
_, _ = buf.WriteRune(ch)
|
||||
}
|
||||
}
|
||||
|
||||
return WS, pos, buf.String()
|
||||
}
|
||||
|
||||
// skipUntilNewline skips characters until it reaches a newline.
|
||||
func (s *Scanner) skipUntilNewline() {
|
||||
for {
|
||||
if ch, _ := s.r.read(); ch == '\n' || ch == eof {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// skipUntilEndComment skips characters until it reaches a '*/' symbol.
|
||||
func (s *Scanner) skipUntilEndComment() error {
|
||||
for {
|
||||
if ch1, _ := s.r.read(); ch1 == '*' {
|
||||
// We might be at the end.
|
||||
star:
|
||||
ch2, _ := s.r.read()
|
||||
if ch2 == '/' {
|
||||
return nil
|
||||
} else if ch2 == '*' {
|
||||
// We are back in the state machine since we see a star.
|
||||
goto star
|
||||
} else if ch2 == eof {
|
||||
return io.EOF
|
||||
}
|
||||
} else if ch1 == eof {
|
||||
return io.EOF
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scanner) scanIdent(lookup bool) (tok Token, pos Pos, lit string) {
|
||||
// Save the starting position of the identifier.
|
||||
_, pos = s.r.read()
|
||||
s.r.unread()
|
||||
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
if ch, _ := s.r.read(); ch == eof {
|
||||
break
|
||||
} else if ch == '"' {
|
||||
tok0, pos0, lit0 := s.scanString()
|
||||
if tok0 == BADSTRING || tok0 == BADESCAPE {
|
||||
return tok0, pos0, lit0
|
||||
}
|
||||
return IDENT, pos, lit0
|
||||
} else if isIdentChar(ch) {
|
||||
s.r.unread()
|
||||
buf.WriteString(ScanBareIdent(s.r))
|
||||
} else {
|
||||
s.r.unread()
|
||||
break
|
||||
}
|
||||
}
|
||||
lit = buf.String()
|
||||
|
||||
// If the literal matches a keyword then return that keyword.
|
||||
if lookup {
|
||||
if tok = Lookup(lit); tok != IDENT {
|
||||
return tok, pos, ""
|
||||
}
|
||||
}
|
||||
return IDENT, pos, lit
|
||||
}
|
||||
|
||||
// scanString consumes a contiguous string of non-quote characters.
|
||||
// Quote characters can be consumed if they're first escaped with a backslash.
|
||||
func (s *Scanner) scanString() (tok Token, pos Pos, lit string) {
|
||||
s.r.unread()
|
||||
_, pos = s.r.curr()
|
||||
|
||||
var err error
|
||||
lit, err = ScanString(s.r)
|
||||
if err == errBadString {
|
||||
return BADSTRING, pos, lit
|
||||
} else if err == errBadEscape {
|
||||
_, pos = s.r.curr()
|
||||
return BADESCAPE, pos, lit
|
||||
}
|
||||
return STRING, pos, lit
|
||||
}
|
||||
|
||||
// ScanRegex consumes a token to find escapes
|
||||
func (s *Scanner) ScanRegex() (tok Token, pos Pos, lit string) {
|
||||
_, pos = s.r.curr()
|
||||
|
||||
// Start & end sentinels.
|
||||
start, end := '/', '/'
|
||||
// Valid escape chars.
|
||||
escapes := map[rune]rune{'/': '/'}
|
||||
|
||||
b, err := ScanDelimited(s.r, start, end, escapes, true)
|
||||
|
||||
if err == errBadEscape {
|
||||
_, pos = s.r.curr()
|
||||
return BADESCAPE, pos, lit
|
||||
} else if err != nil {
|
||||
return BADREGEX, pos, lit
|
||||
}
|
||||
return REGEX, pos, string(b)
|
||||
}
|
||||
|
||||
// scanNumber consumes anything that looks like the start of a number.
|
||||
func (s *Scanner) scanNumber() (tok Token, pos Pos, lit string) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
// Check if the initial rune is a ".".
|
||||
ch, pos := s.r.curr()
|
||||
if ch == '.' {
|
||||
// Peek and see if the next rune is a digit.
|
||||
ch1, _ := s.r.read()
|
||||
s.r.unread()
|
||||
if !isDigit(ch1) {
|
||||
return ILLEGAL, pos, "."
|
||||
}
|
||||
|
||||
// Unread the full stop so we can read it later.
|
||||
s.r.unread()
|
||||
} else {
|
||||
s.r.unread()
|
||||
}
|
||||
|
||||
// Read as many digits as possible.
|
||||
_, _ = buf.WriteString(s.scanDigits())
|
||||
|
||||
// If next code points are a full stop and digit then consume them.
|
||||
isDecimal := false
|
||||
if ch0, _ := s.r.read(); ch0 == '.' {
|
||||
isDecimal = true
|
||||
if ch1, _ := s.r.read(); isDigit(ch1) {
|
||||
_, _ = buf.WriteRune(ch0)
|
||||
_, _ = buf.WriteRune(ch1)
|
||||
_, _ = buf.WriteString(s.scanDigits())
|
||||
} else {
|
||||
s.r.unread()
|
||||
}
|
||||
} else {
|
||||
s.r.unread()
|
||||
}
|
||||
|
||||
// Read as a duration or integer if it doesn't have a fractional part.
|
||||
if !isDecimal {
|
||||
// If the next rune is a letter then this is a duration token.
|
||||
if ch0, _ := s.r.read(); isLetter(ch0) || ch0 == 'µ' {
|
||||
_, _ = buf.WriteRune(ch0)
|
||||
for {
|
||||
ch1, _ := s.r.read()
|
||||
if !isLetter(ch1) && ch1 != 'µ' {
|
||||
s.r.unread()
|
||||
break
|
||||
}
|
||||
_, _ = buf.WriteRune(ch1)
|
||||
}
|
||||
|
||||
// Continue reading digits and letters as part of this token.
|
||||
for {
|
||||
if ch0, _ := s.r.read(); isLetter(ch0) || ch0 == 'µ' || isDigit(ch0) {
|
||||
_, _ = buf.WriteRune(ch0)
|
||||
} else {
|
||||
s.r.unread()
|
||||
break
|
||||
}
|
||||
}
|
||||
return DURATIONVAL, pos, buf.String()
|
||||
} else {
|
||||
s.r.unread()
|
||||
return INTEGER, pos, buf.String()
|
||||
}
|
||||
}
|
||||
return NUMBER, pos, buf.String()
|
||||
}
|
||||
|
||||
// scanDigits consumes a contiguous series of digits.
|
||||
func (s *Scanner) scanDigits() string {
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
ch, _ := s.r.read()
|
||||
if !isDigit(ch) {
|
||||
s.r.unread()
|
||||
break
|
||||
}
|
||||
_, _ = buf.WriteRune(ch)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the rune is a space, tab, or newline.
|
||||
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
|
||||
|
||||
// isLetter returns true if the rune is a letter.
|
||||
func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }
|
||||
|
||||
// isDigit returns true if the rune is a digit.
|
||||
func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }
|
||||
|
||||
// isIdentChar returns true if the rune can be used in an unquoted identifier.
|
||||
func isIdentChar(ch rune) bool { return isLetter(ch) || isDigit(ch) || ch == '_' }
|
||||
|
||||
// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer.
|
||||
func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' }
|
||||
|
||||
// bufScanner represents a wrapper for scanner to add a buffer.
|
||||
// It provides a fixed-length circular buffer that can be unread.
|
||||
type bufScanner struct {
|
||||
s *Scanner
|
||||
i int // buffer index
|
||||
n int // buffer size
|
||||
buf [3]struct {
|
||||
tok Token
|
||||
pos Pos
|
||||
lit string
|
||||
}
|
||||
}
|
||||
|
||||
// newBufScanner returns a new buffered scanner for a reader.
|
||||
func newBufScanner(r io.Reader) *bufScanner {
|
||||
return &bufScanner{s: NewScanner(r)}
|
||||
}
|
||||
|
||||
// Scan reads the next token from the scanner.
|
||||
func (s *bufScanner) Scan() (tok Token, pos Pos, lit string) {
|
||||
return s.scanFunc(s.s.Scan)
|
||||
}
|
||||
|
||||
// ScanRegex reads a regex token from the scanner.
|
||||
func (s *bufScanner) ScanRegex() (tok Token, pos Pos, lit string) {
|
||||
return s.scanFunc(s.s.ScanRegex)
|
||||
}
|
||||
|
||||
// scanFunc uses the provided function to scan the next token.
|
||||
func (s *bufScanner) scanFunc(scan func() (Token, Pos, string)) (tok Token, pos Pos, lit string) {
|
||||
// If we have unread tokens then read them off the buffer first.
|
||||
if s.n > 0 {
|
||||
s.n--
|
||||
return s.curr()
|
||||
}
|
||||
|
||||
// Move buffer position forward and save the token.
|
||||
s.i = (s.i + 1) % len(s.buf)
|
||||
buf := &s.buf[s.i]
|
||||
buf.tok, buf.pos, buf.lit = scan()
|
||||
|
||||
return s.curr()
|
||||
}
|
||||
|
||||
// Unscan pushes the previously token back onto the buffer.
|
||||
func (s *bufScanner) Unscan() { s.n++ }
|
||||
|
||||
// curr returns the last read token.
|
||||
func (s *bufScanner) curr() (tok Token, pos Pos, lit string) {
|
||||
buf := &s.buf[(s.i-s.n+len(s.buf))%len(s.buf)]
|
||||
return buf.tok, buf.pos, buf.lit
|
||||
}
|
||||
|
||||
// reader represents a buffered rune reader used by the scanner.
|
||||
// It provides a fixed-length circular buffer that can be unread.
|
||||
type reader struct {
|
||||
r io.RuneScanner
|
||||
i int // buffer index
|
||||
n int // buffer char count
|
||||
pos Pos // last read rune position
|
||||
buf [3]struct {
|
||||
ch rune
|
||||
pos Pos
|
||||
}
|
||||
eof bool // true if reader has ever seen eof.
|
||||
}
|
||||
|
||||
// ReadRune reads the next rune from the reader.
|
||||
// This is a wrapper function to implement the io.RuneReader interface.
|
||||
// Note that this function does not return size.
|
||||
func (r *reader) ReadRune() (ch rune, size int, err error) {
|
||||
ch, _ = r.read()
|
||||
if ch == eof {
|
||||
err = io.EOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnreadRune pushes the previously read rune back onto the buffer.
|
||||
// This is a wrapper function to implement the io.RuneScanner interface.
|
||||
func (r *reader) UnreadRune() error {
|
||||
r.unread()
|
||||
return nil
|
||||
}
|
||||
|
||||
// read reads the next rune from the reader.
|
||||
func (r *reader) read() (ch rune, pos Pos) {
|
||||
// If we have unread characters then read them off the buffer first.
|
||||
if r.n > 0 {
|
||||
r.n--
|
||||
return r.curr()
|
||||
}
|
||||
|
||||
// Read next rune from underlying reader.
|
||||
// Any error (including io.EOF) should return as EOF.
|
||||
ch, _, err := r.r.ReadRune()
|
||||
if err != nil {
|
||||
ch = eof
|
||||
} else if ch == '\r' {
|
||||
if ch, _, err := r.r.ReadRune(); err != nil {
|
||||
// nop
|
||||
} else if ch != '\n' {
|
||||
_ = r.r.UnreadRune()
|
||||
}
|
||||
ch = '\n'
|
||||
}
|
||||
|
||||
// Save character and position to the buffer.
|
||||
r.i = (r.i + 1) % len(r.buf)
|
||||
buf := &r.buf[r.i]
|
||||
buf.ch, buf.pos = ch, r.pos
|
||||
|
||||
// Update position.
|
||||
// Only count EOF once.
|
||||
if ch == '\n' {
|
||||
r.pos.Line++
|
||||
r.pos.Char = 0
|
||||
} else if !r.eof {
|
||||
r.pos.Char++
|
||||
}
|
||||
|
||||
// Mark the reader as EOF.
|
||||
// This is used so we don't double count EOF characters.
|
||||
if ch == eof {
|
||||
r.eof = true
|
||||
}
|
||||
|
||||
return r.curr()
|
||||
}
|
||||
|
||||
// unread pushes the previously read rune back onto the buffer.
|
||||
func (r *reader) unread() {
|
||||
r.n++
|
||||
}
|
||||
|
||||
// curr returns the last read character and position.
|
||||
func (r *reader) curr() (ch rune, pos Pos) {
|
||||
i := (r.i - r.n + len(r.buf)) % len(r.buf)
|
||||
buf := &r.buf[i]
|
||||
return buf.ch, buf.pos
|
||||
}
|
||||
|
||||
// eof is a marker code point to signify that the reader can't read any more.
|
||||
const eof = rune(0)
|
||||
|
||||
// ScanDelimited reads a delimited set of runes
|
||||
func ScanDelimited(r io.RuneScanner, start, end rune, escapes map[rune]rune, escapesPassThru bool) ([]byte, error) {
|
||||
// Scan start delimiter.
|
||||
if ch, _, err := r.ReadRune(); err != nil {
|
||||
return nil, err
|
||||
} else if ch != start {
|
||||
return nil, fmt.Errorf("expected %s; found %s", string(start), string(ch))
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
ch0, _, err := r.ReadRune()
|
||||
if ch0 == end {
|
||||
return buf.Bytes(), nil
|
||||
} else if err != nil {
|
||||
return buf.Bytes(), err
|
||||
} else if ch0 == '\n' {
|
||||
return nil, errors.New("delimited text contains new line")
|
||||
} else if ch0 == '\\' {
|
||||
// If the next character is an escape then write the escaped char.
|
||||
// If it's not a valid escape then return an error.
|
||||
ch1, _, err := r.ReadRune()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, ok := escapes[ch1]
|
||||
if !ok {
|
||||
if escapesPassThru {
|
||||
// Unread ch1 (char after the \)
|
||||
_ = r.UnreadRune()
|
||||
// Write ch0 (\) to the output buffer.
|
||||
_, _ = buf.WriteRune(ch0)
|
||||
continue
|
||||
} else {
|
||||
buf.Reset()
|
||||
_, _ = buf.WriteRune(ch0)
|
||||
_, _ = buf.WriteRune(ch1)
|
||||
return buf.Bytes(), errBadEscape
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = buf.WriteRune(c)
|
||||
} else {
|
||||
_, _ = buf.WriteRune(ch0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ScanString reads a quoted string from a rune reader.
|
||||
func ScanString(r io.RuneScanner) (string, error) {
|
||||
ending, _, err := r.ReadRune()
|
||||
if err != nil {
|
||||
return "", errBadString
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
ch0, _, err := r.ReadRune()
|
||||
if ch0 == ending {
|
||||
return buf.String(), nil
|
||||
} else if err != nil || ch0 == '\n' {
|
||||
return buf.String(), errBadString
|
||||
} else if ch0 == '\\' {
|
||||
// If the next character is an escape then write the escaped char.
|
||||
// If it's not a valid escape then return an error.
|
||||
ch1, _, _ := r.ReadRune()
|
||||
if ch1 == 'n' {
|
||||
_, _ = buf.WriteRune('\n')
|
||||
} else if ch1 == '\\' {
|
||||
_, _ = buf.WriteRune('\\')
|
||||
} else if ch1 == '"' {
|
||||
_, _ = buf.WriteRune('"')
|
||||
} else if ch1 == '\'' {
|
||||
_, _ = buf.WriteRune('\'')
|
||||
} else {
|
||||
return string(ch0) + string(ch1), errBadEscape
|
||||
}
|
||||
} else {
|
||||
_, _ = buf.WriteRune(ch0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var errBadString = errors.New("bad string")
|
||||
var errBadEscape = errors.New("bad escape")
|
||||
|
||||
// ScanBareIdent reads bare identifier from a rune reader.
|
||||
func ScanBareIdent(r io.RuneScanner) string {
|
||||
// Read every ident character into the buffer.
|
||||
// Non-ident characters and EOF will cause the loop to exit.
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
ch, _, err := r.ReadRune()
|
||||
if err != nil {
|
||||
break
|
||||
} else if !isIdentChar(ch) {
|
||||
r.UnreadRune()
|
||||
break
|
||||
} else {
|
||||
_, _ = buf.WriteRune(ch)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// IsRegexOp returns true if the operator accepts a regex operand.
|
||||
func IsRegexOp(t Token) bool {
|
||||
return (t == EQREGEX || t == NEQREGEX)
|
||||
}
|
290
vendor/github.com/influxdata/influxdb/influxql/scanner_test.go
generated
vendored
Normal file
290
vendor/github.com/influxdata/influxdb/influxql/scanner_test.go
generated
vendored
Normal file
@@ -0,0 +1,290 @@
|
||||
package influxql_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
)
|
||||
|
||||
// Ensure the scanner can scan tokens correctly.
|
||||
func TestScanner_Scan(t *testing.T) {
|
||||
var tests = []struct {
|
||||
s string
|
||||
tok influxql.Token
|
||||
lit string
|
||||
pos influxql.Pos
|
||||
}{
|
||||
// Special tokens (EOF, ILLEGAL, WS)
|
||||
{s: ``, tok: influxql.EOF},
|
||||
{s: `#`, tok: influxql.ILLEGAL, lit: `#`},
|
||||
{s: ` `, tok: influxql.WS, lit: " "},
|
||||
{s: "\t", tok: influxql.WS, lit: "\t"},
|
||||
{s: "\n", tok: influxql.WS, lit: "\n"},
|
||||
{s: "\r", tok: influxql.WS, lit: "\n"},
|
||||
{s: "\r\n", tok: influxql.WS, lit: "\n"},
|
||||
{s: "\rX", tok: influxql.WS, lit: "\n"},
|
||||
{s: "\n\r", tok: influxql.WS, lit: "\n\n"},
|
||||
{s: " \n\t \r\n\t", tok: influxql.WS, lit: " \n\t \n\t"},
|
||||
{s: " foo", tok: influxql.WS, lit: " "},
|
||||
|
||||
// Numeric operators
|
||||
{s: `+`, tok: influxql.ADD},
|
||||
{s: `-`, tok: influxql.SUB},
|
||||
{s: `*`, tok: influxql.MUL},
|
||||
{s: `/`, tok: influxql.DIV},
|
||||
{s: `%`, tok: influxql.MOD},
|
||||
|
||||
// Logical operators
|
||||
{s: `AND`, tok: influxql.AND},
|
||||
{s: `and`, tok: influxql.AND},
|
||||
{s: `OR`, tok: influxql.OR},
|
||||
{s: `or`, tok: influxql.OR},
|
||||
|
||||
{s: `=`, tok: influxql.EQ},
|
||||
{s: `<>`, tok: influxql.NEQ},
|
||||
{s: `! `, tok: influxql.ILLEGAL, lit: "!"},
|
||||
{s: `<`, tok: influxql.LT},
|
||||
{s: `<=`, tok: influxql.LTE},
|
||||
{s: `>`, tok: influxql.GT},
|
||||
{s: `>=`, tok: influxql.GTE},
|
||||
|
||||
// Misc tokens
|
||||
{s: `(`, tok: influxql.LPAREN},
|
||||
{s: `)`, tok: influxql.RPAREN},
|
||||
{s: `,`, tok: influxql.COMMA},
|
||||
{s: `;`, tok: influxql.SEMICOLON},
|
||||
{s: `.`, tok: influxql.DOT},
|
||||
{s: `=~`, tok: influxql.EQREGEX},
|
||||
{s: `!~`, tok: influxql.NEQREGEX},
|
||||
{s: `:`, tok: influxql.COLON},
|
||||
{s: `::`, tok: influxql.DOUBLECOLON},
|
||||
|
||||
// Identifiers
|
||||
{s: `foo`, tok: influxql.IDENT, lit: `foo`},
|
||||
{s: `_foo`, tok: influxql.IDENT, lit: `_foo`},
|
||||
{s: `Zx12_3U_-`, tok: influxql.IDENT, lit: `Zx12_3U_`},
|
||||
{s: `"foo"`, tok: influxql.IDENT, lit: `foo`},
|
||||
{s: `"foo\\bar"`, tok: influxql.IDENT, lit: `foo\bar`},
|
||||
{s: `"foo\bar"`, tok: influxql.BADESCAPE, lit: `\b`, pos: influxql.Pos{Line: 0, Char: 5}},
|
||||
{s: `"foo\"bar\""`, tok: influxql.IDENT, lit: `foo"bar"`},
|
||||
{s: `test"`, tok: influxql.BADSTRING, lit: "", pos: influxql.Pos{Line: 0, Char: 3}},
|
||||
{s: `"test`, tok: influxql.BADSTRING, lit: `test`},
|
||||
{s: `$host`, tok: influxql.BOUNDPARAM, lit: `$host`},
|
||||
{s: `$"host param"`, tok: influxql.BOUNDPARAM, lit: `$host param`},
|
||||
|
||||
{s: `true`, tok: influxql.TRUE},
|
||||
{s: `false`, tok: influxql.FALSE},
|
||||
|
||||
// Strings
|
||||
{s: `'testing 123!'`, tok: influxql.STRING, lit: `testing 123!`},
|
||||
{s: `'foo\nbar'`, tok: influxql.STRING, lit: "foo\nbar"},
|
||||
{s: `'foo\\bar'`, tok: influxql.STRING, lit: "foo\\bar"},
|
||||
{s: `'test`, tok: influxql.BADSTRING, lit: `test`},
|
||||
{s: "'test\nfoo", tok: influxql.BADSTRING, lit: `test`},
|
||||
{s: `'test\g'`, tok: influxql.BADESCAPE, lit: `\g`, pos: influxql.Pos{Line: 0, Char: 6}},
|
||||
|
||||
// Numbers
|
||||
{s: `100`, tok: influxql.INTEGER, lit: `100`},
|
||||
{s: `100.23`, tok: influxql.NUMBER, lit: `100.23`},
|
||||
{s: `.23`, tok: influxql.NUMBER, lit: `.23`},
|
||||
//{s: `.`, tok: influxql.ILLEGAL, lit: `.`},
|
||||
{s: `10.3s`, tok: influxql.NUMBER, lit: `10.3`},
|
||||
|
||||
// Durations
|
||||
{s: `10u`, tok: influxql.DURATIONVAL, lit: `10u`},
|
||||
{s: `10µ`, tok: influxql.DURATIONVAL, lit: `10µ`},
|
||||
{s: `10ms`, tok: influxql.DURATIONVAL, lit: `10ms`},
|
||||
{s: `1s`, tok: influxql.DURATIONVAL, lit: `1s`},
|
||||
{s: `10m`, tok: influxql.DURATIONVAL, lit: `10m`},
|
||||
{s: `10h`, tok: influxql.DURATIONVAL, lit: `10h`},
|
||||
{s: `10d`, tok: influxql.DURATIONVAL, lit: `10d`},
|
||||
{s: `10w`, tok: influxql.DURATIONVAL, lit: `10w`},
|
||||
{s: `10x`, tok: influxql.DURATIONVAL, lit: `10x`}, // non-duration unit, but scanned as a duration value
|
||||
|
||||
// Keywords
|
||||
{s: `ALL`, tok: influxql.ALL},
|
||||
{s: `ALTER`, tok: influxql.ALTER},
|
||||
{s: `AS`, tok: influxql.AS},
|
||||
{s: `ASC`, tok: influxql.ASC},
|
||||
{s: `BEGIN`, tok: influxql.BEGIN},
|
||||
{s: `BY`, tok: influxql.BY},
|
||||
{s: `CREATE`, tok: influxql.CREATE},
|
||||
{s: `CONTINUOUS`, tok: influxql.CONTINUOUS},
|
||||
{s: `DATABASE`, tok: influxql.DATABASE},
|
||||
{s: `DATABASES`, tok: influxql.DATABASES},
|
||||
{s: `DEFAULT`, tok: influxql.DEFAULT},
|
||||
{s: `DELETE`, tok: influxql.DELETE},
|
||||
{s: `DESC`, tok: influxql.DESC},
|
||||
{s: `DROP`, tok: influxql.DROP},
|
||||
{s: `DURATION`, tok: influxql.DURATION},
|
||||
{s: `END`, tok: influxql.END},
|
||||
{s: `EVERY`, tok: influxql.EVERY},
|
||||
{s: `EXPLAIN`, tok: influxql.EXPLAIN},
|
||||
{s: `FIELD`, tok: influxql.FIELD},
|
||||
{s: `FROM`, tok: influxql.FROM},
|
||||
{s: `GRANT`, tok: influxql.GRANT},
|
||||
{s: `GROUP`, tok: influxql.GROUP},
|
||||
{s: `GROUPS`, tok: influxql.GROUPS},
|
||||
{s: `INSERT`, tok: influxql.INSERT},
|
||||
{s: `INTO`, tok: influxql.INTO},
|
||||
{s: `KEY`, tok: influxql.KEY},
|
||||
{s: `KEYS`, tok: influxql.KEYS},
|
||||
{s: `KILL`, tok: influxql.KILL},
|
||||
{s: `LIMIT`, tok: influxql.LIMIT},
|
||||
{s: `SHOW`, tok: influxql.SHOW},
|
||||
{s: `SHARD`, tok: influxql.SHARD},
|
||||
{s: `SHARDS`, tok: influxql.SHARDS},
|
||||
{s: `MEASUREMENT`, tok: influxql.MEASUREMENT},
|
||||
{s: `MEASUREMENTS`, tok: influxql.MEASUREMENTS},
|
||||
{s: `OFFSET`, tok: influxql.OFFSET},
|
||||
{s: `ON`, tok: influxql.ON},
|
||||
{s: `ORDER`, tok: influxql.ORDER},
|
||||
{s: `PASSWORD`, tok: influxql.PASSWORD},
|
||||
{s: `POLICY`, tok: influxql.POLICY},
|
||||
{s: `POLICIES`, tok: influxql.POLICIES},
|
||||
{s: `PRIVILEGES`, tok: influxql.PRIVILEGES},
|
||||
{s: `QUERIES`, tok: influxql.QUERIES},
|
||||
{s: `QUERY`, tok: influxql.QUERY},
|
||||
{s: `READ`, tok: influxql.READ},
|
||||
{s: `REPLICATION`, tok: influxql.REPLICATION},
|
||||
{s: `RESAMPLE`, tok: influxql.RESAMPLE},
|
||||
{s: `RETENTION`, tok: influxql.RETENTION},
|
||||
{s: `REVOKE`, tok: influxql.REVOKE},
|
||||
{s: `SELECT`, tok: influxql.SELECT},
|
||||
{s: `SERIES`, tok: influxql.SERIES},
|
||||
{s: `TAG`, tok: influxql.TAG},
|
||||
{s: `TO`, tok: influxql.TO},
|
||||
{s: `USER`, tok: influxql.USER},
|
||||
{s: `USERS`, tok: influxql.USERS},
|
||||
{s: `VALUES`, tok: influxql.VALUES},
|
||||
{s: `WHERE`, tok: influxql.WHERE},
|
||||
{s: `WITH`, tok: influxql.WITH},
|
||||
{s: `WRITE`, tok: influxql.WRITE},
|
||||
{s: `explain`, tok: influxql.EXPLAIN}, // case insensitive
|
||||
{s: `seLECT`, tok: influxql.SELECT}, // case insensitive
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
s := influxql.NewScanner(strings.NewReader(tt.s))
|
||||
tok, pos, lit := s.Scan()
|
||||
if tt.tok != tok {
|
||||
t.Errorf("%d. %q token mismatch: exp=%q got=%q <%q>", i, tt.s, tt.tok, tok, lit)
|
||||
} else if tt.pos.Line != pos.Line || tt.pos.Char != pos.Char {
|
||||
t.Errorf("%d. %q pos mismatch: exp=%#v got=%#v", i, tt.s, tt.pos, pos)
|
||||
} else if tt.lit != lit {
|
||||
t.Errorf("%d. %q literal mismatch: exp=%q got=%q", i, tt.s, tt.lit, lit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the scanner can scan a series of tokens correctly.
|
||||
func TestScanner_Scan_Multi(t *testing.T) {
|
||||
type result struct {
|
||||
tok influxql.Token
|
||||
pos influxql.Pos
|
||||
lit string
|
||||
}
|
||||
exp := []result{
|
||||
{tok: influxql.SELECT, pos: influxql.Pos{Line: 0, Char: 0}, lit: ""},
|
||||
{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 6}, lit: " "},
|
||||
{tok: influxql.IDENT, pos: influxql.Pos{Line: 0, Char: 7}, lit: "value"},
|
||||
{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 12}, lit: " "},
|
||||
{tok: influxql.FROM, pos: influxql.Pos{Line: 0, Char: 13}, lit: ""},
|
||||
{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 17}, lit: " "},
|
||||
{tok: influxql.IDENT, pos: influxql.Pos{Line: 0, Char: 18}, lit: "myseries"},
|
||||
{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 26}, lit: " "},
|
||||
{tok: influxql.WHERE, pos: influxql.Pos{Line: 0, Char: 27}, lit: ""},
|
||||
{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 32}, lit: " "},
|
||||
{tok: influxql.IDENT, pos: influxql.Pos{Line: 0, Char: 33}, lit: "a"},
|
||||
{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 34}, lit: " "},
|
||||
{tok: influxql.EQ, pos: influxql.Pos{Line: 0, Char: 35}, lit: ""},
|
||||
{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 36}, lit: " "},
|
||||
{tok: influxql.STRING, pos: influxql.Pos{Line: 0, Char: 36}, lit: "b"},
|
||||
{tok: influxql.EOF, pos: influxql.Pos{Line: 0, Char: 40}, lit: ""},
|
||||
}
|
||||
|
||||
// Create a scanner.
|
||||
v := `SELECT value from myseries WHERE a = 'b'`
|
||||
s := influxql.NewScanner(strings.NewReader(v))
|
||||
|
||||
// Continually scan until we reach the end.
|
||||
var act []result
|
||||
for {
|
||||
tok, pos, lit := s.Scan()
|
||||
act = append(act, result{tok, pos, lit})
|
||||
if tok == influxql.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the token counts match.
|
||||
if len(exp) != len(act) {
|
||||
t.Fatalf("token count mismatch: exp=%d, got=%d", len(exp), len(act))
|
||||
}
|
||||
|
||||
// Verify each token matches.
|
||||
for i := range exp {
|
||||
if !reflect.DeepEqual(exp[i], act[i]) {
|
||||
t.Fatalf("%d. token mismatch:\n\nexp=%#v\n\ngot=%#v", i, exp[i], act[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the library can correctly scan strings.
|
||||
func TestScanString(t *testing.T) {
|
||||
var tests = []struct {
|
||||
in string
|
||||
out string
|
||||
err string
|
||||
}{
|
||||
{in: `""`, out: ``},
|
||||
{in: `"foo bar"`, out: `foo bar`},
|
||||
{in: `'foo bar'`, out: `foo bar`},
|
||||
{in: `"foo\nbar"`, out: "foo\nbar"},
|
||||
{in: `"foo\\bar"`, out: `foo\bar`},
|
||||
{in: `"foo\"bar"`, out: `foo"bar`},
|
||||
{in: `'foo\'bar'`, out: `foo'bar`},
|
||||
|
||||
{in: `"foo` + "\n", out: `foo`, err: "bad string"}, // newline in string
|
||||
{in: `"foo`, out: `foo`, err: "bad string"}, // unclosed quotes
|
||||
{in: `"foo\xbar"`, out: `\x`, err: "bad escape"}, // invalid escape
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
out, err := influxql.ScanString(strings.NewReader(tt.in))
|
||||
if tt.err != errstring(err) {
|
||||
t.Errorf("%d. %s: error: exp=%s, got=%s", i, tt.in, tt.err, err)
|
||||
} else if tt.out != out {
|
||||
t.Errorf("%d. %s: out: exp=%s, got=%s", i, tt.in, tt.out, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test scanning regex
|
||||
func TestScanRegex(t *testing.T) {
|
||||
var tests = []struct {
|
||||
in string
|
||||
tok influxql.Token
|
||||
lit string
|
||||
err string
|
||||
}{
|
||||
{in: `/^payments\./`, tok: influxql.REGEX, lit: `^payments\.`},
|
||||
{in: `/foo\/bar/`, tok: influxql.REGEX, lit: `foo/bar`},
|
||||
{in: `/foo\\/bar/`, tok: influxql.REGEX, lit: `foo\/bar`},
|
||||
{in: `/foo\\bar/`, tok: influxql.REGEX, lit: `foo\\bar`},
|
||||
{in: `/http\:\/\/www\.example\.com/`, tok: influxql.REGEX, lit: `http\://www\.example\.com`},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
s := influxql.NewScanner(strings.NewReader(tt.in))
|
||||
tok, _, lit := s.ScanRegex()
|
||||
if tok != tt.tok {
|
||||
t.Errorf("%d. %s: error:\n\texp=%s\n\tgot=%s\n", i, tt.in, tt.tok.String(), tok.String())
|
||||
}
|
||||
if lit != tt.lit {
|
||||
t.Errorf("%d. %s: error:\n\texp=%s\n\tgot=%s\n", i, tt.in, tt.lit, lit)
|
||||
}
|
||||
}
|
||||
}
|
1417
vendor/github.com/influxdata/influxdb/influxql/select.go
generated
vendored
Normal file
1417
vendor/github.com/influxdata/influxdb/influxql/select.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3919
vendor/github.com/influxdata/influxdb/influxql/select_test.go
generated
vendored
Normal file
3919
vendor/github.com/influxdata/influxdb/influxql/select_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
233
vendor/github.com/influxdata/influxdb/influxql/statement_rewriter.go
generated
vendored
Normal file
233
vendor/github.com/influxdata/influxdb/influxql/statement_rewriter.go
generated
vendored
Normal file
@@ -0,0 +1,233 @@
|
||||
package influxql
|
||||
|
||||
import "errors"
|
||||
|
||||
// RewriteStatement rewrites stmt into a new statement, if applicable.
|
||||
func RewriteStatement(stmt Statement) (Statement, error) {
|
||||
switch stmt := stmt.(type) {
|
||||
case *ShowFieldKeysStatement:
|
||||
return rewriteShowFieldKeysStatement(stmt)
|
||||
case *ShowMeasurementsStatement:
|
||||
return rewriteShowMeasurementsStatement(stmt)
|
||||
case *ShowSeriesStatement:
|
||||
return rewriteShowSeriesStatement(stmt)
|
||||
case *ShowTagKeysStatement:
|
||||
return rewriteShowTagKeysStatement(stmt)
|
||||
case *ShowTagValuesStatement:
|
||||
return rewriteShowTagValuesStatement(stmt)
|
||||
default:
|
||||
return stmt, nil
|
||||
}
|
||||
}
|
||||
|
||||
func rewriteShowFieldKeysStatement(stmt *ShowFieldKeysStatement) (Statement, error) {
|
||||
return &SelectStatement{
|
||||
Fields: Fields([]*Field{
|
||||
{Expr: &VarRef{Val: "fieldKey"}},
|
||||
{Expr: &VarRef{Val: "fieldType"}},
|
||||
}),
|
||||
Sources: rewriteSources(stmt.Sources, "_fieldKeys", stmt.Database),
|
||||
Condition: rewriteSourcesCondition(stmt.Sources, nil),
|
||||
Offset: stmt.Offset,
|
||||
Limit: stmt.Limit,
|
||||
SortFields: stmt.SortFields,
|
||||
OmitTime: true,
|
||||
Dedupe: true,
|
||||
IsRawQuery: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func rewriteShowMeasurementsStatement(stmt *ShowMeasurementsStatement) (Statement, error) {
|
||||
// Check for time in WHERE clause (not supported).
|
||||
if HasTimeExpr(stmt.Condition) {
|
||||
return nil, errors.New("SHOW MEASUREMENTS doesn't support time in WHERE clause")
|
||||
}
|
||||
|
||||
condition := stmt.Condition
|
||||
if stmt.Source != nil {
|
||||
condition = rewriteSourcesCondition(Sources([]Source{stmt.Source}), stmt.Condition)
|
||||
}
|
||||
return &ShowMeasurementsStatement{
|
||||
Database: stmt.Database,
|
||||
Condition: condition,
|
||||
Limit: stmt.Limit,
|
||||
Offset: stmt.Offset,
|
||||
SortFields: stmt.SortFields,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func rewriteShowSeriesStatement(stmt *ShowSeriesStatement) (Statement, error) {
|
||||
// Check for time in WHERE clause (not supported).
|
||||
if HasTimeExpr(stmt.Condition) {
|
||||
return nil, errors.New("SHOW SERIES doesn't support time in WHERE clause")
|
||||
}
|
||||
|
||||
return &SelectStatement{
|
||||
Fields: []*Field{
|
||||
{Expr: &VarRef{Val: "key"}},
|
||||
},
|
||||
Sources: rewriteSources(stmt.Sources, "_series", stmt.Database),
|
||||
Condition: rewriteSourcesCondition(stmt.Sources, stmt.Condition),
|
||||
Offset: stmt.Offset,
|
||||
Limit: stmt.Limit,
|
||||
SortFields: stmt.SortFields,
|
||||
OmitTime: true,
|
||||
Dedupe: true,
|
||||
IsRawQuery: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func rewriteShowTagValuesStatement(stmt *ShowTagValuesStatement) (Statement, error) {
|
||||
// Check for time in WHERE clause (not supported).
|
||||
if HasTimeExpr(stmt.Condition) {
|
||||
return nil, errors.New("SHOW TAG VALUES doesn't support time in WHERE clause")
|
||||
}
|
||||
|
||||
condition := stmt.Condition
|
||||
var expr Expr
|
||||
if list, ok := stmt.TagKeyExpr.(*ListLiteral); ok {
|
||||
for _, tagKey := range list.Vals {
|
||||
tagExpr := &BinaryExpr{
|
||||
Op: EQ,
|
||||
LHS: &VarRef{Val: "_tagKey"},
|
||||
RHS: &StringLiteral{Val: tagKey},
|
||||
}
|
||||
|
||||
if expr != nil {
|
||||
expr = &BinaryExpr{
|
||||
Op: OR,
|
||||
LHS: expr,
|
||||
RHS: tagExpr,
|
||||
}
|
||||
} else {
|
||||
expr = tagExpr
|
||||
}
|
||||
}
|
||||
} else {
|
||||
expr = &BinaryExpr{
|
||||
Op: stmt.Op,
|
||||
LHS: &VarRef{Val: "_tagKey"},
|
||||
RHS: stmt.TagKeyExpr,
|
||||
}
|
||||
}
|
||||
|
||||
// Set condition or "AND" together.
|
||||
if condition == nil {
|
||||
condition = expr
|
||||
} else {
|
||||
condition = &BinaryExpr{
|
||||
Op: AND,
|
||||
LHS: &ParenExpr{Expr: condition},
|
||||
RHS: &ParenExpr{Expr: expr},
|
||||
}
|
||||
}
|
||||
condition = rewriteSourcesCondition(stmt.Sources, condition)
|
||||
|
||||
return &ShowTagValuesStatement{
|
||||
Database: stmt.Database,
|
||||
Op: stmt.Op,
|
||||
TagKeyExpr: stmt.TagKeyExpr,
|
||||
Condition: condition,
|
||||
SortFields: stmt.SortFields,
|
||||
Limit: stmt.Limit,
|
||||
Offset: stmt.Offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func rewriteShowTagKeysStatement(stmt *ShowTagKeysStatement) (Statement, error) {
|
||||
// Check for time in WHERE clause (not supported).
|
||||
if HasTimeExpr(stmt.Condition) {
|
||||
return nil, errors.New("SHOW TAG KEYS doesn't support time in WHERE clause")
|
||||
}
|
||||
|
||||
return &SelectStatement{
|
||||
Fields: []*Field{
|
||||
{Expr: &VarRef{Val: "tagKey"}},
|
||||
},
|
||||
Sources: rewriteSources(stmt.Sources, "_tagKeys", stmt.Database),
|
||||
Condition: rewriteSourcesCondition(stmt.Sources, stmt.Condition),
|
||||
Offset: stmt.Offset,
|
||||
Limit: stmt.Limit,
|
||||
SortFields: stmt.SortFields,
|
||||
OmitTime: true,
|
||||
Dedupe: true,
|
||||
IsRawQuery: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// rewriteSources rewrites sources with previous database and retention policy
|
||||
func rewriteSources(sources Sources, measurementName, defaultDatabase string) Sources {
|
||||
newSources := Sources{}
|
||||
for _, src := range sources {
|
||||
if src == nil {
|
||||
continue
|
||||
}
|
||||
mm := src.(*Measurement)
|
||||
database := mm.Database
|
||||
if database == "" {
|
||||
database = defaultDatabase
|
||||
}
|
||||
newSources = append(newSources,
|
||||
&Measurement{
|
||||
Database: database,
|
||||
RetentionPolicy: mm.RetentionPolicy,
|
||||
Name: measurementName,
|
||||
})
|
||||
}
|
||||
if len(newSources) <= 0 {
|
||||
return append(newSources, &Measurement{
|
||||
Database: defaultDatabase,
|
||||
Name: measurementName,
|
||||
})
|
||||
}
|
||||
return newSources
|
||||
}
|
||||
|
||||
// rewriteSourcesCondition rewrites sources into `name` expressions.
|
||||
// Merges with cond and returns a new condition.
|
||||
func rewriteSourcesCondition(sources Sources, cond Expr) Expr {
|
||||
if len(sources) == 0 {
|
||||
return cond
|
||||
}
|
||||
|
||||
// Generate an OR'd set of filters on source name.
|
||||
var scond Expr
|
||||
for _, source := range sources {
|
||||
mm := source.(*Measurement)
|
||||
|
||||
// Generate a filtering expression on the measurement name.
|
||||
var expr Expr
|
||||
if mm.Regex != nil {
|
||||
expr = &BinaryExpr{
|
||||
Op: EQREGEX,
|
||||
LHS: &VarRef{Val: "_name"},
|
||||
RHS: &RegexLiteral{Val: mm.Regex.Val},
|
||||
}
|
||||
} else if mm.Name != "" {
|
||||
expr = &BinaryExpr{
|
||||
Op: EQ,
|
||||
LHS: &VarRef{Val: "_name"},
|
||||
RHS: &StringLiteral{Val: mm.Name},
|
||||
}
|
||||
}
|
||||
|
||||
if scond == nil {
|
||||
scond = expr
|
||||
} else {
|
||||
scond = &BinaryExpr{
|
||||
Op: OR,
|
||||
LHS: scond,
|
||||
RHS: expr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cond != nil {
|
||||
return &BinaryExpr{
|
||||
Op: AND,
|
||||
LHS: &ParenExpr{Expr: scond},
|
||||
RHS: &ParenExpr{Expr: cond},
|
||||
}
|
||||
}
|
||||
return scond
|
||||
}
|
161
vendor/github.com/influxdata/influxdb/influxql/statement_rewriter_test.go
generated
vendored
Normal file
161
vendor/github.com/influxdata/influxdb/influxql/statement_rewriter_test.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
package influxql_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/influxql"
|
||||
)
|
||||
|
||||
func TestRewriteStatement(t *testing.T) {
|
||||
tests := []struct {
|
||||
stmt string
|
||||
s string
|
||||
}{
|
||||
{
|
||||
stmt: `SHOW FIELD KEYS`,
|
||||
s: `SELECT fieldKey, fieldType FROM _fieldKeys`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW FIELD KEYS ON db0`,
|
||||
s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW FIELD KEYS FROM cpu`,
|
||||
s: `SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW FIELD KEYS ON db0 FROM cpu`,
|
||||
s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW FIELD KEYS FROM /c.*/`,
|
||||
s: `SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name =~ /c.*/`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW FIELD KEYS ON db0 FROM /c.*/`,
|
||||
s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name =~ /c.*/`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW FIELD KEYS FROM mydb.myrp2.cpu`,
|
||||
s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW FIELD KEYS ON db0 FROM mydb.myrp2.cpu`,
|
||||
s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW FIELD KEYS FROM mydb.myrp2./c.*/`,
|
||||
s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name =~ /c.*/`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW FIELD KEYS ON db0 FROM mydb.myrp2./c.*/`,
|
||||
s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name =~ /c.*/`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW SERIES`,
|
||||
s: `SELECT "key" FROM _series`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW SERIES ON db0`,
|
||||
s: `SELECT "key" FROM db0.._series`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW SERIES FROM cpu`,
|
||||
s: `SELECT "key" FROM _series WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW SERIES ON db0 FROM cpu`,
|
||||
s: `SELECT "key" FROM db0.._series WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW SERIES FROM mydb.myrp1.cpu`,
|
||||
s: `SELECT "key" FROM mydb.myrp1._series WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW SERIES ON db0 FROM mydb.myrp1.cpu`,
|
||||
s: `SELECT "key" FROM mydb.myrp1._series WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW SERIES FROM mydb.myrp1./c.*/`,
|
||||
s: `SELECT "key" FROM mydb.myrp1._series WHERE _name =~ /c.*/`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW SERIES ON db0 FROM mydb.myrp1./c.*/`,
|
||||
s: `SELECT "key" FROM mydb.myrp1._series WHERE _name =~ /c.*/`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS`,
|
||||
s: `SELECT tagKey FROM _tagKeys`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS ON db0`,
|
||||
s: `SELECT tagKey FROM db0.._tagKeys`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS FROM cpu`,
|
||||
s: `SELECT tagKey FROM _tagKeys WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS ON db0 FROM cpu`,
|
||||
s: `SELECT tagKey FROM db0.._tagKeys WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS FROM /c.*/`,
|
||||
s: `SELECT tagKey FROM _tagKeys WHERE _name =~ /c.*/`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS ON db0 FROM /c.*/`,
|
||||
s: `SELECT tagKey FROM db0.._tagKeys WHERE _name =~ /c.*/`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS FROM cpu WHERE region = 'uswest'`,
|
||||
s: `SELECT tagKey FROM _tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS ON db0 FROM cpu WHERE region = 'uswest'`,
|
||||
s: `SELECT tagKey FROM db0.._tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu`,
|
||||
s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu`,
|
||||
s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name = 'cpu'`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS FROM mydb.myrp1./c.*/`,
|
||||
s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name =~ /c.*/`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1./c.*/`,
|
||||
s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name =~ /c.*/`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu WHERE region = 'uswest'`,
|
||||
s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`,
|
||||
},
|
||||
{
|
||||
stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu WHERE region = 'uswest'`,
|
||||
s: `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`,
|
||||
},
|
||||
{
|
||||
stmt: `SELECT value FROM cpu`,
|
||||
s: `SELECT value FROM cpu`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
stmt, err := influxql.ParseStatement(test.stmt)
|
||||
if err != nil {
|
||||
t.Errorf("error parsing statement: %s", err)
|
||||
} else {
|
||||
stmt, err = influxql.RewriteStatement(stmt)
|
||||
if err != nil {
|
||||
t.Errorf("error rewriting statement: %s", err)
|
||||
} else if s := stmt.String(); s != test.s {
|
||||
t.Errorf("error rendering string. expected %s, actual: %s", test.s, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
129
vendor/github.com/influxdata/influxdb/influxql/subquery.go
generated
vendored
Normal file
129
vendor/github.com/influxdata/influxdb/influxql/subquery.go
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
package influxql
|
||||
|
||||
type subqueryBuilder struct {
|
||||
ic IteratorCreator
|
||||
stmt *SelectStatement
|
||||
}
|
||||
|
||||
// buildAuxIterator constructs an auxiliary Iterator from a subquery.
|
||||
func (b *subqueryBuilder) buildAuxIterator(opt IteratorOptions) (Iterator, error) {
|
||||
// Retrieve a list of fields needed for conditions.
|
||||
auxFields := opt.Aux
|
||||
conds := ExprNames(opt.Condition)
|
||||
if len(conds) > 0 {
|
||||
auxFields = make([]VarRef, len(opt.Aux)+len(conds))
|
||||
copy(auxFields, opt.Aux)
|
||||
copy(auxFields[len(opt.Aux):], conds)
|
||||
}
|
||||
|
||||
// Map the desired auxiliary fields from the substatement.
|
||||
indexes := b.mapAuxFields(auxFields)
|
||||
subOpt, err := newIteratorOptionsSubstatement(b.stmt, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subOpt.Aux = auxFields
|
||||
|
||||
itrs, err := buildIterators(b.stmt, b.ic, subOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Construct the iterators for the subquery.
|
||||
input := NewIteratorMapper(itrs, nil, indexes, subOpt)
|
||||
// If there is a condition, filter it now.
|
||||
if opt.Condition != nil {
|
||||
input = NewFilterIterator(input, opt.Condition, subOpt)
|
||||
}
|
||||
return input, nil
|
||||
}
|
||||
|
||||
func (b *subqueryBuilder) mapAuxFields(auxFields []VarRef) []IteratorMap {
|
||||
indexes := make([]IteratorMap, len(auxFields))
|
||||
for i, name := range auxFields {
|
||||
m := b.mapAuxField(&name)
|
||||
if m == nil {
|
||||
// If this field doesn't map to anything, use the NullMap so it
|
||||
// shows up as null.
|
||||
m = NullMap{}
|
||||
}
|
||||
indexes[i] = m
|
||||
}
|
||||
return indexes
|
||||
}
|
||||
|
||||
func (b *subqueryBuilder) mapAuxField(name *VarRef) IteratorMap {
|
||||
offset := 0
|
||||
for i, f := range b.stmt.Fields {
|
||||
if f.Name() == name.Val {
|
||||
return FieldMap(i + offset)
|
||||
} else if call, ok := f.Expr.(*Call); ok && (call.Name == "top" || call.Name == "bottom") {
|
||||
// We may match one of the arguments in "top" or "bottom".
|
||||
if len(call.Args) > 2 {
|
||||
for j, arg := range call.Args[1 : len(call.Args)-1] {
|
||||
if arg, ok := arg.(*VarRef); ok && arg.Val == name.Val {
|
||||
return FieldMap(i + j + 1)
|
||||
}
|
||||
}
|
||||
// Increment the offset so we have the correct index for later fields.
|
||||
offset += len(call.Args) - 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unable to find this in the list of fields.
|
||||
// Look within the dimensions and create a field if we find it.
|
||||
for _, d := range b.stmt.Dimensions {
|
||||
if d, ok := d.Expr.(*VarRef); ok && name.Val == d.Val {
|
||||
return TagMap(d.Val)
|
||||
}
|
||||
}
|
||||
|
||||
// Unable to find any matches.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *subqueryBuilder) buildVarRefIterator(expr *VarRef, opt IteratorOptions) (Iterator, error) {
|
||||
// Look for the field or tag that is driving this query.
|
||||
driver := b.mapAuxField(expr)
|
||||
if driver == nil {
|
||||
// Exit immediately if there is no driver. If there is no driver, there
|
||||
// are no results. Period.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Determine necessary auxiliary fields for this query.
|
||||
auxFields := opt.Aux
|
||||
conds := ExprNames(opt.Condition)
|
||||
if len(conds) > 0 && len(opt.Aux) > 0 {
|
||||
// Combine the auxiliary fields requested with the ones in the condition.
|
||||
auxFields = make([]VarRef, len(opt.Aux)+len(conds))
|
||||
copy(auxFields, opt.Aux)
|
||||
copy(auxFields[len(opt.Aux):], conds)
|
||||
} else if len(conds) > 0 {
|
||||
// Set the auxiliary fields to what is in the condition since we have
|
||||
// requested none in the query itself.
|
||||
auxFields = conds
|
||||
}
|
||||
|
||||
// Map the auxiliary fields to their index in the subquery.
|
||||
indexes := b.mapAuxFields(auxFields)
|
||||
subOpt, err := newIteratorOptionsSubstatement(b.stmt, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subOpt.Aux = auxFields
|
||||
|
||||
itrs, err := buildIterators(b.stmt, b.ic, subOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Construct the iterators for the subquery.
|
||||
input := NewIteratorMapper(itrs, driver, indexes, subOpt)
|
||||
// If there is a condition, filter it now.
|
||||
if opt.Condition != nil {
|
||||
input = NewFilterIterator(input, opt.Condition, subOpt)
|
||||
}
|
||||
return input, nil
|
||||
}
|
262
vendor/github.com/influxdata/influxdb/influxql/task_manager.go
generated
vendored
Normal file
262
vendor/github.com/influxdata/influxdb/influxql/task_manager.go
generated
vendored
Normal file
@@ -0,0 +1,262 @@
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/uber-go/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultQueryTimeout is the default timeout for executing a query.
|
||||
// A value of zero will have no query timeout.
|
||||
DefaultQueryTimeout = time.Duration(0)
|
||||
)
|
||||
|
||||
// TaskManager takes care of all aspects related to managing running queries.
|
||||
type TaskManager struct {
|
||||
// Query execution timeout.
|
||||
QueryTimeout time.Duration
|
||||
|
||||
// Log queries if they are slower than this time.
|
||||
// If zero, slow queries will never be logged.
|
||||
LogQueriesAfter time.Duration
|
||||
|
||||
// Maximum number of concurrent queries.
|
||||
MaxConcurrentQueries int
|
||||
|
||||
// Logger to use for all logging.
|
||||
// Defaults to discarding all log output.
|
||||
Logger zap.Logger
|
||||
|
||||
// Used for managing and tracking running queries.
|
||||
queries map[uint64]*QueryTask
|
||||
nextID uint64
|
||||
mu sync.RWMutex
|
||||
shutdown bool
|
||||
}
|
||||
|
||||
// NewTaskManager creates a new TaskManager.
|
||||
func NewTaskManager() *TaskManager {
|
||||
return &TaskManager{
|
||||
QueryTimeout: DefaultQueryTimeout,
|
||||
Logger: zap.New(zap.NullEncoder()),
|
||||
queries: make(map[uint64]*QueryTask),
|
||||
nextID: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteStatement executes a statement containing one of the task management queries.
|
||||
func (t *TaskManager) ExecuteStatement(stmt Statement, ctx ExecutionContext) error {
|
||||
switch stmt := stmt.(type) {
|
||||
case *ShowQueriesStatement:
|
||||
rows, err := t.executeShowQueriesStatement(stmt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx.Results <- &Result{
|
||||
StatementID: ctx.StatementID,
|
||||
Series: rows,
|
||||
}
|
||||
case *KillQueryStatement:
|
||||
var messages []*Message
|
||||
if ctx.ReadOnly {
|
||||
messages = append(messages, ReadOnlyWarning(stmt.String()))
|
||||
}
|
||||
|
||||
if err := t.executeKillQueryStatement(stmt); err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.Results <- &Result{
|
||||
StatementID: ctx.StatementID,
|
||||
Messages: messages,
|
||||
}
|
||||
default:
|
||||
return ErrInvalidQuery
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TaskManager) executeKillQueryStatement(stmt *KillQueryStatement) error {
|
||||
return t.KillQuery(stmt.QueryID)
|
||||
}
|
||||
|
||||
func (t *TaskManager) executeShowQueriesStatement(q *ShowQueriesStatement) (models.Rows, error) {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
values := make([][]interface{}, 0, len(t.queries))
|
||||
for id, qi := range t.queries {
|
||||
d := now.Sub(qi.startTime)
|
||||
|
||||
switch {
|
||||
case d >= time.Second:
|
||||
d = d - (d % time.Second)
|
||||
case d >= time.Millisecond:
|
||||
d = d - (d % time.Millisecond)
|
||||
case d >= time.Microsecond:
|
||||
d = d - (d % time.Microsecond)
|
||||
}
|
||||
|
||||
values = append(values, []interface{}{id, qi.query, qi.database, d.String()})
|
||||
}
|
||||
|
||||
return []*models.Row{{
|
||||
Columns: []string{"qid", "query", "database", "duration"},
|
||||
Values: values,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (t *TaskManager) query(qid uint64) (*QueryTask, bool) {
|
||||
t.mu.RLock()
|
||||
query, ok := t.queries[qid]
|
||||
t.mu.RUnlock()
|
||||
return query, ok
|
||||
}
|
||||
|
||||
// AttachQuery attaches a running query to be managed by the TaskManager.
|
||||
// Returns the query id of the newly attached query or an error if it was
|
||||
// unable to assign a query id or attach the query to the TaskManager.
|
||||
// This function also returns a channel that will be closed when this
|
||||
// query finishes running.
|
||||
//
|
||||
// After a query finishes running, the system is free to reuse a query id.
|
||||
func (t *TaskManager) AttachQuery(q *Query, database string, interrupt <-chan struct{}) (uint64, *QueryTask, error) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.shutdown {
|
||||
return 0, nil, ErrQueryEngineShutdown
|
||||
}
|
||||
|
||||
if t.MaxConcurrentQueries > 0 && len(t.queries) >= t.MaxConcurrentQueries {
|
||||
return 0, nil, ErrMaxConcurrentQueriesLimitExceeded(len(t.queries), t.MaxConcurrentQueries)
|
||||
}
|
||||
|
||||
qid := t.nextID
|
||||
query := &QueryTask{
|
||||
query: q.String(),
|
||||
database: database,
|
||||
startTime: time.Now(),
|
||||
closing: make(chan struct{}),
|
||||
monitorCh: make(chan error),
|
||||
}
|
||||
t.queries[qid] = query
|
||||
|
||||
go t.waitForQuery(qid, query.closing, interrupt, query.monitorCh)
|
||||
if t.LogQueriesAfter != 0 {
|
||||
go query.monitor(func(closing <-chan struct{}) error {
|
||||
timer := time.NewTimer(t.LogQueriesAfter)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
t.Logger.Warn(fmt.Sprintf("Detected slow query: %s (qid: %d, database: %s, threshold: %s)",
|
||||
query.query, qid, query.database, t.LogQueriesAfter))
|
||||
case <-closing:
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
t.nextID++
|
||||
return qid, query, nil
|
||||
}
|
||||
|
||||
// KillQuery stops and removes a query from the TaskManager.
|
||||
// This method can be used to forcefully terminate a running query.
|
||||
func (t *TaskManager) KillQuery(qid uint64) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
query, ok := t.queries[qid]
|
||||
if !ok {
|
||||
return fmt.Errorf("no such query id: %d", qid)
|
||||
}
|
||||
|
||||
close(query.closing)
|
||||
delete(t.queries, qid)
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueryInfo represents the information for a query.
|
||||
type QueryInfo struct {
|
||||
ID uint64 `json:"id"`
|
||||
Query string `json:"query"`
|
||||
Database string `json:"database"`
|
||||
Duration time.Duration `json:"duration"`
|
||||
}
|
||||
|
||||
// Queries returns a list of all running queries with information about them.
|
||||
func (t *TaskManager) Queries() []QueryInfo {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
|
||||
now := time.Now()
|
||||
queries := make([]QueryInfo, 0, len(t.queries))
|
||||
for id, qi := range t.queries {
|
||||
queries = append(queries, QueryInfo{
|
||||
ID: id,
|
||||
Query: qi.query,
|
||||
Database: qi.database,
|
||||
Duration: now.Sub(qi.startTime),
|
||||
})
|
||||
}
|
||||
return queries
|
||||
}
|
||||
|
||||
func (t *TaskManager) waitForQuery(qid uint64, interrupt <-chan struct{}, closing <-chan struct{}, monitorCh <-chan error) {
|
||||
var timerCh <-chan time.Time
|
||||
if t.QueryTimeout != 0 {
|
||||
timer := time.NewTimer(t.QueryTimeout)
|
||||
timerCh = timer.C
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-closing:
|
||||
query, ok := t.query(qid)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
query.setError(ErrQueryInterrupted)
|
||||
case err := <-monitorCh:
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
query, ok := t.query(qid)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
query.setError(err)
|
||||
case <-timerCh:
|
||||
query, ok := t.query(qid)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
query.setError(ErrQueryTimeoutLimitExceeded)
|
||||
case <-interrupt:
|
||||
// Query was manually closed so exit the select.
|
||||
return
|
||||
}
|
||||
t.KillQuery(qid)
|
||||
}
|
||||
|
||||
// Close kills all running queries and prevents new queries from being attached.
|
||||
func (t *TaskManager) Close() error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
t.shutdown = true
|
||||
for _, query := range t.queries {
|
||||
query.setError(ErrQueryEngineShutdown)
|
||||
close(query.closing)
|
||||
}
|
||||
t.queries = nil
|
||||
return nil
|
||||
}
|
30
vendor/github.com/influxdata/influxdb/influxql/tmpldata
generated
vendored
Normal file
30
vendor/github.com/influxdata/influxdb/influxql/tmpldata
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
[
|
||||
{
|
||||
"Name":"Float",
|
||||
"name":"float",
|
||||
"Type":"float64",
|
||||
"Nil":"0",
|
||||
"Zero":"float64(0)"
|
||||
},
|
||||
{
|
||||
"Name":"Integer",
|
||||
"name":"integer",
|
||||
"Type":"int64",
|
||||
"Nil":"0",
|
||||
"Zero":"int64(0)"
|
||||
},
|
||||
{
|
||||
"Name":"String",
|
||||
"name":"string",
|
||||
"Type":"string",
|
||||
"Nil":"\"\"",
|
||||
"Zero":"\"\""
|
||||
},
|
||||
{
|
||||
"Name":"Boolean",
|
||||
"name":"boolean",
|
||||
"Type":"bool",
|
||||
"Nil":"false",
|
||||
"Zero":"false"
|
||||
}
|
||||
]
|
326
vendor/github.com/influxdata/influxdb/influxql/token.go
generated
vendored
Normal file
326
vendor/github.com/influxdata/influxdb/influxql/token.go
generated
vendored
Normal file
@@ -0,0 +1,326 @@
|
||||
package influxql
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Token is a lexical token of the InfluxQL language.
|
||||
type Token int
|
||||
|
||||
// These are a comprehensive list of InfluxQL language tokens.
|
||||
const (
|
||||
// ILLEGAL Token, EOF, WS are Special InfluxQL tokens.
|
||||
ILLEGAL Token = iota
|
||||
EOF
|
||||
WS
|
||||
COMMENT
|
||||
|
||||
literalBeg
|
||||
// IDENT and the following are InfluxQL literal tokens.
|
||||
IDENT // main
|
||||
BOUNDPARAM // $param
|
||||
NUMBER // 12345.67
|
||||
INTEGER // 12345
|
||||
DURATIONVAL // 13h
|
||||
STRING // "abc"
|
||||
BADSTRING // "abc
|
||||
BADESCAPE // \q
|
||||
TRUE // true
|
||||
FALSE // false
|
||||
REGEX // Regular expressions
|
||||
BADREGEX // `.*
|
||||
literalEnd
|
||||
|
||||
operatorBeg
|
||||
// ADD and the following are InfluxQL Operators
|
||||
ADD // +
|
||||
SUB // -
|
||||
MUL // *
|
||||
DIV // /
|
||||
MOD // %
|
||||
BITWISE_AND // &
|
||||
BITWISE_OR // |
|
||||
BITWISE_XOR // ^
|
||||
|
||||
AND // AND
|
||||
OR // OR
|
||||
|
||||
EQ // =
|
||||
NEQ // !=
|
||||
EQREGEX // =~
|
||||
NEQREGEX // !~
|
||||
LT // <
|
||||
LTE // <=
|
||||
GT // >
|
||||
GTE // >=
|
||||
operatorEnd
|
||||
|
||||
LPAREN // (
|
||||
RPAREN // )
|
||||
COMMA // ,
|
||||
COLON // :
|
||||
DOUBLECOLON // ::
|
||||
SEMICOLON // ;
|
||||
DOT // .
|
||||
|
||||
keywordBeg
|
||||
// ALL and the following are InfluxQL Keywords
|
||||
ALL
|
||||
ALTER
|
||||
ANY
|
||||
AS
|
||||
ASC
|
||||
BEGIN
|
||||
BY
|
||||
CREATE
|
||||
CONTINUOUS
|
||||
DATABASE
|
||||
DATABASES
|
||||
DEFAULT
|
||||
DELETE
|
||||
DESC
|
||||
DESTINATIONS
|
||||
DIAGNOSTICS
|
||||
DISTINCT
|
||||
DROP
|
||||
DURATION
|
||||
END
|
||||
EVERY
|
||||
EXPLAIN
|
||||
FIELD
|
||||
FOR
|
||||
FROM
|
||||
GRANT
|
||||
GRANTS
|
||||
GROUP
|
||||
GROUPS
|
||||
IN
|
||||
INF
|
||||
INSERT
|
||||
INTO
|
||||
KEY
|
||||
KEYS
|
||||
KILL
|
||||
LIMIT
|
||||
MEASUREMENT
|
||||
MEASUREMENTS
|
||||
NAME
|
||||
OFFSET
|
||||
ON
|
||||
ORDER
|
||||
PASSWORD
|
||||
POLICY
|
||||
POLICIES
|
||||
PRIVILEGES
|
||||
QUERIES
|
||||
QUERY
|
||||
READ
|
||||
REPLICATION
|
||||
RESAMPLE
|
||||
RETENTION
|
||||
REVOKE
|
||||
SELECT
|
||||
SERIES
|
||||
SET
|
||||
SHOW
|
||||
SHARD
|
||||
SHARDS
|
||||
SLIMIT
|
||||
SOFFSET
|
||||
STATS
|
||||
SUBSCRIPTION
|
||||
SUBSCRIPTIONS
|
||||
TAG
|
||||
TO
|
||||
USER
|
||||
USERS
|
||||
VALUES
|
||||
WHERE
|
||||
WITH
|
||||
WRITE
|
||||
keywordEnd
|
||||
)
|
||||
|
||||
var tokens = [...]string{
|
||||
ILLEGAL: "ILLEGAL",
|
||||
EOF: "EOF",
|
||||
WS: "WS",
|
||||
|
||||
IDENT: "IDENT",
|
||||
NUMBER: "NUMBER",
|
||||
DURATIONVAL: "DURATIONVAL",
|
||||
STRING: "STRING",
|
||||
BADSTRING: "BADSTRING",
|
||||
BADESCAPE: "BADESCAPE",
|
||||
TRUE: "TRUE",
|
||||
FALSE: "FALSE",
|
||||
REGEX: "REGEX",
|
||||
|
||||
ADD: "+",
|
||||
SUB: "-",
|
||||
MUL: "*",
|
||||
DIV: "/",
|
||||
MOD: "%",
|
||||
BITWISE_AND: "&",
|
||||
BITWISE_OR: "|",
|
||||
BITWISE_XOR: "^",
|
||||
|
||||
AND: "AND",
|
||||
OR: "OR",
|
||||
|
||||
EQ: "=",
|
||||
NEQ: "!=",
|
||||
EQREGEX: "=~",
|
||||
NEQREGEX: "!~",
|
||||
LT: "<",
|
||||
LTE: "<=",
|
||||
GT: ">",
|
||||
GTE: ">=",
|
||||
|
||||
LPAREN: "(",
|
||||
RPAREN: ")",
|
||||
COMMA: ",",
|
||||
COLON: ":",
|
||||
DOUBLECOLON: "::",
|
||||
SEMICOLON: ";",
|
||||
DOT: ".",
|
||||
|
||||
ALL: "ALL",
|
||||
ALTER: "ALTER",
|
||||
ANY: "ANY",
|
||||
AS: "AS",
|
||||
ASC: "ASC",
|
||||
BEGIN: "BEGIN",
|
||||
BY: "BY",
|
||||
CREATE: "CREATE",
|
||||
CONTINUOUS: "CONTINUOUS",
|
||||
DATABASE: "DATABASE",
|
||||
DATABASES: "DATABASES",
|
||||
DEFAULT: "DEFAULT",
|
||||
DELETE: "DELETE",
|
||||
DESC: "DESC",
|
||||
DESTINATIONS: "DESTINATIONS",
|
||||
DIAGNOSTICS: "DIAGNOSTICS",
|
||||
DISTINCT: "DISTINCT",
|
||||
DROP: "DROP",
|
||||
DURATION: "DURATION",
|
||||
END: "END",
|
||||
EVERY: "EVERY",
|
||||
EXPLAIN: "EXPLAIN",
|
||||
FIELD: "FIELD",
|
||||
FOR: "FOR",
|
||||
FROM: "FROM",
|
||||
GRANT: "GRANT",
|
||||
GRANTS: "GRANTS",
|
||||
GROUP: "GROUP",
|
||||
GROUPS: "GROUPS",
|
||||
IN: "IN",
|
||||
INF: "INF",
|
||||
INSERT: "INSERT",
|
||||
INTO: "INTO",
|
||||
KEY: "KEY",
|
||||
KEYS: "KEYS",
|
||||
KILL: "KILL",
|
||||
LIMIT: "LIMIT",
|
||||
MEASUREMENT: "MEASUREMENT",
|
||||
MEASUREMENTS: "MEASUREMENTS",
|
||||
NAME: "NAME",
|
||||
OFFSET: "OFFSET",
|
||||
ON: "ON",
|
||||
ORDER: "ORDER",
|
||||
PASSWORD: "PASSWORD",
|
||||
POLICY: "POLICY",
|
||||
POLICIES: "POLICIES",
|
||||
PRIVILEGES: "PRIVILEGES",
|
||||
QUERIES: "QUERIES",
|
||||
QUERY: "QUERY",
|
||||
READ: "READ",
|
||||
REPLICATION: "REPLICATION",
|
||||
RESAMPLE: "RESAMPLE",
|
||||
RETENTION: "RETENTION",
|
||||
REVOKE: "REVOKE",
|
||||
SELECT: "SELECT",
|
||||
SERIES: "SERIES",
|
||||
SET: "SET",
|
||||
SHOW: "SHOW",
|
||||
SHARD: "SHARD",
|
||||
SHARDS: "SHARDS",
|
||||
SLIMIT: "SLIMIT",
|
||||
SOFFSET: "SOFFSET",
|
||||
STATS: "STATS",
|
||||
SUBSCRIPTION: "SUBSCRIPTION",
|
||||
SUBSCRIPTIONS: "SUBSCRIPTIONS",
|
||||
TAG: "TAG",
|
||||
TO: "TO",
|
||||
USER: "USER",
|
||||
USERS: "USERS",
|
||||
VALUES: "VALUES",
|
||||
WHERE: "WHERE",
|
||||
WITH: "WITH",
|
||||
WRITE: "WRITE",
|
||||
}
|
||||
|
||||
var keywords map[string]Token
|
||||
|
||||
func init() {
|
||||
keywords = make(map[string]Token)
|
||||
for tok := keywordBeg + 1; tok < keywordEnd; tok++ {
|
||||
keywords[strings.ToLower(tokens[tok])] = tok
|
||||
}
|
||||
for _, tok := range []Token{AND, OR} {
|
||||
keywords[strings.ToLower(tokens[tok])] = tok
|
||||
}
|
||||
keywords["true"] = TRUE
|
||||
keywords["false"] = FALSE
|
||||
}
|
||||
|
||||
// String returns the string representation of the token.
|
||||
func (tok Token) String() string {
|
||||
if tok >= 0 && tok < Token(len(tokens)) {
|
||||
return tokens[tok]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Precedence returns the operator precedence of the binary operator token.
|
||||
func (tok Token) Precedence() int {
|
||||
switch tok {
|
||||
case OR:
|
||||
return 1
|
||||
case AND:
|
||||
return 2
|
||||
case EQ, NEQ, EQREGEX, NEQREGEX, LT, LTE, GT, GTE:
|
||||
return 3
|
||||
case ADD, SUB, BITWISE_OR, BITWISE_XOR:
|
||||
return 4
|
||||
case MUL, DIV, MOD, BITWISE_AND:
|
||||
return 5
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// isOperator returns true for operator tokens.
|
||||
func (tok Token) isOperator() bool { return tok > operatorBeg && tok < operatorEnd }
|
||||
|
||||
// tokstr returns a literal if provided, otherwise returns the token string.
|
||||
func tokstr(tok Token, lit string) string {
|
||||
if lit != "" {
|
||||
return lit
|
||||
}
|
||||
return tok.String()
|
||||
}
|
||||
|
||||
// Lookup returns the token associated with a given string.
|
||||
func Lookup(ident string) Token {
|
||||
if tok, ok := keywords[strings.ToLower(ident)]; ok {
|
||||
return tok
|
||||
}
|
||||
return IDENT
|
||||
}
|
||||
|
||||
// Pos specifies the line and character position of a token.
|
||||
// The Char and Line are both zero-based indexes.
|
||||
type Pos struct {
|
||||
Line int
|
||||
Char int
|
||||
}
|
Reference in New Issue
Block a user