diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..25ebf91 --- /dev/null +++ b/.gitignore @@ -0,0 +1,16 @@ +### Basic ignore file + +# Binaries for programs and plugins +vsphere-influxdb + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Configuration file +vsphere-influxdb.json + +# Vim swap files +*.swp diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..1e73910 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,22 @@ +language: go +sudo: required +go: + - 1.9 +env: + - PATH=/home/travis/gopath/bin:$PATH +before_install: + - sudo apt-get -qq update + - sudo apt-get install -y ruby ruby-dev build-essential rpm + - go get -u github.com/golang/dep/cmd/dep + - go get -u github.com/alecthomas/gometalinter +install: + - dep ensure +before_script: + - gometalinter --install + # - gometalinter --vendor ./... +script: + - git status +after_success: +# - gem install --no-ri --no-rdoc fpm + - test -n "$TRAVIS_TAG" && curl -sL https://git.io/goreleaser | bash + diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 0000000..dd861c2 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,66 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "" + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + digest = "1:ec837623c12abf4f44837163ad32274dd0c9dfa29f0d65068c60897bce34bb75" + name = "github.com/influxdata/influxdb" + packages = [ + "client/v2", + "models", + "pkg/escape", + ] + pruneopts = "" + revision = "a7c3d3c0f7b74f71919047adbc4933460967a576" + version = "v1.3.6" + +[[projects]] + digest = "1:c5342f3ddefc1644d1a3ed028e739bfabe5b457978084b9a28d42ed235c8bb4d" + name = "github.com/vmware/govmomi" + packages = [ + ".", + "property", + "session", + "vim25", + "vim25/debug", + "vim25/methods", + "vim25/mo", + "vim25/progress", + "vim25/soap", + "vim25/types", + "vim25/xml", + ] + pruneopts = "" + revision = "b63044e5f833781eb7b305bc035392480ee06a82" + version = "v0.15.0" + +[[projects]] + branch = "master" + digest = "1:70ca15641aa31be55859a7f75ddef3ae384ae18068deab8274668a1a77d1e84a" + name = "golang.org/x/net" + packages = ["context"] + pruneopts = "" + revision = "4b14673ba32bee7f5ac0f990a48f033919fd418b" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/davecgh/go-spew/spew", + "github.com/influxdata/influxdb/client/v2", + "github.com/vmware/govmomi", + "github.com/vmware/govmomi/property", + "github.com/vmware/govmomi/vim25/methods", + "github.com/vmware/govmomi/vim25/mo", + "github.com/vmware/govmomi/vim25/types", + "golang.org/x/net/context", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 0000000..2ba4bdb --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,38 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/davecgh/go-spew" + version = "1.1.0" + +[[constraint]] + name = "github.com/influxdata/influxdb" + version = "1.3.6" + +[[constraint]] + name = "github.com/vmware/govmomi" + version = "0.15.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" diff --git a/README.md b/README.md index 45f16c2..225187f 100644 --- a/README.md +++ b/README.md @@ -1,64 +1,100 @@ -# Collect VMware vSphere, vCenter and ESXi performance metrics and send them to InfluxDB +[![Releases](https://img.shields.io/github/release/Oxalide/vsphere-influxdb-go.svg?style=flat-square)](https://github.com/Oxalide/vsphere-influxdb-go/releases/latest) [![Build Status](https://travis-ci.org/Oxalide/vsphere-influxdb-go.svg?branch=master)](https://travis-ci.org/Oxalide/vsphere-influxdb-go) [![Go Report Card](https://goreportcard.com/badge/Oxalide/vsphere-influxdb-go)](https://goreportcard.com/report/github.com/Oxalide/vsphere-influxdb-go) -# Extenal dependencies +# Collect VMware vCenter and ESXi performance metrics and send them to InfluxDB -* [govmomi](https://github.com/vmware/govmomi) -* [influxDB go client](https://github.com/influxdata/influxdb/tree/master/client/v2) +# Screenshots of Grafana dashboards +![screenshot](https://grafana.com/api/dashboards/3556/images/2224/image) +![screenshot](https://grafana.com/api/dashboards/3556/images/2227/image) +![screenshot](https://grafana.com/api/dashboards/3556/images/2230/image) +![screenshot](https://grafana.com/api/dashboards/3571/images/2245/image) +![screenshot](https://grafana.com/api/dashboards/3571/images/2251/image) +![screenshot](https://grafana.com/api/dashboards/3571/images/2254/image) -You'll need to go get them both for the script to work: +# Description and Features +This is a tool written in Go that helps you do your own custom tailored monitoring, capacity planning and performance debugging of VMware based infrastructures. It collects all possible metrics from vCenters and ESXi hypervisors about hosts, clusters, resource pools, datastores and virtual machines and sends them to an [InfluxDB database](https://github.com/influxdata/influxdb) (a popular open source time series database project written in Go), which you can then visualise in Grafana (links to sample dashboards [below](#example-dashboards)) or Chronograf, and use Grafana, Kapacitor or custom scripts to do alerting based on your needs, KPIs, capacity plannings/expectations. + +# Install +Grab the [latest release](https://github.com/Oxalide/vsphere-influxdb-go/releases/latest) for your OS (deb, rpm packages, exes, archives for Linux, Darwin, Windows, FreeBSD on amd64, arm6, arm7, arm64 are available) and install it. + +For Debian/Ubuntu on adm64: ``` -go get github.com/vmware/govmomi -go get github.com/influxdata/influxdb/client/v2 - +curl -L -O $(curl -s https://api.github.com/repos/Oxalide/vsphere-influxdb-go/releases | grep browser_download_url | grep '64[.]deb' | head -n 1 | cut -d '"' -f 4) +dpkg -i vsphere-influxdb-go*.deb ``` -# Run +CentOS/Red Hat on amd64: +``` +curl -L -O $(curl -s https://api.github.com/repos/Oxalide/vsphere-influxdb-go/releases | grep browser_download_url | grep '64[.]rpm' | head -n 1 | cut -d '"' -f 4) +rpm -i vsphere-influxdb-go*.rpm +``` +This will install vsphere-influxdb-go in /usr/local/bin/vsphere-influxdb-go and an example configuration file in /etc/vsphere-influxdb-go.json that needs to be edited. + + +# Configure + +The JSON configuration file in /etc/vsphere-influxdb-go.json contains all your vCenters/ESXi to connect to, the InfluxDB connection details(url, username/password, database to use), and the metrics to collect(full list [here](http://www.virten.net/2015/05/vsphere-6-0-performance-counter-description/) ). + +**Note: Not all metrics are available directly, you might need to change your metric collection level.** +A table with the level needed for each metric is availble [here](http://www.virten.net/2015/05/which-performance-counters-are-available-in-each-statistic-level/), and you can find a python script to change the collect level in the [tools folder of the project](./tools/). + +Additionally you can provide a vCenter/ESXi server and InfluxDB connection details via environment variables, wich is extremly helpful when running inside a container: + +For InfluxDB: +* INFLUX\_HOSTNAME +* INFLUX\_USERNAME +* INFLUX\_PASSWORD +* INFLUX\_DATABASE + +For vSphere: +* VSPHERE\_HOSTNAME +* VSPHERE\_USERNAME +* VSPHERE\_PASSWORD + +Keep in mind, that currently only one vCenter/ESXi can be added via environment variable. + +If you set a domain, it will be automaticaly removed from the names of the found objects. + +Metrics collected are defined by associating ObjectType groups with Metric groups. + +There have been reports of the script not working correctly when the time is incorrect on the vsphere or vcenter. Make sure that the time is valid or activate the NTP service on the machine. + +# Run as a service + +Create a crontab to run it every X minutes(one minute is fine - in our case, ~30 vCenters, ~100 ESXi and ~1400 VMs take approximately 25s to collect all metrics - rather impressive, i might add). +``` +* * * * * /usr/local/bin/vsphere-influxdb-go +``` + +# Example dashboards +* https://grafana.com/dashboards/1299 (thanks to @exbane ) +* https://grafana.com/dashboards/3556 (VMware cloud overview, mostly provisioning/global cloud usage stats) +* https://grafana.com/dashboards/3571 (VMware performance, mostly VM oriented performance stats) + +Contributions welcome! + + +# Compile from source ``` go get github.com/oxalide/vsphere-influxdb-go ``` -This will install the project in your $GOBIN($GOPATH/bin). If you have appended $GOBIN to your $PATH, you will be able to call it directly. Otherwise, you'll have to call it with its full path. +This will install the project in your $GOBIN($GOPATH/bin). If you have appended $GOBIN to your $PATH, you will be able to call it directly. Otherwise, you'll have to call it with its full path. Example: ``` -vsphere-influxdb-go +vsphere-influxdb-go ``` or : ``` $GOBIN/vsphere-influxdb-go ``` - -# Configure - -You'll need a JSON file with all your vCenters/ESXi to connect to, the InfluxDB connection details(url, username/password, database to use), and the metrics to collect. - -If you set a domain, it will be automaticaly removed from the names of the found objects. - -Metrics collected are defined by associating ObjectType groups with Metric groups. -To see all available metrics, check out [this](http://www.virten.net/2015/05/vsphere-6-0-performance-counter-description/) page. - -Note: Not all metrics are available directly, you might need to change your metric collection level. -A table with the level needed for each metric is availble [here](http://www.virten.net/2015/05/which-performance-counters-are-available-in-each-statistic-level/), and you can find a PowerCLI script that changes the collect level [here](http://www.valcolabs.com/2012/02/06/modify-historical-statistics-level-using-powercli/) - -An example of configuration file is [here](./vsphere-influxdb-go.json). - -You need to place it at /etc/*binaryname*.json (/etc/vsphere-influxdb-go.json by default) - - -# Run as a service - -Create a crontab to run it every X minutes(one minute is fine - in our case, ~30 vCenters, ~100 ESXi and ~1400 VMs take approximately 25s to collect all metrics - rather impressive, i might add). -``` -* * * * * $HOME/work/go/bin/vsphere-influxdb-go -``` - - -# TODO +# TODO before v1.0 * Add service discovery(or probably something like [Viper](https://github.com/spf13/viper) for easier and more flexible configuration with multiple backends) -* Add extra tags(cluster for the hosts, etc.) +* Daemonize +* Provide a ready to use Dockerfile # Contributing You are welcome to contribute! diff --git a/goreleaser.yml b/goreleaser.yml new file mode 100644 index 0000000..5a7b836 --- /dev/null +++ b/goreleaser.yml @@ -0,0 +1,47 @@ +project_name: vsphere-influxdb-go +builds: + - binary: vsphere-influxdb-go + goos: + - windows + - darwin + - linux + - freebsd + goarch: + - amd64 + - arm + - arm64 + goarm: + - 6 + - 7 + +archive: + format: tar.gz + files: + - LICENSE.txt + - README.md +nfpm: + # Your app's vendor. + # Default is empty. + vendor: Oxalide + # Your app's homepage. + homepage: https://github.com/Oxalide/vsphere-influxdb-go + + # Your app's maintainer (probably you). + maintainer: Adrian Todorov + + # Your app's description. + description: Collect VMware vSphere, vCenter and ESXi performance metrics and send them to InfluxDB + + # Your app's license. + license: GPL 3.0 + + # Formats to be generated. + formats: + - deb + - rpm + # Files or directories to add to your package (beyond the binary). + # Keys are source paths to get the files from. + # Values are the destination locations of the files in the package. + files: + "vsphere-influxdb.json.sample": "/etc/vsphere-influxdb-go.json" + diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 0000000..78c797a --- /dev/null +++ b/tools/README.md @@ -0,0 +1,7 @@ +# Change vCenter metric collection level + +``` +git clone https://github.com/Oxalide/vsphere-influxdb-go.git +pip install -r tools/requirements.txt +./tools/change_metric_collection_level.py +``` diff --git a/tools/change_metric_collection_level.py b/tools/change_metric_collection_level.py new file mode 100644 index 0000000..f9694ed --- /dev/null +++ b/tools/change_metric_collection_level.py @@ -0,0 +1,94 @@ +#!/usr/bin/python +#============================================ +# Script: change_metric_collection_level.py +# Description: Change the metric collection level of an interval in a vCenter +# Copyright 2017 Adrian Todorov, Oxalide ato@oxalide.com +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +#============================================ + +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim +import atexit +import sys +import requests +import argparse +import getpass +import linecache + +requests.packages.urllib3.disable_warnings() + +def PrintException(): + exc_type, exc_obj, tb = sys.exc_info() + f = tb.tb_frame + lineno = tb.tb_lineno + filename = f.f_code.co_filename + linecache.checkcache(filename) + line = linecache.getline(filename, lineno, f.f_globals) + print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj) + + +def get_args(): + parser = argparse.ArgumentParser(description='Arguments for talking to vCenter and modifying a PerfManager collection interval') + + parser.add_argument('-s', '--host', required=True,action='store',help='vSpehre service to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', help='User name to use') + parser.add_argument('-p', '--password', required=False, action='store', help='Password to use') + parser.add_argument('--interval-name', required=False, action='store', dest='intervalName', help='The name of the interval to modify') + parser.add_argument('--interval-key', required=False, action='store', dest='intervalKey', help='The key of the interval to modify') + parser.add_argument('--interval-level', type=int, required=True, default=4, action='store', dest='intervalLevel', help='The collection level wanted for the interval') + + args = parser.parse_args() + + if not args.password: + args.password = getpass.getpass(prompt='Enter password:\n') + if not args.intervalName and not args.intervalKey: + print "An interval name or key is needed" + exit(2) + + return args + +def change_level(host, user, pwd, port, level, key, name): + try: + print user + print pwd + print host + serviceInstance = SmartConnect(host=host,user=user,pwd=pwd,port=port) + atexit.register(Disconnect, serviceInstance) + content = serviceInstance.RetrieveContent() + pm = content.perfManager + + for hi in pm.historicalInterval: + if (key and int(hi.key) == int(key)) or (name and str(hi.name) == str(name)): + print "Changing interval '" + str(hi.name) + "'" + newobj = hi + newobj.level = level + pm.UpdatePerfInterval(newobj) + + print "Intervals are now configured as follows: " + print "Name | Level" + pm2 = content.perfManager + for hi2 in pm2.historicalInterval: + print hi2.name + " | " + str(hi2.level) + + except Exception, e: + print "Error: %s " % (e) + PrintException() + exit(2) + + +if __name__ == "__main__": + args = get_args() + change_level(args.host, args.user, args.password, args.port, args.intervalLevel, args.intervalKey, args.intervalName) + + diff --git a/tools/requirements.txt b/tools/requirements.txt new file mode 100644 index 0000000..d020c00 --- /dev/null +++ b/tools/requirements.txt @@ -0,0 +1,3 @@ +pyVmomi +requests +argparse diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore new file mode 100644 index 0000000..0026861 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml new file mode 100644 index 0000000..984e073 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.5.4 + - 1.6.3 + - 1.7 +install: + - go get -v golang.org/x/tools/cmd/cover +script: + - go test -v -tags=safe ./spew + - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov +after_success: + - go get -v github.com/mattn/goveralls + - export PATH=$PATH:$HOME/gopath/bin + - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000..c836416 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md new file mode 100644 index 0000000..2624304 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -0,0 +1,205 @@ +go-spew +======= + +[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] +(https://travis-ci.org/davecgh/go-spew) [![ISC License] +(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] +(https://img.shields.io/coveralls/davecgh/go-spew.svg)] +(https://coveralls.io/r/davecgh/go-spew?branch=master) + + +Go-spew implements a deep pretty printer for Go data structures to aid in +debugging. A comprehensive suite of tests with 100% test coverage is provided +to ensure proper functionality. See `test_coverage.txt` for the gocov coverage +report. Go-spew is licensed under the liberal ISC license, so it may be used in +open source or commercial projects. + +If you're interested in reading about how this package came to life and some +of the challenges involved in providing a deep pretty printer, there is a blog +post about it +[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). + +## Documentation + +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] +(http://godoc.org/github.com/davecgh/go-spew/spew) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the excellent GoDoc site here: +http://godoc.org/github.com/davecgh/go-spew/spew + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/davecgh/go-spew/spew + +## Installation + +```bash +$ go get -u github.com/davecgh/go-spew/spew +``` + +## Quick Start + +Add this import line to the file you're working in: + +```Go +import "github.com/davecgh/go-spew/spew" +``` + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + +```Go +spew.Dump(myVar1, myVar2, ...) +spew.Fdump(someWriter, myVar1, myVar2, ...) +str := spew.Sdump(myVar1, myVar2, ...) +``` + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most +compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types +and pointer addresses): + +```Go +spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +``` + +## Debugging a Web Application Example + +Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. + +```Go +package main + +import ( + "fmt" + "html" + "net/http" + + "github.com/davecgh/go-spew/spew" +) + +func handler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) + fmt.Fprintf(w, "") +} + +func main() { + http.HandleFunc("/", handler) + http.ListenAndServe(":8080", nil) +} +``` + +## Sample Dump Output + +``` +(main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) { + (string) "one": (bool) true + } +} +([]uint8) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| +} +``` + +## Sample Formatter Output + +Double pointer to a uint8: +``` + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 +``` + +Pointer to circular struct with a uint8 field and a pointer to itself: +``` + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} +``` + +## Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available via the +spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +``` +* Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + +* MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + +* DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + +* DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. This option + relies on access to the unsafe package, so it will not have any effect when + running in environments without access to the unsafe package such as Google + App Engine or with the "safe" build tag specified. + Pointer method invocation is enabled by default. + +* DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + +* DisableCapacities + DisableCapacities specifies whether to disable the printing of capacities + for arrays, slices, maps and channels. This is useful when diffing data + structures in tests. + +* ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + +* SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are supported, + with other types sorted according to the reflect.Value.String() output + which guarantees display stability. Natural map order is used by + default. + +* SpewKeys + SpewKeys specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only considered + if SortKeys is true. + +``` + +## Unsafe Package Dependency + +This package relies on the unsafe package to perform some of the more advanced +features, however it also supports a "limited" mode which allows it to work in +environments where the unsafe package is not available. By default, it will +operate in this mode on Google App Engine and when compiled with GopherJS. The +"safe" build tag may also be specified to force the package to build without +using the unsafe package. + +## License + +Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh new file mode 100644 index 0000000..9579497 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/cov_report.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +if ! type gocov >/dev/null 2>&1; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi + +# Only run the cgo tests if gcc is installed. +if type gcc >/dev/null 2>&1; then + (cd spew && gocov test -tags testcgo | gocov report) +else + (cd spew && gocov test | gocov report) +fi diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..8a4a658 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..1fe3cf3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..7c519ff --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go new file mode 100644 index 0000000..0f5ce47 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common_test.go @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// custom type to test Stinger interface on non-pointer receiver. +type stringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with non-pointer receivers. +func (s stringer) String() string { + return "stringer " + string(s) +} + +// custom type to test Stinger interface on pointer receiver. +type pstringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with only pointer receivers. +func (s *pstringer) String() string { + return "stringer " + string(*s) +} + +// xref1 and xref2 are cross referencing structs for testing circular reference +// detection. +type xref1 struct { + ps2 *xref2 +} +type xref2 struct { + ps1 *xref1 +} + +// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular +// reference for testing detection. +type indirCir1 struct { + ps2 *indirCir2 +} +type indirCir2 struct { + ps3 *indirCir3 +} +type indirCir3 struct { + ps1 *indirCir1 +} + +// embed is used to test embedded structures. +type embed struct { + a string +} + +// embedwrap is used to test embedded structures. +type embedwrap struct { + *embed + e *embed +} + +// panicer is used to intentionally cause a panic for testing spew properly +// handles them +type panicer int + +func (p panicer) String() string { + panic("test panic") +} + +// customError is used to test custom error interface invocation. +type customError int + +func (e customError) Error() string { + return fmt.Sprintf("error: %d", int(e)) +} + +// stringizeWants converts a slice of wanted test output into a format suitable +// for a test error message. +func stringizeWants(wants []string) string { + s := "" + for i, want := range wants { + if i > 0 { + s += fmt.Sprintf("want%d: %s", i+1, want) + } else { + s += "want: " + want + } + } + return s +} + +// testFailed returns whether or not a test failed by checking if the result +// of the test is in the slice of wanted strings. +func testFailed(result string, wants []string) bool { + for _, want := range wants { + if result == want { + return false + } + } + return true +} + +type sortableStruct struct { + x int +} + +func (ss sortableStruct) String() string { + return fmt.Sprintf("ss.%d", ss.x) +} + +type unsortableStruct struct { + x int +} + +type sortTestCase struct { + input []reflect.Value + expected []reflect.Value +} + +func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) { + getInterfaces := func(values []reflect.Value) []interface{} { + interfaces := []interface{}{} + for _, v := range values { + interfaces = append(interfaces, v.Interface()) + } + return interfaces + } + + for _, test := range tests { + spew.SortValues(test.input, cs) + // reflect.DeepEqual cannot really make sense of reflect.Value, + // probably because of all the pointer tricks. For instance, + // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} + // instead. + input := getInterfaces(test.input) + expected := getInterfaces(test.expected) + if !reflect.DeepEqual(input, expected) { + t.Errorf("Sort mismatch:\n %v != %v", input, expected) + } + } +} + +// TestSortValues ensures the sort functionality for relect.Value based sorting +// works as intended. +func TestSortValues(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + embedA := v(embed{"a"}) + embedB := v(embed{"b"}) + embedC := v(embed{"c"}) + tests := []sortTestCase{ + // No values. + { + []reflect.Value{}, + []reflect.Value{}, + }, + // Bools. + { + []reflect.Value{v(false), v(true), v(false)}, + []reflect.Value{v(false), v(false), v(true)}, + }, + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Uints. + { + []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, + []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, + }, + // Floats. + { + []reflect.Value{v(2.0), v(1.0), v(3.0)}, + []reflect.Value{v(1.0), v(2.0), v(3.0)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // Array + { + []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})}, + []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})}, + }, + // Uintptrs. + { + []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, + []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, + }, + // SortableStructs. + { + // Note: not sorted - DisableMethods is set. + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + // Invalid. + { + []reflect.Value{embedB, embedA, embedC}, + []reflect.Value{embedB, embedA, embedC}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithMethods ensures the sort functionality for relect.Value +// based sorting works as intended when using string methods. +func TestSortValuesWithMethods(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: false, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithSpew ensures the sort functionality for relect.Value +// based sorting works as intended when using spew to stringify keys. +func TestSortValuesWithSpew(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: true} + helpTestSortValues(tests, &cs, t) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..df1d582 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go new file mode 100644 index 0000000..5aad9c7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump_test.go @@ -0,0 +1,1042 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Array containing bytes +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Slice containing bytes +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// dumpTest is used to describe a test to be performed against the Dump method. +type dumpTest struct { + in interface{} + wants []string +} + +// dumpTests houses all of the tests to be performed against the Dump method. +var dumpTests = make([]dumpTest, 0) + +// addDumpTest is a helper method to append the passed input and desired result +// to dumpTests +func addDumpTest(in interface{}, wants ...string) { + test := dumpTest{in, wants} + dumpTests = append(dumpTests, test) +} + +func addIntDumpTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addUintDumpTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addBoolDumpTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFloatDumpTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addComplexDumpTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addArrayDumpTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" + + vt + ") 2,\n (" + vt + ") 3\n}" + addDumpTest(v, "([3]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[3]"+vt+")()\n") + + // Array containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := [3]pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t + + ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t + + ") (len=" + v2i2Len + ") " + "stringer 3\n}" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" + + v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len + + ") " + "\"3\"\n}" + } + addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n") + addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n") + addDumpTest(nv2, "(*[3]"+v2t+")()\n") + + // Array containing interfaces. + v3i0 := "one" + v3 := [3]interface{}{v3i0, int(2), uint(3)} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Array containing bytes. + v4 := [34]byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[34]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[34]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addSliceDumpTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" + + vt + ") 6.28,\n (" + vt + ") 12.56\n}" + addDumpTest(v, "([]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[]"+vt+")()\n") + + // Slice containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := []pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" + + v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len + + ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " + + "stringer 3\n}" + addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*[]"+v2t+")()\n") + + // Slice containing interfaces. + v3i0 := "one" + v3 := []interface{}{v3i0, int(2), uint(3), nil} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3,\n (" + v3t5 + ") \n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Slice containing bytes. + v4 := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Nil slice. + v5 := []int(nil) + nv5 := (*[]int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "[]int" + v5s := "" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addStringDumpTests() { + // Standard string. + v := "test" + vLen := fmt.Sprintf("%d", len(v)) + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "(len=" + vLen + ") \"test\"" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addInterfaceDumpTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addMapDumpTests() { + // Map with string keys and int vals. + k := "one" + kk := "two" + m := map[string]int{k: 1, kk: 2} + klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up + kkLen := fmt.Sprintf("%d", len(kk)) + mLen := fmt.Sprintf("%d", len(m)) + nilMap := map[string]int(nil) + nm := (*map[string]int)(nil) + pm := &m + mAddr := fmt.Sprintf("%p", pm) + pmAddr := fmt.Sprintf("%p", &pm) + mt := "map[string]int" + mt1 := "string" + mt2 := "int" + ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " + + "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen + + ") \"two\": (" + mt2 + ") 2\n}" + ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " + + "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen + + ") \"one\": (" + mt2 + ") 1\n}" + addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n") + addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n", + "(*"+mt+")("+mAddr+")("+ms2+")\n") + addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n", + "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n") + addDumpTest(nm, "(*"+mt+")()\n") + addDumpTest(nilMap, "("+mt+") \n") + + // Map with custom formatter type on pointer receiver only keys and vals. + k2 := pstringer("one") + v2 := pstringer("1") + m2 := map[pstringer]pstringer{k2: v2} + k2Len := fmt.Sprintf("%d", len(k2)) + v2Len := fmt.Sprintf("%d", len(v2)) + m2Len := fmt.Sprintf("%d", len(m2)) + nilMap2 := map[pstringer]pstringer(nil) + nm2 := (*map[pstringer]pstringer)(nil) + pm2 := &m2 + m2Addr := fmt.Sprintf("%p", pm2) + pm2Addr := fmt.Sprintf("%p", &pm2) + m2t := "map[spew_test.pstringer]spew_test.pstringer" + m2t1 := "spew_test.pstringer" + m2t2 := "spew_test.pstringer" + m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " + + "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}" + if spew.UnsafeDisabled { + m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + + ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len + + ") \"1\"\n}" + } + addDumpTest(m2, "("+m2t+") "+m2s+"\n") + addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n") + addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n") + addDumpTest(nm2, "(*"+m2t+")()\n") + addDumpTest(nilMap2, "("+m2t+") \n") + + // Map with interface keys and values. + k3 := "one" + k3Len := fmt.Sprintf("%d", len(k3)) + m3 := map[interface{}]interface{}{k3: 1} + m3Len := fmt.Sprintf("%d", len(m3)) + nilMap3 := map[interface{}]interface{}(nil) + nm3 := (*map[interface{}]interface{})(nil) + pm3 := &m3 + m3Addr := fmt.Sprintf("%p", pm3) + pm3Addr := fmt.Sprintf("%p", &pm3) + m3t := "map[interface {}]interface {}" + m3t1 := "string" + m3t2 := "int" + m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " + + "\"one\": (" + m3t2 + ") 1\n}" + addDumpTest(m3, "("+m3t+") "+m3s+"\n") + addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n") + addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n") + addDumpTest(nm3, "(*"+m3t+")()\n") + addDumpTest(nilMap3, "("+m3t+") \n") + + // Map with nil interface value. + k4 := "nil" + k4Len := fmt.Sprintf("%d", len(k4)) + m4 := map[string]interface{}{k4: nil} + m4Len := fmt.Sprintf("%d", len(m4)) + nilMap4 := map[string]interface{}(nil) + nm4 := (*map[string]interface{})(nil) + pm4 := &m4 + m4Addr := fmt.Sprintf("%p", pm4) + pm4Addr := fmt.Sprintf("%p", &pm4) + m4t := "map[string]interface {}" + m4t1 := "string" + m4t2 := "interface {}" + m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" + + " \"nil\": (" + m4t2 + ") \n}" + addDumpTest(m4, "("+m4t+") "+m4s+"\n") + addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n") + addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n") + addDumpTest(nm4, "(*"+m4t+")()\n") + addDumpTest(nilMap4, "("+m4t+") \n") +} + +func addStructDumpTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + + v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 + + ") (len=5) stringer test2\n}" + v3sp := v3s + if spew.UnsafeDisabled { + v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) \"test2\"\n}" + v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) stringer test2\n}" + } + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + eLen := fmt.Sprintf("%d", len("embedstr")) + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + + ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 + + ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" + + " \"embedstr\"\n })\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addUintptrDumpTests() { + // Null pointer. + v := uintptr(0) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + nv2 := (*uintptr)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addUnsafePointerDumpTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addChanDumpTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFuncDumpTests() { + // Function with no params and no returns. + v := addIntDumpTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Function with param and no returns. + v2 := TestDump + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") +} + +func addCircularDumpTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + + vAddr + ")()\n })\n}" + vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + + ")()\n })\n })\n}" + v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")()\n })\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + + ")()\n })\n })\n })\n}" + v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")()\n })\n })\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") +} + +func addPanicDumpTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addErrorDumpTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +// TestDump executes all of the tests described by dumpTests. +func TestDump(t *testing.T) { + // Setup tests. + addIntDumpTests() + addUintDumpTests() + addBoolDumpTests() + addFloatDumpTests() + addComplexDumpTests() + addArrayDumpTests() + addSliceDumpTests() + addStringDumpTests() + addInterfaceDumpTests() + addMapDumpTests() + addStructDumpTests() + addUintptrDumpTests() + addUnsafePointerDumpTests() + addChanDumpTests() + addFuncDumpTests() + addCircularDumpTests() + addPanicDumpTests() + addErrorDumpTests() + addCgoDumpTests() + + t.Logf("Running %d tests", len(dumpTests)) + for i, test := range dumpTests { + buf := new(bytes.Buffer) + spew.Fdump(buf, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) + continue + } + } +} + +func TestDumpSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " + + "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " + + "(len=1) \"3\"\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "(map[spew_test.stringer]int) (len=3) {\n" + + "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if spew.UnsafeDisabled { + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" + + "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" + + "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" + + "}\n" + } + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "(map[spew_test.customError]int) (len=3) {\n" + + "(spew_test.customError) error: 1: (int) 1,\n" + + "(spew_test.customError) error: 2: (int) 2,\n" + + "(spew_test.customError) error: 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go new file mode 100644 index 0000000..6ab1808 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go @@ -0,0 +1,99 @@ +// Copyright (c) 2013-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This means the cgo tests are only added (and hence run) when +// specifially requested. This configuration is used because spew itself +// does not require cgo to run even though it does handle certain cgo types +// specially. Rather than forcing all clients to require cgo and an external +// C compiler just to run the tests, this scheme makes them optional. +// +build cgo,testcgo + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew/testdata" +) + +func addCgoDumpTests() { + // C char pointer. + v := testdata.GetCgoCharPointer() + nv := testdata.GetCgoNullCharPointer() + pv := &v + vcAddr := fmt.Sprintf("%p", v) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "*testdata._Ctype_char" + vs := "116" + addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(nv, "("+vt+")()\n") + + // C char array. + v2, v2l, v2c := testdata.GetCgoCharArray() + v2Len := fmt.Sprintf("%d", v2l) + v2Cap := fmt.Sprintf("%d", v2c) + v2t := "[6]testdata._Ctype_char" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + + "{\n 00000000 74 65 73 74 32 00 " + + " |test2.|\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + + // C unsigned char array. + v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() + v3Len := fmt.Sprintf("%d", v3l) + v3Cap := fmt.Sprintf("%d", v3c) + v3t := "[6]testdata._Ctype_unsignedchar" + v3t2 := "[6]testdata._Ctype_uchar" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + + "{\n 00000000 74 65 73 74 33 00 " + + " |test3.|\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") + + // C signed char array. + v4, v4l, v4c := testdata.GetCgoSignedCharArray() + v4Len := fmt.Sprintf("%d", v4l) + v4Cap := fmt.Sprintf("%d", v4c) + v4t := "[6]testdata._Ctype_schar" + v4t2 := "testdata._Ctype_schar" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + + ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + + ") 0\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + + // C uint8_t array. + v5, v5l, v5c := testdata.GetCgoUint8tArray() + v5Len := fmt.Sprintf("%d", v5l) + v5Cap := fmt.Sprintf("%d", v5c) + v5t := "[6]testdata._Ctype_uint8_t" + v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + + "{\n 00000000 74 65 73 74 35 00 " + + " |test5.|\n}" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + + // C typedefed unsigned char array. + v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() + v6Len := fmt.Sprintf("%d", v6l) + v6Cap := fmt.Sprintf("%d", v6c) + v6t := "[6]testdata._Ctype_custom_uchar_t" + v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + + "{\n 00000000 74 65 73 74 36 00 " + + " |test6.|\n}" + addDumpTest(v6, "("+v6t+") "+v6s+"\n") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go new file mode 100644 index 0000000..52a0971 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when either cgo is not supported or "-tags testcgo" is not added to the go +// test command line. This file intentionally does not setup any cgo tests in +// this scenario. +// +build !cgo !testcgo + +package spew_test + +func addCgoDumpTests() { + // Don't add any tests for cgo since this file is only compiled when + // there should not be any cgo tests. +} diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go new file mode 100644 index 0000000..c6ec8c6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/example_test.go @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew" +) + +type Flag int + +const ( + flagOne Flag = iota + flagTwo +) + +var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", +} + +func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) +} + +type Bar struct { + data uintptr +} + +type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} +} + +// This example demonstrates how to use Dump to dump variables to stdout. +func ExampleDump() { + // The following package level declarations are assumed for this example: + /* + type Flag int + + const ( + flagOne Flag = iota + flagTwo + ) + + var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", + } + + func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) + } + + type Bar struct { + data uintptr + } + + type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} + } + */ + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + f := Flag(5) + b := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + + // Dump! + spew.Dump(s1, f, b) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Flag) Unknown flag (5) + // ([]uint8) (len=34 cap=34) { + // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + // 00000020 31 32 |12| + // } + // +} + +// This example demonstrates how to use Printf to display a variable with a +// format string and inline formatting. +func ExamplePrintf() { + // Create a double pointer to a uint 8. + ui8 := uint8(5) + pui8 := &ui8 + ppui8 := &pui8 + + // Create a circular data type. + type circular struct { + ui8 uint8 + c *circular + } + c := circular{ui8: 1} + c.c = &c + + // Print! + spew.Printf("ppui8: %v\n", ppui8) + spew.Printf("circular: %v\n", c) + + // Output: + // ppui8: <**>5 + // circular: {1 <*>{1 <*>}} +} + +// This example demonstrates how to use a ConfigState. +func ExampleConfigState() { + // Modify the indent level of the ConfigState only. The global + // configuration is not modified. + scs := spew.ConfigState{Indent: "\t"} + + // Output using the ConfigState instance. + v := map[string]int{"one": 1} + scs.Printf("v: %v\n", v) + scs.Dump(v) + + // Output: + // v: map[one:1] + // (map[string]int) (len=1) { + // (string) (len=3) "one": (int) 1 + // } +} + +// This example demonstrates how to use ConfigState.Dump to dump variables to +// stdout +func ExampleConfigState_Dump() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances with different indentation. + scs := spew.ConfigState{Indent: "\t"} + scs2 := spew.ConfigState{Indent: " "} + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + + // Dump using the ConfigState instances. + scs.Dump(s1) + scs2.Dump(s1) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // +} + +// This example demonstrates how to use ConfigState.Printf to display a variable +// with a format string and inline formatting. +func ExampleConfigState_Printf() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances and modify the method handling of the + // first ConfigState only. + scs := spew.NewDefaultConfig() + scs2 := spew.NewDefaultConfig() + scs.DisableMethods = true + + // Alternatively + // scs := spew.ConfigState{Indent: " ", DisableMethods: true} + // scs2 := spew.ConfigState{Indent: " "} + + // This is of type Flag which implements a Stringer and has raw value 1. + f := flagTwo + + // Dump using the ConfigState instances. + scs.Printf("f: %v\n", f) + scs2.Printf("f: %v\n", f) + + // Output: + // f: 1 + // f: flagTwo +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..c49875b --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go new file mode 100644 index 0000000..f9b93ab --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format_test.go @@ -0,0 +1,1558 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +- Type that has a custom Error interface +- %x passthrough with uint +- %#x passthrough with uint +- %f passthrough with precision +- %f passthrough with width and precision +- %d passthrough with width +- %q passthrough with string +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// formatterTest is used to describe a test to be performed against NewFormatter. +type formatterTest struct { + format string + in interface{} + wants []string +} + +// formatterTests houses all of the tests to be performed against NewFormatter. +var formatterTests = make([]formatterTest, 0) + +// addFormatterTest is a helper method to append the passed input and desired +// result to formatterTests. +func addFormatterTest(format string, in interface{}, wants ...string) { + test := formatterTest{format, in, wants} + formatterTests = append(formatterTests, test) +} + +func addIntFormatterTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") +} + +func addUintFormatterTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") +} + +func addBoolFormatterTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFloatFormatterTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addComplexFormatterTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addArrayFormatterTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[3]int" + vs := "[1 2 3]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Array containing type with custom formatter on pointer receiver only. + v2 := [3]pstringer{"1", "2", "3"} + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[3]spew_test.pstringer" + v2sp := "[stringer 1 stringer 2 stringer 3]" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "[1 2 3]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2sp) + addFormatterTest("%v", &pv2, "<**>"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Array containing interfaces. + v3 := [3]interface{}{"one", int(2), uint(3)} + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "[one 2 3]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addSliceFormatterTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[]float32" + vs := "[3.14 6.28 12.56]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Slice containing type with custom formatter on pointer receiver only. + v2 := []pstringer{"1", "2", "3"} + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[]spew_test.pstringer" + v2s := "[stringer 1 stringer 2 stringer 3]" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Slice containing interfaces. + v3 := []interface{}{"one", int(2), uint(3), nil} + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "[one 2 3 ]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + + ")]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Nil slice. + var v4 []int + nv4 := (*[]int)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]int" + v4s := "" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStringFormatterTests() { + // Standard string. + v := "test" + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "test" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addInterfaceFormatterTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addMapFormatterTests() { + // Map with string keys and int vals. + v := map[string]int{"one": 1, "two": 2} + nilMap := map[string]int(nil) + nv := (*map[string]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "map[string]int" + vs := "map[one:1 two:2]" + vs2 := "map[two:2 one:1]" + addFormatterTest("%v", v, vs, vs2) + addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, + "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) + addFormatterTest("%#v", nilMap, "("+vt+")"+"") + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, + "(*"+vt+")("+vAddr+")"+vs2) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, + "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%#+v", nilMap, "("+vt+")"+"") + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Map with custom formatter type on pointer receiver only keys and vals. + v2 := map[pstringer]pstringer{"one": "1"} + nv2 := (*map[pstringer]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "map[spew_test.pstringer]spew_test.pstringer" + v2s := "map[stringer one:stringer 1]" + if spew.UnsafeDisabled { + v2s = "map[one:1]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Map with interface keys and values. + v3 := map[interface{}]interface{}{"one": 1} + nv3 := (*map[interface{}]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "map[interface {}]interface {}" + v3t1 := "string" + v3t2 := "int" + v3s := "map[one:1]" + v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Map with nil interface value + v4 := map[string]interface{}{"nil": nil} + nv4 := (*map[string]interface{})(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "map[string]interface {}" + v4t1 := "interface {}" + v4s := "map[nil:]" + v4s2 := "map[nil:(" + v4t1 + ")]" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStructFormatterTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{127 255}" + vs2 := "{a:127 b:255}" + vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs3) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs3) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{{127 255} true}" + v2s2 := "{s1:{a:127 b:255} b:true}" + v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + + v2t5 + ")true}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s2) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{stringer test stringer test2}" + v3sp := v3s + v3s2 := "{s:stringer test S:stringer test2}" + v3s2p := v3s2 + v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" + v3s3p := v3s3 + if spew.UnsafeDisabled { + v3s = "{test test2}" + v3sp = "{test stringer test2}" + v3s2 = "{s:test S:test2}" + v3s2p = "{s:test S:stringer test2}" + v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}" + v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}" + } + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3sp) + addFormatterTest("%v", &pv3, "<**>"+v3sp) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s2) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{<*>{embedstr} <*>{embedstr}}" + v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + + "){a:embedstr}}" + v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + + "){a:(" + v4t3 + ")embedstr}}" + v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + + ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s2) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addUintptrFormatterTests() { + // Null pointer. + v := uintptr(0) + nv := (*uintptr)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addUnsafePointerFormatterTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addChanFormatterTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFuncFormatterTests() { + // Function with no params and no returns. + v := addIntFormatterTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Function with param and no returns. + v2 := TestFormatter + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addCircularFormatterTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{<*>{<*>}}" + vs2 := "{<*>}" + vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" + vs4 := "{c:<*>(" + vAddr + ")}" + vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" + vs6 := "{c:(*" + vt + ")}" + vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + + ")}}" + vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs2) + addFormatterTest("%+v", v, vs3) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) + addFormatterTest("%#v", v, "("+vt+")"+vs5) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) + addFormatterTest("%#+v", v, "("+vt+")"+vs7) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{<*>{<*>{<*>}}}" + v2s2 := "{<*>{<*>}}" + v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + + ts2Addr + ")}}}" + v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" + v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + + ")}}}" + v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" + v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + + ")}}}" + v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + ")}}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s2) + addFormatterTest("%v", &pv2, "<**>"+v2s2) + addFormatterTest("%+v", v2, v2s3) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{<*>{<*>{<*>{<*>}}}}" + v3s2 := "{<*>{<*>{<*>}}}" + v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" + v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + ")}}}" + v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + "){ps2:(*" + v3t2 + ")}}}}" + v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + ")}}}" + v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + + ")(" + tic2Addr + ")}}}}" + v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s2) + addFormatterTest("%v", &pv3, "<**>"+v3s2) + addFormatterTest("%+v", v3, v3s3) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) + addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) +} + +func addPanicFormatterTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addErrorFormatterTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addPassthroughFormatterTests() { + // %x passthrough with uint. + v := uint(4294967295) + pv := &v + vAddr := fmt.Sprintf("%x", pv) + pvAddr := fmt.Sprintf("%x", &pv) + vs := "ffffffff" + addFormatterTest("%x", v, vs) + addFormatterTest("%x", pv, vAddr) + addFormatterTest("%x", &pv, pvAddr) + + // %#x passthrough with uint. + v2 := int(2147483647) + pv2 := &v2 + v2Addr := fmt.Sprintf("%#x", pv2) + pv2Addr := fmt.Sprintf("%#x", &pv2) + v2s := "0x7fffffff" + addFormatterTest("%#x", v2, v2s) + addFormatterTest("%#x", pv2, v2Addr) + addFormatterTest("%#x", &pv2, pv2Addr) + + // %f passthrough with precision. + addFormatterTest("%.2f", 3.1415, "3.14") + addFormatterTest("%.3f", 3.1415, "3.142") + addFormatterTest("%.4f", 3.1415, "3.1415") + + // %f passthrough with width and precision. + addFormatterTest("%5.2f", 3.1415, " 3.14") + addFormatterTest("%6.3f", 3.1415, " 3.142") + addFormatterTest("%7.4f", 3.1415, " 3.1415") + + // %d passthrough with width. + addFormatterTest("%3d", 127, "127") + addFormatterTest("%4d", 127, " 127") + addFormatterTest("%5d", 127, " 127") + + // %q passthrough with string. + addFormatterTest("%q", "test", "\"test\"") +} + +// TestFormatter executes all of the tests described by formatterTests. +func TestFormatter(t *testing.T) { + // Setup tests. + addIntFormatterTests() + addUintFormatterTests() + addBoolFormatterTests() + addFloatFormatterTests() + addComplexFormatterTests() + addArrayFormatterTests() + addSliceFormatterTests() + addStringFormatterTests() + addInterfaceFormatterTests() + addMapFormatterTests() + addStructFormatterTests() + addUintptrFormatterTests() + addUnsafePointerFormatterTests() + addChanFormatterTests() + addFuncFormatterTests() + addCircularFormatterTests() + addPanicFormatterTests() + addErrorFormatterTests() + addPassthroughFormatterTests() + + t.Logf("Running %d tests", len(formatterTests)) + for i, test := range formatterTests { + buf := new(bytes.Buffer) + spew.Fprintf(buf, test.format, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, + stringizeWants(test.wants)) + continue + } + } +} + +type testStruct struct { + x int +} + +func (ts testStruct) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +type testStructP struct { + x int +} + +func (ts *testStructP) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +func TestPrintSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "map[1:1 2:2 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if spew.UnsafeDisabled { + expected = "map[1:1 2:2 3:3]" + } + if s != expected { + t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) + } + + if !spew.UnsafeDisabled { + s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) + } + } + + s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "map[error: 1:1 error: 2:2 error: 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected) + } +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go new file mode 100644 index 0000000..20a9cfe --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internal_test.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" +) + +// dummyFmtState implements a fake fmt.State to use for testing invalid +// reflect.Value handling. This is necessary because the fmt package catches +// invalid values before invoking the formatter on them. +type dummyFmtState struct { + bytes.Buffer +} + +func (dfs *dummyFmtState) Flag(f int) bool { + if f == int('+') { + return true + } + return false +} + +func (dfs *dummyFmtState) Precision() (int, bool) { + return 0, false +} + +func (dfs *dummyFmtState) Width() (int, bool) { + return 0, false +} + +// TestInvalidReflectValue ensures the dump and formatter code handles an +// invalid reflect value properly. This needs access to internal state since it +// should never happen in real code and therefore can't be tested via the public +// API. +func TestInvalidReflectValue(t *testing.T) { + i := 1 + + // Dump invalid reflect value. + v := new(reflect.Value) + buf := new(bytes.Buffer) + d := dumpState{w: buf, cs: &Config} + d.dump(*v) + s := buf.String() + want := "" + if s != want { + t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) + } + i++ + + // Formatter invalid reflect value. + buf2 := new(dummyFmtState) + f := formatState{value: *v, cs: &Config, fs: buf2} + f.format(*v) + s = buf2.String() + want = "" + if s != want { + t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) + } +} + +// SortValues makes the internal sortValues function available to the test +// package. +func SortValues(values []reflect.Value, cs *ConfigState) { + sortValues(values, cs) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go new file mode 100644 index 0000000..a0c612e --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go @@ -0,0 +1,102 @@ +// Copyright (c) 2013-2016 Dave Collins + +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. + +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" + "unsafe" +) + +// changeKind uses unsafe to intentionally change the kind of a reflect.Value to +// the maximum kind value which does not exist. This is needed to test the +// fallback code which punts to the standard fmt library for new types that +// might get added to the language. +func changeKind(v *reflect.Value, readOnly bool) { + rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) + *rvf = *rvf | ((1< + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/davecgh/go-spew/spew/spew_test.go new file mode 100644 index 0000000..b70466c --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew_test.go @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// spewFunc is used to identify which public function of the spew package or +// ConfigState a test applies to. +type spewFunc int + +const ( + fCSFdump spewFunc = iota + fCSFprint + fCSFprintf + fCSFprintln + fCSPrint + fCSPrintln + fCSSdump + fCSSprint + fCSSprintf + fCSSprintln + fCSErrorf + fCSNewFormatter + fErrorf + fFprint + fFprintln + fPrint + fPrintln + fSdump + fSprint + fSprintf + fSprintln +) + +// Map of spewFunc values to names for pretty printing. +var spewFuncStrings = map[spewFunc]string{ + fCSFdump: "ConfigState.Fdump", + fCSFprint: "ConfigState.Fprint", + fCSFprintf: "ConfigState.Fprintf", + fCSFprintln: "ConfigState.Fprintln", + fCSSdump: "ConfigState.Sdump", + fCSPrint: "ConfigState.Print", + fCSPrintln: "ConfigState.Println", + fCSSprint: "ConfigState.Sprint", + fCSSprintf: "ConfigState.Sprintf", + fCSSprintln: "ConfigState.Sprintln", + fCSErrorf: "ConfigState.Errorf", + fCSNewFormatter: "ConfigState.NewFormatter", + fErrorf: "spew.Errorf", + fFprint: "spew.Fprint", + fFprintln: "spew.Fprintln", + fPrint: "spew.Print", + fPrintln: "spew.Println", + fSdump: "spew.Sdump", + fSprint: "spew.Sprint", + fSprintf: "spew.Sprintf", + fSprintln: "spew.Sprintln", +} + +func (f spewFunc) String() string { + if s, ok := spewFuncStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) +} + +// spewTest is used to describe a test to be performed against the public +// functions of the spew package or ConfigState. +type spewTest struct { + cs *spew.ConfigState + f spewFunc + format string + in interface{} + want string +} + +// spewTests houses the tests to be performed against the public functions of +// the spew package and ConfigState. +// +// These tests are only intended to ensure the public functions are exercised +// and are intentionally not exhaustive of types. The exhaustive type +// tests are handled in the dump and format tests. +var spewTests []spewTest + +// redirStdout is a helper function to return the standard output from f as a +// byte slice. +func redirStdout(f func()) ([]byte, error) { + tempFile, err := ioutil.TempFile("", "ss-test") + if err != nil { + return nil, err + } + fileName := tempFile.Name() + defer os.Remove(fileName) // Ignore error + + origStdout := os.Stdout + os.Stdout = tempFile + f() + os.Stdout = origStdout + tempFile.Close() + + return ioutil.ReadFile(fileName) +} + +func initSpewTests() { + // Config states with various settings. + scsDefault := spew.NewDefaultConfig() + scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} + scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} + scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} + scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} + scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true} + scsNoCap := &spew.ConfigState{DisableCapacities: true} + + // Variables for tests on types which implement Stringer interface with and + // without a pointer receiver. + ts := stringer("test") + tps := pstringer("test") + + type ptrTester struct { + s *struct{} + } + tptr := &ptrTester{s: &struct{}{}} + + // depthTester is used to test max depth handling for structs, array, slices + // and maps. + type depthTester struct { + ic indirCir1 + arr [1]string + slice []string + m map[string]int + } + dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, + map[string]int{"one": 1}} + + // Variable for tests on types which implement error interface. + te := customError(10) + + spewTests = []spewTest{ + {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, + {scsDefault, fCSFprint, "", int16(32767), "32767"}, + {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, + {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, + {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, + {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, + {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, + {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, + {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, + {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, + {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, + {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, + {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, + {scsDefault, fFprint, "", float32(3.14), "3.14"}, + {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, + {scsDefault, fPrint, "", true, "true"}, + {scsDefault, fPrintln, "", false, "false\n"}, + {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, + {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, + {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, + {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, + {scsNoMethods, fCSFprint, "", ts, "test"}, + {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, + {scsNoMethods, fCSFprint, "", tps, "test"}, + {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, + {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, + {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, + {scsNoPmethods, fCSFprint, "", tps, "test"}, + {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, + {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, + {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + + " ic: (spew_test.indirCir1) {\n \n },\n" + + " arr: ([1]string) (len=1 cap=1) {\n \n },\n" + + " slice: ([]string) (len=1 cap=1) {\n \n },\n" + + " m: (map[string]int) (len=1) {\n \n }\n}\n"}, + {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, + {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + + "(len=4) (stringer test) \"test\"\n"}, + {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, + {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + + "(error: 10) 10\n"}, + {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"}, + {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"}, + {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"}, + {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"}, + } +} + +// TestSpew executes all of the tests described by spewTests. +func TestSpew(t *testing.T) { + initSpewTests() + + t.Logf("Running %d tests", len(spewTests)) + for i, test := range spewTests { + buf := new(bytes.Buffer) + switch test.f { + case fCSFdump: + test.cs.Fdump(buf, test.in) + + case fCSFprint: + test.cs.Fprint(buf, test.in) + + case fCSFprintf: + test.cs.Fprintf(buf, test.format, test.in) + + case fCSFprintln: + test.cs.Fprintln(buf, test.in) + + case fCSPrint: + b, err := redirStdout(func() { test.cs.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSPrintln: + b, err := redirStdout(func() { test.cs.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSSdump: + str := test.cs.Sdump(test.in) + buf.WriteString(str) + + case fCSSprint: + str := test.cs.Sprint(test.in) + buf.WriteString(str) + + case fCSSprintf: + str := test.cs.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fCSSprintln: + str := test.cs.Sprintln(test.in) + buf.WriteString(str) + + case fCSErrorf: + err := test.cs.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fCSNewFormatter: + fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) + + case fErrorf: + err := spew.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fFprint: + spew.Fprint(buf, test.in) + + case fFprintln: + spew.Fprintln(buf, test.in) + + case fPrint: + b, err := redirStdout(func() { spew.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fPrintln: + b, err := redirStdout(func() { spew.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fSdump: + str := spew.Sdump(test.in) + buf.WriteString(str) + + case fSprint: + str := spew.Sprint(test.in) + buf.WriteString(str) + + case fSprintf: + str := spew.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fSprintln: + str := spew.Sprintln(test.in) + buf.WriteString(str) + + default: + t.Errorf("%v #%d unrecognized function", test.f, i) + continue + } + s := buf.String() + if test.want != s { + t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) + continue + } + } +} diff --git a/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go b/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go new file mode 100644 index 0000000..5c87dd4 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go @@ -0,0 +1,82 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This code should really only be in the dumpcgo_test.go file, +// but unfortunately Go will not allow cgo in test files, so this is a +// workaround to allow cgo types to be tested. This configuration is used +// because spew itself does not require cgo to run even though it does handle +// certain cgo types specially. Rather than forcing all clients to require cgo +// and an external C compiler just to run the tests, this scheme makes them +// optional. +// +build cgo,testcgo + +package testdata + +/* +#include +typedef unsigned char custom_uchar_t; + +char *ncp = 0; +char *cp = "test"; +char ca[6] = {'t', 'e', 's', 't', '2', '\0'}; +unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'}; +signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'}; +uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'}; +custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'}; +*/ +import "C" + +// GetCgoNullCharPointer returns a null char pointer via cgo. This is only +// used for tests. +func GetCgoNullCharPointer() interface{} { + return C.ncp +} + +// GetCgoCharPointer returns a char pointer via cgo. This is only used for +// tests. +func GetCgoCharPointer() interface{} { + return C.cp +} + +// GetCgoCharArray returns a char array via cgo and the array's len and cap. +// This is only used for tests. +func GetCgoCharArray() (interface{}, int, int) { + return C.ca, len(C.ca), cap(C.ca) +} + +// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the +// array's len and cap. This is only used for tests. +func GetCgoUnsignedCharArray() (interface{}, int, int) { + return C.uca, len(C.uca), cap(C.uca) +} + +// GetCgoSignedCharArray returns a signed char array via cgo and the array's len +// and cap. This is only used for tests. +func GetCgoSignedCharArray() (interface{}, int, int) { + return C.sca, len(C.sca), cap(C.sca) +} + +// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and +// cap. This is only used for tests. +func GetCgoUint8tArray() (interface{}, int, int) { + return C.ui8ta, len(C.ui8ta), cap(C.ui8ta) +} + +// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via +// cgo and the array's len and cap. This is only used for tests. +func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) { + return C.tuca, len(C.tuca), cap(C.tuca) +} diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt new file mode 100644 index 0000000..2cd087a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/test_coverage.txt @@ -0,0 +1,61 @@ + +github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) +github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) +github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) +github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) +github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) +github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) +github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) +github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) +github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) +github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) +github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) +github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) +github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) +github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) +github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) +github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) +github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) +github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) + diff --git a/vendor/github.com/influxdata/influxdb/.dockerignore b/vendor/github.com/influxdata/influxdb/.dockerignore new file mode 100644 index 0000000..378eac2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.dockerignore @@ -0,0 +1 @@ +build diff --git a/vendor/github.com/influxdata/influxdb/.github/ISSUE_TEMPLATE.md b/vendor/github.com/influxdata/influxdb/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..4423a0f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,56 @@ +### Directions +_GitHub Issues are reserved for actionable bug reports and feature requests._ +_General questions should be sent to the [InfluxDB Community Site](https://community.influxdata.com)._ + +_Before opening an issue, search for similar bug reports or feature requests on GitHub Issues._ +_If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below. +_Erase the other section and everything on and above this line._ + +### Bug report + +__System info:__ [Include InfluxDB version, operating system name, and other relevant details] + +__Steps to reproduce:__ + +1. [First Step] +2. [Second Step] +3. [and so on...] + +__Expected behavior:__ [What you expected to happen] + +__Actual behavior:__ [What actually happened] + +__Additional info:__ [Include gist of relevant config, logs, etc.] + +Also, if this is an issue of for performance, locking, etc the following commands are useful to create debug information for the team. + +``` +curl -o profiles.tar.gz "http://localhost:8086/debug/pprof/all?cpu=true" + +curl -o vars.txt "http://localhost:8086/debug/vars" +iostat -xd 1 30 > iostat.txt +``` + +**Please note** It will take at least 30 seconds for the first cURL command above to return a response. +This is because it will run a CPU profile as part of its information gathering, which takes 30 seconds to collect. +Ideally you should run these commands when you're experiencing problems, so we can capture the state of the system at that time. + +If you're concerned about running a CPU profile (which only has a small, temporary impact on performance), then you can set `?cpu=false` or omit `?cpu=true` altogether. + +Please run those if possible and link them from a [gist](http://gist.github.com) or simply attach them as a comment to the issue. + +*Please note, the quickest way to fix a bug is to open a Pull Request.* + + +### Feature Request + +Opening a feature request kicks off a discussion. +Requests may be closed if we're not actively planning to work on them. + +__Proposal:__ [Description of the feature] + +__Current behavior:__ [What currently happens] + +__Desired behavior:__ [What you would like to happen] + +__Use case:__ [Why is this important (helps with prioritizing requests)] diff --git a/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..5cfe48f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,13 @@ +###### Required for all non-trivial PRs +- [ ] Rebased/mergable +- [ ] Tests pass +- [ ] CHANGELOG.md updated +- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) + +###### Required only if applicable +_You can erase any checkboxes below this note if they are not applicable to your Pull Request._ +- [ ] [InfluxQL Spec](https://github.com/influxdata/influxdb/blob/master/influxql/README.md) updated +- [ ] Provide example syntax +- [ ] Update man page when modifying a command +- [ ] Config changes: update sample config (`etc/config.sample.toml`), server `NewDemoConfig` method, and `Diagnostics` methods reporting config settings, if necessary +- [ ] [InfluxData Documentation](https://github.com/influxdata/docs.influxdata.com): issue filed or pull request submitted \ diff --git a/vendor/github.com/influxdata/influxdb/.gitignore b/vendor/github.com/influxdata/influxdb/.gitignore new file mode 100644 index 0000000..4cfc1dd --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.gitignore @@ -0,0 +1,76 @@ +# Keep editor-specific, non-project specific ignore rules in global .gitignore: +# https://help.github.com/articles/ignoring-files/#create-a-global-gitignore + +*~ +src/ + +config.json +/bin/ + +/query/a.out* + +# ignore generated files. +cmd/influxd/version.go + +# executables + +*.test + +influx_tsm +**/influx_tsm +!**/influx_tsm/ + +influx_stress +**/influx_stress +!**/influx_stress/ + +influxd +**/influxd +!**/influxd/ + +influx +**/influx +!**/influx/ + +influxdb +**/influxdb +!**/influxdb/ + +influx_inspect +**/influx_inspect +!**/influx_inspect/ + +/benchmark-tool +/main +/benchmark-storage +godef +gosym +gocode +inspect-raft + +# dependencies +out_rpm/ +packages/ + +# autconf +autom4te.cache/ +config.log +config.status + +# log file +influxdb.log +benchmark.log + +# config file +config.toml + +# test data files +integration/migration_data/ + +# man outputs +man/*.xml +man/*.1 +man/*.1.gz + +# test outputs +/test-results.xml diff --git a/vendor/github.com/influxdata/influxdb/.hooks/pre-commit b/vendor/github.com/influxdata/influxdb/.hooks/pre-commit new file mode 100755 index 0000000..6cf240b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.hooks/pre-commit @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +fmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l` +if [ $fmtcount -gt 0 ]; then + echo "Some files aren't formatted, please run 'go fmt ./...' to format your source code before committing" + exit 1 +fi + +vetcount=`go tool vet ./ 2>&1 | wc -l` +if [ $vetcount -gt 0 ]; then + echo "Some files aren't passing vet heuristics, please run 'go vet ./...' to see the errors it flags and correct your source code before committing" + exit 1 +fi +exit 0 + +# Ensure FIXME lines are removed before commit. +fixme_lines=$(git diff --cached | grep ^+ | grep -v pre-commit | grep FIXME | sed 's_^+\s*__g') +if [ "$fixme_lines" != "" ]; then + echo "Please remove the following lines:" + echo -e "$fixme_lines" + exit 1 +fi + diff --git a/vendor/github.com/influxdata/influxdb/.mention-bot b/vendor/github.com/influxdata/influxdb/.mention-bot new file mode 100644 index 0000000..5f8689b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.mention-bot @@ -0,0 +1,6 @@ +{ + "maxReviewers": 3, + "fileBlacklist": ["CHANGELOG.md"], + "userBlacklist": ["pauldix", "toddboom", "aviau", "mark-rushakoff"], + "requiredOrgs": ["influxdata"] +} diff --git a/vendor/github.com/influxdata/influxdb/CHANGELOG.md b/vendor/github.com/influxdata/influxdb/CHANGELOG.md new file mode 100644 index 0000000..1005f3c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CHANGELOG.md @@ -0,0 +1,2724 @@ +## v1.3.6 [unreleased] + +### Bugfixes + +- [#8770](https://github.com/influxdata/influxdb/pull/8770): Reduce how long it takes to walk the varrefs in an expression. +- [#8787](https://github.com/influxdata/influxdb/issues/8787): panic: runtime error: invalid memory address or nil pointer dereference. +- [#8741](https://github.com/influxdata/influxdb/issues/8741): Fix increased memory usage in cache and wal readers +- [#8848](https://github.com/influxdata/influxdb/issues/8848): Prevent deadlock when doing math on the result of a subquery. +- [#8842](https://github.com/influxdata/influxdb/issues/8842): Fix several races in the shard and engine. +- [#8887](https://github.com/influxdata/influxdb/pull/8887): Fix race on cache entry. + +## v1.3.5 [2017-08-29] + +### Bugfixes + +- [#8755](https://github.com/influxdata/influxdb/pull/8755): Fix race condition accessing `seriesByID` map. +- [#8766](https://github.com/influxdata/influxdb/pull/8766): Fix deadlock when calling `SeriesIDsAllOrByExpr` + +## v1.3.4 [2017-08-23] + +### Bugfixes + +- [#8601](https://github.com/influxdata/influxdb/pull/8601): Fixed time boundaries for continuous queries with time zones. +- [#8607](https://github.com/influxdata/influxdb/issues/8607): Fix time zone shifts when the shift happens on a time zone boundary. +- [#8639](https://github.com/influxdata/influxdb/issues/8639): Parse time literals using the time zone in the select statement. +- [#8701](https://github.com/influxdata/influxdb/pull/8701): Fix drop measurement not dropping all data +- [#8677](https://github.com/influxdata/influxdb/issues/8677): Fix backups when snapshot is empty. +- [#8706](https://github.com/influxdata/influxdb/pull/8706): Cursor leak, resulting in an accumulation of `.tsm.tmp` files after compactions. +- [#8713](https://github.com/influxdata/influxdb/issues/8713): Deadlock when dropping measurement and writing +- [#8716](https://github.com/influxdata/influxdb/pull/8716): Ensure inputs are closed on error. Add runtime GC finalizer as additional guard to close iterators +- [#8726](https://github.com/influxdata/influxdb/pull/8726): Fix leaking tmp file when large compaction aborted + +### Features + +- [#8711](https://github.com/influxdata/influxdb/pull/8711): Batch up writes for monitor service + +## v1.3.3 [2017-08-10] + +### Bugfixes + +- [#8681](https://github.com/influxdata/influxdb/pull/8681): Resolves a memory leak when NewReaderIterator creates a nilFloatIterator, the reader is not closed + +## v1.3.2 [2017-08-04] + +### Bugfixes + +- [#8629](https://github.com/influxdata/influxdb/pull/8629): Interrupt in progress TSM compactions +- [#8630](https://github.com/influxdata/influxdb/pull/8630): Prevent excessive memory usage when dropping series +- [#8640](https://github.com/influxdata/influxdb/issues/8640): Significantly improve performance of SHOW TAG VALUES. + +## v1.3.1 [2017-07-20] + +### Bugfixes + +- [#8559](https://github.com/influxdata/influxdb/issues/8559): Ensure temporary TSM files get cleaned up when compaction aborted. +- [#8500](https://github.com/influxdata/influxdb/issues/8500): InfluxDB goes unresponsive +- [#8531](https://github.com/influxdata/influxdb/issues/8531): Duplicate points generated via INSERT after DELETE +- [#8569](https://github.com/influxdata/influxdb/issues/8569): Fix the cq start and end times to use unix timestamps. + +## v1.3.0 [2017-06-21] + +### Removals + +The admin UI is removed and unusable in this release. The `[admin]` configuration section will be ignored. + +### Configuration Changes + +* The top-level config `bind-address` now defaults to `localhost:8088`. + The previous default was just `:8088`, causing the backup and restore port to be bound on all available interfaces (i.e. including interfaces on the public internet). + +### Features + +- [#8143](https://github.com/influxdata/influxdb/pull/8143): Add WAL sync delay +- [#7977](https://github.com/influxdata/influxdb/issues/7977): Add chunked request processing back into the Go client v2 +- [#7974](https://github.com/influxdata/influxdb/pull/7974): Allow non-admin users to execute SHOW DATABASES. +- [#7948](https://github.com/influxdata/influxdb/pull/7948): Reduce memory allocations by reusing gzip.Writers across requests +- [#7776](https://github.com/influxdata/influxdb/issues/7776): Add system information to /debug/vars. +- [#7553](https://github.com/influxdata/influxdb/issues/7553): Add modulo operator to the query language. +- [#7856](https://github.com/influxdata/influxdb/issues/7856): Failed points during an import now result in a non-zero exit code. +- [#7821](https://github.com/influxdata/influxdb/issues/7821): Expose some configuration settings via SHOW DIAGNOSTICS +- [#8025](https://github.com/influxdata/influxdb/issues/8025): Support single and multiline comments in InfluxQL. +- [#6541](https://github.com/influxdata/influxdb/issues/6541): Support timezone offsets for queries. +- [#8194](https://github.com/influxdata/influxdb/pull/8194): Add "integral" function to InfluxQL. +- [#7393](https://github.com/influxdata/influxdb/issues/7393): Add "non_negative_difference" function to InfluxQL. +- [#8042](https://github.com/influxdata/influxdb/issues/8042): Add bitwise AND, OR and XOR operators to the query language. +- [#8302](https://github.com/influxdata/influxdb/pull/8302): Write throughput/concurrency improvements +- [#8273](https://github.com/influxdata/influxdb/issues/8273): Remove the admin UI. +- [#8327](https://github.com/influxdata/influxdb/pull/8327): Update to go1.8.1 +- [#8348](https://github.com/influxdata/influxdb/pull/8348): Add max concurrent compaction limits +- [#8366](https://github.com/influxdata/influxdb/pull/8366): Add TSI support tooling. +- [#8350](https://github.com/influxdata/influxdb/pull/8350): Track HTTP client requests for /write and /query with /debug/requests. +- [#8384](https://github.com/influxdata/influxdb/pull/8384): Write and compaction stability +- [#7862](https://github.com/influxdata/influxdb/pull/7861): Add new profile endpoint for gathering all debug profiles and querues in single archive. +- [#8390](https://github.com/influxdata/influxdb/issues/8390): Add nanosecond duration literal support. +- [#8394](https://github.com/influxdata/influxdb/pull/8394): Optimize top() and bottom() using an incremental aggregator. +- [#7129](https://github.com/influxdata/influxdb/issues/7129): Maintain the tags of points selected by top() or bottom() when writing the results. + +### Bugfixes + +- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method +- [#8231](https://github.com/influxdata/influxdb/pull/8231): Fix spelling mistake in HTTP section of config -- shared-sercret +- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. +- [#8122](https://github.com/influxdata/influxdb/pull/8122): Suppress headers in output for influx cli when they are the same. +- [#8119](https://github.com/influxdata/influxdb/pull/8119): Add chunked/chunk size as setting/options in cli. +- [#8091](https://github.com/influxdata/influxdb/issues/8091): Do not increment the continuous query statistic if no query is run. +- [#8064](https://github.com/influxdata/influxdb/issues/8064): Forbid wildcards in binary expressions. +- [#8148](https://github.com/influxdata/influxdb/issues/8148): Fix fill(linear) when multiple series exist and there are null values. +- [#7995](https://github.com/influxdata/influxdb/issues/7995): Update liner dependency to handle docker exec. +- [#7835](https://github.com/influxdata/influxdb/pull/7835): Bind backup and restore port to localhost by default +- [#7811](https://github.com/influxdata/influxdb/issues/7811): Kill query not killing query +- [#7457](https://github.com/influxdata/influxdb/issues/7457): KILL QUERY should work during all phases of a query +- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check. +- [#8118](https://github.com/influxdata/influxdb/issues/8118): Significantly improve DROP DATABASE speed. +- [#8181](https://github.com/influxdata/influxdb/issues/8181): Return an error when an invalid duration literal is parsed. +- [#8093](https://github.com/influxdata/influxdb/issues/8093): Fix the time range when an exact timestamp is selected. +- [#8174](https://github.com/influxdata/influxdb/issues/8174): Fix query parser when using addition and subtraction without spaces. +- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors. +- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered. +- [#8171](https://github.com/influxdata/influxdb/issues/8171): Significantly improve shutdown speed for high cardinality databases. +- [#8177](https://github.com/influxdata/influxdb/issues/8177): Fix racy integration test. +- [#8230](https://github.com/influxdata/influxdb/issues/8230): Prevent overflowing or underflowing during window computation. +- [#8058](https://github.com/influxdata/influxdb/pull/8058): Enabled golint for admin, httpd, subscriber, udp. @karlding +- [#8252](https://github.com/influxdata/influxdb/issues/8252): Implicitly cast null to false in binary expressions with a boolean. +- [#8067](https://github.com/influxdata/influxdb/issues/8067): Restrict fill(none) and fill(linear) to be usable only with aggregate queries. +- [#8065](https://github.com/influxdata/influxdb/issues/8065): Restrict top() and bottom() selectors to be used with no other functions. +- [#8266](https://github.com/influxdata/influxdb/issues/8266): top() and bottom() now returns the time for every point. +- [#8315](https://github.com/influxdata/influxdb/issues/8315): Remove default upper time bound on DELETE queries. +- [#8066](https://github.com/influxdata/influxdb/issues/8066): Fix LIMIT and OFFSET for certain aggregate queries. +- [#8045](https://github.com/influxdata/influxdb/issues/8045): Refactor the subquery code and fix outer condition queries. +- [#7425](https://github.com/influxdata/influxdb/issues/7425): Fix compaction aborted log messages +- [#8123](https://github.com/influxdata/influxdb/issues/8123): TSM compaction does not remove .tmp on error +- [#8343](https://github.com/influxdata/influxdb/issues/8343): Set the CSV output to an empty string for null values. +- [#8368](https://github.com/influxdata/influxdb/issues/8368): Compaction exhausting disk resources in InfluxDB +- [#8358](https://github.com/influxdata/influxdb/issues/8358): Small edits to the etc/config.sample.toml file. +- [#8392](https://github.com/influxdata/influxdb/issues/8393): Points beyond retention policy scope are dropped silently +- [#8387](https://github.com/influxdata/influxdb/issues/8387): Fix TSM tmp file leaked on disk +- [#8417](https://github.com/influxdata/influxdb/issues/8417): Fix large field keys preventing snapshot compactions +- [#7957](https://github.com/influxdata/influxdb/issues/7957): URL query parameter credentials take priority over Authentication header. +- [#8443](https://github.com/influxdata/influxdb/issues/8443): TSI branch has duplicate tag values. +- [#8470](https://github.com/influxdata/influxdb/issues/8470): index file fd leak in tsi branch +- [#8468](https://github.com/influxdata/influxdb/pull/8468): Fix TSI non-contiguous compaction panic. + +## v1.2.4 [2017-05-08] + +### Bugfixes + +- [#8338](https://github.com/influxdata/influxdb/pull/8338): Prefix partial write errors with `partial write:` to generalize identification in other subsystems + +## v1.2.3 [2017-04-17] + +### Bugfixes + +- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. +- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method +- [#8022](https://github.com/influxdata/influxdb/issues/8022): Segment violation in models.Tags.Get +- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check. +- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors. +- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered. +- [#8254](https://github.com/influxdata/influxdb/pull/8254): Fix delete time fields creating unparseable points + +## v1.2.2 [2017-03-14] + +### Release Notes + +### Configuration Changes + +#### `[http]` Section + +* `max-row-limit` now defaults to `0`. The previous default was `10000`, but due to a bug, the value in use since `1.0` was `0`. + +### Bugfixes + +- [#8050](https://github.com/influxdata/influxdb/issues/8050): influxdb & grafana, absence of data on the graphs + +## v1.2.1 [2017-03-08] + +### Release Notes + +### Bugfixes + +- [#8100](https://github.com/influxdata/influxdb/issues/8100): Include IsRawQuery in the rewritten statement for meta queries. +- [#8095](https://github.com/influxdata/influxdb/pull/8095): Fix race in WALEntry.Encode and Values.Deduplicate +- [#8085](https://github.com/influxdata/influxdb/issues/8085): panic: interface conversion: tsm1.Value is tsm1.IntegerValue, not tsm1.FloatValue. +- [#8084](https://github.com/influxdata/influxdb/issues/8084): Points missing after compaction +- [#8080](https://github.com/influxdata/influxdb/issues/8080): Point.UnmarshalBinary() bounds check +- [#8078](https://github.com/influxdata/influxdb/issues/8078): Map types correctly when selecting a field with multiple measurements where one of the measurements is empty. +- [#8044](https://github.com/influxdata/influxdb/issues/8044): Treat non-reserved measurement names with underscores as normal measurements. +- [#8040](https://github.com/influxdata/influxdb/issues/8040): Reduce the expression in a subquery to avoid a panic. +- [#8028](https://github.com/influxdata/influxdb/issues/8028): Fix panic in collectd when configured to read types DB from directory. +- [#8001](https://github.com/influxdata/influxdb/issues/8001): Map types correctly when using a regex and one of the measurements is empty. +- [#7968](https://github.com/influxdata/influxdb/issues/7968): Properly select a tag within a subquery. +- [#7966](https://github.com/influxdata/influxdb/pull/7966): Prevent a panic when aggregates are used in an inner query with a raw query. +- [#7946](https://github.com/influxdata/influxdb/issues/7946): Fix authentication when subqueries are present. +- [#7910](https://github.com/influxdata/influxdb/issues/7910): Fix EvalType when a parenthesis expression is used. +- [#7906](https://github.com/influxdata/influxdb/issues/7906): Anchors not working as expected with case-insensitive regex +- [#7905](https://github.com/influxdata/influxdb/issues/7905): Fix ORDER BY time DESC with ordering series keys. +- [#7895](https://github.com/influxdata/influxdb/issues/7895): Fix incorrect math when aggregates that emit different times are used. +- [#7888](https://github.com/influxdata/influxdb/pull/7888): Expand query dimensions from the subquery. +- [#7885](https://github.com/influxdata/influxdb/issues/7885): Fix LIMIT and OFFSET when they are used in a subquery. +- [#7880](https://github.com/influxdata/influxdb/issues/7880): Dividing aggregate functions with different outputs doesn't panic. +- [#7877](https://github.com/influxdata/influxdb/issues/7877): Fix mapping of types when the measurement uses a regex + +## v1.2.0 [2017-01-24] + +### Release Notes + +This release introduces a major new querying capability in the form of sub-queries, and provides several performance improvements, including a 50% or better gain in write performance on larger numbers of cores. The release adds some stability and memory-related improvements, as well as several CLI-related bug fixes. If upgrading from a prior version, please read the configuration changes in the following section before upgrading. + +### Configuration Changes + +The following new configuration options are available, if upgrading to `1.2.0` from prior versions. + +#### `[[collectd]]` Section + +* `security-level` which defaults to `"none"`. This field also accepts `"sign"` and `"encrypt"` and enables different levels of transmission security for the collectd plugin. +* `auth-file` which defaults to `"/etc/collectd/auth_file"`. Specifies where to locate the authentication file used to authenticate clients when using signed or encrypted mode. + +### Deprecations + +The stress tool `influx_stress` will be removed in a subsequent release. We recommend using [`influx-stress`](https://github.com/influxdata/influx-stress) as a replacement. + +### Features + +- [#7830](https://github.com/influxdata/influxdb/pull/7830): Cache snapshotting performance improvements +- [#7723](https://github.com/influxdata/influxdb/pull/7723): Remove the override of GOMAXPROCS. +- [#7709](https://github.com/influxdata/influxdb/pull/7709): Add clear command to cli. +- [#7688](https://github.com/influxdata/influxdb/pull/7688): Adding ability to use parameters in queries in the v2 client using the `Parameters` map in the `Query` struct. +- [#7669](https://github.com/influxdata/influxdb/issues/7669): Uncomment section headers from the default configuration file. +- [#7633](https://github.com/influxdata/influxdb/pull/7633): improve write performance significantly. +- [#7601](https://github.com/influxdata/influxdb/issues/7601): Prune data in meta store for deleted shards. +- [#7554](https://github.com/influxdata/influxdb/pull/7554): update latest dependencies with Godeps. +- [#7368](https://github.com/influxdata/influxdb/pull/7368): Introduce syntax for marking a partial response with chunking. +- [#7356](https://github.com/influxdata/influxdb/issues/7356): Use X-Forwarded-For IP address in HTTP logger if present. +- [#7326](https://github.com/influxdata/influxdb/issues/7326): Verbose output for SSL connection errors. +- [#7323](https://github.com/influxdata/influxdb/pull/7323): Allow add items to array config via ENV +- [#7066](https://github.com/influxdata/influxdb/issues/7066): Add support for secure transmission via collectd. +- [#7036](https://github.com/influxdata/influxdb/issues/7036): Switch logging to use structured logging everywhere. +- [#4619](https://github.com/influxdata/influxdb/issues/4619): Support subquery execution in the query language. +- [#3188](https://github.com/influxdata/influxdb/issues/3188): [CLI feature request] USE retention policy for queries. + +### Bugfixes + +- [#7845](https://github.com/influxdata/influxdb/issues/7845): Fix race in storage engine. +- [#7838](https://github.com/influxdata/influxdb/issues/7838): Ensure Subscriber service can be disabled. +- [#7822](https://github.com/influxdata/influxdb/issues/7822): Drop database will delete /influxdb/data directory +- [#7814](https://github.com/influxdata/influxdb/issues/7814): InfluxDB should do a partial write on mismatched type errors. +- [#7812](https://github.com/influxdata/influxdb/issues/7812): Fix slice out of bounds panic when pruning shard groups. Thanks @vladlopes +- [#7786](https://github.com/influxdata/influxdb/pull/7786): Fix potential race condition in correctness of tsm1_cache memBytes statistic. +- [#7784](https://github.com/influxdata/influxdb/pull/7784): Fix broken error return on meta client's UpdateUser and DropContinuousQuery methods. +- [#7741](https://github.com/influxdata/influxdb/pull/7741): Fix string quoting and significantly improve performance of `influx_inspect export`. +- [#7740](https://github.com/influxdata/influxdb/issues/7740): Fix parse key panic when missing tag value @oiooj +- [#7698](https://github.com/influxdata/influxdb/pull/7698): CLI was caching db/rp for insert into statements. +- [#7659](https://github.com/influxdata/influxdb/issues/7659): Fix CLI import bug when using self-signed SSL certificates. +- [#7656](https://github.com/influxdata/influxdb/issues/7656): Fix cross-platform backup/restore @allenpetersen +- [#7650](https://github.com/influxdata/influxdb/issues/7650): Ensures that all user privileges associated with a database are removed when the database is dropped. +- [#7634](https://github.com/influxdata/influxdb/issues/7634): Return the time from a percentile call on an integer. +- [#7621](https://github.com/influxdata/influxdb/issues/7621): Expand string and boolean fields when using a wildcard with `sample()`. +- [#7616](https://github.com/influxdata/influxdb/pull/7616): Fix chuid argument order in init script @ccasey +- [#7615](https://github.com/influxdata/influxdb/issues/7615): Reject invalid subscription urls @allenpetersen +- [#7585](https://github.com/influxdata/influxdb/pull/7585): Return Error instead of panic when decoding point values. +- [#7563](https://github.com/influxdata/influxdb/issues/7563): RP should not allow `INF` or `0` as a shard duration. +- [#7396](https://github.com/influxdata/influxdb/issues/7396): CLI should use spaces for alignment, not tabs. +- [#6527](https://github.com/influxdata/influxdb/issues/6527): 0.12.2 Influx CLI client PRECISION returns "Unknown precision.... + + +## v1.1.5 [2017-04-28] + +### Bugfixes + +- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. +- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method + +## v1.1.4 [2017-02-27] + +### Bugfixes + +- [#8063](https://github.com/influxdata/influxdb/pull/8063): Backport #7631 to reduce GC allocations. + +## v1.1.3 [2017-02-17] + +### Bugfixes + +- [#8027](https://github.com/influxdata/influxdb/pull/8027): Remove Tags.shouldCopy, replace with forceCopy on series creation. + +## v1.1.2 [2017-02-16] + +### Bugfixes + +- [#7832](https://github.com/influxdata/influxdb/pull/7832): Fix memory leak when writing new series over HTTP +- [#7929](https://github.com/influxdata/influxdb/issues/7929): Fix series tag iteration segfault. (#7922) +- [#8011](https://github.com/influxdata/influxdb/issues/8011): Fix tag dereferencing panic. + +## v1.1.1 [2016-12-06] + +### Features + +- [#7684](https://github.com/influxdata/influxdb/issues/7684): Update Go version to 1.7.4. + +### Bugfixes + +- [#7679](https://github.com/influxdata/influxdb/pull/7679): Fix string fields w/ trailing slashes +- [#7661](https://github.com/influxdata/influxdb/pull/7661): Quote the empty string as an ident. +- [#7625](https://github.com/influxdata/influxdb/issues/7625): Fix incorrect tag value in error message. + +### Security + +[Go 1.7.4](https://golang.org/doc/devel/release.html#go1.7.minor) was released to address two security issues. This release includes these security fixes. + +## v1.1.0 [2016-11-14] + +### Release Notes + +This release is built with go 1.7.3 and provides many performance optimizations, stability changes and a few new query capabilities. If upgrading from a prior version, please read the configuration changes below section before upgrading. + +### Deprecations + +The admin interface is deprecated and will be removed in a subsequent release. The configuration setting to enable the admin UI is now disabled by default, but can be enabled if necessary. We recommend using [Chronograf](https://github.com/influxdata/chronograf) or [Grafana](https://github.com/grafana/grafana) as a replacement. + +### Configuration Changes + +The following configuration changes may need to changed before upgrading to `1.1.0` from prior versions. + +#### `[admin]` Section + +* `enabled` now default to false. If you are currently using the admin interaface, you will need to change this value to `true` to re-enable it. The admin interface is currently deprecated and will be removed in a subsequent release. + +#### `[data]` Section + +* `max-values-per-tag` was added with a default of 100,000, but can be disabled by setting it to `0`. Existing measurements with tags that exceed this limit will continue to load, but writes that would cause the tags cardinality to increase will be dropped and a `partial write` error will be returned to the caller. This limit can be used to prevent high cardinality tag values from being written to a measurement. +* `cache-max-memory-size` has been increased to from `524288000` to `1048576000`. This setting is the maximum amount of RAM, in bytes, a shard cache can use before it rejects writes with an error. Setting this value to `0` disables the limit. +* `cache-snapshot-write-cold-duration` has been decreased from `1h` to `10m`. This setting determines how long values will stay in the shard cache while the shard is cold for writes. +* `compact-full-write-cold-duration` has been decreased from `24h` to `4h`. The shorter duration allows cold shards to be compacted to an optimal state more quickly. + +### Features + +The query language has been extended with a few new features: + +- [#7442](https://github.com/influxdata/influxdb/pull/7442): Support regex on fields keys in select clause +- [#7403](https://github.com/influxdata/influxdb/pull/7403): New `linear` fill option +- [#7388](https://github.com/influxdata/influxdb/pull/7388): New `cumulative_sum` function +- [#7295](https://github.com/influxdata/influxdb/pull/7295): Support `ON` for `SHOW` commands + + +All Changes: + +- [#7496](https://github.com/influxdata/influxdb/pull/7496): Filter out series within shards that do not have data for that series. +- [#7495](https://github.com/influxdata/influxdb/pull/7495): Rewrite regexes of the form host = /^server-a$/ to host = 'server-a', to take advantage of the tsdb index. +- [#7480](https://github.com/influxdata/influxdb/pull/7480): Improve compaction planning performance by caching tsm file stats. +- [#7473](https://github.com/influxdata/influxdb/pull/7473): Align binary math expression streams by time. +- [#7470](https://github.com/influxdata/influxdb/pull/7470): Reduce map allocations when computing the TagSet of a measurement. +- [#7463](https://github.com/influxdata/influxdb/pull/7463): Make input plugin services open/close idempotent. +- [#7441](https://github.com/influxdata/influxdb/pull/7441): Speed up shutdown by closing shards concurrently. +- [#7415](https://github.com/influxdata/influxdb/pull/7415): Add sample function to query language. +- [#7403](https://github.com/influxdata/influxdb/pull/7403): Add `fill(linear)` to query language. +- [#7388](https://github.com/influxdata/influxdb/pull/7388): Implement cumulative_sum() function. +- [#7320](https://github.com/influxdata/influxdb/issues/7320): Update defaults in config for latest best practices +- [#7305](https://github.com/influxdata/influxdb/pull/7305): UDP Client: Split large points. Thanks @vlasad +- [#7281](https://github.com/influxdata/influxdb/pull/7281): Add stats for active compactions, compaction errors. +- [#7268](https://github.com/influxdata/influxdb/pull/7268): More man pages for the other tools we package and compress man pages fully. +- [#7146](https://github.com/influxdata/influxdb/issues/7146): Add max-values-per-tag to limit high tag cardinality data +- [#7136](https://github.com/influxdata/influxdb/pull/7136): Update jwt-go dependency to version 3. +- [#7135](https://github.com/influxdata/influxdb/pull/7135): Support enable HTTP service over unix domain socket. Thanks @oiooj +- [#7120](https://github.com/influxdata/influxdb/issues/7120): Add additional statistics to query executor. +- [#7115](https://github.com/influxdata/influxdb/issues/7115): Feature request: `influx inspect -export` should dump WAL files. +- [#7099](https://github.com/influxdata/influxdb/pull/7099): Implement text/csv content encoding for the response writer. +- [#6992](https://github.com/influxdata/influxdb/issues/6992): Support tools for running async queries. +- [#6962](https://github.com/influxdata/influxdb/issues/6962): Support ON and use default database for SHOW commands. +- [#6896](https://github.com/influxdata/influxdb/issues/6896): Correctly read in input from a non-interactive stream for the CLI. +- [#6894](https://github.com/influxdata/influxdb/issues/6894): Support `INFLUX_USERNAME` and `INFLUX_PASSWORD` for setting username/password in the CLI. +- [#6704](https://github.com/influxdata/influxdb/issues/6704): Optimize first/last when no group by interval is present. +- [#5955](https://github.com/influxdata/influxdb/issues/5955): Make regex work on field and dimension keys in SELECT clause. +- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries for raw queries. +- [#3634](https://github.com/influxdata/influxdb/issues/3634): Support mixed duration units. + +### Bugfixes + +- [#7606](https://github.com/influxdata/influxdb/pull/7606): Avoid deadlock when `max-row-limit` is hit. +- [#7564](https://github.com/influxdata/influxdb/issues/7564): Fix incorrect grouping when multiple aggregates are used with sparse data. +- [#7548](https://github.com/influxdata/influxdb/issues/7548): Fix output duration units for SHOW QUERIES. +- [#7526](https://github.com/influxdata/influxdb/issues/7526): Truncate the version string when linking to the documentation. +- [#7494](https://github.com/influxdata/influxdb/issues/7494): influx_inspect: export does not escape field keys. +- [#7482](https://github.com/influxdata/influxdb/issues/7482): Fix issue where point would be written to wrong shard. +- [#7448](https://github.com/influxdata/influxdb/pull/7448): Fix Retention Policy Inconsistencies +- [#7436](https://github.com/influxdata/influxdb/issues/7436): Remove accidentally added string support for the stddev call. +- [#7431](https://github.com/influxdata/influxdb/issues/7431): Remove /data/process_continuous_queries endpoint. +- [#7392](https://github.com/influxdata/influxdb/pull/7392): Enable https subscriptions to work with custom CA certificates. +- [#7385](https://github.com/influxdata/influxdb/pull/7385): Reduce query planning allocations +- [#7382](https://github.com/influxdata/influxdb/issues/7382): Shard stats include wal path tag so disk bytes make more sense. +- [#7334](https://github.com/influxdata/influxdb/issues/7334): Panic with unread show series iterators during drop database +- [#7297](https://github.com/influxdata/influxdb/issues/7297): Use consistent column output from the CLI for column formatted responses. +- [#7285](https://github.com/influxdata/influxdb/issues/7285): Correctly use password-type field in Admin UI. Thanks @dandv! +- [#7231](https://github.com/influxdata/influxdb/issues/7231): Duplicate parsing bug in ALTER RETENTION POLICY. +- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards +- [#7196](https://github.com/influxdata/influxdb/issues/7196): Fix mmap dereferencing, fixes #7183, #7180 +- [#7177](https://github.com/influxdata/influxdb/issues/7177): Fix base64 encoding issue with /debug/vars stats. +- [#7161](https://github.com/influxdata/influxdb/issues/7161): Drop measurement causes cache max memory exceeded error. +- [#7152](https://github.com/influxdata/influxdb/issues/7152): Decrement number of measurements only once when deleting the last series from a measurement. +- [#7053](https://github.com/influxdata/influxdb/issues/7053): Delete statement returns an error when retention policy or database is specified +- [#7013](https://github.com/influxdata/influxdb/issues/7013): Fix the dollar sign so it properly handles reserved keywords. +- [#2792](https://github.com/influxdata/influxdb/issues/2792): Exceeding max retention policy duration gives incorrect error message +- [#1834](https://github.com/influxdata/influxdb/issues/1834): Drop time when used as a tag or field key. + +## v1.0.2 [2016-10-05] + +### Bugfixes + +- [#7391](https://github.com/influxdata/influxdb/issues/7391): Fix RLE integer decoding producing negative numbers +- [#7335](https://github.com/influxdata/influxdb/pull/7335): Avoid stat syscall when planning compactions +- [#7330](https://github.com/influxdata/influxdb/issues/7330): Subscription data loss under high write load +- [#7150](https://github.com/influxdata/influxdb/issues/7150): Do not automatically reset the shard duration when using ALTER RETENTION POLICY +- [#5878](https://github.com/influxdata/influxdb/issues/5878): Ensure correct shard groups created when retention policy has been altered. + +## v1.0.1 [2016-09-26] + +### Bugfixes + +- [#7315](https://github.com/influxdata/influxdb/issues/7315): Prevent users from manually using system queries since incorrect use would result in a panic. +- [#7299](https://github.com/influxdata/influxdb/issues/7299): Ensure fieldsCreated stat available in shard measurement. +- [#7272](https://github.com/influxdata/influxdb/issues/7272): Report cmdline and memstats in /debug/vars. +- [#7271](https://github.com/influxdata/influxdb/issues/7271): Fixing typo within example configuration file. Thanks @andyfeller! +- [#7270](https://github.com/influxdata/influxdb/issues/7270): Implement time math for lazy time literals. +- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards +- [#7110](https://github.com/influxdata/influxdb/issues/7110): Skip past points at the same time in derivative call within a merged series. +- [#6846](https://github.com/influxdata/influxdb/issues/6846): Read an invalid JSON response as an error in the influx client. + +## v1.0.0 [2016-09-08] + +### Release Notes + +### Breaking changes + +* `max-series-per-database` was added with a default of 1M but can be disabled by setting it to `0`. Existing databases with series that exceed this limit will continue to load but writes that would create new series will fail. +* Config option `[cluster]` has been replaced with `[coordinator]` +* Support for config options `[collectd]` and `[opentsdb]` has been removed; use `[[collectd]]` and `[[opentsdb]]` instead. +* Config option `data-logging-enabled` within the `[data]` section, has been renamed to `trace-logging-enabled`, and defaults to `false`. +* The keywords `IF`, `EXISTS`, and `NOT` where removed for this release. This means you no longer need to specify `IF NOT EXISTS` for `DROP DATABASE` or `IF EXISTS` for `CREATE DATABASE`. If these are specified, a query parse error is returned. +* The Shard `writePointsFail` stat has been renamed to `writePointsErr` for consistency with other stats. + +With this release the systemd configuration files for InfluxDB will use the system configured default for logging and will no longer write files to `/var/log/influxdb` by default. On most systems, the logs will be directed to the systemd journal and can be accessed by `journalctl -u influxdb.service`. Consult the systemd journald documentation for configuring journald. + +### Features + +- [#7199](https://github.com/influxdata/influxdb/pull/7199): Add mode function. Thanks @agaurav. +- [#7194](https://github.com/influxdata/influxdb/issues/7194): Support negative timestamps for the query engine. +- [#7172](https://github.com/influxdata/influxdb/pull/7172): Write path stats +- [#7095](https://github.com/influxdata/influxdb/pull/7095): Add MaxSeriesPerDatabase config setting. +- [#7065](https://github.com/influxdata/influxdb/issues/7065): Remove IF EXISTS/IF NOT EXISTS from influxql language. +- [#7050](https://github.com/influxdata/influxdb/pull/7050): Update go package library dependencies. +- [#7046](https://github.com/influxdata/influxdb/pull/7046): Add tsm file export to influx_inspect tool. +- [#7011](https://github.com/influxdata/influxdb/issues/7011): Create man pages for commands. +- [#6959](https://github.com/influxdata/influxdb/issues/6959): Return 403 Forbidden when authentication succeeds but authorization fails. +- [#6938](https://github.com/influxdata/influxdb/issues/6938): Added favicon +- [#6928](https://github.com/influxdata/influxdb/issues/6928): Run continuous query for multiple buckets rather than one per bucket. +- [#6909](https://github.com/influxdata/influxdb/issues/6909): Log the CQ execution time when continuous query logging is enabled. +- [#6900](https://github.com/influxdata/influxdb/pull/6900): Trim BOM from Windows Notepad-saved config files. +- [#6889](https://github.com/influxdata/influxdb/pull/6889): Update help and remove unused config options from the configuration file. +- [#6820](https://github.com/influxdata/influxdb/issues/6820): Add NodeID to execution options +- [#6812](https://github.com/influxdata/influxdb/pull/6812): Make httpd logger closer to Common (& combined) Log Format. +- [#6805](https://github.com/influxdata/influxdb/issues/6805): Allow any variant of the help option to trigger the help. +- [#6713](https://github.com/influxdata/influxdb/pull/6713): Reduce allocations during query parsing. +- [#6686](https://github.com/influxdata/influxdb/pull/6686): Optimize timestamp run-length decoding +- [#6664](https://github.com/influxdata/influxdb/pull/6664): Adds monitoring statistic for on-disk shard size. +- [#6655](https://github.com/influxdata/influxdb/issues/6655): Add HTTP(s) based subscriptions. +- [#6654](https://github.com/influxdata/influxdb/pull/6654): Add new HTTP statistics to monitoring +- [#6623](https://github.com/influxdata/influxdb/pull/6623): Speed up drop database +- [#6621](https://github.com/influxdata/influxdb/pull/6621): Add Holt-Winter forecasting function. +- [#6609](https://github.com/influxdata/influxdb/pull/6609): Add support for JWT token authentication. +- [#6593](https://github.com/influxdata/influxdb/pull/6593): Add ability to create snapshots of shards. +- [#6585](https://github.com/influxdata/influxdb/pull/6585): Parallelize iterators +- [#6559](https://github.com/influxdata/influxdb/issues/6559): Teach the http service how to enforce connection limits. +- [#6519](https://github.com/influxdata/influxdb/issues/6519): Support cast syntax for selecting a specific type. +- [#6507](https://github.com/influxdata/influxdb/issues/6507): Refactor monitor service to avoid expvar and write monitor statistics on a truncated time interval. +- [#5906](https://github.com/influxdata/influxdb/issues/5906): Dynamically update the documentation link in the admin UI. +- [#5750](https://github.com/influxdata/influxdb/issues/5750): Support wildcards in aggregate functions. +- [#5655](https://github.com/influxdata/influxdb/issues/5655): Support specifying a retention policy for the graphite service. +- [#5500](https://github.com/influxdata/influxdb/issues/5500): Add extra trace logging to tsm engine. +- [#5499](https://github.com/influxdata/influxdb/issues/5499): Add stats and diagnostics to the TSM engine. +- [#4532](https://github.com/influxdata/influxdb/issues/4532): Support regex selection in SHOW TAG VALUES for the key. +- [#3733](https://github.com/influxdata/influxdb/issues/3733): Modify the default retention policy name and make it configurable. +- [#3541](https://github.com/influxdata/influxdb/issues/3451): Update SHOW FIELD KEYS to return the field type with the field key. +- [#2926](https://github.com/influxdata/influxdb/issues/2926): Support bound parameters in the parser. +- [#1310](https://github.com/influxdata/influxdb/issues/1310): Add https-private-key option to httpd config. +- [#1110](https://github.com/influxdata/influxdb/issues/1110): Support loading a folder for collectd typesdb files. + +### Bugfixes + +- [#7243](https://github.com/influxdata/influxdb/issues/7243): Optimize queries that compare a tag value to an empty string. +- [#7240](https://github.com/influxdata/influxdb/issues/7240): Allow blank lines in the line protocol input. +- [#7225](https://github.com/influxdata/influxdb/issues/7225): runtime: goroutine stack exceeds 1000000000-byte limit +- [#7218](https://github.com/influxdata/influxdb/issues/7218): Fix alter retention policy when all options are used. +- [#7127](https://github.com/influxdata/influxdb/pull/7127): Concurrent series limit +- [#7125](https://github.com/influxdata/influxdb/pull/7125): Ensure gzip writer is closed in influx_inspect export +- [#7119](https://github.com/influxdata/influxdb/pull/7119): Fix CREATE DATABASE when dealing with default values. +- [#7088](https://github.com/influxdata/influxdb/pull/7088): Fix UDP pointsRx being incremented twice. +- [#7084](https://github.com/influxdata/influxdb/pull/7084): Tombstone memory improvements +- [#7081](https://github.com/influxdata/influxdb/issues/7081): Hardcode auto generated RP names to autogen +- [#7080](https://github.com/influxdata/influxdb/pull/7080): Ensure IDs can't clash when managing Continuous Queries. +- [#7074](https://github.com/influxdata/influxdb/issues/7074): Continuous full compactions +- [#7043](https://github.com/influxdata/influxdb/pull/7043): Remove limiter from walkShards +- [#7032](https://github.com/influxdata/influxdb/pull/7032): Copy tags in influx_stress to avoid a concurrent write panic on a map. +- [#7028](https://github.com/influxdata/influxdb/pull/7028): Do not run continuous queries that have no time span. +- [#7025](https://github.com/influxdata/influxdb/issues/7025): Move the CQ interval by the group by offset. +- [#6990](https://github.com/influxdata/influxdb/issues/6990): Fix panic parsing empty key +- [#6986](https://github.com/influxdata/influxdb/pull/6986): update connection settings when changing hosts in cli. +- [#6968](https://github.com/influxdata/influxdb/issues/6968): Always use the demo config when outputting a new config. +- [#6965](https://github.com/influxdata/influxdb/pull/6965): Minor improvements to init script. Removes sysvinit-utils as package dependency. +- [#6952](https://github.com/influxdata/influxdb/pull/6952): Fix compaction planning with large TSM files +- [#6946](https://github.com/influxdata/influxdb/issues/6946): Duplicate data for the same timestamp +- [#6942](https://github.com/influxdata/influxdb/pull/6942): Fix panic: truncate the slice when merging the caches. +- [#6934](https://github.com/influxdata/influxdb/pull/6934): Fix regex binary encoding for a measurement. +- [#6911](https://github.com/influxdata/influxdb/issues/6911): Fix fill(previous) when used with math operators. +- [#6883](https://github.com/influxdata/influxdb/pull/6883): Rename dumptsmdev to dumptsm in influx_inspect. +- [#6882](https://github.com/influxdata/influxdb/pull/6882): Remove a double lock in the tsm1 index writer. +- [#6869](https://github.com/influxdata/influxdb/issues/6869): Remove FieldCodec from tsdb package. +- [#6864](https://github.com/influxdata/influxdb/pull/6864): Allow a non-admin to call "use" for the influx cli. +- [#6859](https://github.com/influxdata/influxdb/issues/6859): Set the condition cursor instead of aux iterator when creating a nil condition cursor. +- [#6855](https://github.com/influxdata/influxdb/pull/6855): Update `stress/v2` to work with clusters, ssl, and username/password auth. Code cleanup +- [#6850](https://github.com/influxdata/influxdb/pull/6850): Modify the max nanosecond time to be one nanosecond less. +- [#6835](https://github.com/influxdata/influxdb/pull/6835): Include sysvinit-tools as an rpm dependency. +- [#6834](https://github.com/influxdata/influxdb/pull/6834): Add port to all graphite log output to help with debugging multiple endpoints +- [#6829](https://github.com/influxdata/influxdb/issues/6829): Fix panic: runtime error: index out of range +- [#6824](https://github.com/influxdata/influxdb/issues/6824): Remove systemd output redirection. +- [#6819](https://github.com/influxdata/influxdb/issues/6819): Database unresponsive after DROP MEASUREMENT +- [#6796](https://github.com/influxdata/influxdb/issues/6796): Out of Memory Error when Dropping Measurement +- [#6771](https://github.com/influxdata/influxdb/issues/6771): Fix the point validation parser to identify and sort tags correctly. +- [#6760](https://github.com/influxdata/influxdb/issues/6760): Prevent panic in concurrent auth cache write +- [#6756](https://github.com/influxdata/influxdb/issues/6756): Set X-Influxdb-Version header on every request (even 404 requests). +- [#6753](https://github.com/influxdata/influxdb/issues/6753): Prevent panic if there are no values. +- [#6738](https://github.com/influxdata/influxdb/issues/6738): Time sorting broken with overwritten points +- [#6727](https://github.com/influxdata/influxdb/issues/6727): queries with strings that look like dates end up with date types, not string types +- [#6720](https://github.com/influxdata/influxdb/issues/6720): Concurrent map read write panic. Thanks @arussellsaw +- [#6708](https://github.com/influxdata/influxdb/issues/6708): Drop writes from before the retention policy time window. +- [#6702](https://github.com/influxdata/influxdb/issues/6702): Fix SELECT statement required privileges. +- [#6701](https://github.com/influxdata/influxdb/issues/6701): Filter out sources that do not match the shard database/retention policy. +- [#6693](https://github.com/influxdata/influxdb/pull/6693): Truncate the shard group end time if it exceeds MaxNanoTime. +- [#6685](https://github.com/influxdata/influxdb/issues/6685): Batch SELECT INTO / CQ writes +- [#6683](https://github.com/influxdata/influxdb/issues/6683): Fix compaction planning re-compacting large TSM files +- [#6676](https://github.com/influxdata/influxdb/issues/6676): Ensures client sends correct precision when inserting points. +- [#6672](https://github.com/influxdata/influxdb/issues/6672): Accept points with trailing whitespace. +- [#6663](https://github.com/influxdata/influxdb/issues/6663): Fixing panic in SHOW FIELD KEYS. +- [#6661](https://github.com/influxdata/influxdb/issues/6661): Disable limit optimization when using an aggregate. +- [#6652](https://github.com/influxdata/influxdb/issues/6652): Fix panic: interface conversion: tsm1.Value is \*tsm1.StringValue, not \*tsm1.FloatValue +- [#6650](https://github.com/influxdata/influxdb/issues/6650): Data race when dropping a database immediately after writing to it +- [#6648](https://github.com/influxdata/influxdb/issues/6648): Make sure admin exists before authenticating query. +- [#6644](https://github.com/influxdata/influxdb/issues/6644): Print the query executor's stack trace on a panic to the log. +- [#6641](https://github.com/influxdata/influxdb/issues/6641): Fix read tombstones: EOF +- [#6629](https://github.com/influxdata/influxdb/issues/6629): query-log-enabled in config not ignored anymore. +- [#6624](https://github.com/influxdata/influxdb/issues/6624): Ensure clients requesting gzip encoded bodies don't receive empty body +- [#6618](https://github.com/influxdata/influxdb/pull/6618): Optimize shard loading +- [#6611](https://github.com/influxdata/influxdb/issues/6611): Queries slow down hundreds times after overwriting points +- [#6607](https://github.com/influxdata/influxdb/issues/6607): SHOW TAG VALUES accepts != and !~ in WHERE clause. +- [#6604](https://github.com/influxdata/influxdb/pull/6604): Remove old cluster code +- [#6599](https://github.com/influxdata/influxdb/issues/6599): Ensure that future points considered in SHOW queries. +- [#6595](https://github.com/influxdata/influxdb/issues/6595): Fix full compactions conflicting with level compactions +- [#6557](https://github.com/influxdata/influxdb/issues/6557): Overwriting points on large series can cause memory spikes during compactions +- [#6543](https://github.com/influxdata/influxdb/issues/6543): Fix parseFill to check for fill ident before attempting to parse an expression. +- [#6406](https://github.com/influxdata/influxdb/issues/6406): Max index entries exceeded +- [#6250](https://github.com/influxdata/influxdb/issues/6250): Slow startup time +- [#6235](https://github.com/influxdata/influxdb/issues/6235): Fix measurement field panic in tsm1 engine. +- [#5501](https://github.com/influxdata/influxdb/issues/5501): Queries against files that have just been compacted need to point to new files +- [#2048](https://github.com/influxdata/influxdb/issues/2048): Check that retention policies exist before creating CQ + +## v0.13.0 [2016-05-12] + +### Release Notes + +With this release InfluxDB is moving to Go v1.6. + +### Features + +- [#6534](https://github.com/influxdata/influxdb/pull/6534): Move to Go v1.6.2 (over Go v1.4.3) +- [#6533](https://github.com/influxdata/influxdb/issues/6533): Optimize SHOW SERIES +- [#6522](https://github.com/influxdata/influxdb/pull/6522): Dump TSM files to line protocol +- [#6502](https://github.com/influxdata/influxdb/pull/6502): Add ability to copy shard via rpc calls. Remove deprecated copier service. +- [#6494](https://github.com/influxdata/influxdb/issues/6494): Support booleans for min() and max(). +- [#6484](https://github.com/influxdata/influxdb/pull/6484): Query language support for DELETE +- [#6483](https://github.com/influxdata/influxdb/pull/6483): Delete series support for TSM +- [#6444](https://github.com/influxdata/influxdb/pull/6444): Allow setting the config path through an environment variable and default config path. +- [#6429](https://github.com/influxdata/influxdb/issues/6429): Log slow queries if they pass a configurable threshold. +- [#6394](https://github.com/influxdata/influxdb/pull/6394): Allow time math with integer timestamps. +- [#6334](https://github.com/influxdata/influxdb/pull/6334): Allow environment variables to be set per input type. +- [#6292](https://github.com/influxdata/influxdb/issues/6292): Allow percentile to be used as a selector. +- [#6290](https://github.com/influxdata/influxdb/issues/6290): Add POST /query endpoint and warning messages for using GET with write operations. +- [#6263](https://github.com/influxdata/influxdb/pull/6263): Reduce UDP Service allocation size. +- [#6237](https://github.com/influxdata/influxdb/issues/6237): Enable continuous integration testing on Windows platform via AppVeyor. Thanks @mvadu +- [#6228](https://github.com/influxdata/influxdb/pull/6228): Support for multiple listeners for collectd and OpenTSDB inputs. +- [#6213](https://github.com/influxdata/influxdb/pull/6213): Make logging output location more programmatically configurable. +- [#5707](https://github.com/influxdata/influxdb/issues/5707): Return a deprecated message when IF NOT EXISTS is used. +- [#5502](https://github.com/influxdata/influxdb/issues/5502): Add checksum verification to TSM inspect tool +- [#4675](https://github.com/influxdata/influxdb/issues/4675): Allow derivative() function to be used with ORDER BY desc. +- [#3558](https://github.com/influxdata/influxdb/issues/3558): Support field math inside a WHERE clause. +- [#3247](https://github.com/influxdata/influxdb/issues/3247): Implement derivatives across intervals for aggregate queries. +- [#3166](https://github.com/influxdata/influxdb/issues/3166): Sort the series keys inside of a tag set so output is deterministic. +- [#2074](https://github.com/influxdata/influxdb/issues/2074): Support offset argument in the GROUP BY time(...) call. +- [#1856](https://github.com/influxdata/influxdb/issues/1856): Add `elapsed` function that returns the time delta between subsequent points. + +### Bugfixes + +- [#6505](https://github.com/influxdata/influxdb/issues/6505): Add regex literal to InfluxQL spec for FROM clause. +- [#6496](https://github.com/influxdata/influxdb/issues/6496): Fix parsing escaped series key when loading database index +- [#6495](https://github.com/influxdata/influxdb/issues/6495): Fix aggregate returns when data is missing from some shards. +- [#6491](https://github.com/influxdata/influxdb/pull/6491): Fix the CLI not to enter an infinite loop when the liner has an error. +- [#6480](https://github.com/influxdata/influxdb/issues/6480): Fix SHOW statements' rewriting bug +- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals. +- [#6470](https://github.com/influxdata/influxdb/pull/6470): Remove SHOW SERVERS & DROP SERVER support +- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments +- [#6462](https://github.com/influxdata/influxdb/pull/6462): Add safer locking to CreateFieldIfNotExists +- [#6458](https://github.com/influxdata/influxdb/pull/6458): Make it clear when the CLI version is unknown. +- [#6457](https://github.com/influxdata/influxdb/issues/6457): Retention policy cleanup does not remove series +- [#6439](https://github.com/influxdata/influxdb/issues/6439): Overwriting points returning old values +- [#6427](https://github.com/influxdata/influxdb/pull/6427): Fix setting uint config options via env vars +- [#6425](https://github.com/influxdata/influxdb/pull/6425): Close idle tcp connections in HTTP client to prevent tcp conn leak. +- [#6419](https://github.com/influxdata/influxdb/issues/6419): Fix panic in transform iterator on division. @thbourlove +- [#6398](https://github.com/influxdata/influxdb/issues/6398): Fix CREATE RETENTION POLICY parsing so it doesn't consume tokens it shouldn't. +- [#6382](https://github.com/influxdata/influxdb/pull/6382): Removed dead code from the old query engine. +- [#6361](https://github.com/influxdata/influxdb/pull/6361): Fix cluster/pool release of connection +- [#6296](https://github.com/influxdata/influxdb/issues/6296): Allow the implicit time field to be renamed again. +- [#6294](https://github.com/influxdata/influxdb/issues/6294): Fix panic running influx_inspect info. +- [#6287](https://github.com/influxdata/influxdb/issues/6287): Fix data race in Influx Client. +- [#6283](https://github.com/influxdata/influxdb/pull/6283): Fix GROUP BY tag to produce consistent results when a series has no tags. +- [#6277](https://github.com/influxdata/influxdb/pull/6277): Fix deadlock in tsm1/file_store +- [#6270](https://github.com/influxdata/influxdb/issues/6270): tsm1 query engine alloc reduction +- [#6261](https://github.com/influxdata/influxdb/issues/6261): High CPU usage and slow query with DISTINCT +- [#6252](https://github.com/influxdata/influxdb/pull/6252): Remove TSDB listener accept message @simnv +- [#6202](https://github.com/influxdata/influxdb/pull/6202): Check default SHARD DURATION when recreating the same database. +- [#6109](https://github.com/influxdata/influxdb/issues/6109): Cache maximum memory size exceeded on startup +- [#5890](https://github.com/influxdata/influxdb/issues/5890): Return the time with a selector when there is no group by interval. +- [#3883](https://github.com/influxdata/influxdb/issues/3883): Improve query sanitization to prevent a password leak in the logs. +- [#3773](https://github.com/influxdata/influxdb/issues/3773): Support empty tags for all WHERE equality operations. +- [#3369](https://github.com/influxdata/influxdb/issues/3369): Detect when a timer literal will overflow or underflow the query engine. + +## v0.12.2 [2016-04-20] + +### Bugfixes + +- [#6431](https://github.com/influxdata/influxdb/pull/6431): Fix panic in transform iterator on division. @thbourlove +- [#6414](https://github.com/influxdata/influxdb/pull/6414): Send "Connection: close" header for queries. +- [#6413](https://github.com/influxdata/influxdb/pull/6413): Prevent goroutine leak from persistent http connections. Thanks @aaronknister. +- [#6383](https://github.com/influxdata/influxdb/pull/6383): Recover from a panic during query execution. +- [#6379](https://github.com/influxdata/influxdb/issues/6379): Validate the first argument to percentile() is a variable. +- [#6271](https://github.com/influxdata/influxdb/issues/6271): Fixed deadlock in tsm1 file store. + +## v0.12.1 [2016-04-08] + +### Bugfixes + +- [#6257](https://github.com/influxdata/influxdb/issues/6257): CreateShardGroup was incrementing meta data index even when it was idempotent. +- [#6248](https://github.com/influxdata/influxdb/issues/6248): Panic using incorrectly quoted "queries" field key. +- [#6229](https://github.com/influxdata/influxdb/issues/6229): Fixed aggregate queries with no GROUP BY to include the end time. +- [#6225](https://github.com/influxdata/influxdb/pull/6225): Refresh admin assets. +- [#6223](https://github.com/influxdata/influxdb/issues/6223): Failure to start/run on Windows. Thanks @mvadu +- [#6206](https://github.com/influxdata/influxdb/issues/6206): Handle nil values from the tsm1 cursor correctly. +- [#6190](https://github.com/influxdata/influxdb/pull/6190): Fix race on measurementFields. + + +## v0.12.0 [2016-04-05] +### Release Notes +Upgrading to this release requires a little more than just installing the new binary and starting it up. The upgrade process is very quick and should only require a minute of downtime or less. Details on [upgrading to 0.12 are here](https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/). + +This release removes all of the old clustering code. It operates as a standalone server. For a free open source HA setup see the [InfluxDB Relay](https://github.com/influxdata/influxdb-relay). + +### Features + +- [#6193](https://github.com/influxdata/influxdb/pull/6193): Fix TypeError when processing empty results in admin UI. Thanks @jonseymour! +- [#6166](https://github.com/influxdata/influxdb/pull/6166): Teach influxdb client how to use chunked queries and use in the CLI. +- [#6158](https://github.com/influxdata/influxdb/pull/6158): Update influxd to detect an upgrade from `0.11` to `0.12`. Minor restore bug fixes. +- [#6149](https://github.com/influxdata/influxdb/pull/6149): Kill running queries when server is shutdown. +- [#6148](https://github.com/influxdata/influxdb/pull/6148): Build script is now compatible with Python 3. Added ability to create detached signatures for packages. Build script now uses Python logging facility for messages. +- [#6116](https://github.com/influxdata/influxdb/pull/6116): Allow `httpd` service to be extensible for routes +- [#6115](https://github.com/influxdata/influxdb/issues/6115): Support chunking query results mid-series. Limit non-chunked output. +- [#6112](https://github.com/influxdata/influxdb/issues/6112): Implement simple moving average function. +- [#6111](https://github.com/influxdata/influxdb/pull/6111): Add ability to build static assest. Improved handling of TAR and ZIP package outputs. +- [#6102](https://github.com/influxdata/influxdb/issues/6102): Limit series count in selection +- [#6079](https://github.com/influxdata/influxdb/issues/6079): Limit the maximum number of concurrent queries. +- [#6078](https://github.com/influxdata/influxdb/issues/6078): Limit bucket count in selection. +- [#6077](https://github.com/influxdata/influxdb/issues/6077): Limit point count in selection. +- [#6075](https://github.com/influxdata/influxdb/issues/6075): Limit the maximum running time of a query. +- [#6073](https://github.com/influxdata/influxdb/pull/6073): Iterator stats +- [#6060](https://github.com/influxdata/influxdb/pull/6060): Add configurable shard duration to retention policies +- [#6025](https://github.com/influxdata/influxdb/pull/6025): Remove deprecated JSON write path. +- [#6012](https://github.com/influxdata/influxdb/pull/6012): Add DROP SHARD support. +- [#5939](https://github.com/influxdata/influxdb/issues/5939): Support viewing and killing running queries. +- [#5744](https://github.com/influxdata/influxdb/issues/5744): Add integer literal support to the query language. +- [#5372](https://github.com/influxdata/influxdb/pull/5372): Faster shard loading +- [#1825](https://github.com/influxdata/influxdb/issues/1825): Implement difference function. + +### Bugfixes + +- [#6178](https://github.com/influxdata/influxdb/issues/6178): Ensure SHARD DURATION is checked when recreating a retention policy +- [#6153](https://github.com/influxdata/influxdb/issues/6153): Check SHARD DURATION when recreating the same database +- [#6152](https://github.com/influxdata/influxdb/issues/6152): Allow SHARD DURATION to be specified in isolation when creating a database +- [#6140](https://github.com/influxdata/influxdb/issues/6140): Ensure Shard engine not accessed when closed. +- [#6131](https://github.com/influxdata/influxdb/issues/6061): Fix write throughput regression with large number of measurments +- [#6110](https://github.com/influxdata/influxdb/issues/6110): Fix for 0.9 upgrade path when using RPM +- [#6094](https://github.com/influxdata/influxdb/issues/6094): Ensure CREATE RETENTION POLICY and CREATE CONTINUOUS QUERY are idempotent in the correct way. +- [#6065](https://github.com/influxdata/influxdb/pull/6065): Wait for a process termination on influxdb restart @simnv +- [#6061](https://github.com/influxdata/influxdb/issues/6061): [0.12 / master] POST to /write does not write points if request has header 'Content-Type: application/x-www-form-urlencoded' +- [#5728](https://github.com/influxdata/influxdb/issues/5728): Properly handle semi-colons as part of the main query loop. +- [#5554](https://github.com/influxdata/influxdb/issues/5554): Can't run in alpine linux +- [#5252](https://github.com/influxdata/influxdb/issues/5252): Release tarballs contain specific attributes on '.' +- [#5152](https://github.com/influxdata/influxdb/issues/5152): Fix where filters when a tag and a filter are combined with OR. + +## v0.11.1 [2016-03-31] + +### Bugfixes + +- [#6168](https://github.com/influxdata/influxdb/pull/6168): Remove per measurement statsitics +- [#6129](https://github.com/influxdata/influxdb/pull/6129): Fix default continuous query lease host +- [#6121](https://github.com/influxdata/influxdb/issues/6121): Fix panic: slice index out of bounds in TSM index +- [#6092](https://github.com/influxdata/influxdb/issues/6092): Upgrading directly from 0.9.6.1 to 0.11.0 fails +- [#3932](https://github.com/influxdata/influxdb/issues/3932): Invalid timestamp format should throw an error. + +## v0.11.0 [2016-03-22] + +### Release Notes + +There were some important breaking changes in this release. Here's a list of the important things to know before upgrading: + +* [SHOW SERIES output has changed](https://github.com/influxdata/influxdb/pull/5937). See [new output in this test diff](https://github.com/influxdata/influxdb/pull/5937/files#diff-0cb24c2b7420b4db507ee3496c371845L263). +* [SHOW TAG VALUES output has changed](https://github.com/influxdata/influxdb/pull/5853) +* JSON write endpoint is disabled by default and will be removed in the next release. You can [turn it back on](https://github.com/influxdata/influxdb/pull/5512) in this release. +* b1/bz1 shards are no longer supported. You must migrate all old shards to TSM using [the migration tool](https://github.com/influxdata/influxdb/blob/master/cmd/influx_tsm/README.md). +* On queries to create databases, retention policies, and users, the default behavior has changed to create `IF NOT EXISTS`. If they already exist, no error will be returned. +* On queries with a selector like `min`, `max`, `first`, and `last` the time returned will be the time for the bucket of the group by window. [Selectors for the time for the specific point](https://github.com/influxdata/influxdb/issues/5926) will be added later. + +### Features + +- [#5994](https://github.com/influxdata/influxdb/issues/5994): Single server +- [#5862](https://github.com/influxdata/influxdb/pull/5862): Make Admin UI dynamically fetch both client and server versions +- [#5844](https://github.com/influxdata/influxdb/pull/5844): Tag TSM engine stats with database and retention policy +- [#5758](https://github.com/influxdata/influxdb/pull/5758): TSM engine stats for cache, WAL, and filestore. Thanks @jonseymour +- [#5737](https://github.com/influxdata/influxdb/pull/5737): Admin UI: Display results of multiple queries, not just the first query. Thanks @Vidhuran! +- [#5720](https://github.com/influxdata/influxdb/pull/5720): Admin UI: New button to generate permalink to queries +- [#5706](https://github.com/influxdata/influxdb/pull/5706): Cluster setup cleanup +- [#5691](https://github.com/influxdata/influxdb/pull/5691): Remove associated shard data when retention policies are dropped. +- [#5681](https://github.com/influxdata/influxdb/pull/5681): Stats: Add durations, number currently active to httpd and query executor +- [#5666](https://github.com/influxdata/influxdb/pull/5666): Manage dependencies with gdm +- [#5602](https://github.com/influxdata/influxdb/pull/5602): Simplify cluster startup for scripting and deployment +- [#5598](https://github.com/influxdata/influxdb/pull/5598): Client: Add Ping to v2 client @PSUdaemon +- [#5596](https://github.com/influxdata/influxdb/pull/5596): Build improvements for ARM architectures. Also removed `--goarm` and `--pkgarch` build flags. +- [#5593](https://github.com/influxdata/influxdb/issues/5593): Modify `SHOW TAG VALUES` output for the new query engine to normalize the output. +- [#5562](https://github.com/influxdata/influxdb/pull/5562): Graphite: Support matching fields multiple times (@chrusty) +- [#5550](https://github.com/influxdata/influxdb/pull/5550): Enabled golint for tsdb/engine/wal. @gabelev +- [#5541](https://github.com/influxdata/influxdb/pull/5541): Client: Support for adding custom TLS Config for HTTP client. +- [#5512](https://github.com/influxdata/influxdb/pull/5512): HTTP: Add config option to enable HTTP JSON write path which is now disabled by default. +- [#5419](https://github.com/influxdata/influxdb/pull/5419): Graphite: Support matching tags multiple times Thanks @m4ce +- [#5336](https://github.com/influxdata/influxdb/pull/5366): Enabled golint for influxql. @gabelev +- [#4299](https://github.com/influxdata/influxdb/pull/4299): Client: Reject uint64 Client.Point.Field values. Thanks @arussellsaw +- [#4125](https://github.com/influxdata/influxdb/pull/4125): Admin UI: Fetch and display server version on connect. Thanks @alexiri! +- [#2715](https://github.com/influxdata/influxdb/issues/2715): Support using field regex comparisons in the WHERE clause + +### Bugfixes + +- [#6042](https://github.com/influxdata/influxdb/issues/6042): CreateDatabase failure on Windows, regression from v0.11.0 RC @mvadu +- [#6006](https://github.com/influxdata/influxdb/pull/6006): Fix deadlock while running backups +- [#5965](https://github.com/influxdata/influxdb/issues/5965): InfluxDB panic crashes while parsing "-" as Float +- [#5963](https://github.com/influxdata/influxdb/pull/5963): Fix possible deadlock +- [#5949](https://github.com/influxdata/influxdb/issues/5949): Return error message when improper types are used in SELECT +- [#5937](https://github.com/influxdata/influxdb/pull/5937): Rewrite SHOW SERIES to use query engine +- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm +- [#5889](https://github.com/influxdata/influxdb/issues/5889): Fix writing partial TSM index when flush file fails +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value +- [#5854](https://github.com/influxdata/influxdb/issues/5854): failures of tests in tsdb/engine/tsm1 when compiled with go master +- [#5842](https://github.com/influxdata/influxdb/issues/5842): Add SeriesList binary marshaling +- [#5841](https://github.com/influxdata/influxdb/pull/5841): Reduce tsm allocations by converting time.Time to int64 +- [#5835](https://github.com/influxdata/influxdb/issues/5835): Make CREATE USER default to IF NOT EXISTS +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5814](https://github.com/influxdata/influxdb/issues/5814): Run CQs with the same name from different databases +- [#5787](https://github.com/influxdata/influxdb/pull/5787): HTTP: Add QueryAuthorizer instance to httpd service’s handler. @chris-ramon +- [#5754](https://github.com/influxdata/influxdb/issues/5754): Adding a node as meta only results in a data node also being registered +- [#5753](https://github.com/influxdata/influxdb/pull/5753): Ensures that drop-type commands work correctly in a cluster +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5695](https://github.com/influxdata/influxdb/pull/5695): Remove meta servers from node.json +- [#5664](https://github.com/influxdata/influxdb/issues/5664): panic in model.Points.scanTo #5664 +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5628](https://github.com/influxdata/influxdb/issues/5628): Crashed the server with a bad derivative query +- [#5624](https://github.com/influxdata/influxdb/pull/5624): Fix golint issues in client v2 package @PSUDaemon +- [#5610](https://github.com/influxdata/influxdb/issues/5610): Write into fully-replicated cluster is not replicated across all shards +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter +- [#5590](https://github.com/influxdata/influxdb/pull/5590): Fix panic when dropping subscription for unknown retention policy. +- [#5557](https://github.com/influxdata/influxdb/issues/5630): Fixes panic when surrounding the select statement arguments in brackets +- [#5535](https://github.com/influxdata/influxdb/pull/5535): Update README for referring to Collectd +- [#5532](https://github.com/influxdata/influxdb/issues/5532): user passwords not changeable in cluster +- [#5510](https://github.com/influxdata/influxdb/pull/5510): Optimize ReducePercentile @bsideup +- [#5489](https://github.com/influxdata/influxdb/pull/5489): Fixes multiple issues causing tests to fail on windows. Thanks @runner-mei +- [#5376](https://github.com/influxdata/influxdb/pull/5376): Fix golint issues in models package. @nuss-justin +- [#5375](https://github.com/influxdata/influxdb/pull/5375): Lint tsdb and tsdb/engine package @nuss-justin +- [#5182](https://github.com/influxdata/influxdb/pull/5182): Graphite: Fix an issue where the default template would be used instead of a more specific one. Thanks @flisky +- [#4688](https://github.com/influxdata/influxdb/issues/4688): admin UI doesn't display results for some SHOW queries + +## v0.10.3 [2016-03-09] + +### Bugfixes + +- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter + +## v0.10.2 [2016-03-03] + +### Bugfixes + +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value +- [#5861](https://github.com/influxdata/influxdb/pull/5861): Fix panic when dropping subscription for unknown retention policy. +- [#5857](https://github.com/influxdata/influxdb/issues/5857): panic in tsm1.Values.Deduplicate +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour + +## v0.10.1 [2016-02-18] + +### Bugfixes + +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5303](https://github.com/influxdata/influxdb/issues/5303): Protect against stateful mappers returning nothing in the raw executor + +## v0.10.0 [2016-02-04] + +### Release Notes + +This release now uses the TSM storage engine. Old bz1 and b1 shards can still be read, but in a future release you will be required to migrate old shards to TSM. For new shards getting created, or new installations, the TSM storage engine will be used. + +This release also changes how clusters are setup. The config file has changed so have a look at the new example. Also, upgrading a single node works, but for upgrading clusters, you'll need help from us. Sent us a note at contact@influxdb.com if you need assistance upgrading a cluster. + +### Features + +- [#5565](https://github.com/influxdata/influxdb/pull/5565): Add configuration for time precision with UDP services. - @tpitale +- [#5522](https://github.com/influxdata/influxdb/pull/5522): Optimize tsm1 cache to reduce memory consumption and GC scan time. +- [#5460](https://github.com/influxdata/influxdb/pull/5460): Prevent exponential growth in CLI history. Thanks @sczk! +- [#5459](https://github.com/influxdata/influxdb/pull/5459): Create `/status` endpoint for health checks. +- [#5226](https://github.com/influxdata/influxdb/pull/5226): b\*1 to tsm1 shard conversion tool. +- [#5226](https://github.com/influxdata/influxdb/pull/5226): b*1 to tsm1 shard conversion tool. +- [#5224](https://github.com/influxdata/influxdb/pull/5224): Online backup/incremental backup. Restore (for TSM). +- [#5201](https://github.com/influxdata/influxdb/pull/5201): Allow max UDP buffer size to be configurable. Thanks @sebito91 +- [#5194](https://github.com/influxdata/influxdb/pull/5194): Custom continuous query options per query rather than per node. +- [#5183](https://github.com/influxdata/influxdb/pull/5183): CLI confirms database exists when USE executed. Thanks @pires + +### Bugfixes + +- [#5505](https://github.com/influxdata/influxdb/issues/5505): Clear authCache in meta.Client when password changes. +- [#5504](https://github.com/influxdata/influxdb/issues/5504): create retention policy on unexistant DB crash InfluxDB +- [#5479](https://github.com/influxdata/influxdb/issues/5479): Bringing up a node as a meta only node causes panic +- [#5478](https://github.com/influxdata/influxdb/issues/5478): panic: interface conversion: interface is float64, not int64 +- [#5475](https://github.com/influxdata/influxdb/issues/5475): Ensure appropriate exit code returned for non-interactive use of CLI. +- [#5469](https://github.com/influxdata/influxdb/issues/5469): Conversion from bz1 to tsm doesn't work as described +- [#5455](https://github.com/influxdata/influxdb/issues/5455): panic: runtime error: slice bounds out of range when loading corrupted wal segment +- [#5449](https://github.com/influxdata/influxdb/issues/5449): panic when dropping collectd points +- [#5382](https://github.com/influxdata/influxdb/pull/5382): Fixes some escaping bugs with tag keys and values. +- [#5350](https://github.com/influxdata/influxdb/issues/5350): 'influxd backup' should create backup directory +- [#5349](https://github.com/influxdata/influxdb/issues/5349): Validate metadata blob for 'influxd backup' +- [#5264](https://github.com/influxdata/influxdb/pull/5264): Fix panic: runtime error: slice bounds out of range +- [#5262](https://github.com/influxdata/influxdb/issues/5262): Fix a panic when a tag value was empty. +- [#5244](https://github.com/influxdata/influxdb/issues/5244): panic: ensure it's safe to close engine multiple times. +- [#5193](https://github.com/influxdata/influxdb/issues/5193): Missing data a minute before current time. Comes back later. +- [#5186](https://github.com/influxdata/influxdb/pull/5186): Fix database creation with retention statement parsing. Fixes [#5077](https://github.com/influxdata/influxdb/issues/5077). Thanks @pires +- [#5178](https://github.com/influxdata/influxdb/pull/5178): SHOW FIELD shouldn't consider VALUES to be valid. Thanks @pires +- [#5158](https://github.com/influxdata/influxdb/pull/5158): Fix panic when writing invalid input to the line protocol. +- [#5129](https://github.com/influxdata/influxdb/pull/5129): Ensure precision flag is respected by CLI. Thanks @e-dard +- [#5079](https://github.com/influxdata/influxdb/pull/5079): Ensure tsm WAL encoding buffer can handle large batches. +- [#5078](https://github.com/influxdata/influxdb/issues/5078): influx non-interactive mode - INSERT must be handled. Thanks @grange74 +- [#5064](https://github.com/influxdata/influxdb/pull/5064): Full support for parenthesis in SELECT clause, fixes [#5054](https://github.com/influxdata/influxdb/issues/5054). Thanks @mengjinglei +- [#5059](https://github.com/influxdata/influxdb/pull/5059): Fix unmarshal of database error by client code. Thanks @farshidtz +- [#5042](https://github.com/influxdata/influxdb/issues/5042): Count with fill(none) will drop 0 valued intervals. +- [#5016](https://github.com/influxdata/influxdb/pull/5016): Don't panic if Meta data directory not writable. Thanks @oiooj +- [#4940](https://github.com/influxdata/influxdb/pull/4940): Fix distributed aggregate query query error. Thanks @li-ang +- [#4735](https://github.com/influxdata/influxdb/issues/4735): Fix panic when merging empty results. +- [#4622](https://github.com/influxdata/influxdb/issues/4622): Fix panic when passing too large of timestamps to OpenTSDB input. +- [#4303](https://github.com/influxdata/influxdb/issues/4303): Don't drop measurements or series from multiple databases. + +## v0.9.6 [2015-12-09] + +### Release Notes +This release has an updated design and implementation of the TSM storage engine. If you had been using tsm1 as your storage engine prior to this release (either 0.9.5.x or 0.9.6 nightly builds) you will have to start with a fresh database. + +If you had TSM configuration options set, those have been updated. See the the updated sample configuration for more details: https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml#L98-L125 + +### Features + +- [#4790](https://github.com/influxdata/influxdb/pull/4790): Allow openTSDB point-level error logging to be disabled +- [#4728](https://github.com/influxdata/influxdb/pull/4728): SHOW SHARD GROUPS. By @mateuszdyminski +- [#4841](https://github.com/influxdata/influxdb/pull/4841): Improve point parsing speed. Lint models pacakge. Thanks @e-dard! +- [#4889](https://github.com/influxdata/influxdb/pull/4889): Implement close notifier and timeout on executors +- [#2676](https://github.com/influxdata/influxdb/issues/2676), [#4866](https://github.com/influxdata/influxdb/pull/4866): Add support for specifying default retention policy in database create. Thanks @pires! +- [#4848](https://github.com/influxdata/influxdb/pull/4848): Added framework for cluster integration testing. +- [#4872](https://github.com/influxdata/influxdb/pull/4872): Add option to disable logging for meta service. +- [#4787](https://github.com/influxdata/influxdb/issues/4787): Now builds on Solaris + +### Bugfixes + +- [#4849](https://github.com/influxdata/influxdb/issues/4849): Derivative works with count, mean, median, sum, first, last, max, min, and percentile. +- [#4984](https://github.com/influxdata/influxdb/pull/4984): Allow math on fields, fixes regression. Thanks @mengjinglei +- [#4666](https://github.com/influxdata/influxdb/issues/4666): Fix panic in derivative with invalid values. +- [#4404](https://github.com/influxdata/influxdb/issues/4404): Return better error for currently unsupported DELETE queries. +- [#4858](https://github.com/influxdata/influxdb/pull/4858): Validate nested aggregations in queries. Thanks @viru +- [#4921](https://github.com/influxdata/influxdb/pull/4921): Error responses should be JSON-formatted. Thanks @pires +- [#4974](https://github.com/influxdata/influxdb/issues/4974) Fix Data Race in TSDB when setting measurement field name +- [#4876](https://github.com/influxdata/influxdb/pull/4876): Complete lint for monitor and services packages. Thanks @e-dard! +- [#4833](https://github.com/influxdata/influxdb/pull/4833), [#4927](https://github.com/influxdata/influxdb/pull/4927): Fix SHOW MEASURMENTS for clusters. Thanks @li-ang! +- [#4918](https://github.com/influxdata/influxdb/pull/4918): Restore can hang, Fix [issue #4806](https://github.com/influxdata/influxdb/issues/4806). Thanks @oiooj +- [#4855](https://github.com/influxdata/influxdb/pull/4855): Fix race in TCP proxy shutdown. Thanks @runner-mei! +- [#4411](https://github.com/influxdata/influxdb/pull/4411): Add Access-Control-Expose-Headers to HTTP responses +- [#4768](https://github.com/influxdata/influxdb/pull/4768): CLI history skips blank lines. Thanks @pires +- [#4766](https://github.com/influxdata/influxdb/pull/4766): Update CLI usage output. Thanks @aneshas +- [#4804](https://github.com/influxdata/influxdb/pull/4804): Complete lint for services/admin. Thanks @nii236 +- [#4796](https://github.com/influxdata/influxdb/pull/4796): Check point without fields. Thanks @CrazyJvm +- [#4815](https://github.com/influxdata/influxdb/pull/4815): Added `Time` field into aggregate output across the cluster. Thanks @li-ang +- [#4817](https://github.com/influxdata/influxdb/pull/4817): Fix Min,Max,Top,Bottom function when query distributed node. Thanks @mengjinglei +- [#4878](https://github.com/influxdata/influxdb/pull/4878): Fix String() function for several InfluxQL statement types +- [#4913](https://github.com/influxdata/influxdb/pull/4913): Fix b1 flush deadlock +- [#3170](https://github.com/influxdata/influxdb/issues/3170), [#4921](https://github.com/influxdata/influxdb/pull/4921): Database does not exist error is now JSON. Thanks @pires! +- [#5029](https://github.com/influxdata/influxdb/pull/5029): Drop UDP point on bad parse. + +## v0.9.5 [2015-11-20] + +### Release Notes + +- Field names for the internal stats have been changed to be more inline with Go style. +- 0.9.5 is reverting to Go 1.4.2 due to unresolved issues with Go 1.5.1. + +There are breaking changes in this release: +- The filesystem hierarchy for packages has been changed, namely: + - Binaries are now located in `/usr/bin` (previously `/opt/influxdb`) + - Configuration files are now located in `/etc/influxdb` (previously `/etc/opt/influxdb`) + - Data directories are now located in `/var/lib/influxdb` (previously `/var/opt/influxdb`) + - Scripts are now located in `/usr/lib/influxdb/scripts` (previously `/opt/influxdb`) + +### Features + +- [#4702](https://github.com/influxdata/influxdb/pull/4702): Support 'history' command at CLI +- [#4098](https://github.com/influxdata/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage +- [#4141](https://github.com/influxdata/influxdb/pull/4141): Control whether each query should be logged +- [#4065](https://github.com/influxdata/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex +- [#4140](https://github.com/influxdata/influxdb/pull/4140): Make storage engine configurable +- [#4161](https://github.com/influxdata/influxdb/pull/4161): Implement bottom selector function +- [#4204](https://github.com/influxdata/influxdb/pull/4204): Allow module-level selection for SHOW STATS +- [#4208](https://github.com/influxdata/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS +- [#4196](https://github.com/influxdata/influxdb/pull/4196): Export tsdb.Iterator +- [#4198](https://github.com/influxdata/influxdb/pull/4198): Add basic cluster-service stats +- [#4262](https://github.com/influxdata/influxdb/pull/4262): Allow configuration of UDP retention policy +- [#4265](https://github.com/influxdata/influxdb/pull/4265): Add statistics for Hinted-Handoff +- [#4284](https://github.com/influxdata/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures +- [#4310](https://github.com/influxdata/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou +- [#4348](https://github.com/influxdata/influxdb/pull/4348): Public ApplyTemplate function for graphite parser. +- [#4178](https://github.com/influxdata/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert! +- [#4409](https://github.com/influxdata/influxdb/pull/4409): wire up INTO queries. +- [#4379](https://github.com/influxdata/influxdb/pull/4379): Auto-create database for UDP input. +- [#4375](https://github.com/influxdata/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party. +- [#4506](https://github.com/influxdata/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available. +- [#4516](https://github.com/influxdata/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics +- [#4501](https://github.com/influxdata/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex. +- [#4547](https://github.com/influxdata/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader). +- [#4600](https://github.com/influxdata/influxdb/pull/4600): ping endpoint can wait for leader +- [#4648](https://github.com/influxdata/influxdb/pull/4648): UDP Client (v2 client) +- [#4690](https://github.com/influxdata/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires +- [#4676](https://github.com/influxdata/influxdb/pull/4676): UDP service listener performance enhancements +- [#4659](https://github.com/influxdata/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau +- [#4721](https://github.com/influxdata/influxdb/pull/4721): Export tsdb.InterfaceValues +- [#4681](https://github.com/influxdata/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners +- [#4685](https://github.com/influxdata/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer. +- [#4846](https://github.com/influxdata/influxdb/pull/4846): Allow NaN as a valid value on the graphite service; discard these points silently (graphite compatibility). Thanks @jsternberg! + +### Bugfixes + +- [#4193](https://github.com/influxdata/influxdb/issues/4193): Less than or equal to inequality is not inclusive for time in where clause +- [#4235](https://github.com/influxdata/influxdb/issues/4235): "ORDER BY DESC" doesn't properly order +- [#4789](https://github.com/influxdata/influxdb/pull/4789): Decode WHERE fields during aggregates. Fix [issue #4701](https://github.com/influxdata/influxdb/issues/4701). +- [#4778](https://github.com/influxdata/influxdb/pull/4778): If there are no points to count, count is 0. +- [#4715](https://github.com/influxdata/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdata/influxdb/issues/4707). Thanks @oiooj +- [#4643](https://github.com/influxdata/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj +- [#4632](https://github.com/influxdata/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn +- [#4389](https://github.com/influxdata/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle. +- [#4166](https://github.com/influxdata/influxdb/pull/4166): Fix parser error on invalid SHOW +- [#3457](https://github.com/influxdata/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name +- [#4704](https://github.com/influxdata/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires +- [#4225](https://github.com/influxdata/influxdb/pull/4225): Always display diags in name-sorted order +- [#4111](https://github.com/influxdata/influxdb/pull/4111): Update pre-commit hook for go vet composites +- [#4136](https://github.com/influxdata/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier +- [#4228](https://github.com/influxdata/influxdb/pull/4228): Add build timestamp to version information. +- [#4124](https://github.com/influxdata/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service +- [#4238](https://github.com/influxdata/influxdb/pull/4238): Fully disable hinted-handoff service if so requested. +- [#4165](https://github.com/influxdata/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database. +- [#4586](https://github.com/influxdata/influxdb/pull/4586): Exit when invalid engine is selected +- [#4118](https://github.com/influxdata/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions +- [#4191](https://github.com/influxdata/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdata/influxdb/issues/4170) +- [#4222](https://github.com/influxdata/influxdb/pull/4222): Graphite TCP connections should not block shutdown +- [#4180](https://github.com/influxdata/influxdb/pull/4180): Cursor & SelectMapper Refactor +- [#1577](https://github.com/influxdata/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point +- [#4264](https://github.com/influxdata/influxdb/issues/4264): Refactor map functions to use list of values +- [#4278](https://github.com/influxdata/influxdb/pull/4278): Fix error marshalling across the cluster +- [#4149](https://github.com/influxdata/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri! +- [#4674](https://github.com/influxdata/influxdb/pull/4674): Fix panic during restore. Thanks @simcap. +- [#4725](https://github.com/influxdata/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS. +- [#4237](https://github.com/influxdata/influxdb/issues/4237): DERIVATIVE() edge conditions +- [#4263](https://github.com/influxdata/influxdb/issues/4263): derivative does not work when data is missing +- [#4293](https://github.com/influxdata/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson +- [#4296](https://github.com/influxdata/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdata/influxdb/issues/4272) +- [#4333](https://github.com/influxdata/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader. +- [#4276](https://github.com/influxdata/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources +- [#4465](https://github.com/influxdata/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database. +- [#4342](https://github.com/influxdata/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh. +- [#4349](https://github.com/influxdata/influxdb/issues/4349): If HH can't unmarshal a block, skip that block. +- [#4502](https://github.com/influxdata/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib +- [#4354](https://github.com/influxdata/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters. +- [#4357](https://github.com/influxdata/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski! +- [#4344](https://github.com/influxdata/influxdb/issues/4344): Make client.Write default to client.precision if none is given. +- [#3429](https://github.com/influxdata/influxdb/issues/3429): Incorrect parsing of regex containing '/' +- [#4374](https://github.com/influxdata/influxdb/issues/4374): Add tsm1 quickcheck tests +- [#4644](https://github.com/influxdata/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdata/influxdb/issues/4641) +- [#4377](https://github.com/influxdata/influxdb/pull/4377): Hinted handoff should not process dropped nodes +- [#4365](https://github.com/influxdata/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock +- [#4280](https://github.com/influxdata/influxdb/issues/4280): Only drop points matching WHERE clause +- [#4443](https://github.com/influxdata/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdata/influxdb/issues/4442) +- [#4410](https://github.com/influxdata/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh +- [#4360](https://github.com/influxdata/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing +- [#4421](https://github.com/influxdata/influxdb/issues/4421): Fix line protocol accepting tags with no values +- [#4434](https://github.com/influxdata/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdata/influxdb/issues/4433) +- [#4431](https://github.com/influxdata/influxdb/issues/4431): Add tsm1 WAL QuickCheck +- [#4438](https://github.com/influxdata/influxdb/pull/4438): openTSDB service shutdown fixes +- [#4447](https://github.com/influxdata/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac. +- [#3820](https://github.com/influxdata/influxdb/issues/3820): Fix js error in admin UI. +- [#4460](https://github.com/influxdata/influxdb/issues/4460): tsm1 meta lint +- [#4415](https://github.com/influxdata/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp +- [#4472](https://github.com/influxdata/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error +- [#4475](https://github.com/influxdata/influxdb/issues/4475): Fix SHOW TAG VALUES error message. +- [#4486](https://github.com/influxdata/influxdb/pull/4486): Fix missing comments for runner package +- [#4497](https://github.com/influxdata/influxdb/pull/4497): Fix sequence in meta proto +- [#3367](https://github.com/influxdata/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol. +- [#4563](https://github.com/influxdata/influxdb/pull/4536): Fix broken subscriptions updates. +- [#4538](https://github.com/influxdata/influxdb/issues/4538): Dropping database under a write load causes panics +- [#4582](https://github.com/influxdata/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj +- [#4513](https://github.com/influxdata/influxdb/issues/4513): TSM1: panic: runtime error: index out of range +- [#4521](https://github.com/influxdata/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9 +- [#4587](https://github.com/influxdata/influxdb/pull/4587): Prevent NaN float values from being stored +- [#4596](https://github.com/influxdata/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau +- [#4610](https://github.com/influxdata/influxdb/pull/4610): Make internal stats names consistent with Go style. +- [#4625](https://github.com/influxdata/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj. +- [#4650](https://github.com/influxdata/influxdb/issues/4650): Importer should skip empty lines +- [#4651](https://github.com/influxdata/influxdb/issues/4651): Importer doesn't flush out last batch +- [#4602](https://github.com/influxdata/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services. +- [#4691](https://github.com/influxdata/influxdb/issues/4691): Enable toml test `TestConfig_Encode`. +- [#4283](https://github.com/influxdata/influxdb/pull/4283): Disable HintedHandoff if configuration is not set. +- [#4703](https://github.com/influxdata/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda + +## v0.9.4 [2015-09-14] + +### Release Notes +With this release InfluxDB is moving to Go 1.5. + +### Features + +- [#4050](https://github.com/influxdata/influxdb/pull/4050): Add stats to collectd +- [#3771](https://github.com/influxdata/influxdb/pull/3771): Close idle Graphite TCP connections +- [#3755](https://github.com/influxdata/influxdb/issues/3755): Add option to build script. Thanks @fg2it +- [#3863](https://github.com/influxdata/influxdb/pull/3863): Move to Go 1.5 +- [#3892](https://github.com/influxdata/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE +- [#3916](https://github.com/influxdata/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki +- [#4048](https://github.com/influxdata/influxdb/pull/4048): Add statistics to Continuous Query service +- [#4049](https://github.com/influxdata/influxdb/pull/4049): Add stats to the UDP input +- [#3876](https://github.com/influxdata/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT +- [#3975](https://github.com/influxdata/influxdb/pull/3975): Add shard copy service +- [#3986](https://github.com/influxdata/influxdb/pull/3986): Support sorting by time desc +- [#3930](https://github.com/influxdata/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdata/influxdb/issues/1821) +- [#4045](https://github.com/influxdata/influxdb/pull/4045): Instrument cluster-level points writer +- [#3996](https://github.com/influxdata/influxdb/pull/3996): Add statistics to httpd package +- [#4003](https://github.com/influxdata/influxdb/pull/4033): Add logrotate configuration. +- [#4043](https://github.com/influxdata/influxdb/pull/4043): Add stats and batching to openTSDB input +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Add pending batches control to batcher +- [#4006](https://github.com/influxdata/influxdb/pull/4006): Add basic statistics for shards +- [#4072](https://github.com/influxdata/influxdb/pull/4072): Add statistics for the WAL. + +### Bugfixes + +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Set UDP input batching defaults as needed. +- [#3785](https://github.com/influxdata/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic +- [#3804](https://github.com/influxdata/influxdb/pull/3804): init.d script fixes, fixes issue 3803. +- [#3823](https://github.com/influxdata/influxdb/pull/3823): Deterministic ordering for first() and last() +- [#3869](https://github.com/influxdata/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin +- [#3856](https://github.com/influxdata/influxdb/pull/3856): Minor changes to retention enforcement. +- [#3884](https://github.com/influxdata/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup +- [#3868](https://github.com/influxdata/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. +- [#3886](https://github.com/influxdata/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL +- [#3574](https://github.com/influxdata/influxdb/issues/3574): Querying data node causes panic +- [#3913](https://github.com/influxdata/influxdb/issues/3913): Convert meta shard owners to objects +- [#4026](https://github.com/influxdata/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdata/influxdb/issues/3636) +- [#3927](https://github.com/influxdata/influxdb/issues/3927): Add WAL lock to prevent timing lock contention +- [#3928](https://github.com/influxdata/influxdb/issues/3928): Write fails for multiple points when tag starts with quote +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! +- [#3950](https://github.com/influxdata/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI +- [#3977](https://github.com/influxdata/influxdb/pull/3977): Silence wal logging during testing +- [#3931](https://github.com/influxdata/influxdb/pull/3931): Don't precreate shard groups entirely in the past +- [#3960](https://github.com/influxdata/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster +- [#3980](https://github.com/influxdata/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. +- [#4016](https://github.com/influxdata/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. +- [#4034](https://github.com/influxdata/influxdb/pull/4034): Rollback bolt tx on mapper open error +- [#3848](https://github.com/influxdata/influxdb/issues/3848): restart influxdb causing panic +- [#3881](https://github.com/influxdata/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference +- [#3926](https://github.com/influxdata/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdata/influxdb/pull/4038) +- [#4053](https://github.com/influxdata/influxdb/pull/4053): Prohibit dropping default retention policy. +- [#4060](https://github.com/influxdata/influxdb/pull/4060): Don't log EOF error in openTSDB input. +- [#3978](https://github.com/influxdata/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause +- [#4058](https://github.com/influxdata/influxdb/pull/4058): Disable bz1 recompression +- [#3902](https://github.com/influxdata/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" +- [#3718](https://github.com/influxdata/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse + +## v0.9.3 [2015-08-26] + +### Release Notes + +There are breaking changes in this release. + - To store data points as integers you must now append `i` to the number if using the line protocol. + - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. + - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) for more details. + - The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. + +Please see the *Features* section below for full details. + +### Features + +- [#3376](https://github.com/influxdata/influxdb/pull/3376): Support for remote shard query mapping +- [#3372](https://github.com/influxdata/influxdb/pull/3372): Support joining nodes to existing cluster +- [#3426](https://github.com/influxdata/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 +- [#3478](https://github.com/influxdata/influxdb/pull/3478): Support incremental cluster joins +- [#3519](https://github.com/influxdata/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers +- [#3529](https://github.com/influxdata/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc +- [#3421](https://github.com/influxdata/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes +- [#3502](https://github.com/influxdata/influxdb/pull/3502): Importer for 0.8.9 data via the CLI +- [#3564](https://github.com/influxdata/influxdb/pull/3564): Fix alias, maintain column sort order +- [#3585](https://github.com/influxdata/influxdb/pull/3585): Additional test coverage for non-existent fields +- [#3246](https://github.com/influxdata/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables +- [#3599](https://github.com/influxdata/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale +- [#3636](https://github.com/influxdata/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 +- [#3641](https://github.com/influxdata/influxdb/pull/3641): Logging enhancements and single-node rename +- [#3635](https://github.com/influxdata/influxdb/pull/3635): Add build branch to version output. +- [#3115](https://github.com/influxdata/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. +- [#3628](https://github.com/influxdata/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries +- [#3721](https://github.com/influxdata/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch +- [#3514](https://github.com/influxdata/influxdb/issues/3514): Implement WAL outside BoltDB with compaction +- [#3544](https://github.com/influxdata/influxdb/pull/3544): Implement compression on top of BoltDB +- [#3795](https://github.com/influxdata/influxdb/pull/3795): Throttle import +- [#3584](https://github.com/influxdata/influxdb/pull/3584): Import/export documenation + +### Bugfixes + +- [#3405](https://github.com/influxdata/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 +- [#3411](https://github.com/influxdata/influxdb/issues/3411): 500 timeout on write +- [#3420](https://github.com/influxdata/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. +- [#3404](https://github.com/influxdata/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 +- [#3414](https://github.com/influxdata/influxdb/issues/3414): Shard mappers perform query re-writing +- [#3525](https://github.com/influxdata/influxdb/pull/3525): check if fields are valid during parse time. +- [#3511](https://github.com/influxdata/influxdb/issues/3511): Sending a large number of tag causes panic +- [#3288](https://github.com/influxdata/influxdb/issues/3288): Run go fuzz on the line-protocol input +- [#3545](https://github.com/influxdata/influxdb/issues/3545): Fix parsing string fields with newlines +- [#3579](https://github.com/influxdata/influxdb/issues/3579): Revert breaking change to `client.NewClient` function +- [#3580](https://github.com/influxdata/influxdb/issues/3580): Do not allow wildcards with fields in select statements +- [#3530](https://github.com/influxdata/influxdb/pull/3530): Aliasing a column no longer works +- [#3436](https://github.com/influxdata/influxdb/issues/3436): Fix panic in hinted handoff queue processor +- [#3401](https://github.com/influxdata/influxdb/issues/3401): Derivative on non-numeric fields panics db +- [#3583](https://github.com/influxdata/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic +- [#3611](https://github.com/influxdata/influxdb/pull/3611): Fix query arithmetic with integers +- [#3326](https://github.com/influxdata/influxdb/issues/3326): simple regex query fails with cryptic error +- [#3618](https://github.com/influxdata/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger +- [#3625](https://github.com/influxdata/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement +- [#3629](https://github.com/influxdata/influxdb/pull/3629): Use sensible batching defaults for Graphite. +- [#3638](https://github.com/influxdata/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field +- [#3640](https://github.com/influxdata/influxdb/pull/3640): Shutdown Graphite service when signal received. +- [#3632](https://github.com/influxdata/influxdb/issues/3632): Make single-node host renames more seamless +- [#3656](https://github.com/influxdata/influxdb/issues/3656): Silence snapshotter logger for testing +- [#3651](https://github.com/influxdata/influxdb/pull/3651): Fully remove series when dropped. +- [#3517](https://github.com/influxdata/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. +- [#3522](https://github.com/influxdata/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. +- [#3646](https://github.com/influxdata/influxdb/pull/3646): Fix nil FieldCodec panic. +- [#3672](https://github.com/influxdata/influxdb/pull/3672): Reduce in-memory index by 20%-30% +- [#3673](https://github.com/influxdata/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. +- [#3676](https://github.com/influxdata/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. +- [#3686](https://github.com/influxdata/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. +- [#3687](https://github.com/influxdata/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff +- [#3697](https://github.com/influxdata/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. +- [#3708](https://github.com/influxdata/influxdb/issues/3708): Fix double escaping measurement name during cluster replication +- [#3704](https://github.com/influxdata/influxdb/issues/3704): cluster replication issue for measurement name containing backslash +- [#3681](https://github.com/influxdata/influxdb/issues/3681): Quoted measurement names fail +- [#3681](https://github.com/influxdata/influxdb/issues/3682): Fix inserting string value with backslashes +- [#3735](https://github.com/influxdata/influxdb/issues/3735): Append to small bz1 blocks +- [#3736](https://github.com/influxdata/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme +- [#3539](https://github.com/influxdata/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always +- [#3790](https://github.com/influxdata/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values +- [#3778](https://github.com/influxdata/influxdb/pull/3778): Don't panic if SELECT on time. +- [#3824](https://github.com/influxdata/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types +- [#3828](https://github.com/influxdata/influxdb/pull/3828): Support all number types when decoding a point +- [#3853](https://github.com/influxdata/influxdb/pull/3853): Use 4KB default block size for bz1 +- [#3607](https://github.com/influxdata/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! + +## v0.9.2 [2015-07-24] + +### Features +- [#3177](https://github.com/influxdata/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham +- [#3299](https://github.com/influxdata/influxdb/pull/3299): Refactor query engine for distributed query support. +- [#3334](https://github.com/influxdata/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho + +### Bugfixes + +- [#3180](https://github.com/influxdata/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. +- [#3218](https://github.com/influxdata/influxdb/pull/3218): Allow write timeouts to be configurable. +- [#3184](https://github.com/influxdata/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! +- [#3236](https://github.com/influxdata/influxdb/pull/3236): Fix display issues in admin interface. +- [#3232](https://github.com/influxdata/influxdb/pull/3232): Set logging prefix for metastore. +- [#3230](https://github.com/influxdata/influxdb/issues/3230): panic: unable to parse bool value +- [#3245](https://github.com/influxdata/influxdb/issues/3245): Error using graphite plugin with multiple filters +- [#3223](https://github.com/influxdata/influxdb/issues/323): default graphite template cannot have extra tags +- [#3255](https://github.com/influxdata/influxdb/pull/3255): Flush WAL on start-up as soon as possible. +- [#3289](https://github.com/influxdata/influxdb/issues/3289): InfluxDB crashes on floats without decimal +- [#3298](https://github.com/influxdata/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 +- [#3152](https://github.com/influxdata/influxdb/issues/3159): High CPU Usage with unsorted writes +- [#3307](https://github.com/influxdata/influxdb/pull/3307): Fix regression parsing boolean values True/False +- [#3304](https://github.com/influxdata/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 +- [#3332](https://github.com/influxdata/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. +- [#3335](https://github.com/influxdata/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report +- [#2761](https://github.com/influxdata/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. +- [#3356](https://github.com/influxdata/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. +- [#3351](https://github.com/influxdata/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel +- [#3244](https://github.com/influxdata/influxdb/pull/3244): Wire up admin privilege grant and revoke. +- [#3259](https://github.com/influxdata/influxdb/issues/3259): Respect privileges for queries. +- [#3256](https://github.com/influxdata/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. +- [#3380](https://github.com/influxdata/influxdb/issues/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. +- [#3319](https://github.com/influxdata/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces +- [#3453](https://github.com/influxdata/influxdb/issues/3453): Remove outdated `dump` command from CLI. +- [#3463](https://github.com/influxdata/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. + +## v0.9.1 [2015-07-02] + +### Features + +- [2650](https://github.com/influxdata/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. +- [3125](https://github.com/influxdata/influxdb/pull/3125): Graphite Input Protocol Parsing +- [2746](https://github.com/influxdata/influxdb/pull/2746): New Admin UI/interface +- [3036](https://github.com/influxdata/influxdb/pull/3036): Write Ahead Log (WAL) +- [3014](https://github.com/influxdata/influxdb/issues/3014): Implement Raft snapshots + +### Bugfixes + +- [3013](https://github.com/influxdata/influxdb/issues/3013): Panic error with inserting values with commas +- [#2956](https://github.com/influxdata/influxdb/issues/2956): Type mismatch in derivative +- [#2908](https://github.com/influxdata/influxdb/issues/2908): Field mismatch error messages need to be updated +- [#2931](https://github.com/influxdata/influxdb/pull/2931): Services and reporting should wait until cluster has leader. +- [#2943](https://github.com/influxdata/influxdb/issues/2943): Ensure default retention policies are fully replicated +- [#2948](https://github.com/influxdata/influxdb/issues/2948): Field mismatch error message to include measurement name +- [#2919](https://github.com/influxdata/influxdb/issues/2919): Unable to insert negative floats +- [#2935](https://github.com/influxdata/influxdb/issues/2935): Hook CPU and memory profiling back up. +- [#2960](https://github.com/influxdata/influxdb/issues/2960): Cluster Write Errors. +- [#2928](https://github.com/influxdata/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. +- [#2969](https://github.com/influxdata/influxdb/pull/2969): Actually set HTTP version in responses. +- [#2993](https://github.com/influxdata/influxdb/pull/2993): Don't log each UDP batch. +- [#2994](https://github.com/influxdata/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. +- [#3002](https://github.com/influxdata/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. +- [#3021](https://github.com/influxdata/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. +- [#3027](https://github.com/influxdata/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. +- [#3030](https://github.com/influxdata/influxdb/pull/3030): Fix excessive logging of shard creation. +- [#3038](https://github.com/influxdata/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. +- [#3033](https://github.com/influxdata/influxdb/pull/3033): Add support for marshaling `uint64` in client. +- [#3090](https://github.com/influxdata/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. +- [#2944](https://github.com/influxdata/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. +- [#3075](https://github.com/influxdata/influxdb/pull/3075): GROUP BY correctly when different tags have same value. +- [#3078](https://github.com/influxdata/influxdb/pull/3078): Fix CLI panic on malformed INSERT. +- [#2102](https://github.com/influxdata/influxdb/issues/2102): Re-work Graphite input and metric processing +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3136](https://github.com/influxdata/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3127](https://github.com/influxdata/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd +- [#3131](https://github.com/influxdata/influxdb/pull/3131): Copy batch tags to each point before marshalling +- [#3155](https://github.com/influxdata/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. +- [#2678](https://github.com/influxdata/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value +- [#3061](https://github.com/influxdata/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database +- [#2608](https://github.com/influxdata/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic +- [#3183](https://github.com/influxdata/influxdb/issues/3183): using line protocol measurement names cannot contain commas +- [#3193](https://github.com/influxdata/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd +- [#3102](https://github.com/influxdata/influxdb/issues/3102): Add authentication cache +- [#3209](https://github.com/influxdata/influxdb/pull/3209): Dump Run() errors to stderr +- [#3217](https://github.com/influxdata/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. + +## v0.9.0 [2015-06-11] + +### Bugfixes + +- [#2869](https://github.com/influxdata/influxdb/issues/2869): Adding field to existing measurement causes panic +- [#2849](https://github.com/influxdata/influxdb/issues/2849): RC32: Frequent write errors +- [#2700](https://github.com/influxdata/influxdb/issues/2700): Incorrect error message in database EncodeFields +- [#2897](https://github.com/influxdata/influxdb/pull/2897): Ensure target Graphite database exists +- [#2898](https://github.com/influxdata/influxdb/pull/2898): Ensure target openTSDB database exists +- [#2895](https://github.com/influxdata/influxdb/pull/2895): Use Graphite input defaults where necessary +- [#2900](https://github.com/influxdata/influxdb/pull/2900): Use openTSDB input defaults where necessary +- [#2886](https://github.com/influxdata/influxdb/issues/2886): Refactor backup & restore +- [#2804](https://github.com/influxdata/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! +- [#2906](https://github.com/influxdata/influxdb/pull/2906): Restrict replication factor to the cluster size +- [#2905](https://github.com/influxdata/influxdb/pull/2905): Restrict clusters to 3 peers +- [#2904](https://github.com/influxdata/influxdb/pull/2904): Re-enable server reporting. +- [#2917](https://github.com/influxdata/influxdb/pull/2917): Fix int64 field values. +- [#2920](https://github.com/influxdata/influxdb/issues/2920): Ensure collectd database exists + +## v0.9.0-rc33 [2015-06-09] + +### Bugfixes + +- [#2816](https://github.com/influxdata/influxdb/pull/2816): Enable UDP service. Thanks @renan- +- [#2824](https://github.com/influxdata/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao +- [#2823](https://github.com/influxdata/influxdb/pull/2823): Convert OpenTSDB to a service. +- [#2838](https://github.com/influxdata/influxdb/pull/2838): Set auto-created retention policy period to infinite. +- [#2829](https://github.com/influxdata/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. +- [#2814](https://github.com/influxdata/influxdb/issues/2814): Convert collectd to a service. +- [#2852](https://github.com/influxdata/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo +- [#2857](https://github.com/influxdata/influxdb/issues/2857): Fix parsing commas in string field values. +- [#2833](https://github.com/influxdata/influxdb/pull/2833): Make the default config valid. +- [#2859](https://github.com/influxdata/influxdb/pull/2859): Fix panic on aggregate functions. +- [#2878](https://github.com/influxdata/influxdb/pull/2878): Re-enable shard precreation. +- [2865](https://github.com/influxdata/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. + +### Features +- [2858](https://github.com/influxdata/influxdb/pull/2858): Support setting openTSDB write consistency. + +## v0.9.0-rc32 [2015-06-07] + +### Release Notes + +This released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released. + +### Features +- [#1997](https://github.com/influxdata/influxdb/pull/1997): Update SELECT * to return tag values. +- [#2599](https://github.com/influxdata/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. +- [#2682](https://github.com/influxdata/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md +- [#2683](https://github.com/influxdata/influxdb/issues/2683): Add batching support to Graphite inputs. +- [#2687](https://github.com/influxdata/influxdb/issues/2687): Add batching support to Collectd inputs. +- [#2696](https://github.com/influxdata/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. +- [#2751](https://github.com/influxdata/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. +- [#2684](https://github.com/influxdata/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! + +### Bugfixes +- [#2776](https://github.com/influxdata/influxdb/issues/2776): Re-implement retention policy enforcement. +- [#2635](https://github.com/influxdata/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. +- [#2644](https://github.com/influxdata/influxdb/issues/2644): Make SHOW queries work with FROM //. +- [#2501](https://github.com/influxdata/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart +- [#2647](https://github.com/influxdata/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! + +## v0.9.0-rc31 [2015-05-21] + +### Features +- [#1822](https://github.com/influxdata/influxdb/issues/1822): Wire up DERIVATIVE aggregate +- [#1477](https://github.com/influxdata/influxdb/issues/1477): Wire up non_negative_derivative function +- [#2557](https://github.com/influxdata/influxdb/issues/2557): Fix false positive error with `GROUP BY time` +- [#1891](https://github.com/influxdata/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate +- [#1989](https://github.com/influxdata/influxdb/issues/1989): Implement `SELECT tagName FROM m` + +### Bugfixes +- [#2545](https://github.com/influxdata/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. +- [#2558](https://github.com/influxdata/influxdb/pull/2558): Fix client response check - thanks @vladlopes! +- [#2566](https://github.com/influxdata/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. +- [#2602](https://github.com/influxdata/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. +- [#2610](https://github.com/influxdata/influxdb/pull/2610): Fix shard group creation +- [#2596](https://github.com/influxdata/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. +- [#2592](https://github.com/influxdata/influxdb/pull/2592): Should return an error if user attempts to group by a field. +- [#2499](https://github.com/influxdata/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. +- [#2612](https://github.com/influxdata/influxdb/pull/2612): Query planner should validate distinct is passed a field. +- [#2531](https://github.com/influxdata/influxdb/issues/2531): Fix select with 3 or more terms in where clause. +- [#2564](https://github.com/influxdata/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. + +## PRs +- [#2569](https://github.com/influxdata/influxdb/pull/2569): Add derivative functions +- [#2598](https://github.com/influxdata/influxdb/pull/2598): Implement tag support in SELECT statements +- [#2624](https://github.com/influxdata/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. + +## v0.9.0-rc30 [2015-05-12] + +### Release Notes + +This release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`. + +### Features +- [#2254](https://github.com/influxdata/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate +- [#2525](https://github.com/influxdata/influxdb/pull/2525): Serve broker diagnostics over HTTP +- [#2186](https://github.com/influxdata/influxdb/pull/2186): The default status code for queries is now `200 OK` +- [#2298](https://github.com/influxdata/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! +- [#2549](https://github.com/influxdata/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. +- [#2568](https://github.com/influxdata/influxdb/pull/2568): Wire up SELECT DISTINCT. + +### Bugfixes +- [#2535](https://github.com/influxdata/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. +- [#2521](https://github.com/influxdata/influxdb/pull/2521): Don't truncate topic data until fully replicated. +- [#2509](https://github.com/influxdata/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart +- [#2536](https://github.com/influxdata/influxdb/issues/2532): Set leader ID on restart of single-node cluster. +- [#2448](https://github.com/influxdata/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! +- [#2108](https://github.com/influxdata/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! +- [#2539](https://github.com/influxdata/influxdb/issues/2539): Add additional vote request logging. +- [#2541](https://github.com/influxdata/influxdb/issues/2541): Update messaging client connection index with every message. +- [#2542](https://github.com/influxdata/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. +- [#2548](https://github.com/influxdata/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. +- [#2487](https://github.com/influxdata/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! +- [#2552](https://github.com/influxdata/influxdb/issues/2552): Run CQ that is actually passed into go-routine. +- [#2553](https://github.com/influxdata/influxdb/issues/2553): Fix race condition during CQ execution. +- [#2557](https://github.com/influxdata/influxdb/issues/2557): RC30 WHERE time filter Regression. + +## v0.9.0-rc29 [2015-05-05] + +### Features +- [#2410](https://github.com/influxdata/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. +- [#2469](https://github.com/influxdata/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. +- [#1824](https://github.com/influxdata/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! + +### Bugfixes +- [#2446](https://github.com/influxdata/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart +- [#2452](https://github.com/influxdata/influxdb/issues/2452): Fix panic with shard stats on multiple clusters +- [#2453](https://github.com/influxdata/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). +- [#2460](https://github.com/influxdata/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick +- [#2465](https://github.com/influxdata/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz +- [#2475](https://github.com/influxdata/influxdb/pull/2475): RLock server when checking if shards groups are required during write. +- [#2471](https://github.com/influxdata/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart +- [#2281](https://github.com/influxdata/influxdb/issues/2281): Fix Bad Escape error when parsing regex + +## v0.9.0-rc28 [2015-04-27] + +### Features +- [#2410](https://github.com/influxdata/influxdb/pull/2410) Allow configuration of Raft timers +- [#2354](https://github.com/influxdata/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! + +### Bugfixes +- [#2374](https://github.com/influxdata/influxdb/issues/2374): Two different panics during SELECT percentile +- [#2404](https://github.com/influxdata/influxdb/pull/2404): Mean and percentile function fixes +- [#2408](https://github.com/influxdata/influxdb/pull/2408): Fix snapshot 500 error +- [#1896](https://github.com/influxdata/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop +- [#2418](https://github.com/influxdata/influxdb/pull/2418): Fix raft node getting stuck in candidate state +- [#2415](https://github.com/influxdata/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in Graphite server. +- [#2429](https://github.com/influxdata/influxdb/pull/2429): Ensure no field value is null. +- [#2431](https://github.com/influxdata/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils +- [#2441](https://github.com/influxdata/influxdb/pull/2441): Correctly release server RLock during "drop series". +- [#2445](https://github.com/influxdata/influxdb/pull/2445): Read locks and data race fixes + +## v0.9.0-rc27 [04-23-2015] + +### Features +- [#2398](https://github.com/influxdata/influxdb/pull/2398) Track more stats and report errors for shards. + +### Bugfixes +- [#2370](https://github.com/influxdata/influxdb/pull/2370): Fix data race in openTSDB endpoint. +- [#2371](https://github.com/influxdata/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 +- [#2372](https://github.com/influxdata/influxdb/pull/2372): Fix data race in graphite endpoint. +- [#2373](https://github.com/influxdata/influxdb/pull/2373): Actually allow HTTP logging to be controlled. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. +- [#2386](https://github.com/influxdata/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times +- [#2393](https://github.com/influxdata/influxdb/pull/2393): Fix default hostname for connecting to cluster. +- [#2390](https://github.com/influxdata/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! +- [#2391](https://github.com/influxdata/influxdb/pull/2391): Unable to write points through Go client when authentication enabled +- [#2400](https://github.com/influxdata/influxdb/pull/2400): Always send auth headers for client requests if present + +## v0.9.0-rc26 [04-21-2015] + +### Features +- [#2301](https://github.com/influxdata/influxdb/pull/2301): Distributed query load balancing and failover +- [#2336](https://github.com/influxdata/influxdb/pull/2336): Handle distributed queries when shards != data nodes +- [#2353](https://github.com/influxdata/influxdb/pull/2353): Distributed Query/Clustering Fixes + +### Bugfixes +- [#2297](https://github.com/influxdata/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. +- [#2312](https://github.com/influxdata/influxdb/pull/2312): Re-use httpclient for continuous queries +- [#2318](https://github.com/influxdata/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. +- [#2242](https://github.com/influxdata/influxdb/pull/2242): Distributed Query should balance requests +- [#2243](https://github.com/influxdata/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ +- [#2190](https://github.com/influxdata/influxdb/pull/2190): Implement failover to other data nodes for distributed queries +- [#2324](https://github.com/influxdata/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() +- [#2325](https://github.com/influxdata/influxdb/pull/2325): Cluster open fixes +- [#2326](https://github.com/influxdata/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY +- [#2300](https://github.com/influxdata/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. +- [#2338](https://github.com/influxdata/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been +- [#2340](https://github.com/influxdata/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. +- [#2351](https://github.com/influxdata/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. +- [#2348](https://github.com/influxdata/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 +- [#2343](https://github.com/influxdata/influxdb/pull/2343): Node falls behind Metastore updates +- [#2334](https://github.com/influxdata/influxdb/pull/2334): Test Partial replication is very problematic +- [#2272](https://github.com/influxdata/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a +- [#2350](https://github.com/influxdata/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. +- [#2367](https://github.com/influxdata/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. + +## v0.9.0-rc25 [2015-04-15] + +### Bugfixes +- [#2282](https://github.com/influxdata/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. +- [#2283](https://github.com/influxdata/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. +- [#2293](https://github.com/influxdata/influxdb/pull/2293): Open cluster listener before starting broker. +- [#2287](https://github.com/influxdata/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. +- [#2288](https://github.com/influxdata/influxdb/pull/2288): Fix expression parsing bug. +- [#2294](https://github.com/influxdata/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). + +## Features +- [#2276](https://github.com/influxdata/influxdb/pull/2276): Broker topic truncation. +- [#2292](https://github.com/influxdata/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! +- [#2290](https://github.com/influxdata/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! +- [#2295](https://github.com/influxdata/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! +- [#2246](https://github.com/influxdata/influxdb/pull/2246): Allow HTTP logging to be controlled. + +## v0.9.0-rc24 [2015-04-13] + +### Bugfixes +- [#2255](https://github.com/influxdata/influxdb/pull/2255): Fix panic when changing default retention policy. +- [#2257](https://github.com/influxdata/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. +- [#2261](https://github.com/influxdata/influxdb/pull/2261): Support int64 value types. +- [#2191](https://github.com/influxdata/influxdb/pull/2191): Case-insensitive check for "fill" +- [#2274](https://github.com/influxdata/influxdb/pull/2274): Snapshot and HTTP API endpoints +- [#2265](https://github.com/influxdata/influxdb/pull/2265): Fix auth for CLI. + +## v0.9.0-rc23 [2015-04-11] + +### Features +- [#2202](https://github.com/influxdata/influxdb/pull/2202): Initial implementation of Distributed Queries +- [#2202](https://github.com/influxdata/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. + +### Bugfixes +- [#2225](https://github.com/influxdata/influxdb/pull/2225): Make keywords completely case insensitive +- [#2228](https://github.com/influxdata/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement +- [#2236](https://github.com/influxdata/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof +- [#2213](https://github.com/influxdata/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. + +## v0.9.0-rc22 [2015-04-09] + +### Features +- [#2214](https://github.com/influxdata/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g + +### Bugfixes +- [#2223](https://github.com/influxdata/influxdb/pull/2223): Always notify term change on RequestVote + +## v0.9.0-rc21 [2015-04-09] + +### Features +- [#870](https://github.com/influxdata/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate +- [#2180](https://github.com/influxdata/influxdb/pull/2180): Allow http write handler to decode gzipped body +- [#2175](https://github.com/influxdata/influxdb/pull/2175): Separate broker and data nodes +- [#2158](https://github.com/influxdata/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g +- [#2201](https://github.com/influxdata/influxdb/pull/2201): Bring back config join URLs +- [#2121](https://github.com/influxdata/influxdb/pull/2121): Parser refactor + +### Bugfixes +- [#2181](https://github.com/influxdata/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". +- [#2170](https://github.com/influxdata/influxdb/pull/2170): Make sure queries on missing tags return 200 status. +- [#2197](https://github.com/influxdata/influxdb/pull/2197): Lock server during Open(). +- [#2200](https://github.com/influxdata/influxdb/pull/2200): Re-enable Continuous Queries. +- [#2203](https://github.com/influxdata/influxdb/pull/2203): Fix race condition on continuous queries. +- [#2217](https://github.com/influxdata/influxdb/pull/2217): Only revert to follower if new term is greater. +- [#2219](https://github.com/influxdata/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium + +## v0.9.0-rc20 [2015-04-04] + +### Features +- [#2128](https://github.com/influxdata/influxdb/pull/2128): Data node discovery from brokers +- [#2142](https://github.com/influxdata/influxdb/pull/2142): Support chunked queries +- [#2154](https://github.com/influxdata/influxdb/pull/2154): Node redirection +- [#2168](https://github.com/influxdata/influxdb/pull/2168): Return raft term from vote, add term logging + +### Bugfixes +- [#2147](https://github.com/influxdata/influxdb/pull/2147): Set Go Max procs in a better location +- [#2137](https://github.com/influxdata/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. +- [#2151](https://github.com/influxdata/influxdb/pull/2151): Ignore replay commands on the metastore. +- [#2152](https://github.com/influxdata/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' +- [#2156](https://github.com/influxdata/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. +- [#2163](https://github.com/influxdata/influxdb/pull/2163): Fix up paths for default data and run storage. +- [#2164](https://github.com/influxdata/influxdb/pull/2164): Append STDOUT/STDERR in initscript. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Better name for config section for stats and diags. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Monitoring database and retention policy are not configurable. +- [#2167](https://github.com/influxdata/influxdb/pull/2167): Add broker log recovery. +- [#2166](https://github.com/influxdata/influxdb/pull/2166): Don't panic if presented with a field of unknown type. +- [#2149](https://github.com/influxdata/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. +- [#2150](https://github.com/influxdata/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. + +## v0.9.0-rc19 [2015-04-01] + +### Features +- [#2143](https://github.com/influxdata/influxdb/pull/2143): Add raft term logging. + +### Bugfixes +- [#2145](https://github.com/influxdata/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. + +## v0.9.0-rc18 [2015-03-31] + +### Bugfixes +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Use channel to synchronize collectd shutdown. +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Synchronize access to shard index. +- [#2131](https://github.com/influxdata/influxdb/pull/2131): Optimize marshalTags(). +- [#2130](https://github.com/influxdata/influxdb/pull/2130): Make fewer calls to marshalTags(). +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support !~ tags values. +- [#2138](https://github.com/influxdata/influxdb/pull/2136): Use map for marshaledTags cache. + +## v0.9.0-rc17 [2015-03-29] + +### Features +- [#2076](https://github.com/influxdata/influxdb/pull/2076): Separate stdout and stderr output in init.d script +- [#2091](https://github.com/influxdata/influxdb/pull/2091): Support disabling snapshot endpoint. +- [#2081](https://github.com/influxdata/influxdb/pull/2081): Support writing diagnostic data into the internal database. +- [#2095](https://github.com/influxdata/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed + +### Bugfixes +- [#2093](https://github.com/influxdata/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed +- [#2084](https://github.com/influxdata/influxdb/pull/2084): Allowing leading underscores in identifiers. +- [#2080](https://github.com/influxdata/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. +- [#2101](https://github.com/influxdata/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". +- [#2104](https://github.com/influxdata/influxdb/pull/2104): Include NEQ when calculating field filters. +- [#2112](https://github.com/influxdata/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. +- [#2111](https://github.com/influxdata/influxdb/pull/2111) and [#2025](https://github.com/influxdata/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. +- [#2114](https://github.com/influxdata/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. + +## v0.9.0-rc16 [2015-03-24] + +### Features +- [#2058](https://github.com/influxdata/influxdb/pull/2058): Track number of queries executed in stats. +- [#2059](https://github.com/influxdata/influxdb/pull/2059): Retention policies sorted by name on return to client. +- [#2061](https://github.com/influxdata/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. +- [#2064](https://github.com/influxdata/influxdb/pull/2064): Allow init.d script to return influxd version. +- [#2053](https://github.com/influxdata/influxdb/pull/2053): Implment backup and restore. +- [#1631](https://github.com/influxdata/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. + +### Bugfixes +- [#2037](https://github.com/influxdata/influxdb/pull/2037): Don't check 'configExists' at Run() level. +- [#2039](https://github.com/influxdata/influxdb/pull/2039): Don't panic if getting current user fails. +- [#2034](https://github.com/influxdata/influxdb/pull/2034): GROUP BY should require an aggregate. +- [#2040](https://github.com/influxdata/influxdb/pull/2040): Add missing top-level help for config command. +- [#2057](https://github.com/influxdata/influxdb/pull/2057): Move racy "in order" test to integration test suite. +- [#2060](https://github.com/influxdata/influxdb/pull/2060): Reload server shard map on restart. +- [#2068](https://github.com/influxdata/influxdb/pull/2068): Fix misspelled JSON field. +- [#2067](https://github.com/influxdata/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. + +## v0.9.0-rc15 [2015-03-19] + +### Features +- [#2000](https://github.com/influxdata/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. +- [#2007](https://github.com/influxdata/influxdb/pull/2007): Track shard-level stats. + +### Bugfixes +- [#2001](https://github.com/influxdata/influxdb/pull/2001): Ensure measurement not found returns status code 200. +- [#1985](https://github.com/influxdata/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. +- [#2003](https://github.com/influxdata/influxdb/pull/2003): Set timestamp when writing monitoring stats. +- [#2004](https://github.com/influxdata/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). +- [#2016](https://github.com/influxdata/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann +- [#2021](https://github.com/influxdata/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern + + +## v0.9.0-rc14 [2015-03-18] + +### Bugfixes +- [#1999](https://github.com/influxdata/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. + +## v0.9.0-rc13 [2015-03-17] + +### Features +- [#1974](https://github.com/influxdata/influxdb/pull/1974): Add time taken for request to the http server logs. + +### Bugfixes +- [#1971](https://github.com/influxdata/influxdb/pull/1971): Fix leader id initialization. +- [#1975](https://github.com/influxdata/influxdb/pull/1975): Require `q` parameter for query endpoint. +- [#1969](https://github.com/influxdata/influxdb/pull/1969): Print loaded config. +- [#1987](https://github.com/influxdata/influxdb/pull/1987): Fix config print startup statement for when no config is provided. +- [#1990](https://github.com/influxdata/influxdb/pull/1990): Drop measurement was taking too long due to transactions. + +## v0.9.0-rc12 [2015-03-15] + +### Bugfixes +- [#1942](https://github.com/influxdata/influxdb/pull/1942): Sort wildcard names. +- [#1957](https://github.com/influxdata/influxdb/pull/1957): Graphite numbers are always float64. +- [#1955](https://github.com/influxdata/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio +- [#1952](https://github.com/influxdata/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio + +### Features +- [#1935](https://github.com/influxdata/influxdb/pull/1935): Implement stateless broker for Raft. +- [#1936](https://github.com/influxdata/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring + +### Features +- [#1909](https://github.com/influxdata/influxdb/pull/1909): Implement a dump command. + +## v0.9.0-rc11 [2015-03-13] + +### Bugfixes +- [#1917](https://github.com/influxdata/influxdb/pull/1902): Creating Infinite Retention Policy Failed. +- [#1758](https://github.com/influxdata/influxdb/pull/1758): Add Graphite Integration Test. +- [#1929](https://github.com/influxdata/influxdb/pull/1929): Default Retention Policy incorrectly auto created. +- [#1930](https://github.com/influxdata/influxdb/pull/1930): Auto create database for graphite if not specified. +- [#1908](https://github.com/influxdata/influxdb/pull/1908): Cosmetic CLI output fixes. +- [#1931](https://github.com/influxdata/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. +- [#1937](https://github.com/influxdata/influxdb/pull/1937): OFFSET should be allowed to be 0. + +### Features +- [#1902](https://github.com/influxdata/influxdb/pull/1902): Enforce retention policies to have a minimum duration. +- [#1906](https://github.com/influxdata/influxdb/pull/1906): Add show servers to query language. +- [#1925](https://github.com/influxdata/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. + +## v0.9.0-rc10 [2015-03-09] + +### Bugfixes +- [#1867](https://github.com/influxdata/influxdb/pull/1867): Fix race accessing topic replicas map +- [#1864](https://github.com/influxdata/influxdb/pull/1864): fix race in startStateLoop +- [#1753](https://github.com/influxdata/influxdb/pull/1874): Do Not Panic on Missing Dirs +- [#1877](https://github.com/influxdata/influxdb/pull/1877): Broker clients track broker leader +- [#1862](https://github.com/influxdata/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin +- [#1883](https://github.com/influxdata/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha +- [#1868](https://github.com/influxdata/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. +- [#1881](https://github.com/influxdata/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. +- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select + +### Features +- [#1875](https://github.com/influxdata/influxdb/pull/1875): Support trace logging of Raft. +- [#1895](https://github.com/influxdata/influxdb/pull/1895): Auto-create a retention policy when a database is created. +- [#1897](https://github.com/influxdata/influxdb/pull/1897): Pre-create shard groups. +- [#1900](https://github.com/influxdata/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` + +## v0.9.0-rc9 [2015-03-06] + +### Bugfixes +- [#1872](https://github.com/influxdata/influxdb/pull/1872): Fix "stale term" errors with raft + +## v0.9.0-rc8 [2015-03-05] + +### Bugfixes +- [#1836](https://github.com/influxdata/influxdb/pull/1836): Store each parsed shell command in history file. +- [#1789](https://github.com/influxdata/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh +- [#1859](https://github.com/influxdata/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist + +### Features +- [#1755](https://github.com/influxdata/influxdb/pull/1848): Support JSON data ingest over UDP +- [#1857](https://github.com/influxdata/influxdb/pull/1857): Support retention policies with infinite duration +- [#1858](https://github.com/influxdata/influxdb/pull/1858): Enable detailed tracing of write path + +## v0.9.0-rc7 [2015-03-02] + +### Features +- [#1813](https://github.com/influxdata/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. +- [#1826](https://github.com/influxdata/influxdb/pull/1826), [#1827](https://github.com/influxdata/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. + +### Bugfixes + +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh +- [#1809](https://github.com/influxdata/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos + +## v0.9.0-rc6 [2015-02-27] + +### Bugfixes + +- [#1780](https://github.com/influxdata/influxdb/pull/1780): Malformed identifiers get through the parser +- [#1775](https://github.com/influxdata/influxdb/pull/1775): Panic "index out of range" on some queries +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. + +## v0.9.0-rc5 [2015-02-27] + +### Bugfixes + +- [#1752](https://github.com/influxdata/influxdb/pull/1752): remove debug log output from collectd. +- [#1720](https://github.com/influxdata/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. +- [#1767](https://github.com/influxdata/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. +- [#1773](https://github.com/influxdata/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval +- [#1771](https://github.com/influxdata/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` + +### Features + +- [#1698](https://github.com/influxdata/influxdb/pull/1698): Wire up DROP MEASUREMENT + +## v0.9.0-rc4 [2015-02-24] + +### Bugfixes + +- Fix authentication issue with continuous queries +- Print version in the log on startup + +## v0.9.0-rc3 [2015-02-23] + +### Features + +- [#1659](https://github.com/influxdata/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' +- [#1580](https://github.com/influxdata/influxdb/pull/1580): Add support for fields with bool, int, or string data types +- [#1687](https://github.com/influxdata/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE +- [#1629](https://github.com/influxdata/influxdb/pull/1629): Add support for `DROP SERIES` queries +- [#1632](https://github.com/influxdata/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement +- [#1689](https://github.com/influxdata/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE +- [#1699](https://github.com/influxdata/influxdb/pull/1699): Add CPU and memory profiling options to daemon +- [#1672](https://github.com/influxdata/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work +- [#1591](https://github.com/influxdata/influxdb/pull/1591): Add `spread` aggregate function +- [#1576](https://github.com/influxdata/influxdb/pull/1576): Add `first` and `last` aggregate functions +- [#1573](https://github.com/influxdata/influxdb/pull/1573): Add `stddev` aggregate function +- [#1565](https://github.com/influxdata/influxdb/pull/1565): Add the admin interface back into the server and update for new API +- [#1562](https://github.com/influxdata/influxdb/pull/1562): Enforce retention policies +- [#1700](https://github.com/influxdata/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE +- [#1706](https://github.com/influxdata/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause + +### Bugfixes + +- [#1636](https://github.com/influxdata/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE +- [#1701](https://github.com/influxdata/influxdb/pull/1701), [#1667](https://github.com/influxdata/influxdb/pull/1667), [#1663](https://github.com/influxdata/influxdb/pull/1663), [#1615](https://github.com/influxdata/influxdb/pull/1615): Raft fixes +- [#1644](https://github.com/influxdata/influxdb/pull/1644): Add batching support for significantly improved write performance +- [#1704](https://github.com/influxdata/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) +- [#1718](https://github.com/influxdata/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field +- [#1806](https://github.com/influxdata/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. + + +## v0.9.0-rc1,2 [no public release] + +### Features + +- Support for tags added +- New queries for showing measurement names, tag keys, and tag values +- Renamed shard spaces to retention policies +- Deprecated matching against regex in favor of explicit writing and querying on retention policies +- Pure Go InfluxQL parser +- Switch to BoltDB as underlying datastore +- BoltDB backed metastore to store schema information +- Updated HTTP API to only have two endpoints `/query` and `/write` +- Added all administrative functions to the query language +- Change cluster architecture to have brokers and data nodes +- Switch to streaming Raft implementation +- In memory inverted index of the tag data +- Pure Go implementation! + +## v0.8.6 [2014-11-15] + +### Features + +- [Issue #973](https://github.com/influxdata/influxdb/issues/973). Support + joining using a regex or list of time series +- [Issue #1068](https://github.com/influxdata/influxdb/issues/1068). Print + the processor chain when the query is started + +### Bugfixes + +- [Issue #584](https://github.com/influxdata/influxdb/issues/584). Don't + panic if the process died while initializing +- [Issue #663](https://github.com/influxdata/influxdb/issues/663). Make + sure all sub servies are closed when are stopping InfluxDB +- [Issue #671](https://github.com/influxdata/influxdb/issues/671). Fix + the Makefile package target for Mac OSX +- [Issue #800](https://github.com/influxdata/influxdb/issues/800). Use + su instead of sudo in the init script. This fixes the startup problem + on RHEL 6. +- [Issue #925](https://github.com/influxdata/influxdb/issues/925). Don't + generate invalid query strings for single point queries +- [Issue #943](https://github.com/influxdata/influxdb/issues/943). Don't + take two snapshots at the same time +- [Issue #947](https://github.com/influxdata/influxdb/issues/947). Exit + nicely if the daemon doesn't have permission to write to the log. +- [Issue #959](https://github.com/influxdata/influxdb/issues/959). Stop using + closed connections in the protobuf client. +- [Issue #978](https://github.com/influxdata/influxdb/issues/978). Check + for valgrind and mercurial in the configure script +- [Issue #996](https://github.com/influxdata/influxdb/issues/996). Fill should + fill the time range even if no points exists in the given time range +- [Issue #1008](https://github.com/influxdata/influxdb/issues/1008). Return + an appropriate exit status code depending on whether the process exits + due to an error or exits gracefully. +- [Issue #1024](https://github.com/influxdata/influxdb/issues/1024). Hitting + open files limit causes influxdb to create shards in loop. +- [Issue #1069](https://github.com/influxdata/influxdb/issues/1069). Fix + deprecated interface endpoint in Admin UI. +- [Issue #1076](https://github.com/influxdata/influxdb/issues/1076). Fix + the timestamps of data points written by the collectd plugin. (Thanks, + @renchap for reporting this bug) +- [Issue #1078](https://github.com/influxdata/influxdb/issues/1078). Make sure + we don't resurrect shard directories for shards that have already expired +- [Issue #1085](https://github.com/influxdata/influxdb/issues/1085). Set + the connection string of the local raft node +- [Issue #1092](https://github.com/influxdata/influxdb/issues/1093). Set + the connection string of the local node in the raft snapshot. +- [Issue #1100](https://github.com/influxdata/influxdb/issues/1100). Removing + a non-existent shard space causes the cluster to panic. +- [Issue #1113](https://github.com/influxdata/influxdb/issues/1113). A nil + engine.ProcessorChain causes a panic. + +## v0.8.5 [2014-10-27] + +### Features + +- [Issue #1055](https://github.com/influxdata/influxdb/issues/1055). Allow + graphite and collectd input plugins to have separate binding address + +### Bugfixes + +- [Issue #1058](https://github.com/influxdata/influxdb/issues/1058). Use + the query language instead of the continuous query endpoints that + were removed in 0.8.4 +- [Issue #1022](https://github.com/influxdata/influxdb/issues/1022). Return + an +Inf or NaN instead of panicing when we encounter a divide by zero +- [Issue #821](https://github.com/influxdata/influxdb/issues/821). Don't + scan through points when we hit the limit +- [Issue #1051](https://github.com/influxdata/influxdb/issues/1051). Fix + timestamps when the collectd is used and low resolution timestamps + is set. + +## v0.8.4 [2014-10-24] + +### Bugfixes + +- Remove the continuous query api endpoints since the query language + has all the features needed to list and delete continuous queries. +- [Issue #778](https://github.com/influxdata/influxdb/issues/778). Selecting + from a non-existent series should give a better error message indicating + that the series doesn't exist +- [Issue #988](https://github.com/influxdata/influxdb/issues/988). Check + the arguments of `top()` and `bottom()` +- [Issue #1021](https://github.com/influxdata/influxdb/issues/1021). Make + redirecting to standard output and standard error optional instead of + going to `/dev/null`. This can now be configured by setting `$STDOUT` + in `/etc/default/influxdb` +- [Issue #985](https://github.com/influxdata/influxdb/issues/985). Make + sure we drop a shard only when there's no one using it. Otherwise, the + shard can be closed when another goroutine is writing to it which will + cause random errors and possibly corruption of the database. + +### Features + +- [Issue #1047](https://github.com/influxdata/influxdb/issues/1047). Allow + merge() to take a list of series (as opposed to a regex in #72) + +## v0.8.4-rc.1 [2014-10-21] + +### Bugfixes + +- [Issue #1040](https://github.com/influxdata/influxdb/issues/1040). Revert + to older raft snapshot if the latest one is corrupted +- [Issue #1004](https://github.com/influxdata/influxdb/issues/1004). Querying + for data outside of existing shards returns an empty response instead of + throwing a `Couldn't lookup columns` error +- [Issue #1020](https://github.com/influxdata/influxdb/issues/1020). Change + init script exit codes to conform to the lsb standards. (Thanks, @spuder) +- [Issue #1011](https://github.com/influxdata/influxdb/issues/1011). Fix + the tarball for homebrew so that rocksdb is included and the directory + structure is clean +- [Issue #1007](https://github.com/influxdata/influxdb/issues/1007). Fix + the content type when an error occurs and the client requests + compression. +- [Issue #916](https://github.com/influxdata/influxdb/issues/916). Set + the ulimit in the init script with a way to override the limit +- [Issue #742](https://github.com/influxdata/influxdb/issues/742). Fix + rocksdb for Mac OSX +- [Issue #387](https://github.com/influxdata/influxdb/issues/387). Aggregations + with group by time(1w), time(1m) and time(1y) (for week, month and + year respectively) will cause the start time and end time of the bucket + to fall on the logical boundaries of the week, month or year. +- [Issue #334](https://github.com/influxdata/influxdb/issues/334). Derivative + for queries with group by time() and fill(), will take the difference + between the first value in the bucket and the first value of the next + bucket. +- [Issue #972](https://github.com/influxdata/influxdb/issues/972). Don't + assign duplicate server ids + +### Features + +- [Issue #722](https://github.com/influxdata/influxdb/issues/722). Add + an install target to the Makefile +- [Issue #1032](https://github.com/influxdata/influxdb/issues/1032). Include + the admin ui static assets in the binary +- [Issue #1019](https://github.com/influxdata/influxdb/issues/1019). Upgrade + to rocksdb 3.5.1 +- [Issue #992](https://github.com/influxdata/influxdb/issues/992). Add + an input plugin for collectd. (Thanks, @kimor79) +- [Issue #72](https://github.com/influxdata/influxdb/issues/72). Support merge + for multiple series using regex syntax + +## v0.8.3 [2014-09-24] + +### Bugfixes + +- [Issue #885](https://github.com/influxdata/influxdb/issues/885). Multiple + queries separated by semicolons work as expected. Queries are process + sequentially +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return an + error if an invalid column is used in the where clause +- [Issue #794](https://github.com/influxdata/influxdb/issues/794). Fix case + insensitive regex matching +- [Issue #853](https://github.com/influxdata/influxdb/issues/853). Move + cluster config from raft to API. +- [Issue #714](https://github.com/influxdata/influxdb/issues/714). Don't + panic on invalid boolean operators. +- [Issue #843](https://github.com/influxdata/influxdb/issues/843). Prevent blank database names +- [Issue #780](https://github.com/influxdata/influxdb/issues/780). Fix + fill() for all aggregators +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose + table names in double quotes in the result of GetQueryString() +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose + table names in double quotes in the result of GetQueryString() +- [Issue #967](https://github.com/influxdata/influxdb/issues/967). Return an + error if the storage engine can't be created +- [Issue #954](https://github.com/influxdata/influxdb/issues/954). Don't automatically + create shards which was causing too many shards to be created when used with + grafana +- [Issue #939](https://github.com/influxdata/influxdb/issues/939). Aggregation should + ignore null values and invalid values, e.g. strings with mean(). +- [Issue #964](https://github.com/influxdata/influxdb/issues/964). Parse + big int in queries properly. + +## v0.8.2 [2014-09-05] + +### Bugfixes + +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Update shard space to not set defaults + +- [Issue #867](https://github.com/influxdata/influxdb/issues/867). Add option to return shard space mappings in list series + +### Bugfixes + +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return + a meaningful error if an invalid column is used in where clause + after joining multiple series + +## v0.8.2 [2014-09-08] + +### Features + +- Added API endpoint to update shard space definitions + +### Bugfixes + +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB + +## v0.8.1 [2014-09-03] + +- [Issue #896](https://github.com/influxdata/influxdb/issues/896). Allow logging to syslog. Thanks @malthe + +### Bugfixes + +- [Issue #868](https://github.com/influxdata/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x +- [Issue #887](https://github.com/influxdata/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled +- [Issue #674](https://github.com/influxdata/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) +- [Issue #857](https://github.com/influxdata/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) + +## v0.8.0 [2014-08-22] + +### Features + +- [Issue #850](https://github.com/influxdata/influxdb/issues/850). Makes the server listing more informative + +### Bugfixes + +- [Issue #779](https://github.com/influxdata/influxdb/issues/779). Deleting expired shards isn't thread safe. +- [Issue #860](https://github.com/influxdata/influxdb/issues/860). Load database config should validate shard spaces. +- [Issue #862](https://github.com/influxdata/influxdb/issues/862). Data migrator should have option to set delay time. + +## v0.8.0-rc.5 [2014-08-15] + +### Features + +- [Issue #376](https://github.com/influxdata/influxdb/issues/376). List series should support regex filtering +- [Issue #745](https://github.com/influxdata/influxdb/issues/745). Add continuous queries to the database config +- [Issue #746](https://github.com/influxdata/influxdb/issues/746). Add data migration tool for 0.8.0 + +### Bugfixes + +- [Issue #426](https://github.com/influxdata/influxdb/issues/426). Fill should fill the entire time range that is requested +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Don't emit non existent fields when joining series with different fields +- [Issue #744](https://github.com/influxdata/influxdb/issues/744). Admin site should have all assets locally +- [Issue #767](https://github.com/influxdata/influxdb/issues/768). Remove shards whenever they expire +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Don't emit non existent fields when joining series with different fields +- [Issue #791](https://github.com/influxdata/influxdb/issues/791). Move database config loader to be an API endpoint +- [Issue #809](https://github.com/influxdata/influxdb/issues/809). Migration path from 0.7 -> 0.8 +- [Issue #811](https://github.com/influxdata/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft +- [Issue #820](https://github.com/influxdata/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range +- [Issue #827](https://github.com/influxdata/influxdb/issues/827). Don't leak file descriptors in the WAL +- [Issue #830](https://github.com/influxdata/influxdb/issues/830). List series should return series in lexicographic sorted order +- [Issue #831](https://github.com/influxdata/influxdb/issues/831). Move create shard space to be db specific + +## v0.8.0-rc.4 [2014-07-29] + +### Bugfixes + +- [Issue #774](https://github.com/influxdata/influxdb/issues/774). Don't try to parse "inf" shard retention policy +- [Issue #769](https://github.com/influxdata/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) +- [Issue #736](https://github.com/influxdata/influxdb/issues/736). Only db admins should be able to drop a series +- [Issue #713](https://github.com/influxdata/influxdb/issues/713). Null should be a valid fill value +- [Issue #644](https://github.com/influxdata/influxdb/issues/644). Graphite api should write data in batches to the coordinator +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Panic when distinct fields are selected from an inner join +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Panic when distinct fields are added after an inner join + +## v0.8.0-rc.3 [2014-07-21] + +### Bugfixes + +- [Issue #752](https://github.com/influxdata/influxdb/issues/752). `./configure` should use goroot to find gofmt +- [Issue #758](https://github.com/influxdata/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) +- [Issue #759](https://github.com/influxdata/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) +- [Issue #760](https://github.com/influxdata/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) +- [Issue #772](https://github.com/influxdata/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. + + +## v0.8.0-rc.2 [2014-07-15] + +- This release is to fix a build error in rc1 which caused rocksdb to not be available +- Bump up the `max-open-files` option to 1000 on all storage engines +- Lower the `write-buffer-size` to 1000 + +## v0.8.0-rc.1 [2014-07-15] + +### Features + +- [Issue #643](https://github.com/influxdata/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) +- [Issue #641](https://github.com/influxdata/influxdb/issues/641). Support multiple storage engines +- [Issue #665](https://github.com/influxdata/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) +- [Issue #667](https://github.com/influxdata/influxdb/issues/667). Enable compression on all GET requests and when writing data +- [Issue #648](https://github.com/influxdata/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) +- [Issue #682](https://github.com/influxdata/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) +- [Issue #689](https://github.com/influxdata/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft +- [Issue #255](https://github.com/influxdata/influxdb/issues/255). Support millisecond precision using `ms` suffix +- [Issue #95](https://github.com/influxdata/influxdb/issues/95). Drop database should not be synchronous +- [Issue #571](https://github.com/influxdata/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies +- Default storage engine changed to RocksDB + +### Bugfixes + +- [Issue #651](https://github.com/influxdata/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) +- [Issue #670](https://github.com/influxdata/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs +- [Issue #676](https://github.com/influxdata/influxdb/issues/676). Allow storing high precision integer values without losing any information +- [Issue #695](https://github.com/influxdata/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) +- [Issue #731](https://github.com/influxdata/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false +- [Issue #733](https://github.com/influxdata/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled +- [Issue #707](https://github.com/influxdata/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character +- [Issue #734](https://github.com/influxdata/influxdb/issues/734). Don't buffer non replicated writes +- [Issue #465](https://github.com/influxdata/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore +- [Issue #358](https://github.com/influxdata/influxdb/issues/358). **BREAKING** List series should return as a single series +- [Issue #499](https://github.com/influxdata/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error +- [Issue #570](https://github.com/influxdata/influxdb/issues/570). InfluxDB crashes during delete/drop of database +- [Issue #592](https://github.com/influxdata/influxdb/issues/592). Drop series is inefficient + +## v0.7.3 [2014-06-13] + +### Bugfixes + +- [Issue #637](https://github.com/influxdata/influxdb/issues/637). Truncate log files if the last request wasn't written properly +- [Issue #646](https://github.com/influxdata/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. + +## v0.7.2 [2014-05-30] + +### Features + +- [Issue #521](https://github.com/influxdata/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) + +### Bugfixes + +- [Issue #418](https://github.com/influxdata/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. +- [Issue #606](https://github.com/influxdata/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist +- [Issue #602](https://github.com/influxdata/influxdb/issues/602). Merge will fail to work across shards + +### Features + +## v0.7.1 [2014-05-29] + +### Bugfixes + +- [Issue #579](https://github.com/influxdata/influxdb/issues/579). Reject writes to nonexistent databases +- [Issue #597](https://github.com/influxdata/influxdb/issues/597). Force compaction after deleting data + +### Features + +- [Issue #476](https://github.com/influxdata/influxdb/issues/476). Support ARM architecture +- [Issue #578](https://github.com/influxdata/influxdb/issues/578). Support aliasing for expressions in parenthesis +- [Issue #544](https://github.com/influxdata/influxdb/pull/544). Support forcing node removal from a cluster +- [Issue #591](https://github.com/influxdata/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) +- [Issue #600](https://github.com/influxdata/influxdb/pull/600). Report version, os, arch, and raftName once per day. + +## v0.7.0 [2014-05-23] + +### Bugfixes + +- [Issue #557](https://github.com/influxdata/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works +- [Issue #547](https://github.com/influxdata/influxdb/issues/547). Add difference function (Thanks, @mboelstra) +- [Issue #550](https://github.com/influxdata/influxdb/issues/550). Fix tests on 32-bit ARM +- [Issue #524](https://github.com/influxdata/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together +- [Issue #561](https://github.com/influxdata/influxdb/issues/561). Fix missing query in parsing errors +- [Issue #563](https://github.com/influxdata/influxdb/issues/563). Add sample config for graphite over udp +- [Issue #537](https://github.com/influxdata/influxdb/issues/537). Incorrect query syntax causes internal error +- [Issue #565](https://github.com/influxdata/influxdb/issues/565). Empty series names shouldn't cause a panic +- [Issue #575](https://github.com/influxdata/influxdb/issues/575). Single point select doesn't interpret timestamps correctly +- [Issue #576](https://github.com/influxdata/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq +- [Issue #560](https://github.com/influxdata/influxdb/issues/560). Use /dev/urandom instead of /dev/random +- [Issue #502](https://github.com/influxdata/influxdb/issues/502). Fix a + race condition in assigning id to db+series+field (Thanks @ohurvitz + for reporting this bug and providing a script to repro) + +### Features + +- [Issue #567](https://github.com/influxdata/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) + +### Deprecated + +- [Issue #460](https://github.com/influxdata/influxdb/issues/460). Don't start automatically after installing +- [Issue #529](https://github.com/influxdata/influxdb/issues/529). Don't run influxdb as root +- [Issue #443](https://github.com/influxdata/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins + +## v0.6.5 [2014-05-19] + +### Features + +- [Issue #551](https://github.com/influxdata/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) + +### Bugfixes + +- [Issue #555](https://github.com/influxdata/influxdb/issues/555). Fix a regression introduced in the raft snapshot format + +## v0.6.4 [2014-05-16] + +### Features + +- Make the write batch size configurable (also applies to deletes) +- Optimize writing to multiple series +- [Issue #546](https://github.com/influxdata/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) + +### Bugfixes + +- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards +- [Issue #489](https://github.com/influxdata/influxdb/issues/489). Remove replication factor from CreateDatabase command + +## v0.6.3 [2014-05-13] + +### Features + +- [Issue #505](https://github.com/influxdata/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) +- [Issue #520](https://github.com/influxdata/influxdb/issues/520). Print the version to the log file + +### Bugfixes + +- [Issue #516](https://github.com/influxdata/influxdb/issues/516). Close WAL log/index files when they aren't being used +- [Issue #532](https://github.com/influxdata/influxdb/issues/532). Don't log graphite connection EOF as an error +- [Issue #535](https://github.com/influxdata/influxdb/issues/535). WAL Replay hangs if response isn't received +- [Issue #538](https://github.com/influxdata/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns +- [Issue #536](https://github.com/influxdata/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic +- [Issue #539](https://github.com/influxdata/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups +- [Issue #534](https://github.com/influxdata/influxdb/issues/534). Create a new series when interpolating + +## v0.6.2 [2014-05-09] + +### Bugfixes + +- [Issue #511](https://github.com/influxdata/influxdb/issues/511). Don't automatically create the database when a db user is created +- [Issue #512](https://github.com/influxdata/influxdb/issues/512). Group by should respect null values +- [Issue #518](https://github.com/influxdata/influxdb/issues/518). Filter Infinities and NaNs from the returned json +- [Issue #522](https://github.com/influxdata/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files +- [Issue #369](https://github.com/influxdata/influxdb/issues/369). Fix some edge cases with WAL recovery + +## v0.6.1 [2014-05-06] + +### Bugfixes + +- [Issue #500](https://github.com/influxdata/influxdb/issues/500). Support `y` suffix in time durations +- [Issue #501](https://github.com/influxdata/influxdb/issues/501). Writes with invalid payload should be rejected +- [Issue #507](https://github.com/influxdata/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster +- [Issue #508](https://github.com/influxdata/influxdb/issues/508). Don't replay WAL entries for servers with no shards +- [Issue #464](https://github.com/influxdata/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns +- [Issue #480](https://github.com/influxdata/influxdb/issues/480). Large values on the y-axis get cut off + +## v0.6.0 [2014-05-02] + +### Feature + +- [Issue #477](https://github.com/influxdata/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) +- [Issue #491](https://github.com/influxdata/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) + +### Bugfixes + +- [Issue #469](https://github.com/influxdata/influxdb/issues/469). Drop continuous queries when a database is dropped +- [Issue #431](https://github.com/influxdata/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file +- [Issue #483](https://github.com/influxdata/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) +- [Issue #486](https://github.com/influxdata/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series +- [Issue #490](https://github.com/influxdata/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) +- [Issue #495](https://github.com/influxdata/influxdb/issues/495). Enforce write permissions properly + +## v0.5.12 [2014-04-29] + +### Bugfixes + +- [Issue #419](https://github.com/influxdata/influxdb/issues/419),[Issue #478](https://github.com/influxdata/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user + +## v0.5.11 [2014-04-25] + +### Features + +- [Issue #471](https://github.com/influxdata/influxdb/issues/471). Read and write permissions should be settable through the http api + +### Bugfixes + +- [Issue #323](https://github.com/influxdata/influxdb/issues/323). Continuous queries should guard against data loops +- [Issue #473](https://github.com/influxdata/influxdb/issues/473). Engine memory optimization + +## v0.5.10 [2014-04-22] + +### Features + +- [Issue #463](https://github.com/influxdata/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) +- [Issue #447](https://github.com/influxdata/influxdb/issues/447). Allow @ in usernames +- [Issue #466](https://github.com/influxdata/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) + +### Bugfixes + +- [Issue #458](https://github.com/influxdata/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 +- [Issue #457](https://github.com/influxdata/influxdb/issues/457). Deleting series that start with capital letters should work + +## v0.5.9 [2014-04-18] + +### Bugfixes + +- [Issue #446](https://github.com/influxdata/influxdb/issues/446). Check for (de)serialization errors +- [Issue #456](https://github.com/influxdata/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value +- [Issue #455](https://github.com/influxdata/influxdb/issues/455). Comparison operators should ignore null values + +## v0.5.8 [2014-04-17] + +- Renamed config.toml.sample to config.sample.toml + +### Bugfixes + +- [Issue #244](https://github.com/influxdata/influxdb/issues/244). Reconstruct the query from the ast +- [Issue #449](https://github.com/influxdata/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up +- [Issue #451](https://github.com/influxdata/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that + aggregation queries over large periods of time don't take insance amount of memory + +## v0.5.7 [2014-04-15] + +### Features + +- Queries are now logged as INFO in the log file before they run + +### Bugfixes + +- [Issue #328](https://github.com/influxdata/influxdb/issues/328). Join queries with math expressions don't work +- [Issue #440](https://github.com/influxdata/influxdb/issues/440). Heartbeat timeouts in logs +- [Issue #442](https://github.com/influxdata/influxdb/issues/442). shouldQuerySequentially didn't work as expected + causing count(*) queries on large time series to use + lots of memory +- [Issue #437](https://github.com/influxdata/influxdb/issues/437). Queries with negative constants don't parse properly +- [Issue #432](https://github.com/influxdata/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart +- [Issue #439](https://github.com/influxdata/influxdb/issues/439). Report the right location of the error in the query +- Fix some bugs with the WAL recovery on startup + +## v0.5.6 [2014-04-08] + +### Features + +- [Issue #310](https://github.com/influxdata/influxdb/issues/310). Request should support multiple timeseries +- [Issue #416](https://github.com/influxdata/influxdb/issues/416). Improve the time it takes to drop database + +### Bugfixes + +- [Issue #413](https://github.com/influxdata/influxdb/issues/413). Don't assume that group by interval is greater than a second +- [Issue #415](https://github.com/influxdata/influxdb/issues/415). Include the database when sending an auth error back to the user +- [Issue #421](https://github.com/influxdata/influxdb/issues/421). Make read timeout a config option +- [Issue #392](https://github.com/influxdata/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards + +### Bugfixes + +## v0.5.5 [2014-04-04] + +- Upgrade leveldb 1.10 -> 1.15 + + This should be a backward compatible change, but is here for documentation only + +### Feature + +- Add a command line option to repair corrupted leveldb databases on startup +- [Issue #401](https://github.com/influxdata/influxdb/issues/401). No limit on the number of columns in the group by clause + +### Bugfixes + +- [Issue #398](https://github.com/influxdata/influxdb/issues/398). Support now() and NOW() in the query lang +- [Issue #403](https://github.com/influxdata/influxdb/issues/403). Filtering should work with join queries +- [Issue #404](https://github.com/influxdata/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server +- [Issue #405](https://github.com/influxdata/influxdb/issues/405). Percentile shouldn't crash for small number of values +- [Issue #408](https://github.com/influxdata/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics +- [Issue #390](https://github.com/influxdata/influxdb/issues/390). Multiple response.WriteHeader when querying as admin +- [Issue #407](https://github.com/influxdata/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized +- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 + +## v0.5.4 [2014-04-02] + +### Bugfixes + +- [Issue #386](https://github.com/influxdata/influxdb/issues/386). Drop series should work with series containing dots +- [Issue #389](https://github.com/influxdata/influxdb/issues/389). Filtering shouldn't stop prematurely +- [Issue #341](https://github.com/influxdata/influxdb/issues/341). Make the number of shards that are queried in parallel configurable +- [Issue #394](https://github.com/influxdata/influxdb/issues/394). Support count(distinct) and count(DISTINCT) +- [Issue #362](https://github.com/influxdata/influxdb/issues/362). Limit should be enforced after aggregation + +## v0.5.3 [2014-03-31] + +### Bugfixes + +- [Issue #378](https://github.com/influxdata/influxdb/issues/378). Indexing should return if there are no requests added since the last index +- [Issue #370](https://github.com/influxdata/influxdb/issues/370). Filtering and limit should be enforced on the shards +- [Issue #379](https://github.com/influxdata/influxdb/issues/379). Boolean columns should be usable in where clauses +- [Issue #381](https://github.com/influxdata/influxdb/issues/381). Should be able to do deletes as a cluster admin + +## v0.5.2 [2014-03-28] + +### Bugfixes + +- [Issue #342](https://github.com/influxdata/influxdb/issues/342). Data resurrected after a server restart +- [Issue #367](https://github.com/influxdata/influxdb/issues/367). Influxdb won't start if the api port is commented out +- [Issue #355](https://github.com/influxdata/influxdb/issues/355). Return an error on wrong time strings +- [Issue #331](https://github.com/influxdata/influxdb/issues/331). Allow negative time values in the where clause +- [Issue #371](https://github.com/influxdata/influxdb/issues/371). Seris index isn't deleted when the series is dropped +- [Issue #360](https://github.com/influxdata/influxdb/issues/360). Store and recover continuous queries + +## v0.5.1 [2014-03-24] + +### Bugfixes + +- Revert the version of goraft due to a bug found in the latest version + +## v0.5.0 [2014-03-24] + +### Features + +- [Issue #293](https://github.com/influxdata/influxdb/pull/293). Implement a Graphite listener + +### Bugfixes + +- [Issue #340](https://github.com/influxdata/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order + +## v0.5.0-rc.6 [2014-03-20] + +### Bugfixes + +- Increase raft election timeout to avoid unecessary relections +- Sort points before writing them to avoid an explosion in the request + number when the points are written randomly +- [Issue #335](https://github.com/influxdata/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries +- [Issue #318](https://github.com/influxdata/influxdb/pull/318). Support EXPLAIN queries +- [Issue #333](https://github.com/influxdata/influxdb/pull/333). Fail + when the password is too short or too long instead of passing it to + the crypto library + +## v0.5.0-rc.5 [2014-03-11] + +### Bugfixes + +- [Issue #312](https://github.com/influxdata/influxdb/issues/312). WAL should wait for server id to be set before recovering +- [Issue #301](https://github.com/influxdata/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache +- [Issue #319](https://github.com/influxdata/influxdb/issues/319). Propagate engine creation error correctly to the user +- [Issue #316](https://github.com/influxdata/influxdb/issues/316). Make + sure we don't starve goroutines if we get an access denied error + from one of the shards +- [Issue #306](https://github.com/influxdata/influxdb/issues/306). Deleting/Dropping database takes a lot of memory +- [Issue #302](https://github.com/influxdata/influxdb/issues/302). Should be able to set negative timestamps on points +- [Issue #327](https://github.com/influxdata/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 +- [Issue #321](https://github.com/influxdata/influxdb/issues/321). Make sure we split points on shards properly + +## v0.5.0-rc.4 [2014-03-07] + +### Bugfixes + +- [Issue #298](https://github.com/influxdata/influxdb/issues/298). Fix limit when querying multiple shards +- [Issue #305](https://github.com/influxdata/influxdb/issues/305). Shard ids not unique after restart +- [Issue #309](https://github.com/influxdata/influxdb/issues/309). Don't relog the requests on the remote server +- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) + +## v0.5.0-rc.3 [2014-03-03] + +### Bugfixes +- [Issue #69](https://github.com/influxdata/influxdb/issues/69). Support column aliases +- [Issue #287](https://github.com/influxdata/influxdb/issues/287). Make the lru cache size configurable +- [Issue #38](https://github.com/influxdata/influxdb/issues/38). Fix a memory leak discussed in this story +- [Issue #286](https://github.com/influxdata/influxdb/issues/286). Make the number of open shards configurable +- Make LevelDB use the max open files configuration option. + +## v0.5.0-rc.2 [2014-02-27] + +### Bugfixes + +- [Issue #274](https://github.com/influxdata/influxdb/issues/274). Crash after restart +- [Issue #277](https://github.com/influxdata/influxdb/issues/277). Ensure duplicate shards won't be created +- [Issue #279](https://github.com/influxdata/influxdb/issues/279). Limits not working on regex queries +- [Issue #281](https://github.com/influxdata/influxdb/issues/281). `./influxdb -v` should print the sha when building from source +- [Issue #283](https://github.com/influxdata/influxdb/issues/283). Dropping shard and restart in cluster causes panic. +- [Issue #288](https://github.com/influxdata/influxdb/issues/288). Sequence numbers should be unique per server id + +## v0.5.0-rc.1 [2014-02-25] + +### Bugfixes + +- Ensure large deletes don't take too much memory +- [Issue #240](https://github.com/influxdata/influxdb/pull/240). Unable to query against columns with `.` in the name. +- [Issue #250](https://github.com/influxdata/influxdb/pull/250). different result between normal and continuous query with "group by" clause +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points + +### Features + +- [Issue #243](https://github.com/influxdata/influxdb/issues/243). Should have endpoint to GET a user's attributes. +- [Issue #269](https://github.com/influxdata/influxdb/pull/269), [Issue #65](https://github.com/influxdata/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards +- [Issue #164](https://github.com/influxdata/influxdb/pull/269),[Issue #103](https://github.com/influxdata/influxdb/pull/269),[Issue #166](https://github.com/influxdata/influxdb/pull/269),[Issue #165](https://github.com/influxdata/influxdb/pull/269),[Issue #132](https://github.com/influxdata/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup + +### Deprecated + +- [Issue #189](https://github.com/influxdata/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points + +## v0.4.4 [2014-02-05] + +### Features + +- Make the leveldb max open files configurable in the toml file + +## v0.4.3 [2014-01-31] + +### Bugfixes + +- [Issue #225](https://github.com/influxdata/influxdb/issues/225). Remove a hard limit on the points returned by the datastore +- [Issue #223](https://github.com/influxdata/influxdb/issues/223). Null values caused count(distinct()) to panic +- [Issue #224](https://github.com/influxdata/influxdb/issues/224). Null values broke replication due to protobuf limitation + +## v0.4.1 [2014-01-30] + +### Features + +- [Issue #193](https://github.com/influxdata/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy +- [Issue #190](https://github.com/influxdata/influxdb/pull/190). Add support for SSL. +- [Issue #194](https://github.com/influxdata/influxdb/pull/194). Should be able to disable Admin interface. + +### Bugfixes + +- [Issue #33](https://github.com/influxdata/influxdb/issues/33). Don't call WriteHeader more than once per request +- [Issue #195](https://github.com/influxdata/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. +- [Issue #199](https://github.com/influxdata/influxdb/issues/199). Make the test timeout configurable +- [Issue #200](https://github.com/influxdata/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail +- [Issue #215](https://github.com/influxdata/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. + +## v0.4.0 [2014-01-17] + +## Features + +- [Issue #86](https://github.com/influxdata/influxdb/issues/86). Support arithmetic expressions in select clause +- [Issue #92](https://github.com/influxdata/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' +- [Issue #88](https://github.com/influxdata/influxdb/issues/88). Support datetime strings +- [Issue #64](https://github.com/influxdata/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) +- [Issue #78](https://github.com/influxdata/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused +- [Issue #102](https://github.com/influxdata/influxdb/issues/102). Support expressions in where condition +- [Issue #101](https://github.com/influxdata/influxdb/issues/101). Support expressions in aggregates +- [Issue #62](https://github.com/influxdata/influxdb/issues/62). Support updating and deleting column values +- [Issue #96](https://github.com/influxdata/influxdb/issues/96). Replicate deletes in a cluster +- [Issue #94](https://github.com/influxdata/influxdb/issues/94). delete queries +- [Issue #116](https://github.com/influxdata/influxdb/issues/116). Use proper logging +- [Issue #40](https://github.com/influxdata/influxdb/issues/40). Use TOML instead of JSON in the config file +- [Issue #99](https://github.com/influxdata/influxdb/issues/99). Support list series in the query language +- [Issue #149](https://github.com/influxdata/influxdb/issues/149). Cluster admins should be able to perform reads and writes. +- [Issue #108](https://github.com/influxdata/influxdb/issues/108). Querying one point using `time =` +- [Issue #114](https://github.com/influxdata/influxdb/issues/114). Servers should periodically check that they're consistent. +- [Issue #93](https://github.com/influxdata/influxdb/issues/93). Should be able to drop a time series +- [Issue #177](https://github.com/influxdata/influxdb/issues/177). Support drop series in the query language. +- [Issue #184](https://github.com/influxdata/influxdb/issues/184). Implement Raft log compaction. +- [Issue #153](https://github.com/influxdata/influxdb/issues/153). Implement continuous queries + +### Bugfixes + +- [Issue #90](https://github.com/influxdata/influxdb/issues/90). Group by multiple columns panic +- [Issue #89](https://github.com/influxdata/influxdb/issues/89). 'Group by' combined with 'where' not working +- [Issue #106](https://github.com/influxdata/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative +- [Issue #105](https://github.com/influxdata/influxdb/issues/105). Panic when using a where clause that reference columns with null values +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Remove default limits from queries +- [Issue #118](https://github.com/influxdata/influxdb/issues/118). Make column names starting with '_' legal +- [Issue #121](https://github.com/influxdata/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails +- [Issue #127](https://github.com/influxdata/influxdb/issues/127). Return error on delete queries with where condition that don't have time +- [Issue #117](https://github.com/influxdata/influxdb/issues/117). Fill empty groups with default values +- [Issue #150](https://github.com/influxdata/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. +- [Issue #158](https://github.com/influxdata/influxdb/issues/158). Logged deletes should be stored with the time range if missing. +- [Issue #136](https://github.com/influxdata/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays +- [Issue #145](https://github.com/influxdata/influxdb/issues/145). Server fails to join cluster if all starting at same time. +- [Issue #176](https://github.com/influxdata/influxdb/issues/176). Drop database should take effect on all nodes +- [Issue #180](https://github.com/influxdata/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. +- [Issue #182](https://github.com/influxdata/influxdb/issues/182). Queries with invalid limit clause crash the server + +### Deprecated + +- deprecate '==' and '!=' in favor of '=' and '<>', respectively +- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins` +- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should + be used to update user flags, password, etc. +- Querying for column names that don't exist no longer throws an error. + +## v0.3.2 + +## Features + +- [Issue #82](https://github.com/influxdata/influxdb/issues/82). Add endpoint for listing available admin interfaces. +- [Issue #80](https://github.com/influxdata/influxdb/issues/80). Support durations when specifying start and end time +- [Issue #81](https://github.com/influxdata/influxdb/issues/81). Add support for IN + +## Bugfixes + +- [Issue #75](https://github.com/influxdata/influxdb/issues/75). Don't allow time series names that start with underscore +- [Issue #85](https://github.com/influxdata/influxdb/issues/85). Non-existing columns exist after they have been queried before + +## v0.3.0 + +## Features + +- [Issue #51](https://github.com/influxdata/influxdb/issues/51). Implement first and last aggregates +- [Issue #35](https://github.com/influxdata/influxdb/issues/35). Support table aliases in Join Queries +- [Issue #71](https://github.com/influxdata/influxdb/issues/71). Add WillReturnSingleSeries to the Query +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Limit should default to 10k +- [Issue #59](https://github.com/influxdata/influxdb/issues/59). Add histogram aggregate function + +## Bugfixes + +- Fix join and merges when the query is a descending order query +- [Issue #57](https://github.com/influxdata/influxdb/issues/57). Don't panic when type of time != float +- [Issue #63](https://github.com/influxdata/influxdb/issues/63). Aggregate queries should not have a sequence_number column + +## v0.2.0 + +### Features + +- [Issue #37](https://github.com/influxdata/influxdb/issues/37). Support the negation of the regex matcher !~ +- [Issue #47](https://github.com/influxdata/influxdb/issues/47). Spill out query and database detail at the time of bug report + +### Bugfixes + +- [Issue #36](https://github.com/influxdata/influxdb/issues/36). The regex operator should be =~ not ~= +- [Issue #39](https://github.com/influxdata/influxdb/issues/39). Return proper content types from the http api +- [Issue #42](https://github.com/influxdata/influxdb/issues/42). Make the api consistent with the docs +- [Issue #41](https://github.com/influxdata/influxdb/issues/41). Table/Points not deleted when database is dropped +- [Issue #45](https://github.com/influxdata/influxdb/issues/45). Aggregation shouldn't mess up the order of the points +- [Issue #44](https://github.com/influxdata/influxdb/issues/44). Fix crashes on RHEL 5.9 +- [Issue #34](https://github.com/influxdata/influxdb/issues/34). Ascending order always return null for columns that have a null value +- [Issue #55](https://github.com/influxdata/influxdb/issues/55). Limit should limit the points that match the Where clause +- [Issue #53](https://github.com/influxdata/influxdb/issues/53). Writing null values via HTTP API fails + +### Deprecated + +- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users` +- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should + be used to update user flags, password, etc. + +## v0.1.0 + +### Features + +- [Issue #29](https://github.com/influxdata/influxdb/issues/29). Semicolon is now optional in queries +- [Issue #31](https://github.com/influxdata/influxdb/issues/31). Support Basic Auth as well as query params for authentication. + +### Bugfixes + +- Don't allow creating users with empty username +- [Issue #22](https://github.com/influxdata/influxdb/issues/22). Don't set goroot if it was set +- [Issue #25](https://github.com/influxdata/influxdb/issues/25). Fix queries that use the median aggregator +- [Issue #26](https://github.com/influxdata/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data +- [Issue #27](https://github.com/influxdata/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values +- [Issue #30](https://github.com/influxdata/influxdb/issues/30). Column indexes/names getting off somehow +- [Issue #32](https://github.com/influxdata/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli + +## v0.0.9 + +#### Features + +- Add stddev(...) support +- Better docs, thanks @auxesis and @d-snp. + +#### Bugfixes + +- Set PYTHONPATH and CC appropriately on mac os x. +- [Issue #18](https://github.com/influxdata/influxdb/issues/18). Fix 386 debian and redhat packages +- [Issue #23](https://github.com/influxdata/influxdb/issues/23). Fix the init scripts on redhat + +## v0.0.8 + +#### Features + +- Add a way to reset the root password from the command line. +- Add distinct(..) and derivative(...) support +- Print test coverage if running go1.2 + +#### Bugfixes + +- Fix the default admin site path in the .deb and .rpm packages. +- Fix the configuration filename in the .tar.gz package. + +## v0.0.7 + +#### Features + +- include the admin site in the repo to make it easier for newcomers. + +## v0.0.6 + +#### Features + +- Add count(distinct(..)) support + +#### Bugfixes + +- Reuse levigo read/write options. + +## v0.0.5 + +#### Features + +- Cache passwords in memory to speed up password verification +- Add MERGE and INNER JOIN support + +#### Bugfixes + +- All columns should be returned if `select *` was used +- Read/Write benchmarks + +## v0.0.2 + +#### Features + +- Add an admin UI +- Deb and RPM packages + +#### Bugfixes + +- Fix some nil pointer dereferences +- Cleanup the aggregators implementation + +## v0.0.1 [2013-10-22] + + * Initial Release diff --git a/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md b/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md new file mode 100644 index 0000000..6c1d2f2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md @@ -0,0 +1,82 @@ +_This document is currently in draft form._ + +# Background + +The goal of this guide is to capture some Do and Don'ts of Go code for the InfluxDB database. When it comes to Go, writing good code is often achieved with the help of tools like `go fmt` and `go vet`. However there are still some practices not enforceable by any tools. This guide lists some specific practices to follow when writing code for the database. + +*Like everything, one needs to use good judgment.* There will always be times when it doesn't make sense to follow a guideline outlined in this document. If that case arises, be ready to justify your choices. + +# The Guidelines + +## Try not to use third-party libraries + +A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) in some storage engines. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use. + +For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/). + +## Always include a default case in a 'switch' statement +The lack of a `default` case in a `switch` statement can be a significant source of bugs. This is particularly true in the case of a type-assertions switch. So always include a `default` statement unless you have an explicit reason not to. + +## When -- and when not -- set a channel to 'nil' + +## Use defer with anonymous functions to handle complex locking +Consider a block of code like the following. +``` + mu.Lock() + if foo == "quit" { + mu.Unlock() + return + } else if foo == "continue" { + if bar == "quit" { + mu.Unlock() + return + } + bar = "still going" + } else { + qux = "here at last" + mu.Unlock() + return + } + foo = "more to do" + bar = "still more to do" + mu.Unlock() + + qux = "finished now" + return +``` +While this is obviously contrived, complex lock control like this is sometimes required, and doesn't lend itself to `defer`. But as the code evolves, it's easy to introduce new cases, and forget to release locks. One way to address this is to use an anonymous function like so: +``` + more := func() bool { + mu.Lock() + defer mu.Unlock() + if foo == "quit" { + return false + } else if foo == "continue" { + if bar == "quit" { + return false + } + bar = "still going" + } else { + qux = "here at last" + return false + } + foo = "more to do" + bar = "still more to do" + return true + }() + + if more { + qux = "finished" + } + return +``` +This allows us to use `defer` but ensures that if any new cases are added to the logic within the anonymous function, the lock will always be released. Another advantage of this approach is that `defer` will still run even in the event of a panic, ensuring the locks will be released even in that case. + +## When to call 'panic()' + +# Useful links +- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go) +- [Go in production](http://peter.bourgon.org/go-in-production/) +- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/) +- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables` + diff --git a/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md b/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md new file mode 100644 index 0000000..4fd753c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md @@ -0,0 +1,280 @@ +Contributing to InfluxDB +======================== + +Bug reports +--------------- +Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following. +* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04. +* The version of InfluxDB you are running +* Whether you installed it using a pre-built package, or built it from source. +* A small test case, if applicable, that demonstrates the issues. + +Remember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.** +If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html) + +Test cases should be in the form of `curl` commands. For example: +```bash +# create database +curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb" + +# create retention policy +curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT" + +# write data +curl -X POST http://localhost:8086/write --data-urlencode "db=mydb" --data-binary "cpu,region=useast,host=server_1,service=redis value=61" + +# Delete a Measurement +curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu' + +# Query the Measurement +# Bug: expected it to return no data, but data comes back. +curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu' +``` +**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report. + +Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [InfluxData Community](https://community.influxdata.com/), not filed as issues. Issues like this will be closed. + +Feature requests +--------------- +We really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB. + +Contributing to the source code +--------------- + +InfluxDB follows standard Go project structure. This means that all your Go development are done in `$GOPATH/src`. GOPATH can be any directory under which InfluxDB and all its dependencies will be cloned. For full details on the project structure, follow along below. + +You should also read our [coding guide](https://github.com/influxdata/influxdb/blob/master/CODING_GUIDELINES.md), to understand better how to write code for InfluxDB. + +Submitting a pull request +------------ +To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged. + +There will usually be some back and forth as we finalize the change, but once that completes it may be merged. + +To assist in review for the PR, please add the following to your pull request comment: + +```md +- [ ] CHANGELOG.md updated +- [ ] Rebased/mergable +- [ ] Tests pass +- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) +``` + +Signing the CLA +--------------- + +If you are going to be contributing back to InfluxDB please take a +second to sign our CLA, which can be found +[on our website](https://influxdata.com/community/cla/). + +Installing Go +------------- +InfluxDB requires Go 1.8.3 + +At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions +on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). + +After installing gvm you can install and set the default go version by +running the following: + + gvm install go1.8.3 + gvm use go1.8.3 --default + +Installing GDM +------------- +InfluxDB uses [gdm](https://github.com/sparrc/gdm) to manage dependencies. Install it by running the following: + + go get github.com/sparrc/gdm + +Revision Control Systems +------------- +Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system. +Currently the project only depends on `git` and `mercurial`. + +* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) +* [Install Mercurial](http://mercurial.selenic.com/wiki/Download) + +Getting the source +------ +Setup the project structure and fetch the repo like so: + +```bash + mkdir $HOME/gocodez + export GOPATH=$HOME/gocodez + go get github.com/influxdata/influxdb +``` + +You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime. + +Cloning a fork +------------- +If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork: + +```bash + export GOPATH=$HOME/gocodez + mkdir -p $GOPATH/src/github.com/influxdata + cd $GOPATH/src/github.com/influxdata + git clone git@github.com:/influxdb +``` + +Retaining the directory structure `$GOPATH/src/github.com/influxdata` is necessary so that Go imports work correctly. + +Build and Test +----- + +Make sure you have Go installed and the project structure as shown above. To then get the dependencies for the project, execute the following commands: + +```bash +cd $GOPATH/src/github.com/influxdata/influxdb +gdm restore +``` + +To then build and install the binaries, run the following command. +```bash +go clean ./... +go install ./... +``` +The binaries will be located in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`. + +To set the version and commit flags during the build pass the following to the **install** command: + +```bash +-ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT" +``` + +where `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash. + +If you want to build packages, see `build.py` usage information: + +```bash +python build.py --help + +# Or to build a package for your current system +python build.py --package +``` + +To run the tests, execute the following command: + +```bash +cd $GOPATH/src/github.com/influxdata/influxdb +go test -v ./... + +# run tests that match some pattern +go test -run=TestDatabase . -v + +# run tests and show coverage +go test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover +``` + +To install go cover, run the following command: +``` +go get golang.org/x/tools/cmd/cover +``` + +Generated Google Protobuf code +----------------- +Most changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. + +First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/ +) 2.6.1 or later for your OS: + +Then install the go plugins: + +```bash +go get github.com/gogo/protobuf/proto +go get github.com/gogo/protobuf/protoc-gen-gogo +go get github.com/gogo/protobuf/gogoproto +``` + +Finally run, `go generate` after updating any `*.proto` file: + +```bash +go generate ./... +``` +**Troubleshooting** + +If generating the protobuf code is failing for you, check each of the following: +* Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. +* Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. + + +Generated Go Templates +---------------------- + +The query engine requires optimized data structures for each data type so +instead of writing each implementation several times we use templates. _Do not +change code that ends in a `.gen.go` extension!_ Instead you must edit the +`.gen.go.tmpl` file that was used to generate it. + +Once you've edited the template file, you'll need the [`tmpl`][tmpl] utility +to generate the code: + +```sh +$ go get github.com/benbjohnson/tmpl +``` + +Then you can regenerate all templates in the project: + +```sh +$ go generate ./... +``` + +[tmpl]: https://github.com/benbjohnson/tmpl + + +Pre-commit checks +------------- + +We have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following: +```bash + cd $GOPATH/src/github.com/influxdata/influxdb + cp .hooks/pre-commit .git/hooks/ +``` +In case the commit is rejected because it's not formatted you can run +the following to format the code: + +``` +go fmt ./... +go vet ./... +``` + +To install go vet, run the following command: +``` +go get golang.org/x/tools/cmd/vet +``` + +NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above. + +For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet). + +Profiling +----- +When troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU and memory profiling turned on. For example: + +```sh +# start influx with profiling +./influxd -cpuprofile influxdcpu.prof -memprof influxdmem.prof +# run queries, writes, whatever you're testing +# Quit out of influxd and influxd.prof will then be written. +# open up pprof to examine the profiling data. +go tool pprof ./influxd influxd.prof +# once inside run "web", opens up browser with the CPU graph +# can also run "web " to zoom in. Or "list " to see specific lines +``` +Note that when you pass the binary to `go tool pprof` *you must specify the path to the binary*. + +If you are profiling benchmarks built with the `testing` package, you may wish +to use the [`github.com/pkg/profile`](github.com/pkg/profile) package to limit +the code being profiled: + +```go +func BenchmarkSomething(b *testing.B) { + // do something intensive like fill database with data... + defer profile.Start(profile.ProfilePath("/tmp"), profile.MemProfile).Stop() + // do something that you want to profile... +} +``` + +Continuous Integration testing +----- +InfluxDB uses CircleCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdata/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file. diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 new file mode 100644 index 0000000..16f7840 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 @@ -0,0 +1,35 @@ +FROM ioft/i386-ubuntu:14.04 + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + rpm \ + zip \ + python \ + python-boto + +RUN gem install fpm + +# Install go +ENV GOPATH /root/go +ENV GO_VERSION 1.8.3 +ENV GO_ARCH 386 +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR +WORKDIR $PROJECT_DIR + +VOLUME $PROJECT_DIR + +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 new file mode 100644 index 0000000..9b36193 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 @@ -0,0 +1,38 @@ +FROM ubuntu:trusty + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + rpm \ + zip \ + python \ + python-boto \ + asciidoc \ + xmlto \ + docbook-xsl + +RUN gem install fpm + +# Install go +ENV GOPATH /root/go +ENV GO_VERSION 1.8.3 +ENV GO_ARCH amd64 +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR +WORKDIR $PROJECT_DIR + +VOLUME $PROJECT_DIR + +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git new file mode 100644 index 0000000..20dce34 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git @@ -0,0 +1,43 @@ +FROM ubuntu:trusty + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + rpm \ + zip \ + python \ + python-boto + +RUN gem install fpm + +# Setup env +ENV GOPATH /root/go +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR + +VOLUME $PROJECT_DIR + + +# Install go +ENV GO_VERSION 1.8.3 +ENV GO_ARCH amd64 +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz + +# Clone Go tip for compilation +ENV GOROOT_BOOTSTRAP /usr/local/go +RUN git clone https://go.googlesource.com/go +ENV PATH /go/bin:$PATH + +# Add script for compiling go +ENV GO_CHECKOUT master +ADD ./gobuild.sh /gobuild.sh +ENTRYPOINT [ "/gobuild.sh" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 new file mode 100644 index 0000000..af505b5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 @@ -0,0 +1,12 @@ +FROM 32bit/ubuntu:14.04 + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python-software-properties software-properties-common git +RUN add-apt-repository ppa:evarlast/golang1.4 +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go + +ENV GOPATH=/root/go +RUN mkdir -p /root/go/src/github.com/influxdata/influxdb +RUN mkdir -p /tmp/artifacts + +VOLUME /root/go/src/github.com/influxdata/influxdb +VOLUME /tmp/artifacts diff --git a/vendor/github.com/influxdata/influxdb/Godeps b/vendor/github.com/influxdata/influxdb/Godeps new file mode 100644 index 0000000..c3f1efb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Godeps @@ -0,0 +1,21 @@ +collectd.org e84e8af5356e7f47485bbc95c96da6dd7984a67e +github.com/BurntSushi/toml 99064174e013895bbd9b025c31100bd1d9b590ca +github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c +github.com/boltdb/bolt 4b1ebc1869ad66568b313d0dc410e2be72670dda +github.com/cespare/xxhash 4a94f899c20bc44d4f5f807cb14529e72aca99d6 +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 +github.com/dgrijalva/jwt-go 24c63f56522a87ec5339cc3567883f1039378fdb +github.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef +github.com/dgryski/go-bitstream 7d46cd22db7004f0cceb6f7975824b560cf0e486 +github.com/gogo/protobuf 30433562cfbf487fe1df7cd26c7bab168d2f14d0 +github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380 +github.com/google/go-cmp 18107e6c56edb2d51f965f7d68e59404f0daee54 +github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967 +github.com/jwilder/encoding 27894731927e49b0a9023f00312be26733744815 +github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447 +github.com/peterh/liner 88609521dc4b6c858fd4c98b628147da928ce4ac +github.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d +github.com/spaolacci/murmur3 0d12bf811670bf6a1a63828dfbd003eded177fce +github.com/uber-go/atomic 74ca5ec650841aee9f289dce76e928313a37cbc6 +github.com/uber-go/zap fbae0281ffd546fa6d1959fec6075ac5da7fb577 +golang.org/x/crypto 9477e0b78b9ac3d0b03822fd95422e2fe07627cd diff --git a/vendor/github.com/influxdata/influxdb/LICENSE b/vendor/github.com/influxdata/influxdb/LICENSE new file mode 100644 index 0000000..63cef79 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013-2016 Errplane Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md new file mode 100644 index 0000000..949a7b3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md @@ -0,0 +1,25 @@ +# List +- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) +- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) +- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) +- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) +- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) +- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) +- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE) +- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) +- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE) +- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE) +- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE) +- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt) +- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE) +- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE) +- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) +- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) +- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE) +- github.com/uber-go/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt) +- github.com/uber-go/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt) +- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) +- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) diff --git a/vendor/github.com/influxdata/influxdb/Makefile b/vendor/github.com/influxdata/influxdb/Makefile new file mode 100644 index 0000000..9fa9c82 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Makefile @@ -0,0 +1,39 @@ +PACKAGES=$(shell find . -name '*.go' -print0 | xargs -0 -n1 dirname | sort --unique) + +default: + +metalint: deadcode cyclo aligncheck defercheck structcheck lint errcheck + +deadcode: + @deadcode $(PACKAGES) 2>&1 + +cyclo: + @gocyclo -over 10 $(PACKAGES) + +aligncheck: + @aligncheck $(PACKAGES) + +defercheck: + @defercheck $(PACKAGES) + + +structcheck: + @structcheck $(PACKAGES) + +lint: + @for pkg in $(PACKAGES); do golint $$pkg; done + +errcheck: + @for pkg in $(PACKAGES); do \ + errcheck -ignorepkg=bytes,fmt -ignore=":(Rollback|Close)" $$pkg \ + done + +tools: + go get github.com/remyoudompheng/go-misc/deadcode + go get github.com/alecthomas/gocyclo + go get github.com/opennota/check/... + go get github.com/golang/lint/golint + go get github.com/kisielk/errcheck + go get github.com/sparrc/gdm + +.PHONY: default,metalint,deadcode,cyclo,aligncheck,defercheck,structcheck,lint,errcheck,tools diff --git a/vendor/github.com/influxdata/influxdb/QUERIES.md b/vendor/github.com/influxdata/influxdb/QUERIES.md new file mode 100644 index 0000000..46a9eb1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/QUERIES.md @@ -0,0 +1,180 @@ +The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field key, or tag key appears it should be wrapped in double quotes. + +# Databases & retention policies + +```sql +-- create a database +CREATE DATABASE + +-- create a retention policy +CREATE RETENTION POLICY ON DURATION REPLICATION [DEFAULT] + +-- alter retention policy +ALTER RETENTION POLICY ON (DURATION | REPLICATION | DEFAULT)+ + +-- drop a database +DROP DATABASE + +-- drop a retention policy +DROP RETENTION POLICY ON +``` +where `` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `` must be an integer. + +If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads. + +# Users and permissions + +```sql +-- create user +CREATE USER WITH PASSWORD '' + +-- grant privilege on a database +GRANT ON TO + +-- grant cluster admin privileges +GRANT ALL [PRIVILEGES] TO + +-- revoke privilege +REVOKE ON FROM + +-- revoke all privileges for a DB +REVOKE ALL [PRIVILEGES] ON FROM + +-- revoke all privileges including cluster admin +REVOKE ALL [PRIVILEGES] FROM + +-- combine db creation with privilege assignment (user must already exist) +CREATE DATABASE GRANT TO +CREATE DATABASE REVOKE FROM + +-- delete a user +DROP USER + + +``` +where ` := READ | WRITE | All `. + +Authentication must be enabled in the influxdb.conf file for user permissions to be in effect. + +By default, newly created users have no privileges to any databases. + +Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements. + +# Select + +```sql +SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m) + +SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region +``` + +## Group By + +# Delete + +# Series + +## Destroy + +```sql +DROP MEASUREMENT +DROP MEASUREMENT cpu WHERE region = 'uswest' +``` + +## Show + +Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery. + +```sql +-- show all databases +SHOW DATABASES + +-- show measurement names +SHOW MEASUREMENTS +SHOW MEASUREMENTS LIMIT 15 +SHOW MEASUREMENTS LIMIT 10 OFFSET 40 +SHOW MEASUREMENTS WHERE service = 'redis' +-- LIMIT and OFFSET can be applied to any of the SHOW type queries + +-- show all series across all measurements/tagsets +SHOW SERIES + +-- get a show of all series for any measurements where tag key region = tak value 'uswest' +SHOW SERIES WHERE region = 'uswest' + +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 + +-- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns +-- series split into measurements. Each series counts as a row. So you could see only a +-- single measurement returned, but 10 series within it. +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100 + +-- show all retention policies on a database +SHOW RETENTION POLICIES ON mydb + +-- get a show of all tag keys across all measurements +SHOW TAG KEYS + +-- show all the tag keys for a given measurement +SHOW TAG KEYS FROM cpu +SHOW TAG KEYS FROM temperature, wind_speed + +-- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required +SHOW TAG VALUES WITH TAG KEY = 'region' +SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host' + +-- and you can do stuff against fields +SHOW FIELD KEYS FROM cpu + +-- but you can't do this +SHOW FIELD VALUES +-- we don't index field values, so this query should be invalid. + +-- show all users +SHOW USERS +``` + +Note that `FROM` and `WHERE` are optional clauses in most of the show series queries. + +And the show series output looks like this: + +```json +[ + { + "name": "cpu", + "columns": ["id", "region", "host"], + "values": [ + 1, "uswest", "servera", + 2, "uswest", "serverb" + ] + }, + { + "name": "reponse_time", + "columns": ["id", "application", "host"], + "values": [ + 3, "myRailsApp", "servera" + ] + } +] +``` + +# Continuous Queries + +Continuous queries are going to be inspired by MySQL `TRIGGER` syntax: + +http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html + +Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention, +particularly in the case where creation is scripted. + +## Create + + CREATE CONTINUOUS QUERY AS SELECT ... FROM ... + +## Destroy + + DROP CONTINUOUS QUERY + +## List + + SHOW CONTINUOUS QUERIES diff --git a/vendor/github.com/influxdata/influxdb/README.md b/vendor/github.com/influxdata/influxdb/README.md new file mode 100644 index 0000000..cf301c3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/README.md @@ -0,0 +1,71 @@ +# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) [![Go Report Card](https://goreportcard.com/badge/github.com/influxdata/influxdb)](https://goreportcard.com/report/github.com/influxdata/influxdb) [![Docker pulls](https://img.shields.io/docker/pulls/library/influxdb.svg)](https://hub.docker.com/_/influxdb/) + +## An Open-Source Time Series Database + +InfluxDB is an open source **time series database** with +**no external dependencies**. It's useful for recording metrics, +events, and performing analytics. + +## Features + +* Built-in [HTTP API](https://docs.influxdata.com/influxdb/latest/guides/writing_data/) so you don't have to write any server side code to get up and running. +* Data can be tagged, allowing very flexible querying. +* SQL-like query language. +* Simple to install and manage, and fast to get data in and out. +* It aims to answer queries in real-time. That means every data point is + indexed as it comes in and is immediately available in queries that + should return in < 100ms. + +## Installation + +We recommend installing InfluxDB using one of the [pre-built packages](https://influxdata.com/downloads/#influxdb). Then start InfluxDB using: + +* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package. +* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later. +* `$GOPATH/bin/influxd` if you have built InfluxDB from source. + +## Getting Started + +### Create your first database + +``` +curl -XPOST 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb" +``` + +### Insert some data +``` +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server01,region=uswest load=42 1434055562000000000' + +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server02,region=uswest load=78 1434055562000000000' + +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server03,region=useast load=15.4 1434055562000000000' +``` + +### Query for the data +```JSON +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d" +``` + +### Analyze the data +```JSON +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'" +``` + +## Documentation + +* Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/latest/). +* Follow the [getting started guide](https://docs.influxdata.com/influxdb/latest/introduction/getting_started/) to learn the basics in just a few minutes. +* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/latest/guides/writing_data/). + +## Contributing + +If you're feeling adventurous and want to contribute to InfluxDB, see our [contributing doc](https://github.com/influxdata/influxdb/blob/master/CONTRIBUTING.md) for info on how to make feature requests, build from source, and run tests. + +## Looking for Support? + +InfluxDB offers a number of services to help your project succeed. We offer Developer Support for organizations in active development, Managed Hosting to make it easy to move into production, and Enterprise Support for companies requiring the best response times, SLAs, and technical fixes. Visit our [support page](https://influxdata.com/services/) or contact [sales@influxdb.com](mailto:sales@influxdb.com) to learn how we can best help you succeed. diff --git a/vendor/github.com/influxdata/influxdb/TODO.md b/vendor/github.com/influxdata/influxdb/TODO.md new file mode 100644 index 0000000..56b5294 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/TODO.md @@ -0,0 +1,9 @@ +# TODO + +## v2 + +TODO list for v2. Here is a list of things we want to add to v1, but can't because they would be a breaking change. + +- [#1834](https://github.com/influxdata/influxdb/issues/1834): Disallow using time as a tag key or field key. +- [#2124](https://github.com/influxdata/influxdb/issues/2124): Prohibit writes with precision, but without an explicit timestamp. +- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries. diff --git a/vendor/github.com/influxdata/influxdb/appveyor.yml b/vendor/github.com/influxdata/influxdb/appveyor.yml new file mode 100644 index 0000000..05f77ef --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/appveyor.yml @@ -0,0 +1,37 @@ +version: 0.{build} +pull_requests: + do_not_increment_build_number: true +branches: + only: + - master + +os: Windows Server 2012 R2 + +# Custom clone folder (variables are not expanded here). +clone_folder: c:\gopath\src\github.com\influxdata\influxdb + +# Environment variables +environment: + GOROOT: C:\go17 + GOPATH: C:\gopath + +# Scripts that run after cloning repository +install: + - set PATH=%GOROOT%\bin;%GOPATH%\bin;%PATH% + - rmdir c:\go /s /q + - echo %PATH% + - echo %GOPATH% + - cd C:\gopath\src\github.com\influxdata\influxdb + - go version + - go env + - go get github.com/sparrc/gdm + - cd C:\gopath\src\github.com\influxdata\influxdb + - gdm restore + +# To run your custom scripts instead of automatic MSBuild +build_script: + - go get -t -v ./... + - go test -race -v ./... + +# To disable deployment +deploy: off diff --git a/vendor/github.com/influxdata/influxdb/build.py b/vendor/github.com/influxdata/influxdb/build.py new file mode 100755 index 0000000..8fba0ea --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/build.py @@ -0,0 +1,991 @@ +#!/usr/bin/python2.7 -u + +import sys +import os +import subprocess +import time +from datetime import datetime +import shutil +import tempfile +import hashlib +import re +import logging +import argparse + +################ +#### InfluxDB Variables +################ + +# Packaging variables +PACKAGE_NAME = "influxdb" +INSTALL_ROOT_DIR = "/usr/bin" +LOG_DIR = "/var/log/influxdb" +DATA_DIR = "/var/lib/influxdb" +SCRIPT_DIR = "/usr/lib/influxdb/scripts" +CONFIG_DIR = "/etc/influxdb" +LOGROTATE_DIR = "/etc/logrotate.d" +MAN_DIR = "/usr/share/man" + +INIT_SCRIPT = "scripts/init.sh" +SYSTEMD_SCRIPT = "scripts/influxdb.service" +PREINST_SCRIPT = "scripts/pre-install.sh" +POSTINST_SCRIPT = "scripts/post-install.sh" +POSTUNINST_SCRIPT = "scripts/post-uninstall.sh" +LOGROTATE_SCRIPT = "scripts/logrotate" +DEFAULT_CONFIG = "etc/config.sample.toml" + +# Default AWS S3 bucket for uploads +DEFAULT_BUCKET = "dl.influxdata.com/influxdb/artifacts" + +CONFIGURATION_FILES = [ + CONFIG_DIR + '/influxdb.conf', + LOGROTATE_DIR + '/influxdb', +] + +PACKAGE_LICENSE = "MIT" +PACKAGE_URL = "https://github.com/influxdata/influxdb" +MAINTAINER = "support@influxdb.com" +VENDOR = "InfluxData" +DESCRIPTION = "Distributed time-series database." + +prereqs = [ 'git', 'go' ] +go_vet_command = "go tool vet ./" +optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ] + +fpm_common_args = "-f -s dir --log error \ +--vendor {} \ +--url {} \ +--after-install {} \ +--before-install {} \ +--after-remove {} \ +--license {} \ +--maintainer {} \ +--directories {} \ +--directories {} \ +--directories {} \ +--description \"{}\"".format( + VENDOR, + PACKAGE_URL, + POSTINST_SCRIPT, + PREINST_SCRIPT, + POSTUNINST_SCRIPT, + PACKAGE_LICENSE, + MAINTAINER, + LOG_DIR, + DATA_DIR, + MAN_DIR, + DESCRIPTION) + +for f in CONFIGURATION_FILES: + fpm_common_args += " --config-files {}".format(f) + +targets = { + 'influx' : './cmd/influx', + 'influxd' : './cmd/influxd', + 'influx_stress' : './cmd/influx_stress', + 'influx_inspect' : './cmd/influx_inspect', + 'influx_tsm' : './cmd/influx_tsm', +} + +supported_builds = { + 'darwin': [ "amd64" ], + 'windows': [ "amd64" ], + 'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ] +} + +supported_packages = { + "darwin": [ "tar" ], + "linux": [ "deb", "rpm", "tar" ], + "windows": [ "zip" ], +} + +################ +#### InfluxDB Functions +################ + +def print_banner(): + logging.info(""" + ___ __ _ ___ ___ + |_ _|_ _ / _| |_ ___ _| \\| _ ) + | || ' \\| _| | || \\ \\ / |) | _ \\ + |___|_||_|_| |_|\\_,_/_\\_\\___/|___/ + Build Script +""") + +def create_package_fs(build_root): + """Create a filesystem structure to mimic the package filesystem. + """ + logging.debug("Creating package filesystem at location: {}".format(build_root)) + # Using [1:] for the path names due to them being absolute + # (will overwrite previous paths, per 'os.path.join' documentation) + dirs = [ INSTALL_ROOT_DIR[1:], + LOG_DIR[1:], + DATA_DIR[1:], + SCRIPT_DIR[1:], + CONFIG_DIR[1:], + LOGROTATE_DIR[1:], + MAN_DIR[1:] ] + for d in dirs: + os.makedirs(os.path.join(build_root, d)) + os.chmod(os.path.join(build_root, d), 0o755) + +def package_scripts(build_root, config_only=False, windows=False): + """Copy the necessary scripts and configuration files to the package + filesystem. + """ + if config_only: + logging.debug("Copying configuration to build directory.") + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "influxdb.conf")) + os.chmod(os.path.join(build_root, "influxdb.conf"), 0o644) + else: + logging.debug("Copying scripts and sample configuration to build directory.") + shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb")) + os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0o644) + +def package_man_files(build_root): + """Copy and gzip man pages to the package filesystem.""" + logging.debug("Installing man pages.") + run("make -C man/ clean install DESTDIR={}/usr".format(build_root)) + for path, dir, files in os.walk(os.path.join(build_root, MAN_DIR[1:])): + for f in files: + run("gzip -9n {}".format(os.path.join(path, f))) + +def go_get(branch, update=False, no_uncommitted=False): + """Retrieve build dependencies or restore pinned dependencies. + """ + if local_changes() and no_uncommitted: + logging.error("There are uncommitted changes in the current directory.") + return False + if not check_path_for("gdm"): + logging.info("Downloading `gdm`...") + get_command = "go get github.com/sparrc/gdm" + run(get_command) + logging.info("Retrieving dependencies with `gdm`...") + sys.stdout.flush() + run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH"))) + return True + +def run_tests(race, parallel, timeout, no_vet, junit=False): + """Run the Go test suite on binary output. + """ + logging.info("Starting tests...") + if race: + logging.info("Race is enabled.") + if parallel is not None: + logging.info("Using parallel: {}".format(parallel)) + if timeout is not None: + logging.info("Using timeout: {}".format(timeout)) + out = run("go fmt ./...") + if len(out) > 0: + logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") + logging.error("{}".format(out)) + return False + if not no_vet: + logging.info("Running 'go vet'...") + out = run(go_vet_command) + if len(out) > 0: + logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.") + logging.error("{}".format(out)) + return False + else: + logging.info("Skipping 'go vet' call...") + test_command = "go test -v" + if race: + test_command += " -race" + if parallel is not None: + test_command += " -parallel {}".format(parallel) + if timeout is not None: + test_command += " -timeout {}".format(timeout) + test_command += " ./..." + if junit: + logging.info("Retrieving go-junit-report...") + run("go get github.com/jstemmer/go-junit-report") + + # Retrieve the output from this command. + logging.info("Running tests...") + logging.debug("{}".format(test_command)) + proc = subprocess.Popen(test_command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + output, unused_err = proc.communicate() + output = output.decode('utf-8').strip() + + # Process the output through go-junit-report. + with open('test-results.xml', 'w') as f: + logging.debug("{}".format("go-junit-report")) + junit_proc = subprocess.Popen(["go-junit-report"], stdin=subprocess.PIPE, stdout=f, stderr=subprocess.PIPE) + unused_output, err = junit_proc.communicate(output.encode('ascii', 'ignore')) + if junit_proc.returncode != 0: + logging.error("Command '{}' failed with error: {}".format("go-junit-report", err)) + sys.exit(1) + + if proc.returncode != 0: + logging.error("Command '{}' failed with error: {}".format(test_command, output.encode('ascii', 'ignore'))) + sys.exit(1) + else: + logging.info("Running tests...") + output = run(test_command) + logging.debug("Test output:\n{}".format(out.encode('ascii', 'ignore'))) + return True + +################ +#### All InfluxDB-specific content above this line +################ + +def run(command, allow_failure=False, shell=False): + """Run shell command (convenience wrapper around subprocess). + """ + out = None + logging.debug("{}".format(command)) + try: + if shell: + out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell) + else: + out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT) + out = out.decode('utf-8').strip() + # logging.debug("Command output: {}".format(out)) + except subprocess.CalledProcessError as e: + if allow_failure: + logging.warn("Command '{}' failed with error: {}".format(command, e.output)) + return None + else: + logging.error("Command '{}' failed with error: {}".format(command, e.output)) + sys.exit(1) + except OSError as e: + if allow_failure: + logging.warn("Command '{}' failed with error: {}".format(command, e)) + return out + else: + logging.error("Command '{}' failed with error: {}".format(command, e)) + sys.exit(1) + else: + return out + +def create_temp_dir(prefix = None): + """ Create temporary directory with optional prefix. + """ + if prefix is None: + return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME)) + else: + return tempfile.mkdtemp(prefix=prefix) + +def increment_minor_version(version): + """Return the version with the minor version incremented and patch + version set to zero. + """ + ver_list = version.split('.') + if len(ver_list) != 3: + logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version)) + return version + ver_list[1] = str(int(ver_list[1]) + 1) + ver_list[2] = str(0) + inc_version = '.'.join(ver_list) + logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version)) + return inc_version + +def get_current_version_tag(): + """Retrieve the raw git version tag. + """ + version = run("git describe --always --tags --abbrev=0") + return version + +def get_current_version(): + """Parse version information from git tag output. + """ + version_tag = get_current_version_tag() + # Remove leading 'v' + if version_tag[0] == 'v': + version_tag = version_tag[1:] + # Replace any '-'/'_' with '~' + if '-' in version_tag: + version_tag = version_tag.replace("-","~") + if '_' in version_tag: + version_tag = version_tag.replace("_","~") + return version_tag + +def get_current_commit(short=False): + """Retrieve the current git commit. + """ + command = None + if short: + command = "git log --pretty=format:'%h' -n 1" + else: + command = "git rev-parse HEAD" + out = run(command) + return out.strip('\'\n\r ') + +def get_current_branch(): + """Retrieve the current git branch. + """ + command = "git rev-parse --abbrev-ref HEAD" + out = run(command) + return out.strip() + +def local_changes(): + """Return True if there are local un-committed changes. + """ + output = run("git diff-files --ignore-submodules --").strip() + if len(output) > 0: + return True + return False + +def get_system_arch(): + """Retrieve current system architecture. + """ + arch = os.uname()[4] + if arch == "x86_64": + arch = "amd64" + elif arch == "386": + arch = "i386" + elif arch == "aarch64": + arch = "arm64" + elif 'arm' in arch: + # Prevent uname from reporting full ARM arch (eg 'armv7l') + arch = "arm" + return arch + +def get_system_platform(): + """Retrieve current system platform. + """ + if sys.platform.startswith("linux"): + return "linux" + else: + return sys.platform + +def get_go_version(): + """Retrieve version information for Go. + """ + out = run("go version") + matches = re.search('go version go(\S+)', out) + if matches is not None: + return matches.groups()[0].strip() + return None + +def check_path_for(b): + """Check the the user's path for the provided binary. + """ + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + full_path = os.path.join(path, b) + if os.path.isfile(full_path) and os.access(full_path, os.X_OK): + return full_path + +def check_environ(build_dir = None): + """Check environment for common Go variables. + """ + logging.info("Checking environment...") + for v in [ "GOPATH", "GOBIN", "GOROOT" ]: + logging.debug("Using '{}' for {}".format(os.environ.get(v), v)) + + cwd = os.getcwd() + if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: + logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.") + return True + +def check_prereqs(): + """Check user path for required dependencies. + """ + logging.info("Checking for dependencies...") + for req in prereqs: + if not check_path_for(req): + logging.error("Could not find dependency: {}".format(req)) + return False + return True + +def upload_packages(packages, bucket_name=None, overwrite=False): + """Upload provided package output to AWS S3. + """ + logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages)) + try: + import boto + from boto.s3.key import Key + from boto.s3.connection import OrdinaryCallingFormat + logging.getLogger("boto").setLevel(logging.WARNING) + except ImportError: + logging.warn("Cannot upload packages without 'boto' Python library!") + return False + logging.info("Connecting to AWS S3...") + # Up the number of attempts to 10 from default of 1 + boto.config.add_section("Boto") + boto.config.set("Boto", "metadata_service_num_attempts", "10") + c = boto.connect_s3(calling_format=OrdinaryCallingFormat()) + if bucket_name is None: + bucket_name = DEFAULT_BUCKET + bucket = c.get_bucket(bucket_name.split('/')[0]) + for p in packages: + if '/' in bucket_name: + # Allow for nested paths within the bucket name (ex: + # bucket/folder). Assuming forward-slashes as path + # delimiter. + name = os.path.join('/'.join(bucket_name.split('/')[1:]), + os.path.basename(p)) + else: + name = os.path.basename(p) + logging.debug("Using key: {}".format(name)) + if bucket.get_key(name) is None or overwrite: + logging.info("Uploading file {}".format(name)) + k = Key(bucket) + k.key = name + if overwrite: + n = k.set_contents_from_filename(p, replace=True) + else: + n = k.set_contents_from_filename(p, replace=False) + k.make_public() + else: + logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name)) + return True + +def go_list(vendor=False, relative=False): + """ + Return a list of packages + If vendor is False vendor package are not included + If relative is True the package prefix defined by PACKAGE_URL is stripped + """ + p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + packages = out.split('\n') + if packages[-1] == '': + packages = packages[:-1] + if not vendor: + non_vendor = [] + for p in packages: + if '/vendor/' not in p: + non_vendor.append(p) + packages = non_vendor + if relative: + relative_pkgs = [] + for p in packages: + r = p.replace(PACKAGE_URL, '.') + if r != '.': + relative_pkgs.append(r) + packages = relative_pkgs + return packages + +def build(version=None, + platform=None, + arch=None, + nightly=False, + race=False, + clean=False, + outdir=".", + tags=[], + static=False): + """Build each target for the specified architecture and platform. + """ + logging.info("Starting build for {}/{}...".format(platform, arch)) + logging.info("Using Go version: {}".format(get_go_version())) + logging.info("Using git branch: {}".format(get_current_branch())) + logging.info("Using git commit: {}".format(get_current_commit())) + if static: + logging.info("Using statically-compiled output.") + if race: + logging.info("Race is enabled.") + if len(tags) > 0: + logging.info("Using build tags: {}".format(','.join(tags))) + + logging.info("Sending build output to: {}".format(outdir)) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif clean and outdir != '/' and outdir != ".": + logging.info("Cleaning build directory '{}' before building.".format(outdir)) + shutil.rmtree(outdir) + os.makedirs(outdir) + + logging.info("Using version '{}' for build.".format(version)) + + for target, path in targets.items(): + logging.info("Building target: {}".format(target)) + build_command = "" + + # Handle static binary output + if static is True or "static_" in arch: + if "static_" in arch: + static = True + arch = arch.replace("static_", "") + build_command += "CGO_ENABLED=0 " + + # Handle variations in architecture output + if arch == "i386" or arch == "i686": + arch = "386" + elif "arm" in arch: + arch = "arm" + build_command += "GOOS={} GOARCH={} ".format(platform, arch) + + if "arm" in arch: + if arch == "armel": + build_command += "GOARM=5 " + elif arch == "armhf" or arch == "arm": + build_command += "GOARM=6 " + elif arch == "arm64": + # TODO(rossmcdonald) - Verify this is the correct setting for arm64 + build_command += "GOARM=7 " + else: + logging.error("Invalid ARM architecture specified: {}".format(arch)) + logging.error("Please specify either 'armel', 'armhf', or 'arm64'.") + return False + if platform == 'windows': + target = target + '.exe' + build_command += "go build -o {} ".format(os.path.join(outdir, target)) + if race: + build_command += "-race " + if len(tags) > 0: + build_command += "-tags {} ".format(','.join(tags)) + if "1.4" in get_go_version(): + if static: + build_command += "-ldflags=\"-s -X main.version {} -X main.branch {} -X main.commit {}\" ".format(version, + get_current_branch(), + get_current_commit()) + else: + build_command += "-ldflags=\"-X main.version {} -X main.branch {} -X main.commit {}\" ".format(version, + get_current_branch(), + get_current_commit()) + + else: + # Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value' + if static: + build_command += "-ldflags=\"-s -X main.version={} -X main.branch={} -X main.commit={}\" ".format(version, + get_current_branch(), + get_current_commit()) + else: + build_command += "-ldflags=\"-X main.version={} -X main.branch={} -X main.commit={}\" ".format(version, + get_current_branch(), + get_current_commit()) + if static: + build_command += "-a -installsuffix cgo " + build_command += path + start_time = datetime.utcnow() + run(build_command, shell=True) + end_time = datetime.utcnow() + logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) + return True + +def generate_md5_from_file(path): + """Generate MD5 signature based on the contents of the file at path. + """ + m = hashlib.md5() + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(4096), b""): + m.update(chunk) + return m.hexdigest() + +def generate_sig_from_file(path): + """Generate a detached GPG signature from the file at path. + """ + logging.debug("Generating GPG signature for file: {}".format(path)) + gpg_path = check_path_for('gpg') + if gpg_path is None: + logging.warn("gpg binary not found on path! Skipping signature creation.") + return False + if os.environ.get("GNUPG_HOME") is not None: + run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path)) + else: + run('gpg --armor --detach-sign --yes {}'.format(path)) + return True + +def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False): + """Package the output of the build process. + """ + outfiles = [] + tmp_build_dir = create_temp_dir() + logging.debug("Packaging for build output: {}".format(build_output)) + logging.info("Using temporary directory: {}".format(tmp_build_dir)) + try: + for platform in build_output: + # Create top-level folder displaying which platform (linux, etc) + os.makedirs(os.path.join(tmp_build_dir, platform)) + for arch in build_output[platform]: + logging.info("Creating packages for {}/{}".format(platform, arch)) + # Create second-level directory displaying the architecture (amd64, etc) + current_location = build_output[platform][arch] + + # Create directory tree to mimic file system of package + build_root = os.path.join(tmp_build_dir, + platform, + arch, + '{}-{}-{}'.format(PACKAGE_NAME, version, iteration)) + os.makedirs(build_root) + + # Copy packaging scripts to build directory + if platform == "windows": + # For windows and static builds, just copy + # binaries to root of package (no other scripts or + # directories) + package_scripts(build_root, config_only=True, windows=True) + elif static or "static_" in arch: + package_scripts(build_root, config_only=True) + else: + create_package_fs(build_root) + package_scripts(build_root) + + if platform != "windows": + package_man_files(build_root) + + for binary in targets: + # Copy newly-built binaries to packaging directory + if platform == 'windows': + binary = binary + '.exe' + if platform == 'windows' or static or "static_" in arch: + # Where the binary should go in the package filesystem + to = os.path.join(build_root, binary) + # Where the binary currently is located + fr = os.path.join(current_location, binary) + else: + # Where the binary currently is located + fr = os.path.join(current_location, binary) + # Where the binary should go in the package filesystem + to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary) + shutil.copy(fr, to) + + for package_type in supported_packages[platform]: + # Package the directory structure for each package type for the platform + logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type)) + name = pkg_name + # Reset version, iteration, and current location on each run + # since they may be modified below. + package_version = version + package_iteration = iteration + if "static_" in arch: + # Remove the "static_" from the displayed arch on the package + package_arch = arch.replace("static_", "") + else: + package_arch = arch + if not release and not nightly: + # For non-release builds, just use the commit hash as the version + package_version = "{}~{}".format(version, + get_current_commit(short=True)) + package_iteration = "0" + package_build_root = build_root + current_location = build_output[platform][arch] + + if package_type in ['zip', 'tar']: + # For tars and zips, start the packaging one folder above + # the build root (to include the package name) + package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1])) + if nightly: + if static or "static_" in arch: + name = '{}-static-nightly_{}_{}'.format(name, + platform, + package_arch) + else: + name = '{}-nightly_{}_{}'.format(name, + platform, + package_arch) + else: + if static or "static_" in arch: + name = '{}-{}-static_{}_{}'.format(name, + package_version, + platform, + package_arch) + else: + name = '{}-{}_{}_{}'.format(name, + package_version, + platform, + package_arch) + current_location = os.path.join(os.getcwd(), current_location) + if package_type == 'tar': + tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name) + run(tar_command, shell=True) + run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name + ".tar.gz") + outfiles.append(outfile) + elif package_type == 'zip': + zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name) + run(zip_command, shell=True) + run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name + ".zip") + outfiles.append(outfile) + elif package_type not in ['zip', 'tar'] and static or "static_" in arch: + logging.info("Skipping package type '{}' for static builds.".format(package_type)) + else: + fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( + fpm_common_args, + name, + package_arch, + package_type, + package_version, + package_iteration, + package_build_root, + current_location) + if package_type == "rpm": + fpm_command += "--depends coreutils --rpm-posttrans {}".format(POSTINST_SCRIPT) + out = run(fpm_command, shell=True) + matches = re.search(':path=>"(.*)"', out) + outfile = None + if matches is not None: + outfile = matches.groups()[0] + if outfile is None: + logging.warn("Could not determine output from packaging output!") + else: + if nightly: + # Strip nightly version from package name + new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly") + os.rename(outfile, new_outfile) + outfile = new_outfile + else: + if package_type == 'rpm': + # rpm's convert any dashes to underscores + package_version = package_version.replace("-", "_") + new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version) + os.rename(outfile, new_outfile) + outfile = new_outfile + outfiles.append(os.path.join(os.getcwd(), outfile)) + logging.debug("Produced package files: {}".format(outfiles)) + return outfiles + finally: + # Cleanup + shutil.rmtree(tmp_build_dir) + +def main(args): + global PACKAGE_NAME + + if args.release and args.nightly: + logging.error("Cannot be both a nightly and a release.") + return 1 + + if args.nightly: + args.version = increment_minor_version(args.version) + args.version = "{}~n{}".format(args.version, + datetime.utcnow().strftime("%Y%m%d%H%M")) + args.iteration = 0 + + # Pre-build checks + check_environ() + if not check_prereqs(): + return 1 + if args.build_tags is None: + args.build_tags = [] + else: + args.build_tags = args.build_tags.split(',') + + orig_commit = get_current_commit(short=True) + orig_branch = get_current_branch() + + if args.platform not in supported_builds and args.platform != 'all': + logging.error("Invalid build platform: {}".format(target_platform)) + return 1 + + build_output = {} + + if args.branch != orig_branch and args.commit != orig_commit: + logging.error("Can only specify one branch or commit to build from.") + return 1 + elif args.branch != orig_branch: + logging.info("Moving to git branch: {}".format(args.branch)) + run("git checkout {}".format(args.branch)) + elif args.commit != orig_commit: + logging.info("Moving to git commit: {}".format(args.commit)) + run("git checkout {}".format(args.commit)) + + if not args.no_get: + if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted): + return 1 + + if args.test: + if not run_tests(args.race, args.parallel, args.timeout, args.no_vet, args.junit_report): + return 1 + + platforms = [] + single_build = True + if args.platform == 'all': + platforms = supported_builds.keys() + single_build = False + else: + platforms = [args.platform] + + for platform in platforms: + build_output.update( { platform : {} } ) + archs = [] + if args.arch == "all": + single_build = False + archs = supported_builds.get(platform) + else: + archs = [args.arch] + + for arch in archs: + od = args.outdir + if not single_build: + od = os.path.join(args.outdir, platform, arch) + if not build(version=args.version, + platform=platform, + arch=arch, + nightly=args.nightly, + race=args.race, + clean=args.clean, + outdir=od, + tags=args.build_tags, + static=args.static): + return 1 + build_output.get(platform).update( { arch : od } ) + + # Build packages + if args.package: + if not check_path_for("fpm"): + logging.error("FPM ruby gem required for packaging. Stopping.") + return 1 + packages = package(build_output, + args.name, + args.version, + nightly=args.nightly, + iteration=args.iteration, + static=args.static, + release=args.release) + if args.sign: + logging.debug("Generating GPG signatures for packages: {}".format(packages)) + sigs = [] # retain signatures so they can be uploaded with packages + for p in packages: + if generate_sig_from_file(p): + sigs.append(p + '.asc') + else: + logging.error("Creation of signature for package [{}] failed!".format(p)) + return 1 + packages += sigs + if args.upload: + logging.debug("Files staged for upload: {}".format(packages)) + if args.nightly: + args.upload_overwrite = True + if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite): + return 1 + logging.info("Packages created:") + for p in packages: + logging.info("{} (MD5={})".format(p.split('/')[-1:][0], + generate_md5_from_file(p))) + if orig_branch != get_current_branch(): + logging.info("Moving back to original git branch: {}".format(orig_branch)) + run("git checkout {}".format(orig_branch)) + + return 0 + +if __name__ == '__main__': + LOG_LEVEL = logging.INFO + if '--debug' in sys.argv[1:]: + LOG_LEVEL = logging.DEBUG + log_format = '[%(levelname)s] %(funcName)s: %(message)s' + logging.basicConfig(level=LOG_LEVEL, + format=log_format) + + parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.') + parser.add_argument('--verbose','-v','--debug', + action='store_true', + help='Use debug output') + parser.add_argument('--outdir', '-o', + metavar='', + default='./build/', + type=os.path.abspath, + help='Output directory') + parser.add_argument('--name', '-n', + metavar='', + default=PACKAGE_NAME, + type=str, + help='Name to use for package name (when package is specified)') + parser.add_argument('--arch', + metavar='', + type=str, + default=get_system_arch(), + help='Target architecture for build output') + parser.add_argument('--platform', + metavar='', + type=str, + default=get_system_platform(), + help='Target platform for build output') + parser.add_argument('--branch', + metavar='', + type=str, + default=get_current_branch(), + help='Build from a specific branch') + parser.add_argument('--commit', + metavar='', + type=str, + default=get_current_commit(short=True), + help='Build from a specific commit') + parser.add_argument('--version', + metavar='', + type=str, + default=get_current_version(), + help='Version information to apply to build output (ex: 0.12.0)') + parser.add_argument('--iteration', + metavar='', + type=str, + default="1", + help='Package iteration to apply to build output (defaults to 1)') + parser.add_argument('--stats', + action='store_true', + help='Emit build metrics (requires InfluxDB Python client)') + parser.add_argument('--stats-server', + metavar='', + type=str, + help='Send build stats to InfluxDB using provided hostname and port') + parser.add_argument('--stats-db', + metavar='', + type=str, + help='Send build stats to InfluxDB using provided database name') + parser.add_argument('--nightly', + action='store_true', + help='Mark build output as nightly build (will incremement the minor version)') + parser.add_argument('--update', + action='store_true', + help='Update build dependencies prior to building') + parser.add_argument('--package', + action='store_true', + help='Package binary output') + parser.add_argument('--release', + action='store_true', + help='Mark build output as release') + parser.add_argument('--clean', + action='store_true', + help='Clean output directory before building') + parser.add_argument('--no-get', + action='store_true', + help='Do not retrieve pinned dependencies when building') + parser.add_argument('--no-uncommitted', + action='store_true', + help='Fail if uncommitted changes exist in the working directory') + parser.add_argument('--upload', + action='store_true', + help='Upload output packages to AWS S3') + parser.add_argument('--upload-overwrite','-w', + action='store_true', + help='Upload output packages to AWS S3') + parser.add_argument('--bucket', + metavar='', + type=str, + default=DEFAULT_BUCKET, + help='Destination bucket for uploads') + parser.add_argument('--build-tags', + metavar='', + help='Optional build tags to use for compilation') + parser.add_argument('--static', + action='store_true', + help='Create statically-compiled binary output') + parser.add_argument('--sign', + action='store_true', + help='Create GPG detached signatures for packages (when package is specified)') + parser.add_argument('--test', + action='store_true', + help='Run tests (does not produce build output)') + parser.add_argument('--junit-report', + action='store_true', + help='Output tests in the JUnit XML format') + parser.add_argument('--no-vet', + action='store_true', + help='Do not run "go vet" when running tests') + parser.add_argument('--race', + action='store_true', + help='Enable race flag for build output') + parser.add_argument('--parallel', + metavar='', + type=int, + help='Number of tests to run simultaneously') + parser.add_argument('--timeout', + metavar='', + type=str, + help='Timeout for tests before failing') + args = parser.parse_args() + print_banner() + sys.exit(main(args)) diff --git a/vendor/github.com/influxdata/influxdb/build.sh b/vendor/github.com/influxdata/influxdb/build.sh new file mode 100755 index 0000000..0f80ac7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/build.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Run the build utility via Docker + +set -e + +# Make sure our working dir is the dir of the script +DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd) +cd $DIR + + +# Build new docker image +docker build -f Dockerfile_build_ubuntu64 -t influxdb-builder $DIR +echo "Running build.py" +# Run docker +docker run --rm \ + -e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \ + -e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \ + -v $HOME/.aws.conf:/root/.aws.conf \ + -v $DIR:/root/go/src/github.com/influxdata/influxdb \ + influxdb-builder \ + "$@" + diff --git a/vendor/github.com/influxdata/influxdb/circle-test.sh b/vendor/github.com/influxdata/influxdb/circle-test.sh new file mode 100755 index 0000000..7528e8f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/circle-test.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# +# This is the InfluxDB test script for CircleCI, it is a light wrapper around ./test.sh. + +# Exit if any command fails +set -e + +# Get dir of script and make it is our working directory. +DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) +cd $DIR + +export OUTPUT_DIR="$CIRCLE_ARTIFACTS" +# Don't delete the container since CircleCI doesn't have permission to do so. +export DOCKER_RM="false" + +# Get number of test environments. +count=$(./test.sh count) +# Check that we aren't wasting CircleCI nodes. +if [ $CIRCLE_NODE_INDEX -gt $((count - 1)) ] +then + echo "More CircleCI nodes allocated than tests environments to run!" + exit 0 +fi + +# Map CircleCI nodes to test environments. +tests=$(seq 0 $((count - 1))) +for i in $tests +do + mine=$(( $i % $CIRCLE_NODE_TOTAL )) + if [ $mine -eq $CIRCLE_NODE_INDEX ] + then + echo "Running test env index: $i" + ./test.sh $i + fi +done + +# Copy the JUnit test XML to the test reports folder. +mkdir -p $CIRCLE_TEST_REPORTS/reports +cp test-results.xml $CIRCLE_TEST_REPORTS/reports/test-results.xml diff --git a/vendor/github.com/influxdata/influxdb/circle.yml b/vendor/github.com/influxdata/influxdb/circle.yml new file mode 100644 index 0000000..c2d994e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/circle.yml @@ -0,0 +1,43 @@ +machine: + services: + - docker + environment: + GODIST: "go1.8.3.linux-amd64.tar.gz" + post: + - mkdir -p download + - test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST + - sudo rm -rf /usr/local/go + - sudo tar -C /usr/local -xzf download/$GODIST + +dependencies: + cache_directories: + - "~/docker" + - ~/download + override: + - ./test.sh save: + # building the docker images can take a long time, hence caching + timeout: 1800 + +test: + override: + - bash circle-test.sh: + parallel: true + # Race tests using 960s timeout + timeout: 960 + +deployment: + release: + tag: /^v[0-9]+(\.[0-9]+)*(\S*)$/ + commands: + - > + docker run + -e "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" + -e "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" + -v $(pwd):/root/go/src/github.com/influxdata/influxdb + influxdb_build_ubuntu64 + --release + --package + --platform all + --arch all + --upload + --bucket dl.influxdata.com/influxdb/releases diff --git a/vendor/github.com/influxdata/influxdb/client/README.md b/vendor/github.com/influxdata/influxdb/client/README.md new file mode 100644 index 0000000..0df8936 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/README.md @@ -0,0 +1,298 @@ +# InfluxDB Client + +[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2) + +## Description + +**NOTE:** The Go client library now has a "v2" version, with the old version +being deprecated. The new version can be imported at +`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible. + +A Go client library written and maintained by the **InfluxDB** team. +This package provides convenience functions to read and write time series data. +It uses the HTTP protocol to communicate with your **InfluxDB** cluster. + + +## Getting Started + +### Connecting To Your Database + +Connecting to an **InfluxDB** database is straightforward. You will need a host +name, a port and the cluster user credentials if applicable. The default port is +8086. You can customize these settings to your specific installation via the +**InfluxDB** configuration file. + +Though not necessary for experimentation, you may want to create a new user +and authenticate the connection to your database. + +For more information please check out the +[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/). + +For the impatient, you can create a new admin user _bubba_ by firing off the +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). + +```shell +influx +> create user bubba with password 'bumblebeetuna' +> grant all privileges to bubba +``` + +And now for good measure set the credentials in you shell environment. +In the example below we will use $INFLUX_USER and $INFLUX_PWD + +Now with the administrivia out of the way, let's connect to our database. + +NOTE: If you've opted out of creating a user, you can omit Username and Password in +the configuration below. + +```go +package main + +import ( + "log" + "time" + + "github.com/influxdata/influxdb/client/v2" +) + +const ( + MyDB = "square_holes" + username = "bubba" + password = "bumblebeetuna" +) + + +func main() { + // Create a new HTTPClient + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + Username: username, + Password: password, + }) + if err != nil { + log.Fatal(err) + } + + // Create a new point batch + bp, err := client.NewBatchPoints(client.BatchPointsConfig{ + Database: MyDB, + Precision: "s", + }) + if err != nil { + log.Fatal(err) + } + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + log.Fatal(err) + } + bp.AddPoint(pt) + + // Write the batch + if err := c.Write(bp); err != nil { + log.Fatal(err) + } +} + +``` + +### Inserting Data + +Time series data aka *points* are written to the database using batch inserts. +The mechanism is to create one or more points and then create a batch aka +*batch points* and write these to a given database and series. A series is a +combination of a measurement (time/values) and a set of tags. + +In this sample we will create a batch of a 1,000 points. Each point has a time and +a single value as well as 2 tags indicating a shape and color. We write these points +to a database called _square_holes_ using a measurement named _shapes_. + +NOTE: You can specify a RetentionPolicy as part of the batch points. If not +provided InfluxDB will use the database _default_ retention policy. + +```go + +func writePoints(clnt client.Client) { + sampleSize := 1000 + + bp, err := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "systemstats", + Precision: "us", + }) + if err != nil { + log.Fatal(err) + } + + rand.Seed(time.Now().UnixNano()) + for i := 0; i < sampleSize; i++ { + regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} + tags := map[string]string{ + "cpu": "cpu-total", + "host": fmt.Sprintf("host%d", rand.Intn(1000)), + "region": regions[rand.Intn(len(regions))], + } + + idle := rand.Float64() * 100.0 + fields := map[string]interface{}{ + "idle": idle, + "busy": 100.0 - idle, + } + + pt, err := client.NewPoint( + "cpu_usage", + tags, + fields, + time.Now(), + ) + if err != nil { + log.Fatal(err) + } + bp.AddPoint(pt) + } + + if err := clnt.Write(bp); err != nil { + log.Fatal(err) + } +} +``` + +### Querying Data + +One nice advantage of using **InfluxDB** the ability to query your data using familiar +SQL constructs. In this example we can create a convenience function to query the database +as follows: + +```go +// queryDB convenience function to query the database +func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) { + q := client.Query{ + Command: cmd, + Database: MyDB, + } + if response, err := clnt.Query(q); err == nil { + if response.Error() != nil { + return res, response.Error() + } + res = response.Results + } else { + return res, err + } + return res, nil +} +``` + +#### Creating a Database + +```go +_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB)) +if err != nil { + log.Fatal(err) +} +``` + +#### Count Records + +```go +q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement) +res, err := queryDB(clnt, q) +if err != nil { + log.Fatal(err) +} +count := res[0].Series[0].Values[0][1] +log.Printf("Found a total of %v records\n", count) +``` + +#### Find the last 10 _shapes_ records + +```go +q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20) +res, err = queryDB(clnt, q) +if err != nil { + log.Fatal(err) +} + +for i, row := range res[0].Series[0].Values { + t, err := time.Parse(time.RFC3339, row[0].(string)) + if err != nil { + log.Fatal(err) + } + val := row[1].(string) + log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val) +} +``` + +### Using the UDP Client + +The **InfluxDB** client also supports writing over UDP. + +```go +func WriteUDP() { + // Make client + c, err := client.NewUDPClient("localhost:8089") + if err != nil { + panic(err.Error()) + } + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + panic(err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} +``` + +### Point Splitting + +The UDP client now supports splitting single points that exceed the configured +payload size. The logic for processing each point is listed here, starting with +an empty payload. + +1. If adding the point to the current (non-empty) payload would exceed the + configured size, send the current payload. Otherwise, add it to the current + payload. +1. If the point is smaller than the configured size, add it to the payload. +1. If the point has no timestamp, just try to send the entire point as a single + UDP payload, and process the next point. +1. Since the point has a timestamp, re-use the existing measurement name, + tagset, and timestamp and create multiple new points by splitting up the + fields. The per-point length will be kept close to the configured size, + staying under it if possible. This does mean that one large field, maybe a + long string, could be sent as a larger-than-configured payload. + +The above logic attempts to respect configured payload sizes, but not sacrifice +any data integrity. Points without a timestamp can't be split, as that may +cause fields to have differing timestamps when processed by the server. + +## Go Docs + +Please refer to +[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2) +for documentation. + +## See Also + +You can also examine how the client library is used by the +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). diff --git a/vendor/github.com/influxdata/influxdb/client/example_test.go b/vendor/github.com/influxdata/influxdb/client/example_test.go new file mode 100644 index 0000000..f375383 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/example_test.go @@ -0,0 +1,113 @@ +package client_test + +import ( + "fmt" + "log" + "math/rand" + "net/url" + "os" + "strconv" + "time" + + "github.com/influxdata/influxdb/client" +) + +func ExampleNewClient() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + + // NOTE: this assumes you've setup a user and have setup shell env variables, + // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. + conf := client.Config{ + URL: *host, + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + } + con, err := client.NewClient(conf) + if err != nil { + log.Fatal(err) + } + log.Println("Connection", con) +} + +func ExampleClient_Ping() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + dur, ver, err := con.Ping() + if err != nil { + log.Fatal(err) + } + log.Printf("Happy as a hippo! %v, %s", dur, ver) +} + +func ExampleClient_Query() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + q := client.Query{ + Command: "select count(value) from shapes", + Database: "square_holes", + } + if response, err := con.Query(q); err == nil && response.Error() == nil { + log.Println(response.Results) + } +} + +func ExampleClient_Write() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + var ( + shapes = []string{"circle", "rectangle", "square", "triangle"} + colors = []string{"red", "blue", "green"} + sampleSize = 1000 + pts = make([]client.Point, sampleSize) + ) + + rand.Seed(42) + for i := 0; i < sampleSize; i++ { + pts[i] = client.Point{ + Measurement: "shapes", + Tags: map[string]string{ + "color": strconv.Itoa(rand.Intn(len(colors))), + "shape": strconv.Itoa(rand.Intn(len(shapes))), + }, + Fields: map[string]interface{}{ + "value": rand.Intn(sampleSize), + }, + Time: time.Now(), + Precision: "s", + } + } + + bps := client.BatchPoints{ + Points: pts, + Database: "BumbeBeeTuna", + RetentionPolicy: "default", + } + _, err = con.Write(bps) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go new file mode 100644 index 0000000..773eb27 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go @@ -0,0 +1,832 @@ +// Package client implements a now-deprecated client for InfluxDB; +// use github.com/influxdata/influxdb/client/v2 instead. +package client // import "github.com/influxdata/influxdb/client" + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" +) + +const ( + // DefaultHost is the default host used to connect to an InfluxDB instance + DefaultHost = "localhost" + + // DefaultPort is the default port used to connect to an InfluxDB instance + DefaultPort = 8086 + + // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance + DefaultTimeout = 0 +) + +// Query is used to send a command to the server. Both Command and Database are required. +type Query struct { + Command string + Database string + + // Chunked tells the server to send back chunked responses. This places + // less load on the server by sending back chunks of the response rather + // than waiting for the entire response all at once. + Chunked bool + + // ChunkSize sets the maximum number of rows that will be returned per + // chunk. Chunks are either divided based on their series or if they hit + // the chunk size limit. + // + // Chunked must be set to true for this option to be used. + ChunkSize int +} + +// ParseConnectionString will parse a string to create a valid connection URL +func ParseConnectionString(path string, ssl bool) (url.URL, error) { + var host string + var port int + + h, p, err := net.SplitHostPort(path) + if err != nil { + if path == "" { + host = DefaultHost + } else { + host = path + } + // If they didn't specify a port, always use the default port + port = DefaultPort + } else { + host = h + port, err = strconv.Atoi(p) + if err != nil { + return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err) + } + } + + u := url.URL{ + Scheme: "http", + } + if ssl { + u.Scheme = "https" + } + + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) + + return u, nil +} + +// Config is used to specify what server to connect to. +// URL: The URL of the server connecting to. +// Username/Password are optional. They will be passed via basic auth if provided. +// UserAgent: If not provided, will default "InfluxDBClient", +// Timeout: If not provided, will default to 0 (no timeout) +type Config struct { + URL url.URL + UnixSocket string + Username string + Password string + UserAgent string + Timeout time.Duration + Precision string + WriteConsistency string + UnsafeSsl bool +} + +// NewConfig will create a config to be used in connecting to the client +func NewConfig() Config { + return Config{ + Timeout: DefaultTimeout, + } +} + +// Client is used to make calls to the server. +type Client struct { + url url.URL + unixSocket string + username string + password string + httpClient *http.Client + userAgent string + precision string +} + +const ( + // ConsistencyOne requires at least one data node acknowledged a write. + ConsistencyOne = "one" + + // ConsistencyAll requires all data nodes to acknowledge a write. + ConsistencyAll = "all" + + // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write. + ConsistencyQuorum = "quorum" + + // ConsistencyAny allows for hinted hand off, potentially no write happened yet. + ConsistencyAny = "any" +) + +// NewClient will instantiate and return a connected client to issue commands to the server. +func NewClient(c Config) (*Client, error) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.UnsafeSsl, + } + + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + } + + if c.UnixSocket != "" { + // No need for compression in local communications. + tr.DisableCompression = true + + tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", c.UnixSocket) + } + } + + client := Client{ + url: c.URL, + unixSocket: c.UnixSocket, + username: c.Username, + password: c.Password, + httpClient: &http.Client{Timeout: c.Timeout, Transport: tr}, + userAgent: c.UserAgent, + precision: c.Precision, + } + if client.userAgent == "" { + client.userAgent = "InfluxDBClient" + } + return &client, nil +} + +// SetAuth will update the username and passwords +func (c *Client) SetAuth(u, p string) { + c.username = u + c.password = p +} + +// SetPrecision will update the precision +func (c *Client) SetPrecision(precision string) { + c.precision = precision +} + +// Query sends a command to the server and returns the Response +func (c *Client) Query(q Query) (*Response, error) { + u := c.url + + u.Path = "query" + values := u.Query() + values.Set("q", q.Command) + values.Set("db", q.Database) + if q.Chunked { + values.Set("chunked", "true") + if q.ChunkSize > 0 { + values.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + } + if c.precision != "" { + values.Set("epoch", c.precision) + } + u.RawQuery = values.Encode() + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != nil { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + if err := dec.Decode(&response); err != nil { + // Ignore EOF errors if we got an invalid status code. + if !(err == io.EOF && resp.StatusCode != http.StatusOK) { + return nil, err + } + } + } + + // If we don't have an error in our json response, and didn't get StatusOK, + // then send back an error. + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// Write takes BatchPoints and allows for writing of multiple points with defaults +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) Write(bp BatchPoints) (*Response, error) { + u := c.url + u.Path = "write" + + var b bytes.Buffer + for _, p := range bp.Points { + err := checkPointTypes(p) + if err != nil { + return nil, err + } + if p.Raw != "" { + if _, err := b.WriteString(p.Raw); err != nil { + return nil, err + } + } else { + for k, v := range bp.Tags { + if p.Tags == nil { + p.Tags = make(map[string]string, len(bp.Tags)) + } + p.Tags[k] = v + } + + if _, err := b.WriteString(p.MarshalString()); err != nil { + return nil, err + } + } + + if err := b.WriteByte('\n'); err != nil { + return nil, err + } + } + + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + precision := bp.Precision + if precision == "" { + precision = c.precision + } + + params := req.URL.Query() + params.Set("db", bp.Database) + params.Set("rp", bp.RetentionPolicy) + params.Set("precision", precision) + params.Set("consistency", bp.WriteConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// WriteLineProtocol takes a string with line returns to delimit each write +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { + u := c.url + u.Path = "write" + + r := strings.NewReader(data) + + req, err := http.NewRequest("POST", u.String(), r) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + params := req.URL.Query() + params.Set("db", database) + params.Set("rp", retentionPolicy) + params.Set("precision", precision) + params.Set("consistency", writeConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + err := fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// Ping will check to see if the server is up +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *Client) Ping() (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Structs + +// Message represents a user message. +type Message struct { + Level string `json:"level,omitempty"` + Text string `json:"text,omitempty"` +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err error +} + +// MarshalJSON encodes the result into JSON. +func (r *Result) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Series = r.Series + o.Messages = r.Messages + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Result struct +func (r *Result) UnmarshalJSON(b []byte) error { + var o struct { + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Series = o.Series + r.Messages = o.Messages + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err error +} + +// MarshalJSON encodes the response into JSON. +func (r *Response) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Results = r.Results + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Response struct +func (r *Response) UnmarshalJSON(b []byte) error { + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Results = o.Results + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != nil { + return r.Err + } + for _, result := range r.Results { + if result.Err != nil { + return result.Err + } + } + return nil +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.Reader + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: r, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, nil + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + r.buf.Reset() + return &response, nil +} + +// Point defines the fields that will be written to the database +// Measurement, Time, and Fields are required +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type Point struct { + Measurement string + Tags map[string]string + Time time.Time + Fields map[string]interface{} + Precision string + Raw string +} + +// MarshalJSON will format the time in RFC3339Nano +// Precision is also ignored as it is only used for writing, not reading +// Or another way to say it is we always send back in nanosecond precision +func (p *Point) MarshalJSON() ([]byte, error) { + point := struct { + Measurement string `json:"measurement,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time string `json:"time,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + Precision string `json:"precision,omitempty"` + }{ + Measurement: p.Measurement, + Tags: p.Tags, + Fields: p.Fields, + Precision: p.Precision, + } + // Let it omit empty if it's really zero + if !p.Time.IsZero() { + point.Time = p.Time.UTC().Format(time.RFC3339Nano) + } + return json.Marshal(&point) +} + +// MarshalString renders string representation of a Point with specified +// precision. The default precision is nanoseconds. +func (p *Point) MarshalString() string { + pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time) + if err != nil { + return "# ERROR: " + err.Error() + " " + p.Measurement + } + if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { + return pt.String() + } + return pt.PrecisionString(p.Precision) +} + +// UnmarshalJSON decodes the data into the Point struct +func (p *Point) UnmarshalJSON(b []byte) error { + var normal struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + var epoch struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + + if err := func() error { + var err error + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err = dec.Decode(&epoch); err != nil { + return err + } + // Convert from epoch to time.Time, but only if Time + // was actually set. + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + p.Measurement = epoch.Measurement + p.Tags = epoch.Tags + p.Time = ts + p.Precision = epoch.Precision + p.Fields = normalizeFields(epoch.Fields) + return nil + }(); err == nil { + return nil + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err := dec.Decode(&normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + p.Measurement = normal.Measurement + p.Tags = normal.Tags + p.Time = normal.Time + p.Precision = normal.Precision + p.Fields = normalizeFields(normal.Fields) + + return nil +} + +// Remove any notion of json.Number +func normalizeFields(fields map[string]interface{}) map[string]interface{} { + newFields := map[string]interface{}{} + + for k, v := range fields { + switch v := v.(type) { + case json.Number: + jv, e := v.Float64() + if e != nil { + panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) + } + newFields[k] = jv + default: + newFields[k] = v + } + } + return newFields +} + +// BatchPoints is used to send batched data in a single write. +// Database and Points are required +// If no retention policy is specified, it will use the databases default retention policy. +// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored. +// If time is specified, it will be applied to any point with an empty time. +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type BatchPoints struct { + Points []Point `json:"points,omitempty"` + Database string `json:"database,omitempty"` + RetentionPolicy string `json:"retentionPolicy,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time time.Time `json:"time,omitempty"` + Precision string `json:"precision,omitempty"` + WriteConsistency string `json:"-"` +} + +// UnmarshalJSON decodes the data into the BatchPoints struct +func (bp *BatchPoints) UnmarshalJSON(b []byte) error { + var normal struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + } + var epoch struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + } + + if err := func() error { + var err error + if err = json.Unmarshal(b, &epoch); err != nil { + return err + } + // Convert from epoch to time.Time + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + bp.Points = epoch.Points + bp.Database = epoch.Database + bp.RetentionPolicy = epoch.RetentionPolicy + bp.Tags = epoch.Tags + bp.Time = ts + bp.Precision = epoch.Precision + return nil + }(); err == nil { + return nil + } + + if err := json.Unmarshal(b, &normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + bp.Points = normal.Points + bp.Database = normal.Database + bp.RetentionPolicy = normal.RetentionPolicy + bp.Tags = normal.Tags + bp.Time = normal.Time + bp.Precision = normal.Precision + + return nil +} + +// utility functions + +// Addr provides the current url as a string of the server the client is connected to. +func (c *Client) Addr() string { + if c.unixSocket != "" { + return c.unixSocket + } + return c.url.String() +} + +// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found. +func checkPointTypes(p Point) error { + for _, v := range p.Fields { + switch v.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil: + return nil + default: + return fmt.Errorf("unsupported point type: %T", v) + } + } + return nil +} + +// helper functions + +// EpochToTime takes a unix epoch time and uses precision to return back a time.Time +func EpochToTime(epoch int64, precision string) (time.Time, error) { + if precision == "" { + precision = "s" + } + var t time.Time + switch precision { + case "h": + t = time.Unix(0, epoch*int64(time.Hour)) + case "m": + t = time.Unix(0, epoch*int64(time.Minute)) + case "s": + t = time.Unix(0, epoch*int64(time.Second)) + case "ms": + t = time.Unix(0, epoch*int64(time.Millisecond)) + case "u": + t = time.Unix(0, epoch*int64(time.Microsecond)) + case "n": + t = time.Unix(0, epoch) + default: + return time.Time{}, fmt.Errorf("Unknown precision %q", precision) + } + return t, nil +} + +// SetPrecision will round a time to the specified precision +func SetPrecision(t time.Time, precision string) time.Time { + switch precision { + case "n": + case "u": + return t.Round(time.Microsecond) + case "ms": + return t.Round(time.Millisecond) + case "s": + return t.Round(time.Second) + case "m": + return t.Round(time.Minute) + case "h": + return t.Round(time.Hour) + } + return t +} diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb_test.go b/vendor/github.com/influxdata/influxdb/client/influxdb_test.go new file mode 100644 index 0000000..39349c1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/influxdb_test.go @@ -0,0 +1,831 @@ +package client_test + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/client" +) + +func BenchmarkWrite(b *testing.B) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + b.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + bp := client.BatchPoints{ + Points: []client.Point{ + {Fields: map[string]interface{}{"value": 101}}}, + } + for i := 0; i < b.N; i++ { + r, err := c.Write(bp) + if err != nil { + b.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if r != nil { + b.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } + } +} + +func BenchmarkUnmarshalJSON2Tags(b *testing.B) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01", + "region": "us-east1" + }, + "time": 14244733039069373, + "precision": "n", + "fields": { + "value": 4541770385657154000 + } + } + ] +} +`) + + for i := 0; i < b.N; i++ { + if err := json.Unmarshal(data, &bp); err != nil { + b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } + b.SetBytes(int64(len(data))) + } +} + +func BenchmarkUnmarshalJSON10Tags(b *testing.B) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01", + "region": "us-east1", + "tag1": "value1", + "tag2": "value2", + "tag2": "value3", + "tag4": "value4", + "tag5": "value5", + "tag6": "value6", + "tag7": "value7", + "tag8": "value8" + }, + "time": 14244733039069373, + "precision": "n", + "fields": { + "value": 4541770385657154000 + } + } + ] +} +`) + + for i := 0; i < b.N; i++ { + if err := json.Unmarshal(data, &bp); err != nil { + b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } + b.SetBytes(int64(len(data))) + } +} + +func TestNewClient(t *testing.T) { + config := client.Config{} + _, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Ping(t *testing.T) { + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + d, version, err := c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if d.Nanoseconds() == 0 { + t.Fatalf("expected a duration greater than zero. actual %v", d.Nanoseconds()) + } + if version != "x.x" { + t.Fatalf("unexpected version. expected %s, actual %v", "x.x", version) + } +} + +func TestClient_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_ChunkedQuery(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + enc := json.NewEncoder(w) + _ = enc.Encode(data) + _ = enc.Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{Chunked: true} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_BasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + + if !ok { + t.Errorf("basic auth error") + } + if u != "username" { + t.Errorf("unexpected username, expected %q, actual %q", "username", u) + } + if p != "password" { + t.Errorf("unexpected password, expected %q, actual %q", "password", p) + } + w.WriteHeader(http.StatusNoContent) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + u.User = url.UserPassword("username", "password") + config := client.Config{URL: *u, Username: "username", Password: "password"} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + _, _, err = c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Write(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + bp := client.BatchPoints{} + r, err := c.Write(bp) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if r != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } +} + +func TestClient_UserAgent(t *testing.T) { + receivedUserAgent := "" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedUserAgent = r.UserAgent() + + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + _, err := http.Get(ts.URL) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + name string + userAgent string + expected string + }{ + { + name: "Empty user agent", + userAgent: "", + expected: "InfluxDBClient", + }, + { + name: "Custom user agent", + userAgent: "Test Influx Client", + expected: "Test Influx Client", + }, + } + + for _, test := range tests { + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u, UserAgent: test.userAgent} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + receivedUserAgent = "" + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + bp := client.BatchPoints{} + _, err = c.Write(bp) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + _, _, err = c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if receivedUserAgent != test.expected { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + } +} + +func TestClient_Messages(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"results":[{"messages":[{"level":"warning","text":"deprecation test"}]}]}`)) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + resp, err := c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + if got, exp := len(resp.Results), 1; got != exp { + t.Fatalf("unexpected number of results. expected %v, actual %v", exp, got) + } + + r := resp.Results[0] + if got, exp := len(r.Messages), 1; got != exp { + t.Fatalf("unexpected number of messages. expected %v, actual %v", exp, got) + } + + m := r.Messages[0] + if got, exp := m.Level, "warning"; got != exp { + t.Errorf("unexpected message level. expected %v, actual %v", exp, got) + } + if got, exp := m.Text, "deprecation test"; got != exp { + t.Errorf("unexpected message text. expected %v, actual %v", exp, got) + } +} + +func TestPoint_UnmarshalEpoch(t *testing.T) { + now := time.Now() + tests := []struct { + name string + epoch int64 + precision string + expected time.Time + }{ + { + name: "nanoseconds", + epoch: now.UnixNano(), + precision: "n", + expected: now, + }, + { + name: "microseconds", + epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), + precision: "u", + expected: now.Round(time.Microsecond), + }, + { + name: "milliseconds", + epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), + precision: "ms", + expected: now.Round(time.Millisecond), + }, + { + name: "seconds", + epoch: now.Round(time.Second).UnixNano() / int64(time.Second), + precision: "s", + expected: now.Round(time.Second), + }, + { + name: "minutes", + epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), + precision: "m", + expected: now.Round(time.Minute), + }, + { + name: "hours", + epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), + precision: "h", + expected: now.Round(time.Hour), + }, + { + name: "max int64", + epoch: 9223372036854775807, + precision: "n", + expected: time.Unix(0, 9223372036854775807), + }, + { + name: "100 years from now", + epoch: now.Add(time.Hour * 24 * 365 * 100).UnixNano(), + precision: "n", + expected: now.Add(time.Hour * 24 * 365 * 100), + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision)) + t.Logf("json: %s", string(data)) + var p client.Point + err := json.Unmarshal(data, &p) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if !p.Time.Equal(test.expected) { + t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) + } + } +} + +func TestPoint_UnmarshalRFC(t *testing.T) { + now := time.Now().UTC() + tests := []struct { + name string + rfc string + now time.Time + expected time.Time + }{ + { + name: "RFC3339Nano", + rfc: time.RFC3339Nano, + now: now, + expected: now, + }, + { + name: "RFC3339", + rfc: time.RFC3339, + now: now.Round(time.Second), + expected: now.Round(time.Second), + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + ts := test.now.Format(test.rfc) + data := []byte(fmt.Sprintf(`{"time": %q}`, ts)) + t.Logf("json: %s", string(data)) + var p client.Point + err := json.Unmarshal(data, &p) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if !p.Time.Equal(test.expected) { + t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) + } + } +} + +func TestPoint_MarshalOmitempty(t *testing.T) { + now := time.Now().UTC() + tests := []struct { + name string + point client.Point + now time.Time + expected string + }{ + { + name: "all empty", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}}, + now: now, + expected: `{"measurement":"cpu","fields":{"value":1.1}}`, + }, + { + name: "with time", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Time: now}, + now: now, + expected: fmt.Sprintf(`{"measurement":"cpu","time":"%s","fields":{"value":1.1}}`, now.Format(time.RFC3339Nano)), + }, + { + name: "with tags", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Tags: map[string]string{"foo": "bar"}}, + now: now, + expected: `{"measurement":"cpu","tags":{"foo":"bar"},"fields":{"value":1.1}}`, + }, + { + name: "with precision", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Precision: "ms"}, + now: now, + expected: `{"measurement":"cpu","fields":{"value":1.1},"precision":"ms"}`, + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + b, err := json.Marshal(&test.point) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if test.expected != string(b) { + t.Fatalf("Unexpected result. expected: %v, actual: %v", test.expected, string(b)) + } + } +} + +func TestEpochToTime(t *testing.T) { + now := time.Now() + + tests := []struct { + name string + epoch int64 + precision string + expected time.Time + }{ + {name: "nanoseconds", epoch: now.UnixNano(), precision: "n", expected: now}, + {name: "microseconds", epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), precision: "u", expected: now.Round(time.Microsecond)}, + {name: "milliseconds", epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), precision: "ms", expected: now.Round(time.Millisecond)}, + {name: "seconds", epoch: now.Round(time.Second).UnixNano() / int64(time.Second), precision: "s", expected: now.Round(time.Second)}, + {name: "minutes", epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), precision: "m", expected: now.Round(time.Minute)}, + {name: "hours", epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), precision: "h", expected: now.Round(time.Hour)}, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + tm, e := client.EpochToTime(test.epoch, test.precision) + if e != nil { + t.Fatalf("unexpected error: expected %v, actual: %v", nil, e) + } + if tm != test.expected { + t.Fatalf("unexpected time: expected %v, actual %v", test.expected, tm) + } + } +} + +// helper functions + +func emptyTestServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(50 * time.Millisecond) + w.Header().Set("X-Influxdb-Version", "x.x") + return + })) +} + +// Ensure that data with epoch times can be decoded. +func TestBatchPoints_Normal(t *testing.T) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01" + }, + "time": 14244733039069373, + "precision": "n", + "values": { + "value": 4541770385657154000 + } + }, + { + "name": "cpu", + "tags": { + "host": "server01" + }, + "time": 14244733039069380, + "precision": "n", + "values": { + "value": 7199311900554737000 + } + } + ] +} +`) + + if err := json.Unmarshal(data, &bp); err != nil { + t.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } +} + +func TestClient_Timeout(t *testing.T) { + done := make(chan bool) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-done + })) + defer ts.Close() + defer func() { done <- true }() + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u, Timeout: 500 * time.Millisecond} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + query := client.Query{} + _, err = c.Query(query) + if err == nil { + t.Fatalf("unexpected success. expected timeout error") + } else if !strings.Contains(err.Error(), "request canceled") && + !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("unexpected error. expected 'request canceled' error, got %v", err) + } +} + +func TestClient_NoTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(1 * time.Second) + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_WriteUint64(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + bp := client.BatchPoints{ + Points: []client.Point{ + { + Fields: map[string]interface{}{"value": uint64(10)}, + }, + }, + } + r, err := c.Write(bp) + if err == nil { + t.Fatalf("unexpected error. expected err, actual %v", err) + } + if r != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } +} + +func TestClient_ParseConnectionString_IPv6(t *testing.T) { + path := "[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086" + u, err := client.ParseConnectionString(path, false) + if err != nil { + t.Fatalf("unexpected error, expected %v, actual %v", nil, err) + } + if u.Host != path { + t.Fatalf("ipv6 parse failed, expected %s, actual %s", path, u.Host) + } +} + +func TestClient_CustomCertificates(t *testing.T) { + // generated with: + // openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days 3650 -nodes -config influx.cnf + // influx.cnf: + // [req] + // distinguished_name = req_distinguished_name + // x509_extensions = v3_req + // prompt = no + // [req_distinguished_name] + // C = US + // ST = CA + // L = San Francisco + // O = InfluxDB + // CN = github.com/influxdata + // [v3_req] + // keyUsage = keyEncipherment, dataEncipherment + // extendedKeyUsage = serverAuth + // subjectAltName = @alt_names + // [alt_names] + // IP.1 = 127.0.0.1 + // + key := ` +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLswqKJLxfhBRi +4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigrXeadK6hv +qjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+3UcrzVjS +1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDKu54hMU1t +WTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW37ZfuxTa +mhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2tiMT3Wt39m +hXzclLTDAgMBAAECggEAK8mpElkjRUUXPMqMQSdpYe5rv5g973bb8n3jyMpC7i/I +dSwWM4hfmbVWfhnhHk7kErvb9raQxGiGJLrp2eP6Gw69RPGA54SodpoY21cCzHDi +b4FDQH+MoOKyy/xQHb4kitfejK70ha320huI5OhjOQgCtJeNh8yYVIGX3pX2BVyu +36UB9tfX1S5pbiHeih3vZGd322Muj/joNzIelnYRBnoO0xqvQ0S1Dk+dLCTHO0/m +u9AZN8c2TsRWZpJPMWwBv8LuABbE0e66/TSsrfklAn86ELCo44lZURDE7uPZ4pIH +FWtmf+nW5Hy6aPhy60E40MqotlejhWwB3ktY/m3JAQKBgQDuB4nhxzJA9lH9EaCt +byvJ9wGVvI3k79hOwc/Z2R3dNe+Ma+TJy+aBppvsLF4qz83aWC+canyasbHcPNR/ +vXQGlsgKfucrmd1PfMV7uvOIkfOjK0E6mRC+jMuKtNTQrdtM1BU/Z7LY0iy0fNJ6 +aNqhFdlJmmk0g+4bR4SAWB6FkwKBgQDbE/7r1u+GdJk/mhdjTi1aegr9lXb0l7L6 +BCvOYhs/Z/pXfsaYPSXhgk2w+LiGk6BaEA2/4Sr0YS2MAAaIhBVeFBIXVpNrXB3K +Yg1jOEeLQ3qoVBeJFhJNrN9ZQx33HANC1W/Y1apMwaYqCRUGVQkrdcsN2KNea1z0 +3qeYeCCSEQKBgCKZKeuNfrp+k1BLnaVYAW9r3ekb7SwXyMM53LJ3oqWiz10D2c+T +OcAirYtYr59dcTiJlPIRcGcz6PxwQxsGOLU0eYM9CvEFfmutYS8o73ksbdOL2AFi +elKYOIXC3yQuATBbq3L56b8mXaUmd5mfYBgGCv1t2ljtzFBext248UbNAoGBAIv1 +2V24YiwnH6THf/ucfVMZNx5Mt8OJivk5YvcmLDw05HWzc5LdNe89PP871z963u3K +5c3ZP4UC9INFnOboY3JIJkqsr9/d6NZcECt8UBDDmoAhwSt+Y1EmiUZQn7s4NUkk +bKE919/Ts6GVTc5O013lkkUVS0HOG4QBH1dEH6LRAoGAStl11WA9tuKXiBl5XG/C +cq9mFPNJK3pEgd6YH874vEnYEEqENR4MFK3uWXus9Nm+VYxbUbPEzFF4kpsfukDg +/JAVqY4lUam7g6fyyaoIIPQEp7jGjbsUf46IjnUjFcaojOugA3EAfn9awREUDuJZ +cvh4WzEegcExTppINW1NB5E= +-----END PRIVATE KEY----- +` + cert := ` +-----BEGIN CERTIFICATE----- +MIIDdjCCAl6gAwIBAgIJAMYGAwkxUV51MA0GCSqGSIb3DQEBCwUAMFgxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzERMA8G +A1UECgwISW5mbHV4REIxETAPBgNVBAMMCGluZmx1eGRiMB4XDTE1MTIyOTAxNTg1 +NloXDTI1MTIyNjAxNTg1NlowWDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYw +FAYDVQQHDA1TYW4gRnJhbmNpc2NvMREwDwYDVQQKDAhJbmZsdXhEQjERMA8GA1UE +AwwIaW5mbHV4ZGIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLswqK +JLxfhBRi4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigr +XeadK6hvqjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+ +3UcrzVjS1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDK +u54hMU1tWTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW +37ZfuxTamhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2ti +MT3Wt39mhXzclLTDAgMBAAGjQzBBMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgQw +MBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcN +AQELBQADggEBAJxgHeduV9q2BuKnrt+sjXLGn/HwbMbgGbgFK6kUKJBWtv6Pa7JJ +m4teDmTMWiaeB2g4N2bmaWTuEZzzShNKG5roFeWm1ilFMAyzkb+VifN4YuDKH62F +3e259qsytiGbbJF3F//4sjfMw8qZVEPvspG1zKsASo0PpSOOUFmxcj0oMAXhnMrk +rRcbk6fufhyq0iZGl8ZLKTCrkjk0b3qlNs6UaRD9/XBB59VlQ8I338sfjV06edwY +jn5Amab0uyoFNEp70Y4WGxrxUTS1GAC1LCA13S7EnidD440UrnWALTarjmHAK6aW +war3JNM1mGB3o2iAtuOJlFIKLpI1x+1e8pI= +-----END CERTIFICATE----- +` + cer, err := tls.X509KeyPair([]byte(cert), []byte(key)) + + if err != nil { + t.Fatalf("Received error: %v", err) + } + + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + server.TLS = &tls.Config{Certificates: []tls.Certificate{cer}} + server.TLS.BuildNameToCertificate() + server.StartTLS() + defer server.Close() + + certFile, _ := ioutil.TempFile("", "influx-cert-") + certFile.WriteString(cert) + certFile.Close() + defer os.Remove(certFile.Name()) + + u, _ := url.Parse(server.URL) + + tests := []struct { + name string + unsafeSsl bool + expected error + }{ + {name: "validate certificates", unsafeSsl: false, expected: errors.New("error")}, + {name: "not validate certificates", unsafeSsl: true, expected: nil}, + } + + for _, test := range tests { + config := client.Config{URL: *u, UnsafeSsl: test.unsafeSsl} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + query := client.Query{} + _, err = c.Query(query) + + if (test.expected == nil) != (err == nil) { + t.Fatalf("%s: expected %v. got %v. unsafeSsl: %v", test.name, test.expected, err, test.unsafeSsl) + } + } +} + +func TestChunkedResponse(t *testing.T) { + s := `{"results":[{},{}]}{"results":[{}]}` + r := client.NewChunkedResponse(strings.NewReader(s)) + resp, err := r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if actual := len(resp.Results); actual != 2 { + t.Fatalf("unexpected number of results. expected %v, actual %v", 2, actual) + } + + resp, err = r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if actual := len(resp.Results); actual != 1 { + t.Fatalf("unexpected number of results. expected %v, actual %v", 1, actual) + } + + resp, err = r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if resp != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, resp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go new file mode 100644 index 0000000..7a057c1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -0,0 +1,609 @@ +// Package client (v2) is the current official Go client for InfluxDB. +package client // import "github.com/influxdata/influxdb/client/v2" + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" +) + +// HTTPConfig is the config data needed to create an HTTP Client. +type HTTPConfig struct { + // Addr should be of the form "http://host:port" + // or "http://[ipv6-host%zone]:port". + Addr string + + // Username is the influxdb username, optional. + Username string + + // Password is the influxdb password, optional. + Password string + + // UserAgent is the http User Agent, defaults to "InfluxDBClient". + UserAgent string + + // Timeout for influxdb writes, defaults to no timeout. + Timeout time.Duration + + // InsecureSkipVerify gets passed to the http client, if true, it will + // skip https certificate verification. Defaults to false. + InsecureSkipVerify bool + + // TLSConfig allows the user to set their own TLS config for the HTTP + // Client. If set, this option overrides InsecureSkipVerify. + TLSConfig *tls.Config +} + +// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct. +type BatchPointsConfig struct { + // Precision is the write precision of the points, defaults to "ns". + Precision string + + // Database is the database to write points to. + Database string + + // RetentionPolicy is the retention policy of the points. + RetentionPolicy string + + // Write consistency is the number of servers required to confirm write. + WriteConsistency string +} + +// Client is a client interface for writing & querying the database. +type Client interface { + // Ping checks that status of cluster, and will always return 0 time and no + // error for UDP clients. + Ping(timeout time.Duration) (time.Duration, string, error) + + // Write takes a BatchPoints object and writes all Points to InfluxDB. + Write(bp BatchPoints) error + + // Query makes an InfluxDB Query on the database. This will fail if using + // the UDP client. + Query(q Query) (*Response, error) + + // Close releases any resources a Client may be using. + Close() error +} + +// NewHTTPClient returns a new Client from the provided config. +// Client is safe for concurrent use by multiple goroutines. +func NewHTTPClient(conf HTTPConfig) (Client, error) { + if conf.UserAgent == "" { + conf.UserAgent = "InfluxDBClient" + } + + u, err := url.Parse(conf.Addr) + if err != nil { + return nil, err + } else if u.Scheme != "http" && u.Scheme != "https" { + m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ + " must start with http:// or https://", u.Scheme) + return nil, errors.New(m) + } + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: conf.InsecureSkipVerify, + }, + } + if conf.TLSConfig != nil { + tr.TLSClientConfig = conf.TLSConfig + } + return &client{ + url: *u, + username: conf.Username, + password: conf.Password, + useragent: conf.UserAgent, + httpClient: &http.Client{ + Timeout: conf.Timeout, + Transport: tr, + }, + transport: tr, + }, nil +} + +// Ping will check to see if the server is up with an optional timeout on waiting for leader. +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + if timeout > 0 { + params := req.URL.Query() + params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) + req.URL.RawQuery = params.Encode() + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return 0, "", err + } + + if resp.StatusCode != http.StatusNoContent { + var err = fmt.Errorf(string(body)) + return 0, "", err + } + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Close releases the client's resources. +func (c *client) Close() error { + c.transport.CloseIdleConnections() + return nil +} + +// client is safe for concurrent use as the fields are all read-only +// once the client is instantiated. +type client struct { + // N.B - if url.UserInfo is accessed in future modifications to the + // methods on client, you will need to syncronise access to url. + url url.URL + username string + password string + useragent string + httpClient *http.Client + transport *http.Transport +} + +// BatchPoints is an interface into a batched grouping of points to write into +// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate +// batch for each goroutine. +type BatchPoints interface { + // AddPoint adds the given point to the Batch of points. + AddPoint(p *Point) + // AddPoints adds the given points to the Batch of points. + AddPoints(ps []*Point) + // Points lists the points in the Batch. + Points() []*Point + + // Precision returns the currently set precision of this Batch. + Precision() string + // SetPrecision sets the precision of this batch. + SetPrecision(s string) error + + // Database returns the currently set database of this Batch. + Database() string + // SetDatabase sets the database of this Batch. + SetDatabase(s string) + + // WriteConsistency returns the currently set write consistency of this Batch. + WriteConsistency() string + // SetWriteConsistency sets the write consistency of this Batch. + SetWriteConsistency(s string) + + // RetentionPolicy returns the currently set retention policy of this Batch. + RetentionPolicy() string + // SetRetentionPolicy sets the retention policy of this Batch. + SetRetentionPolicy(s string) +} + +// NewBatchPoints returns a BatchPoints interface based on the given config. +func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) { + if conf.Precision == "" { + conf.Precision = "ns" + } + if _, err := time.ParseDuration("1" + conf.Precision); err != nil { + return nil, err + } + bp := &batchpoints{ + database: conf.Database, + precision: conf.Precision, + retentionPolicy: conf.RetentionPolicy, + writeConsistency: conf.WriteConsistency, + } + return bp, nil +} + +type batchpoints struct { + points []*Point + database string + precision string + retentionPolicy string + writeConsistency string +} + +func (bp *batchpoints) AddPoint(p *Point) { + bp.points = append(bp.points, p) +} + +func (bp *batchpoints) AddPoints(ps []*Point) { + bp.points = append(bp.points, ps...) +} + +func (bp *batchpoints) Points() []*Point { + return bp.points +} + +func (bp *batchpoints) Precision() string { + return bp.precision +} + +func (bp *batchpoints) Database() string { + return bp.database +} + +func (bp *batchpoints) WriteConsistency() string { + return bp.writeConsistency +} + +func (bp *batchpoints) RetentionPolicy() string { + return bp.retentionPolicy +} + +func (bp *batchpoints) SetPrecision(p string) error { + if _, err := time.ParseDuration("1" + p); err != nil { + return err + } + bp.precision = p + return nil +} + +func (bp *batchpoints) SetDatabase(db string) { + bp.database = db +} + +func (bp *batchpoints) SetWriteConsistency(wc string) { + bp.writeConsistency = wc +} + +func (bp *batchpoints) SetRetentionPolicy(rp string) { + bp.retentionPolicy = rp +} + +// Point represents a single data point. +type Point struct { + pt models.Point +} + +// NewPoint returns a point with the given timestamp. If a timestamp is not +// given, then data is sent to the database without a timestamp, in which case +// the server will assign local time upon reception. NOTE: it is recommended to +// send data with a timestamp. +func NewPoint( + name string, + tags map[string]string, + fields map[string]interface{}, + t ...time.Time, +) (*Point, error) { + var T time.Time + if len(t) > 0 { + T = t[0] + } + + pt, err := models.NewPoint(name, models.NewTags(tags), fields, T) + if err != nil { + return nil, err + } + return &Point{ + pt: pt, + }, nil +} + +// String returns a line-protocol string of the Point. +func (p *Point) String() string { + return p.pt.String() +} + +// PrecisionString returns a line-protocol string of the Point, +// with the timestamp formatted for the given precision. +func (p *Point) PrecisionString(precison string) string { + return p.pt.PrecisionString(precison) +} + +// Name returns the measurement name of the point. +func (p *Point) Name() string { + return string(p.pt.Name()) +} + +// Tags returns the tags associated with the point. +func (p *Point) Tags() map[string]string { + return p.pt.Tags().Map() +} + +// Time return the timestamp for the point. +func (p *Point) Time() time.Time { + return p.pt.Time() +} + +// UnixNano returns timestamp of the point in nanoseconds since Unix epoch. +func (p *Point) UnixNano() int64 { + return p.pt.UnixNano() +} + +// Fields returns the fields for the point. +func (p *Point) Fields() (map[string]interface{}, error) { + return p.pt.Fields() +} + +// NewPointFrom returns a point from the provided models.Point. +func NewPointFrom(pt models.Point) *Point { + return &Point{pt: pt} +} + +func (c *client) Write(bp BatchPoints) error { + var b bytes.Buffer + + for _, p := range bp.Points() { + if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil { + return err + } + + if err := b.WriteByte('\n'); err != nil { + return err + } + } + + u := c.url + u.Path = "write" + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("db", bp.Database()) + params.Set("rp", bp.RetentionPolicy()) + params.Set("precision", bp.Precision()) + params.Set("consistency", bp.WriteConsistency()) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + return err + } + + return nil +} + +// Query defines a query to send to the server. +type Query struct { + Command string + Database string + Precision string + Chunked bool + ChunkSize int + Parameters map[string]interface{} +} + +// NewQuery returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +func NewQuery(command, database, precision string) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: make(map[string]interface{}), + } +} + +// NewQueryWithParameters returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +// parameters is a map of the parameter names used in the command to their values. +func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: parameters, + } +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err string `json:"error,omitempty"` +} + +// Error returns the first error from any statement. +// It returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != "" { + return fmt.Errorf(r.Err) + } + for _, result := range r.Results { + if result.Err != "" { + return fmt.Errorf(result.Err) + } + } + return nil +} + +// Message represents a user message. +type Message struct { + Level string + Text string +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err string `json:"error,omitempty"` +} + +// Query sends a command to the server and returns the Response. +func (c *client) Query(q Query) (*Response, error) { + u := c.url + u.Path = "query" + + jsonParameters, err := json.Marshal(q.Parameters) + + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("q", q.Command) + params.Set("db", q.Database) + params.Set("params", string(jsonParameters)) + if q.Chunked { + params.Set("chunked", "true") + if q.ChunkSize > 0 { + params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + } + + if q.Precision != "" { + params.Set("epoch", q.Precision) + } + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != "" { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + decErr := dec.Decode(&response) + + // ignore this error if we got an invalid status code + if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { + decErr = nil + } + // If we got a valid decode error, send that back + if decErr != nil { + return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr) + } + } + // If we don't have an error in our json response, and didn't get statusOK + // then send back an error + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", + resp.StatusCode) + } + return &response, nil +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.Reader + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: r, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, nil + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + + r.buf.Reset() + return &response, nil +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client_test.go b/vendor/github.com/influxdata/influxdb/client/v2/client_test.go new file mode 100644 index 0000000..dfc9d1a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/client_test.go @@ -0,0 +1,563 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "sync" + "testing" + "time" +) + +func TestUDPClient_Query(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + query := Query{} + _, err = c.Query(query) + if err == nil { + t.Error("Querying UDP client should fail") + } +} + +func TestUDPClient_Ping(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + + rtt, version, err := c.Ping(0) + if rtt != 0 || version != "" || err != nil { + t.Errorf("unexpected error. expected (%v, '%v', %v), actual (%v, '%v', %v)", 0, "", nil, rtt, version, err) + } +} + +func TestUDPClient_Write(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + fields := make(map[string]interface{}) + fields["value"] = 1.0 + pt, _ := NewPoint("cpu", make(map[string]string), fields) + bp.AddPoint(pt) + + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestUDPClient_BadAddr(t *testing.T) { + config := UDPConfig{Addr: "foobar@wahoo"} + c, err := NewUDPClient(config) + if err == nil { + defer c.Close() + t.Error("Expected resolve error") + } +} + +func TestUDPClient_Batches(t *testing.T) { + var logger writeLogger + var cl udpclient + + cl.conn = &logger + cl.payloadSize = 20 // should allow for two points per batch + + // expected point should look like this: "cpu a=1i" + fields := map[string]interface{}{"a": 1} + + p, _ := NewPoint("cpu", nil, fields, time.Time{}) + + bp, _ := NewBatchPoints(BatchPointsConfig{}) + + for i := 0; i < 9; i++ { + bp.AddPoint(p) + } + + if err := cl.Write(bp); err != nil { + t.Fatalf("Unexpected error during Write: %v", err) + } + + if len(logger.writes) != 5 { + t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), 5) + } +} + +func TestUDPClient_Split(t *testing.T) { + var logger writeLogger + var cl udpclient + + cl.conn = &logger + cl.payloadSize = 1 // force one field per point + + fields := map[string]interface{}{"a": 1, "b": 2, "c": 3, "d": 4} + + p, _ := NewPoint("cpu", nil, fields, time.Unix(1, 0)) + + bp, _ := NewBatchPoints(BatchPointsConfig{}) + + bp.AddPoint(p) + + if err := cl.Write(bp); err != nil { + t.Fatalf("Unexpected error during Write: %v", err) + } + + if len(logger.writes) != len(fields) { + t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), len(fields)) + } +} + +type writeLogger struct { + writes [][]byte +} + +func (w *writeLogger) Write(b []byte) (int, error) { + w.writes = append(w.writes, append([]byte(nil), b...)) + return len(b), nil +} + +func (w *writeLogger) Close() error { return nil } + +func TestClient_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_ChunkedQuery(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusOK) + enc := json.NewEncoder(w) + _ = enc.Encode(data) + _ = enc.Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, err := NewHTTPClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := Query{Chunked: true} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_BoundParameters(t *testing.T) { + var parameterString string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + r.ParseForm() + parameterString = r.FormValue("params") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + expectedParameters := map[string]interface{}{ + "testStringParameter": "testStringValue", + "testNumberParameter": 12.3, + } + + query := Query{ + Parameters: expectedParameters, + } + + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + var actualParameters map[string]interface{} + + err = json.Unmarshal([]byte(parameterString), &actualParameters) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + if !reflect.DeepEqual(expectedParameters, actualParameters) { + t.Errorf("unexpected parameters. expected %v, actual %v", expectedParameters, actualParameters) + } +} + +func TestClient_BasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + + if !ok { + t.Errorf("basic auth error") + } + if u != "username" { + t.Errorf("unexpected username, expected %q, actual %q", "username", u) + } + if p != "password" { + t.Errorf("unexpected password, expected %q, actual %q", "password", p) + } + var data Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL, Username: "username", Password: "password"} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Ping(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + _, _, err := c.Ping(0) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Concurrent_Use(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{}`)) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + var wg sync.WaitGroup + wg.Add(3) + n := 1000 + + errC := make(chan error) + go func() { + defer wg.Done() + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + errC <- fmt.Errorf("got error %v", err) + return + } + + for i := 0; i < n; i++ { + if err = c.Write(bp); err != nil { + errC <- fmt.Errorf("got error %v", err) + return + } + } + }() + + go func() { + defer wg.Done() + var q Query + for i := 0; i < n; i++ { + if _, err := c.Query(q); err != nil { + errC <- fmt.Errorf("got error %v", err) + return + } + } + }() + + go func() { + defer wg.Done() + for i := 0; i < n; i++ { + c.Ping(time.Second) + } + }() + + go func() { + wg.Wait() + close(errC) + }() + + for err := range errC { + if err != nil { + t.Error(err) + } + } +} + +func TestClient_Write(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_UserAgent(t *testing.T) { + receivedUserAgent := "" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedUserAgent = r.UserAgent() + + var data Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + _, err := http.Get(ts.URL) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + name string + userAgent string + expected string + }{ + { + name: "Empty user agent", + userAgent: "", + expected: "InfluxDBClient", + }, + { + name: "Custom user agent", + userAgent: "Test Influx Client", + expected: "Test Influx Client", + }, + } + + for _, test := range tests { + + config := HTTPConfig{Addr: ts.URL, UserAgent: test.userAgent} + c, _ := NewHTTPClient(config) + defer c.Close() + + receivedUserAgent = "" + query := Query{} + _, err = c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + bp, _ := NewBatchPoints(BatchPointsConfig{}) + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if receivedUserAgent != test.expected { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + } +} + +func TestClient_PointString(t *testing.T) { + const shortForm = "2006-Jan-02" + time1, _ := time.Parse(shortForm, "2013-Feb-03") + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields, time1) + + s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000000000" + if p.String() != s { + t.Errorf("Point String Error, got %s, expected %s", p.String(), s) + } + + s = "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000" + if p.PrecisionString("ms") != s { + t.Errorf("Point String Error, got %s, expected %s", + p.PrecisionString("ms"), s) + } +} + +func TestClient_PointWithoutTimeString(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39" + if p.String() != s { + t.Errorf("Point String Error, got %s, expected %s", p.String(), s) + } + + if p.PrecisionString("ms") != s { + t.Errorf("Point String Error, got %s, expected %s", + p.PrecisionString("ms"), s) + } +} + +func TestClient_PointName(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + exp := "cpu_usage" + if p.Name() != exp { + t.Errorf("Error, got %s, expected %s", + p.Name(), exp) + } +} + +func TestClient_PointTags(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + if !reflect.DeepEqual(tags, p.Tags()) { + t.Errorf("Error, got %v, expected %v", + p.Tags(), tags) + } +} + +func TestClient_PointUnixNano(t *testing.T) { + const shortForm = "2006-Jan-02" + time1, _ := time.Parse(shortForm, "2013-Feb-03") + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields, time1) + + exp := int64(1359849600000000000) + if p.UnixNano() != exp { + t.Errorf("Error, got %d, expected %d", + p.UnixNano(), exp) + } +} + +func TestClient_PointFields(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + pfields, err := p.Fields() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(fields, pfields) { + t.Errorf("Error, got %v, expected %v", + pfields, fields) + } +} + +func TestBatchPoints_PrecisionError(t *testing.T) { + _, err := NewBatchPoints(BatchPointsConfig{Precision: "foobar"}) + if err == nil { + t.Errorf("Precision: foobar should have errored") + } + + bp, _ := NewBatchPoints(BatchPointsConfig{Precision: "ns"}) + err = bp.SetPrecision("foobar") + if err == nil { + t.Errorf("Precision: foobar should have errored") + } +} + +func TestBatchPoints_SettersGetters(t *testing.T) { + bp, _ := NewBatchPoints(BatchPointsConfig{ + Precision: "ns", + Database: "db", + RetentionPolicy: "rp", + WriteConsistency: "wc", + }) + if bp.Precision() != "ns" { + t.Errorf("Expected: %s, got %s", bp.Precision(), "ns") + } + if bp.Database() != "db" { + t.Errorf("Expected: %s, got %s", bp.Database(), "db") + } + if bp.RetentionPolicy() != "rp" { + t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp") + } + if bp.WriteConsistency() != "wc" { + t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc") + } + + bp.SetDatabase("db2") + bp.SetRetentionPolicy("rp2") + bp.SetWriteConsistency("wc2") + err := bp.SetPrecision("s") + if err != nil { + t.Errorf("Did not expect error: %s", err.Error()) + } + + if bp.Precision() != "s" { + t.Errorf("Expected: %s, got %s", bp.Precision(), "s") + } + if bp.Database() != "db2" { + t.Errorf("Expected: %s, got %s", bp.Database(), "db2") + } + if bp.RetentionPolicy() != "rp2" { + t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp2") + } + if bp.WriteConsistency() != "wc2" { + t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc2") + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/example_test.go b/vendor/github.com/influxdata/influxdb/client/v2/example_test.go new file mode 100644 index 0000000..68bb24b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/example_test.go @@ -0,0 +1,265 @@ +package client_test + +import ( + "fmt" + "math/rand" + "os" + "time" + + "github.com/influxdata/influxdb/client/v2" +) + +// Create a new client +func ExampleClient() { + // NOTE: this assumes you've setup a user and have setup shell env variables, + // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. + _, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } +} + +// Write a point using the UDP client +func ExampleClient_uDP() { + // Make client + config := client.UDPConfig{Addr: "localhost:8089"} + c, err := client.NewUDPClient(config) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + defer c.Close() + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} + +// Ping the cluster using the HTTP client +func ExampleClient_Ping() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + _, _, err = c.Ping(0) + if err != nil { + fmt.Println("Error pinging InfluxDB Cluster: ", err.Error()) + } +} + +// Write a point using the HTTP client +func ExampleClient_write() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "BumbleBeeTuna", + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} + +// Create a batch and add a point +func ExampleBatchPoints() { + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "BumbleBeeTuna", + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) +} + +// Using the BatchPoints setter functions +func ExampleBatchPoints_setters() { + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{}) + bp.SetDatabase("BumbleBeeTuna") + bp.SetPrecision("ms") + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) +} + +// Create a new point with a timestamp +func ExamplePoint() { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err == nil { + fmt.Println("We created a point: ", pt.String()) + } +} + +// Create a new point without a timestamp +func ExamplePoint_withoutTime() { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields) + if err == nil { + fmt.Println("We created a point w/o time: ", pt.String()) + } +} + +// Write 1000 points +func ExampleClient_write1000() { + sampleSize := 1000 + + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + rand.Seed(42) + + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "systemstats", + Precision: "us", + }) + + for i := 0; i < sampleSize; i++ { + regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} + tags := map[string]string{ + "cpu": "cpu-total", + "host": fmt.Sprintf("host%d", rand.Intn(1000)), + "region": regions[rand.Intn(len(regions))], + } + + idle := rand.Float64() * 100.0 + fields := map[string]interface{}{ + "idle": idle, + "busy": 100.0 - idle, + } + + pt, err := client.NewPoint( + "cpu_usage", + tags, + fields, + time.Now(), + ) + if err != nil { + println("Error:", err.Error()) + continue + } + bp.AddPoint(pt) + } + + err = c.Write(bp) + if err != nil { + fmt.Println("Error: ", err.Error()) + } +} + +// Make a Query +func ExampleClient_query() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + q := client.NewQuery("SELECT count(value) FROM shapes", "square_holes", "ns") + if response, err := c.Query(q); err == nil && response.Error() == nil { + fmt.Println(response.Results) + } +} + +// Create a Database with a query +func ExampleClient_createDatabase() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + q := client.NewQuery("CREATE DATABASE telegraf", "", "") + if response, err := c.Query(q); err == nil && response.Error() == nil { + fmt.Println(response.Results) + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/udp.go b/vendor/github.com/influxdata/influxdb/client/v2/udp.go new file mode 100644 index 0000000..779a28b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/udp.go @@ -0,0 +1,112 @@ +package client + +import ( + "fmt" + "io" + "net" + "time" +) + +const ( + // UDPPayloadSize is a reasonable default payload size for UDP packets that + // could be travelling over the internet. + UDPPayloadSize = 512 +) + +// UDPConfig is the config data needed to create a UDP Client. +type UDPConfig struct { + // Addr should be of the form "host:port" + // or "[ipv6-host%zone]:port". + Addr string + + // PayloadSize is the maximum size of a UDP client message, optional + // Tune this based on your network. Defaults to UDPPayloadSize. + PayloadSize int +} + +// NewUDPClient returns a client interface for writing to an InfluxDB UDP +// service from the given config. +func NewUDPClient(conf UDPConfig) (Client, error) { + var udpAddr *net.UDPAddr + udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) + if err != nil { + return nil, err + } + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + + payloadSize := conf.PayloadSize + if payloadSize == 0 { + payloadSize = UDPPayloadSize + } + + return &udpclient{ + conn: conn, + payloadSize: payloadSize, + }, nil +} + +// Close releases the udpclient's resources. +func (uc *udpclient) Close() error { + return uc.conn.Close() +} + +type udpclient struct { + conn io.WriteCloser + payloadSize int +} + +func (uc *udpclient) Write(bp BatchPoints) error { + var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed + var d, _ = time.ParseDuration("1" + bp.Precision()) + + var delayedError error + + var checkBuffer = func(n int) { + if len(b) > 0 && len(b)+n > uc.payloadSize { + if _, err := uc.conn.Write(b); err != nil { + delayedError = err + } + b = b[:0] + } + } + + for _, p := range bp.Points() { + p.pt.Round(d) + pointSize := p.pt.StringSize() + 1 // include newline in size + //point := p.pt.RoundedString(d) + "\n" + + checkBuffer(pointSize) + + if p.Time().IsZero() || pointSize <= uc.payloadSize { + b = p.pt.AppendString(b) + b = append(b, '\n') + continue + } + + points := p.pt.Split(uc.payloadSize - 1) // account for newline character + for _, sp := range points { + checkBuffer(sp.StringSize() + 1) + b = sp.AppendString(b) + b = append(b, '\n') + } + } + + if len(b) > 0 { + if _, err := uc.conn.Write(b); err != nil { + return err + } + } + return delayedError +} + +func (uc *udpclient) Query(q Query) (*Response, error) { + return nil, fmt.Errorf("Querying via UDP is not supported") +} + +func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { + return 0, "", nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go new file mode 100644 index 0000000..b8717b8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go @@ -0,0 +1,1077 @@ +// Package cli contains the logic of the influx command line client. +package cli // import "github.com/influxdata/influxdb/cmd/influx/cli" + +import ( + "bytes" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/signal" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "syscall" + "text/tabwriter" + + "golang.org/x/crypto/ssh/terminal" + + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/importer/v8" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/peterh/liner" +) + +// ErrBlankCommand is returned when a parsed command is empty. +var ErrBlankCommand = errors.New("empty input") + +// CommandLine holds CLI configuration and state. +type CommandLine struct { + Line *liner.State + Host string + Port int + Database string + Ssl bool + RetentionPolicy string + ClientVersion string + ServerVersion string + Pretty bool // controls pretty print for json + Format string // controls the output format. Valid values are json, csv, or column + Execute string + ShowVersion bool + Import bool + Chunked bool + ChunkSize int + Quit chan struct{} + IgnoreSignals bool // Ignore signals normally caught by this process (used primarily for testing) + ForceTTY bool // Force the CLI to act as if it were connected to a TTY + osSignals chan os.Signal + historyFilePath string + + Client *client.Client + ClientConfig client.Config // Client config options. + ImporterConfig v8.Config // Importer configuration options. +} + +// New returns an instance of CommandLine with the specified client version. +func New(version string) *CommandLine { + return &CommandLine{ + ClientVersion: version, + Quit: make(chan struct{}, 1), + osSignals: make(chan os.Signal, 1), + Chunked: true, + } +} + +// Run executes the CLI. +func (c *CommandLine) Run() error { + hasTTY := c.ForceTTY || terminal.IsTerminal(int(os.Stdin.Fd())) + + var promptForPassword bool + // determine if they set the password flag but provided no value + for _, v := range os.Args { + v = strings.ToLower(v) + if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.ClientConfig.Password == "" { + promptForPassword = true + break + } + } + + // Check if we will be able to prompt for the password later. + if promptForPassword && !hasTTY { + return errors.New("Unable to prompt for a password with no TTY.") + } + + // Read environment variables for username/password. + if c.ClientConfig.Username == "" { + c.ClientConfig.Username = os.Getenv("INFLUX_USERNAME") + } + // If we are going to be prompted for a password, always use the entered password. + if promptForPassword { + // Open the liner (temporarily) and prompt for the password. + p, e := func() (string, error) { + l := liner.NewLiner() + defer l.Close() + return l.PasswordPrompt("password: ") + }() + if e != nil { + return errors.New("Unable to parse password") + } + c.ClientConfig.Password = p + } else if c.ClientConfig.Password == "" { + c.ClientConfig.Password = os.Getenv("INFLUX_PASSWORD") + } + + if err := c.Connect(""); err != nil { + msg := "Please check your connection settings and ensure 'influxd' is running." + if !c.Ssl && strings.Contains(err.Error(), "malformed HTTP response") { + // Attempt to connect with SSL and disable secure SSL for this test. + c.Ssl = true + unsafeSsl := c.ClientConfig.UnsafeSsl + c.ClientConfig.UnsafeSsl = true + if err := c.Connect(""); err == nil { + msg = "Please use the -ssl flag to connect using SSL." + } + c.Ssl = false + c.ClientConfig.UnsafeSsl = unsafeSsl + } else if c.Ssl && !c.ClientConfig.UnsafeSsl && strings.Contains(err.Error(), "certificate is valid for") { + // Attempt to connect with an insecure connection just to see if it works. + c.ClientConfig.UnsafeSsl = true + if err := c.Connect(""); err == nil { + msg = "You may use -unsafeSsl to connect anyway, but the SSL connection will not be secure." + } + c.ClientConfig.UnsafeSsl = false + } + return fmt.Errorf("Failed to connect to %s: %s\n%s", c.Client.Addr(), err.Error(), msg) + } + + // Modify precision. + c.SetPrecision(c.ClientConfig.Precision) + + if c.Execute != "" { + // Make the non-interactive mode send everything through the CLI's parser + // the same way the interactive mode works + lines := strings.Split(c.Execute, "\n") + for _, line := range lines { + if err := c.ParseCommand(line); err != nil { + return err + } + } + return nil + } + + if c.Import { + addr := net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) + u, e := client.ParseConnectionString(addr, c.Ssl) + if e != nil { + return e + } + + // Copy the latest importer config and inject the latest client config + // into it. + config := c.ImporterConfig + config.Config = c.ClientConfig + config.URL = u + + i := v8.NewImporter(config) + if err := i.Import(); err != nil { + err = fmt.Errorf("ERROR: %s\n", err) + return err + } + return nil + } + + if !hasTTY { + cmd, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + return c.ExecuteQuery(string(cmd)) + } + + if !c.IgnoreSignals { + // register OS signals for graceful termination + signal.Notify(c.osSignals, syscall.SIGINT, syscall.SIGTERM) + } + + c.Line = liner.NewLiner() + defer c.Line.Close() + + c.Line.SetMultiLineMode(true) + + fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion) + + c.Version() + + // Only load/write history if HOME environment variable is set. + if homeDir := os.Getenv("HOME"); homeDir != "" { + // Attempt to load the history file. + c.historyFilePath = filepath.Join(homeDir, ".influx_history") + if historyFile, err := os.Open(c.historyFilePath); err == nil { + c.Line.ReadHistory(historyFile) + historyFile.Close() + } + } + + // read from prompt until exit is run + return c.mainLoop() +} + +// mainLoop runs the main prompt loop for the CLI. +func (c *CommandLine) mainLoop() error { + for { + select { + case <-c.osSignals: + c.exit() + return nil + case <-c.Quit: + c.exit() + return nil + default: + l, e := c.Line.Prompt("> ") + if e == io.EOF { + // Instead of die, register that someone exited the program gracefully + l = "exit" + } else if e != nil { + c.exit() + return e + } + if err := c.ParseCommand(l); err != ErrBlankCommand && !strings.HasPrefix(strings.TrimSpace(l), "auth") { + l = influxql.Sanitize(l) + c.Line.AppendHistory(l) + c.saveHistory() + } + } + } +} + +// ParseCommand parses an instruction and calls the related method +// or executes the command as a query against InfluxDB. +func (c *CommandLine) ParseCommand(cmd string) error { + lcmd := strings.TrimSpace(strings.ToLower(cmd)) + tokens := strings.Fields(lcmd) + + if len(tokens) > 0 { + switch tokens[0] { + case "exit", "quit": + close(c.Quit) + case "gopher": + c.gopher() + case "connect": + return c.Connect(cmd) + case "auth": + c.SetAuth(cmd) + case "help": + c.help() + case "history": + c.history() + case "format": + c.SetFormat(cmd) + case "precision": + c.SetPrecision(cmd) + case "consistency": + c.SetWriteConsistency(cmd) + case "settings": + c.Settings() + case "chunked": + c.Chunked = !c.Chunked + if c.Chunked { + fmt.Println("chunked responses enabled") + } else { + fmt.Println("chunked reponses disabled") + } + case "chunk": + c.SetChunkSize(cmd) + case "pretty": + c.Pretty = !c.Pretty + if c.Pretty { + fmt.Println("Pretty print enabled") + } else { + fmt.Println("Pretty print disabled") + } + case "use": + c.use(cmd) + case "insert": + return c.Insert(cmd) + case "clear": + c.clear(cmd) + default: + return c.ExecuteQuery(cmd) + } + + return nil + } + return ErrBlankCommand +} + +// Connect connects to a server. +func (c *CommandLine) Connect(cmd string) error { + // Remove the "connect" keyword if it exists + addr := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1)) + if addr == "" { + // If they didn't provide a connection string, use the current settings + addr = net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) + } + + URL, err := client.ParseConnectionString(addr, c.Ssl) + if err != nil { + return err + } + + // Create copy of the current client config and create a new client. + ClientConfig := c.ClientConfig + ClientConfig.UserAgent = "InfluxDBShell/" + c.ClientVersion + ClientConfig.URL = URL + + client, err := client.NewClient(ClientConfig) + if err != nil { + return fmt.Errorf("Could not create client %s", err) + } + c.Client = client + + _, v, err := c.Client.Ping() + if err != nil { + return err + } + c.ServerVersion = v + + // Update the command with the current connection information + if host, port, err := net.SplitHostPort(ClientConfig.URL.Host); err == nil { + c.Host = host + if i, err := strconv.Atoi(port); err == nil { + c.Port = i + } + } + + return nil +} + +// SetAuth sets client authentication credentials. +func (c *CommandLine) SetAuth(cmd string) { + // If they pass in the entire command, we should parse it + // auth + args := strings.Fields(cmd) + if len(args) == 3 { + args = args[1:] + } else { + args = []string{} + } + + if len(args) == 2 { + c.ClientConfig.Username = args[0] + c.ClientConfig.Password = args[1] + } else { + u, e := c.Line.Prompt("username: ") + if e != nil { + fmt.Printf("Unable to process input: %s", e) + return + } + c.ClientConfig.Username = strings.TrimSpace(u) + p, e := c.Line.PasswordPrompt("password: ") + if e != nil { + fmt.Printf("Unable to process input: %s", e) + return + } + c.ClientConfig.Password = p + } + + // Update the client as well + c.Client.SetAuth(c.ClientConfig.Username, c.ClientConfig.Password) +} + +func (c *CommandLine) clear(cmd string) { + args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ") + v := strings.ToLower(strings.Join(args[1:], " ")) + switch v { + case "database", "db": + c.Database = "" + fmt.Println("database context cleared") + return + case "retention policy", "rp": + c.RetentionPolicy = "" + fmt.Println("retention policy context cleared") + return + default: + if len(args) > 1 { + fmt.Printf("invalid command %q.\n", v) + } + fmt.Println(`Possible commands for 'clear' are: + # Clear the database context + clear database + clear db + + # Clear the retention policy context + clear retention policy + clear rp + `) + } +} + +func (c *CommandLine) use(cmd string) { + args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ") + if len(args) != 2 { + fmt.Printf("Could not parse database name from %q.\n", cmd) + return + } + + stmt := args[1] + db, rp, err := parseDatabaseAndRetentionPolicy([]byte(stmt)) + if err != nil { + fmt.Printf("Unable to parse database or retention policy from %s", stmt) + return + } + + if !c.databaseExists(db) { + return + } + + c.Database = db + fmt.Printf("Using database %s\n", db) + + if rp != "" { + if !c.retentionPolicyExists(db, rp) { + return + } + c.RetentionPolicy = rp + fmt.Printf("Using retention policy %s\n", rp) + } +} + +func (c *CommandLine) databaseExists(db string) bool { + // Validate if specified database exists + response, err := c.Client.Query(client.Query{Command: "SHOW DATABASES"}) + if err != nil { + fmt.Printf("ERR: %s\n", err) + return false + } else if err := response.Error(); err != nil { + if c.ClientConfig.Username == "" { + fmt.Printf("ERR: %s\n", err) + return false + } + // TODO(jsternberg): Fix SHOW DATABASES to be user-aware #6397. + // If we are unable to run SHOW DATABASES, display a warning and use the + // database anyway in case the person doesn't have permission to run the + // command, but does have permission to use the database. + fmt.Printf("WARN: %s\n", err) + } else { + // Verify the provided database exists + if databaseExists := func() bool { + for _, result := range response.Results { + for _, row := range result.Series { + if row.Name == "databases" { + for _, values := range row.Values { + for _, database := range values { + if database == db { + return true + } + } + } + } + } + } + return false + }(); !databaseExists { + fmt.Printf("ERR: Database %s doesn't exist. Run SHOW DATABASES for a list of existing databases.\n", db) + return false + } + } + return true +} + +func (c *CommandLine) retentionPolicyExists(db, rp string) bool { + // Validate if specified database exists + response, err := c.Client.Query(client.Query{Command: fmt.Sprintf("SHOW RETENTION POLICIES ON %q", db)}) + if err != nil { + fmt.Printf("ERR: %s\n", err) + return false + } else if err := response.Error(); err != nil { + if c.ClientConfig.Username == "" { + fmt.Printf("ERR: %s\n", err) + return false + } + fmt.Printf("WARN: %s\n", err) + } else { + // Verify the provided database exists + if retentionPolicyExists := func() bool { + for _, result := range response.Results { + for _, row := range result.Series { + for _, values := range row.Values { + for i, v := range values { + if i != 0 { + continue + } + if v == rp { + return true + } + } + } + } + } + return false + }(); !retentionPolicyExists { + fmt.Printf("ERR: RETENTION POLICY %s doesn't exist. Run SHOW RETENTION POLICIES ON %q for a list of existing retention polices.\n", rp, db) + return false + } + } + return true +} + +// SetChunkSize sets the chunk size +// 0 sets it back to the default +func (c *CommandLine) SetChunkSize(cmd string) { + // normalize cmd + cmd = strings.ToLower(cmd) + cmd = strings.Join(strings.Fields(cmd), " ") + + // Remove the "chunk size" keyword if it exists + cmd = strings.TrimPrefix(cmd, "chunk size ") + + // Remove the "chunk" keyword if it exists + // allows them to use `chunk 50` as a shortcut + cmd = strings.TrimPrefix(cmd, "chunk ") + + if n, err := strconv.ParseInt(cmd, 10, 64); err == nil { + c.ChunkSize = int(n) + if c.ChunkSize <= 0 { + c.ChunkSize = 0 + } + fmt.Printf("chunk size set to %d\n", c.ChunkSize) + } else { + fmt.Printf("unable to parse chunk size from %q\n", cmd) + } +} + +// SetPrecision sets client precision. +func (c *CommandLine) SetPrecision(cmd string) { + // normalize cmd + cmd = strings.ToLower(cmd) + + // Remove the "precision" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "precision", "", -1)) + + switch cmd { + case "h", "m", "s", "ms", "u", "ns": + c.ClientConfig.Precision = cmd + c.Client.SetPrecision(c.ClientConfig.Precision) + case "rfc3339": + c.ClientConfig.Precision = "" + c.Client.SetPrecision(c.ClientConfig.Precision) + default: + fmt.Printf("Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\n", cmd) + } +} + +// SetFormat sets output format. +func (c *CommandLine) SetFormat(cmd string) { + // Remove the "format" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1)) + // normalize cmd + cmd = strings.ToLower(cmd) + + switch cmd { + case "json", "csv", "column": + c.Format = cmd + default: + fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd) + } +} + +// SetWriteConsistency sets write consistency level. +func (c *CommandLine) SetWriteConsistency(cmd string) { + // Remove the "consistency" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "consistency", "", -1)) + // normalize cmd + cmd = strings.ToLower(cmd) + + _, err := models.ParseConsistencyLevel(cmd) + if err != nil { + fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd) + return + } + c.ClientConfig.WriteConsistency = cmd +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } + +// isLetter returns true if the rune is a letter. +func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') } + +// isDigit returns true if the rune is a digit. +func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') } + +// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer. +func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' } + +// isIdentChar returns true if the rune can be used in an unquoted identifier. +func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') } + +func parseUnquotedIdentifier(stmt string) (string, string) { + if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 { + return fields[0], strings.TrimPrefix(stmt, fields[0]) + } + return "", stmt +} + +func parseDoubleQuotedIdentifier(stmt string) (string, string) { + escapeNext := false + fields := strings.FieldsFunc(stmt, func(ch rune) bool { + if ch == '\\' { + escapeNext = true + } else if ch == '"' { + if !escapeNext { + return true + } + escapeNext = false + } + return false + }) + if len(fields) > 0 { + return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"") + } + return "", stmt +} + +func parseNextIdentifier(stmt string) (ident, remainder string) { + if len(stmt) > 0 { + switch { + case isWhitespace(rune(stmt[0])): + return parseNextIdentifier(stmt[1:]) + case isIdentFirstChar(rune(stmt[0])): + return parseUnquotedIdentifier(stmt) + case stmt[0] == '"': + return parseDoubleQuotedIdentifier(stmt) + } + } + return "", stmt +} + +func (c *CommandLine) parseInto(stmt string) *client.BatchPoints { + ident, stmt := parseNextIdentifier(stmt) + db, rp := c.Database, c.RetentionPolicy + if strings.HasPrefix(stmt, ".") { + db = ident + ident, stmt = parseNextIdentifier(stmt[1:]) + } + if strings.HasPrefix(stmt, " ") { + rp = ident + stmt = stmt[1:] + } + + return &client.BatchPoints{ + Points: []client.Point{ + client.Point{Raw: stmt}, + }, + Database: db, + RetentionPolicy: rp, + Precision: c.ClientConfig.Precision, + WriteConsistency: c.ClientConfig.WriteConsistency, + } +} + +func (c *CommandLine) parseInsert(stmt string) (*client.BatchPoints, error) { + i, point := parseNextIdentifier(stmt) + if !strings.EqualFold(i, "insert") { + return nil, fmt.Errorf("found %s, expected INSERT\n", i) + } + if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") { + bp := c.parseInto(r) + return bp, nil + } + return &client.BatchPoints{ + Points: []client.Point{ + client.Point{Raw: point}, + }, + Database: c.Database, + RetentionPolicy: c.RetentionPolicy, + Precision: c.ClientConfig.Precision, + WriteConsistency: c.ClientConfig.WriteConsistency, + }, nil +} + +// Insert runs an INSERT statement. +func (c *CommandLine) Insert(stmt string) error { + bp, err := c.parseInsert(stmt) + if err != nil { + fmt.Printf("ERR: %s\n", err) + return nil + } + if _, err := c.Client.Write(*bp); err != nil { + fmt.Printf("ERR: %s\n", err) + if c.Database == "" { + fmt.Println("Note: error may be due to not setting a database or retention policy.") + fmt.Println(`Please set a database with the command "use " or`) + fmt.Println("INSERT INTO . ") + } + } + return nil +} + +// query creates a query struct to be used with the client. +func (c *CommandLine) query(query string) client.Query { + return client.Query{ + Command: query, + Database: c.Database, + Chunked: c.Chunked, + ChunkSize: c.ChunkSize, + } +} + +// ExecuteQuery runs any query statement. +func (c *CommandLine) ExecuteQuery(query string) error { + // If we have a retention policy, we need to rewrite the statement sources + if c.RetentionPolicy != "" { + pq, err := influxql.NewParser(strings.NewReader(query)).ParseQuery() + if err != nil { + fmt.Printf("ERR: %s\n", err) + return err + } + for _, stmt := range pq.Statements { + if selectStatement, ok := stmt.(*influxql.SelectStatement); ok { + influxql.WalkFunc(selectStatement.Sources, func(n influxql.Node) { + if t, ok := n.(*influxql.Measurement); ok { + if t.Database == "" && c.Database != "" { + t.Database = c.Database + } + if t.RetentionPolicy == "" && c.RetentionPolicy != "" { + t.RetentionPolicy = c.RetentionPolicy + } + } + }) + } + } + query = pq.String() + } + response, err := c.Client.Query(c.query(query)) + if err != nil { + fmt.Printf("ERR: %s\n", err) + return err + } + c.FormatResponse(response, os.Stdout) + if err := response.Error(); err != nil { + fmt.Printf("ERR: %s\n", response.Error()) + if c.Database == "" { + fmt.Println("Warning: It is possible this error is due to not setting a database.") + fmt.Println(`Please set a database with the command "use ".`) + } + return err + } + return nil +} + +// FormatResponse formats output to the previously chosen format. +func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) { + switch c.Format { + case "json": + c.writeJSON(response, w) + case "csv": + c.writeCSV(response, w) + case "column": + c.writeColumns(response, w) + default: + fmt.Fprintf(w, "Unknown output format %q.\n", c.Format) + } +} + +func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) { + var data []byte + var err error + if c.Pretty { + data, err = json.MarshalIndent(response, "", " ") + } else { + data, err = json.Marshal(response) + } + if err != nil { + fmt.Fprintf(w, "Unable to parse json: %s\n", err) + return + } + fmt.Fprintln(w, string(data)) +} + +func tagsEqual(prev, current map[string]string) bool { + return reflect.DeepEqual(prev, current) +} + +func columnsEqual(prev, current []string) bool { + return reflect.DeepEqual(prev, current) +} + +func headersEqual(prev, current models.Row) bool { + if prev.Name != current.Name { + return false + } + return tagsEqual(prev.Tags, current.Tags) && columnsEqual(prev.Columns, current.Columns) +} + +func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) { + csvw := csv.NewWriter(w) + var previousHeaders models.Row + for _, result := range response.Results { + suppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, result.Series[0]) + if !suppressHeaders && len(result.Series) > 0 { + previousHeaders = models.Row{ + Name: result.Series[0].Name, + Tags: result.Series[0].Tags, + Columns: result.Series[0].Columns, + } + } + + // Create a tabbed writer for each result as they won't always line up + rows := c.formatResults(result, "\t", suppressHeaders) + for _, r := range rows { + csvw.Write(strings.Split(r, "\t")) + } + } + csvw.Flush() +} + +func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) { + // Create a tabbed writer for each result as they won't always line up + writer := new(tabwriter.Writer) + writer.Init(w, 0, 8, 1, ' ', 0) + + var previousHeaders models.Row + for i, result := range response.Results { + // Print out all messages first + for _, m := range result.Messages { + fmt.Fprintf(w, "%s: %s.\n", m.Level, m.Text) + } + // Check to see if the headers are the same as the previous row. If so, suppress them in the output + suppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, result.Series[0]) + if !suppressHeaders && len(result.Series) > 0 { + previousHeaders = models.Row{ + Name: result.Series[0].Name, + Tags: result.Series[0].Tags, + Columns: result.Series[0].Columns, + } + } + + // If we are suppressing headers, don't output the extra line return. If we + // aren't suppressing headers, then we put out line returns between results + // (not before the first result, and not after the last result). + if !suppressHeaders && i > 0 { + fmt.Fprintln(writer, "") + } + + rows := c.formatResults(result, "\t", suppressHeaders) + for _, r := range rows { + fmt.Fprintln(writer, r) + } + + } + writer.Flush() +} + +// formatResults will behave differently if you are formatting for columns or csv +func (c *CommandLine) formatResults(result client.Result, separator string, suppressHeaders bool) []string { + rows := []string{} + // Create a tabbed writer for each result as they won't always line up + for i, row := range result.Series { + // gather tags + tags := []string{} + for k, v := range row.Tags { + tags = append(tags, fmt.Sprintf("%s=%s", k, v)) + sort.Strings(tags) + } + + columnNames := []string{} + + // Only put name/tags in a column if format is csv + if c.Format == "csv" { + if len(tags) > 0 { + columnNames = append([]string{"tags"}, columnNames...) + } + + if row.Name != "" { + columnNames = append([]string{"name"}, columnNames...) + } + } + + columnNames = append(columnNames, row.Columns...) + + // Output a line separator if we have more than one set or results and format is column + if i > 0 && c.Format == "column" && !suppressHeaders { + rows = append(rows, "") + } + + // If we are column format, we break out the name/tag to separate lines + if c.Format == "column" && !suppressHeaders { + if row.Name != "" { + n := fmt.Sprintf("name: %s", row.Name) + rows = append(rows, n) + } + if len(tags) > 0 { + t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", "))) + rows = append(rows, t) + } + } + + if !suppressHeaders { + rows = append(rows, strings.Join(columnNames, separator)) + } + + // if format is column, write dashes under each column + if c.Format == "column" && !suppressHeaders { + lines := []string{} + for _, columnName := range columnNames { + lines = append(lines, strings.Repeat("-", len(columnName))) + } + rows = append(rows, strings.Join(lines, separator)) + } + + for _, v := range row.Values { + var values []string + if c.Format == "csv" { + if row.Name != "" { + values = append(values, row.Name) + } + if len(tags) > 0 { + values = append(values, strings.Join(tags, ",")) + } + } + + for _, vv := range v { + values = append(values, interfaceToString(vv)) + } + rows = append(rows, strings.Join(values, separator)) + } + } + return rows +} + +func interfaceToString(v interface{}) string { + switch t := v.(type) { + case nil: + return "" + case bool: + return fmt.Sprintf("%v", v) + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr: + return fmt.Sprintf("%d", t) + case float32, float64: + return fmt.Sprintf("%v", t) + default: + return fmt.Sprintf("%v", t) + } +} + +// Settings prints current settings. +func (c *CommandLine) Settings() { + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 1, 1, ' ', 0) + fmt.Fprintln(w, "Setting\tValue") + fmt.Fprintln(w, "--------\t--------") + if c.Port > 0 { + fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port) + } else { + fmt.Fprintf(w, "Host\t%s\n", c.Host) + } + fmt.Fprintf(w, "Username\t%s\n", c.ClientConfig.Username) + fmt.Fprintf(w, "Database\t%s\n", c.Database) + fmt.Fprintf(w, "RetentionPolicy\t%s\n", c.RetentionPolicy) + fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty) + fmt.Fprintf(w, "Format\t%s\n", c.Format) + fmt.Fprintf(w, "Write Consistency\t%s\n", c.ClientConfig.WriteConsistency) + fmt.Fprintf(w, "Chunked\t%v\n", c.Chunked) + fmt.Fprintf(w, "Chunk Size\t%d\n", c.ChunkSize) + fmt.Fprintln(w) + w.Flush() +} + +func (c *CommandLine) help() { + fmt.Println(`Usage: + connect connects to another node specified by host:port + auth prompts for username and password + pretty toggles pretty print for the json format + chunked turns on chunked responses from server + chunk size sets the size of the chunked responses. Set to 0 to reset to the default chunked size + use sets current database + format specifies the format of the server responses: json, csv, or column + precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns + consistency sets write consistency level: any, one, quorum, or all + history displays command history + settings outputs the current settings for the shell + clear clears settings such as database or retention policy. run 'clear' for help + exit/quit/ctrl+d quits the influx shell + + show databases show database names + show series show series information + show measurements show measurement information + show tag keys show tag key information + show field keys show field key information + + A full list of influxql commands can be found at: + https://docs.influxdata.com/influxdb/latest/query_language/spec/ +`) +} + +func (c *CommandLine) history() { + var buf bytes.Buffer + c.Line.WriteHistory(&buf) + fmt.Print(buf.String()) +} + +func (c *CommandLine) saveHistory() { + if historyFile, err := os.Create(c.historyFilePath); err != nil { + fmt.Printf("There was an error writing history file: %s\n", err) + } else { + c.Line.WriteHistory(historyFile) + historyFile.Close() + } +} + +func (c *CommandLine) gopher() { + fmt.Println(` + .-::-::://:-::- .:/++/' + '://:-''/oo+//++o+/.://o- ./+: + .:-. '++- .o/ '+yydhy' o- + .:/. .h: :osoys .smMN- :/ + -/:.' s- /MMMymh. '/y/ s' + -+s:'''' d -mMMms// '-/o: + -/++/++/////:. o: '... s- :s. + :+-+s-' ':/' 's- /+ 'o: + '+-'o: /ydhsh. '//. '-o- o- + .y. o: .MMMdm+y ':+++:::/+:.' s: + .-h/ y- 'sdmds'h -+ydds:::-.' 'h. + .//-.d' o: '.' 'dsNMMMNh:.:++' :y + +y. 'd 's. .s:mddds: ++ o/ + 'N- odd 'o/. './o-s-' .---+++' o- + 'N' yNd .://:/:::::. -s -+/s/./s' 'o/' + so' .h '''' ////s: '+. .s +y' + os/-.y' 's' 'y::+ +d' + '.:o/ -+:-:.' so.---.' + o' 'd-.''/s' + .s' :y.''.y + -s mo:::' + :: yh + // '''' /M' + o+ .s///:/. 'N: + :+ /: -s' ho + 's- -/s/:+/.+h' +h + ys' ':' '-. -d + oh .h + /o .s + s. .h + -y .d + m/ -h + +d /o + 'N- y: + h: m. + s- -d + o- s+ + +- 'm' + s/ oo--. + y- /s ':+' + s' 'od--' .d: + -+ ':o: ':+-/+ + y- .:+- ' + //o- '.:+/. + .-:+/' ''-/+/. + ./:' ''.:o+/-' + .+o:/:/+-' ''.-+ooo/-' + o: -h///++////-. + /: .o/ + //+ 'y + ./sooy. + +`) +} + +// Version prints the CLI version. +func (c *CommandLine) Version() { + fmt.Println("InfluxDB shell version:", c.ClientVersion) +} + +func (c *CommandLine) exit() { + // write to history file + c.saveHistory() + // release line resources + c.Line.Close() + c.Line = nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_internal_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_internal_test.go new file mode 100644 index 0000000..a1eacd0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_internal_test.go @@ -0,0 +1,58 @@ +package cli + +import "testing" + +func TestParseCommand_InsertInto(t *testing.T) { + t.Parallel() + + c := CommandLine{} + + tests := []struct { + cmd, db, rp string + }{ + { + cmd: `INSERT INTO test cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test", + }, + { + cmd: ` INSERT INTO .test cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test", + }, + { + cmd: `INSERT INTO "test test" cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test test", + }, + { + cmd: `Insert iNTO test.test cpu,host=serverA,region=us-west value=1.0`, + db: "test", + rp: "test", + }, + { + cmd: `insert into "test test" cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test test", + }, + { + cmd: `insert into "d b"."test test" cpu,host=serverA,region=us-west value=1.0`, + db: "d b", + rp: "test test", + }, + } + + for _, test := range tests { + t.Logf("command: %s", test.cmd) + bp, err := c.parseInsert(test.cmd) + if err != nil { + t.Fatal(err) + } + if bp.Database != test.db { + t.Fatalf(`Command "insert into" db parsing failed, expected: %q, actual: %q`, test.db, bp.Database) + } + if bp.RetentionPolicy != test.rp { + t.Fatalf(`Command "insert into" rp parsing failed, expected: %q, actual: %q`, test.rp, bp.RetentionPolicy) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go new file mode 100644 index 0000000..bc2c222 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go @@ -0,0 +1,594 @@ +package cli_test + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/cmd/influx/cli" + "github.com/influxdata/influxdb/influxql" + "github.com/peterh/liner" +) + +const ( + CLIENT_VERSION = "y.y" + SERVER_VERSION = "x.x" +) + +func TestNewCLI(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + + if c == nil { + t.Fatal("CommandLine shouldn't be nil.") + } + + if c.ClientVersion != CLIENT_VERSION { + t.Fatalf("CommandLine version is %s but should be %s", c.ClientVersion, CLIENT_VERSION) + } +} + +func TestRunCLI(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + h, p, _ := net.SplitHostPort(u.Host) + c := cli.New(CLIENT_VERSION) + c.Host = h + c.Port, _ = strconv.Atoi(p) + c.IgnoreSignals = true + c.ForceTTY = true + go func() { + close(c.Quit) + }() + if err := c.Run(); err != nil { + t.Fatalf("Run failed with error: %s", err) + } +} + +func TestRunCLI_ExecuteInsert(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + h, p, _ := net.SplitHostPort(u.Host) + c := cli.New(CLIENT_VERSION) + c.Host = h + c.Port, _ = strconv.Atoi(p) + c.ClientConfig.Precision = "ms" + c.Execute = "INSERT sensor,floor=1 value=2" + c.IgnoreSignals = true + c.ForceTTY = true + if err := c.Run(); err != nil { + t.Fatalf("Run failed with error: %s", err) + } +} + +func TestSetAuth(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + u := "userx" + p := "pwdy" + c.SetAuth("auth " + u + " " + p) + + // validate CLI configuration + if c.ClientConfig.Username != u { + t.Fatalf("Username is %s but should be %s", c.ClientConfig.Username, u) + } + if c.ClientConfig.Password != p { + t.Fatalf("Password is %s but should be %s", c.ClientConfig.Password, p) + } +} + +func TestSetPrecision(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // validate set non-default precision + p := "ns" + c.SetPrecision("precision " + p) + if c.ClientConfig.Precision != p { + t.Fatalf("Precision is %s but should be %s", c.ClientConfig.Precision, p) + } + + // validate set default precision which equals empty string + p = "rfc3339" + c.SetPrecision("precision " + p) + if c.ClientConfig.Precision != "" { + t.Fatalf("Precision is %s but should be empty", c.ClientConfig.Precision) + } +} + +func TestSetFormat(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // validate set non-default format + f := "json" + c.SetFormat("format " + f) + if c.Format != f { + t.Fatalf("Format is %s but should be %s", c.Format, f) + } +} + +func Test_SetChunked(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // make sure chunked is on by default + if got, exp := c.Chunked, true; got != exp { + t.Fatalf("chunked should be on by default. got %v, exp %v", got, exp) + } + + // turn chunked off + if err := c.ParseCommand("Chunked"); err != nil { + t.Fatalf("setting chunked failed: err: %s", err) + } + + if got, exp := c.Chunked, false; got != exp { + t.Fatalf("setting chunked failed. got %v, exp %v", got, exp) + } + + // turn chunked back on + if err := c.ParseCommand("Chunked"); err != nil { + t.Fatalf("setting chunked failed: err: %s", err) + } + + if got, exp := c.Chunked, true; got != exp { + t.Fatalf("setting chunked failed. got %v, exp %v", got, exp) + } +} + +func Test_SetChunkSize(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // check default chunk size + if got, exp := c.ChunkSize, 0; got != exp { + t.Fatalf("unexpected chunk size. got %d, exp %d", got, exp) + } + + tests := []struct { + command string + exp int + }{ + {"chunk size 20", 20}, + {" CHunk siZE 55 ", 55}, + {"chunk 10", 10}, + {" chuNK 15", 15}, + {"chunk size -60", 0}, + {"chunk size 10", 10}, + {"chunk size 0", 0}, + {"chunk size 10", 10}, + {"chunk size junk", 10}, + } + + for _, test := range tests { + if err := c.ParseCommand(test.command); err != nil { + t.Logf("command: %q", test.command) + t.Fatalf("setting chunked failed: err: %s", err) + } + + if got, exp := c.ChunkSize, test.exp; got != exp { + t.Logf("command: %q", test.command) + t.Fatalf("unexpected chunk size. got %d, exp %d", got, exp) + } + } +} + +func TestSetWriteConsistency(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // set valid write consistency + consistency := "all" + c.SetWriteConsistency("consistency " + consistency) + if c.ClientConfig.WriteConsistency != consistency { + t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, consistency) + } + + // set different valid write consistency and validate change + consistency = "quorum" + c.SetWriteConsistency("consistency " + consistency) + if c.ClientConfig.WriteConsistency != consistency { + t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, consistency) + } + + // set invalid write consistency and verify there was no change + invalidConsistency := "invalid_consistency" + c.SetWriteConsistency("consistency " + invalidConsistency) + if c.ClientConfig.WriteConsistency == invalidConsistency { + t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, consistency) + } +} + +func TestParseCommand_CommandsExist(t *testing.T) { + t.Parallel() + c, err := client.NewClient(client.Config{}) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + m := cli.CommandLine{Client: c, Line: liner.NewLiner()} + tests := []struct { + cmd string + }{ + {cmd: "gopher"}, + {cmd: "auth"}, + {cmd: "help"}, + {cmd: "format"}, + {cmd: "precision"}, + {cmd: "settings"}, + } + for _, test := range tests { + if err := m.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil`, err, test.cmd) + } + } +} + +func TestParseCommand_Connect(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + cmd := "connect " + u.Host + c := cli.CommandLine{} + + // assert connection is established + if err := c.ParseCommand(cmd); err != nil { + t.Fatalf("There was an error while connecting to %v: %v", u.Path, err) + } + + // assert server version is populated + if c.ServerVersion != SERVER_VERSION { + t.Fatalf("Server version is %s but should be %s.", c.ServerVersion, SERVER_VERSION) + } +} + +func TestParseCommand_TogglePretty(t *testing.T) { + t.Parallel() + c := cli.CommandLine{} + if c.Pretty { + t.Fatalf(`Pretty should be false.`) + } + c.ParseCommand("pretty") + if !c.Pretty { + t.Fatalf(`Pretty should be true.`) + } + c.ParseCommand("pretty") + if c.Pretty { + t.Fatalf(`Pretty should be false.`) + } +} + +func TestParseCommand_Exit(t *testing.T) { + t.Parallel() + tests := []struct { + cmd string + }{ + {cmd: "exit"}, + {cmd: " exit"}, + {cmd: "exit "}, + {cmd: "Exit "}, + } + + for _, test := range tests { + c := cli.CommandLine{Quit: make(chan struct{}, 1)} + c.ParseCommand(test.cmd) + // channel should be closed + if _, ok := <-c.Quit; ok { + t.Fatalf(`Command "exit" failed for %q.`, test.cmd) + } + } +} + +func TestParseCommand_Quit(t *testing.T) { + t.Parallel() + tests := []struct { + cmd string + }{ + {cmd: "quit"}, + {cmd: " quit"}, + {cmd: "quit "}, + {cmd: "Quit "}, + } + + for _, test := range tests { + c := cli.CommandLine{Quit: make(chan struct{}, 1)} + c.ParseCommand(test.cmd) + // channel should be closed + if _, ok := <-c.Quit; ok { + t.Fatalf(`Command "quit" failed for %q.`, test.cmd) + } + } +} + +func TestParseCommand_Use(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + cmd string + }{ + {cmd: "use db"}, + {cmd: " use db"}, + {cmd: "use db "}, + {cmd: "use db;"}, + {cmd: "use db; "}, + {cmd: "Use db"}, + } + + for _, test := range tests { + m := cli.CommandLine{Client: c} + if err := m.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + + if m.Database != "db" { + t.Fatalf(`Command "use" changed database to %q. Expected db`, m.Database) + } + } +} + +func TestParseCommand_UseAuth(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + tests := []struct { + cmd string + user string + database string + }{ + { + cmd: "use db", + user: "admin", + database: "db", + }, + { + cmd: "use blank", + user: "admin", + database: "", + }, + { + cmd: "use db", + user: "anonymous", + database: "db", + }, + { + cmd: "use blank", + user: "anonymous", + database: "blank", + }, + } + + for i, tt := range tests { + config := client.Config{URL: *u, Username: tt.user} + fmt.Println("using auth:", tt.user) + c, err := client.NewClient(config) + if err != nil { + t.Errorf("%d. unexpected error. expected %v, actual %v", i, nil, err) + continue + } + m := cli.CommandLine{Client: c} + m.ClientConfig.Username = tt.user + + if err := m.ParseCommand(tt.cmd); err != nil { + t.Fatalf(`%d. Got error %v for command %q, expected nil.`, i, err, tt.cmd) + } + + if m.Database != tt.database { + t.Fatalf(`%d. Command "use" changed database to %q. Expected %q`, i, m.Database, tt.database) + } + } +} + +func TestParseCommand_Consistency(t *testing.T) { + t.Parallel() + c := cli.CommandLine{} + tests := []struct { + cmd string + }{ + {cmd: "consistency one"}, + {cmd: " consistency one"}, + {cmd: "consistency one "}, + {cmd: "consistency one;"}, + {cmd: "consistency one; "}, + {cmd: "Consistency one"}, + } + + for _, test := range tests { + if err := c.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + + if c.ClientConfig.WriteConsistency != "one" { + t.Fatalf(`Command "consistency" changed consistency to %q. Expected one`, c.ClientConfig.WriteConsistency) + } + } +} + +func TestParseCommand_Insert(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + m := cli.CommandLine{Client: c} + + tests := []struct { + cmd string + }{ + {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: " INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: "insert cpu,host=serverA,region=us-west value=1.0 "}, + {cmd: "insert"}, + {cmd: "Insert "}, + {cmd: "insert c"}, + {cmd: "insert int"}, + } + + for _, test := range tests { + if err := m.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + } +} + +func TestParseCommand_History(t *testing.T) { + t.Parallel() + c := cli.CommandLine{Line: liner.NewLiner()} + defer c.Line.Close() + + // append one entry to history + c.Line.AppendHistory("abc") + + tests := []struct { + cmd string + }{ + {cmd: "history"}, + {cmd: " history"}, + {cmd: "history "}, + {cmd: "History "}, + } + + for _, test := range tests { + if err := c.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + } + + // buf size should be at least 1 + var buf bytes.Buffer + c.Line.WriteHistory(&buf) + if buf.Len() < 1 { + t.Fatal("History is borked") + } +} + +func TestParseCommand_HistoryWithBlankCommand(t *testing.T) { + t.Parallel() + c := cli.CommandLine{Line: liner.NewLiner()} + defer c.Line.Close() + + // append one entry to history + c.Line.AppendHistory("x") + + tests := []struct { + cmd string + err error + }{ + {cmd: "history"}, + {cmd: " history"}, + {cmd: "history "}, + {cmd: "", err: cli.ErrBlankCommand}, // shouldn't be persisted in history + {cmd: " ", err: cli.ErrBlankCommand}, // shouldn't be persisted in history + {cmd: " ", err: cli.ErrBlankCommand}, // shouldn't be persisted in history + } + + // a blank command will return cli.ErrBlankCommand. + for _, test := range tests { + if err := c.ParseCommand(test.cmd); err != test.err { + t.Errorf(`Got error %v for command %q, expected %v`, err, test.cmd, test.err) + } + } + + // buf shall not contain empty commands + var buf bytes.Buffer + c.Line.WriteHistory(&buf) + scanner := bufio.NewScanner(&buf) + for scanner.Scan() { + if strings.TrimSpace(scanner.Text()) == "" { + t.Fatal("Empty commands should not be persisted in history.") + } + } +} + +// helper methods + +func emptyTestServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Influxdb-Version", SERVER_VERSION) + + // Fake authorization entirely based on the username. + authorized := false + user, _, _ := r.BasicAuth() + switch user { + case "", "admin": + authorized = true + } + + switch r.URL.Path { + case "/query": + values := r.URL.Query() + parser := influxql.NewParser(bytes.NewBufferString(values.Get("q"))) + q, err := parser.ParseQuery() + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + stmt := q.Statements[0] + + switch stmt.(type) { + case *influxql.ShowDatabasesStatement: + if authorized { + io.WriteString(w, `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db"]]}]}]}`) + } else { + w.WriteHeader(http.StatusUnauthorized) + io.WriteString(w, fmt.Sprintf(`{"error":"error authorizing query: %s not authorized to execute statement 'SHOW DATABASES', requires admin privilege"}`, user)) + } + case *influxql.ShowDiagnosticsStatement: + io.WriteString(w, `{"results":[{}]}`) + } + case "/write": + w.WriteHeader(http.StatusOK) + } + })) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser.go new file mode 100644 index 0000000..4134c02 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser.go @@ -0,0 +1,34 @@ +package cli + +import ( + "bytes" + "fmt" +) + +func parseDatabaseAndRetentionPolicy(stmt []byte) (string, string, error) { + var db, rp []byte + var quoted bool + var seperatorCount int + + stmt = bytes.TrimSpace(stmt) + + for _, b := range stmt { + if b == '"' { + quoted = !quoted + continue + } + if b == '.' && !quoted { + seperatorCount++ + if seperatorCount > 1 { + return "", "", fmt.Errorf("unable to parse database and retention policy from %s", string(stmt)) + } + continue + } + if seperatorCount == 1 { + rp = append(rp, b) + continue + } + db = append(db, b) + } + return string(db), string(rp), nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser_internal_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser_internal_test.go new file mode 100644 index 0000000..5bf955c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser_internal_test.go @@ -0,0 +1,90 @@ +package cli + +import ( + "errors" + "testing" +) + +func Test_parseDatabaseAndretentionPolicy(t *testing.T) { + tests := []struct { + stmt string + db string + rp string + err error + }{ + { + stmt: `foo`, + db: "foo", + }, + { + stmt: `"foo.bar"`, + db: "foo.bar", + }, + { + stmt: `"foo.bar".`, + db: "foo.bar", + }, + { + stmt: `."foo.bar"`, + rp: "foo.bar", + }, + { + stmt: `foo.bar`, + db: "foo", + rp: "bar", + }, + { + stmt: `"foo".bar`, + db: "foo", + rp: "bar", + }, + { + stmt: `"foo"."bar"`, + db: "foo", + rp: "bar", + }, + { + stmt: `"foo.bin"."bar"`, + db: "foo.bin", + rp: "bar", + }, + { + stmt: `"foo.bin"."bar.baz...."`, + db: "foo.bin", + rp: "bar.baz....", + }, + { + stmt: ` "foo.bin"."bar.baz...." `, + db: "foo.bin", + rp: "bar.baz....", + }, + + { + stmt: `"foo.bin"."bar".boom`, + err: errors.New("foo"), + }, + { + stmt: "foo.bar.", + err: errors.New("foo"), + }, + } + + for _, test := range tests { + db, rp, err := parseDatabaseAndRetentionPolicy([]byte(test.stmt)) + if err != nil && test.err == nil { + t.Errorf("unexpected error: got %s", err) + continue + } + if test.err != nil && err == nil { + t.Errorf("expected err: got: nil, exp: %s", test.err) + continue + } + if db != test.db { + t.Errorf("unexpected database: got: %s, exp: %s", db, test.db) + } + if rp != test.rp { + t.Errorf("unexpected retention policy: got: %s, exp: %s", rp, test.rp) + } + } + +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx/main.go new file mode 100644 index 0000000..e00d018 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/main.go @@ -0,0 +1,120 @@ +// The influx command is a CLI client to InfluxDB. +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/cmd/influx/cli" +) + +// These variables are populated via the Go linker. +var ( + version string +) + +const ( + // defaultFormat is the default format of the results when issuing queries + defaultFormat = "column" + + // defaultPrecision is the default timestamp format of the results when issuing queries + defaultPrecision = "ns" + + // defaultPPS is the default points per second that the import will throttle at + // by default it's 0, which means it will not throttle + defaultPPS = 0 +) + +func init() { + // If version is not set, make that clear. + if version == "" { + version = "unknown" + } +} + +func main() { + c := cli.New(version) + + fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError) + fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.") + fs.IntVar(&c.Port, "port", client.DefaultPort, "Influxdb port to connect to.") + fs.StringVar(&c.ClientConfig.UnixSocket, "socket", "", "Influxdb unix socket to connect to.") + fs.StringVar(&c.ClientConfig.Username, "username", "", "Username to connect to the server.") + fs.StringVar(&c.ClientConfig.Password, "password", "", `Password to connect to the server. Leaving blank will prompt for password (--password="").`) + fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.") + fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.") + fs.BoolVar(&c.ClientConfig.UnsafeSsl, "unsafeSsl", false, "Set this when connecting to the cluster using https and not use SSL verification.") + fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.") + fs.StringVar(&c.ClientConfig.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.") + fs.StringVar(&c.ClientConfig.WriteConsistency, "consistency", "all", "Set write consistency level: any, one, quorum, or all.") + fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.") + fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.") + fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.") + fs.BoolVar(&c.Import, "import", false, "Import a previous database.") + fs.IntVar(&c.ImporterConfig.PPS, "pps", defaultPPS, "How many points per second the import will allow. By default it is zero and will not throttle importing.") + fs.StringVar(&c.ImporterConfig.Path, "path", "", "path to the file to import") + fs.BoolVar(&c.ImporterConfig.Compressed, "compressed", false, "set to true if the import file is compressed") + + // Define our own custom usage to print + fs.Usage = func() { + fmt.Println(`Usage of influx: + -version + Display the version and exit. + -host 'host name' + Host to connect to. + -port 'port #' + Port to connect to. + -socket 'unix domain socket' + Unix socket to connect to. + -database 'database name' + Database to connect to the server. + -password 'password' + Password to connect to the server. Leaving blank will prompt for password (--password ''). + -username 'username' + Username to connect to the server. + -ssl + Use https for requests. + -unsafeSsl + Set this when connecting to the cluster using https and not use SSL verification. + -execute 'command' + Execute command and quit. + -format 'json|csv|column' + Format specifies the format of the server responses: json, csv, or column. + -precision 'rfc3339|h|m|s|ms|u|ns' + Precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns. + -consistency 'any|one|quorum|all' + Set write consistency level: any, one, quorum, or all + -pretty + Turns on pretty print for the json format. + -import + Import a previous database export from file + -pps + How many points per second the import will allow. By default it is zero and will not throttle importing. + -path + Path to file to import + -compressed + Set to true if the import file is compressed + +Examples: + + # Use influx in a non-interactive mode to query the database "metrics" and pretty print json: + $ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty + + # Connect to a specific database on startup and set database context: + $ influx -database 'metrics' -host 'localhost' -port '8086' +`) + } + fs.Parse(os.Args[1:]) + + if c.ShowVersion { + c.Version() + os.Exit(0) + } + + if err := c.Run(); err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/README.md b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/README.md new file mode 100644 index 0000000..bcdf418 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/README.md @@ -0,0 +1,107 @@ +# `influx_inspect` + +## Ways to run + +### `influx_inspect` +Will print usage for the tool. + +### `influx_inspect report` +Displays series meta-data for all shards. Default location [$HOME/.influxdb] + +### `influx_inspect dumptsm` +Dumps low-level details about tsm1 files + +#### Flags + +##### `-index` bool +Dump raw index data. + +`default` = false + +#### `-blocks` bool +Dump raw block data. + +`default` = false + +#### `-all` +Dump all data. Caution: This may print a lot of information. + +`default` = false + +#### `-filter-key` +Only display index and block data match this key substring. + +`default` = "" + + +### `influx_inspect export` +Exports all tsm files to line protocol. This output file can be imported via the [influx](https://github.com/influxdata/influxdb/tree/master/importer#running-the-import-command) command. + + +#### `-datadir` string +Data storage path. + +`default` = "$HOME/.influxdb/data" + +#### `-waldir` string +WAL storage path. + +`default` = "$HOME/.influxdb/wal" + +#### `-out` string +Destination file to export to + +`default` = "$HOME/.influxdb/export" + +#### `-database` string (optional) +Database to export. + +`default` = "" + +#### `-retention` string (optional) +Retention policy to export. + +`default` = "" + +#### `-start` string (optional) +Optional. The time range to start at. + +#### `-end` string (optional) +Optional. The time range to end at. + +#### `-compress` bool (optional) +Compress the output. + +`default` = false + +#### Sample Commands + +Export entire database and compress output: +``` +influx_inspect export --compress +``` + +Export specific retention policy: +``` +influx_inspect export --database mydb --retention autogen +``` + +##### Sample Data +This is a sample of what the output will look like. + +``` +# DDL +CREATE DATABASE MY_DB_NAME +CREATE RETENTION POLICY autogen ON MY_DB_NAME DURATION inf REPLICATION 1 + +# DML +# CONTEXT-DATABASE:MY_DB_NAME +# CONTEXT-RETENTION-POLICY:autogen +randset value=97.9296104805 1439856000000000000 +randset value=25.3849066842 1439856100000000000 +``` + +# Caveats + +The system does not have access to the meta store when exporting TSM shards. As such, it always creates the retention policy with infinite duration and replication factor of 1. +End users may want to change this prior to re-importing if they are importing to a cluster or want a different duration for retention. diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go new file mode 100644 index 0000000..830271a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go @@ -0,0 +1,474 @@ +// Package dumptsi inspects low-level details about tsi1 files. +package dumptsi + +import ( + "flag" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "text/tabwriter" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb/index/tsi1" +) + +// Command represents the program execution for "influxd dumptsi". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + paths []string + + showSeries bool + showMeasurements bool + showTagKeys bool + showTagValues bool + showTagValueSeries bool + + measurementFilter *regexp.Regexp + tagKeyFilter *regexp.Regexp + tagValueFilter *regexp.Regexp +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + var measurementFilter, tagKeyFilter, tagValueFilter string + fs := flag.NewFlagSet("dumptsi", flag.ExitOnError) + fs.BoolVar(&cmd.showSeries, "series", false, "Show raw series data") + fs.BoolVar(&cmd.showMeasurements, "measurements", false, "Show raw measurement data") + fs.BoolVar(&cmd.showTagKeys, "tag-keys", false, "Show raw tag key data") + fs.BoolVar(&cmd.showTagValues, "tag-values", false, "Show raw tag value data") + fs.BoolVar(&cmd.showTagValueSeries, "tag-value-series", false, "Show raw series data for each value") + fs.StringVar(&measurementFilter, "measurement-filter", "", "Regex measurement filter") + fs.StringVar(&tagKeyFilter, "tag-key-filter", "", "Regex tag key filter") + fs.StringVar(&tagValueFilter, "tag-value-filter", "", "Regex tag value filter") + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + if err := fs.Parse(args); err != nil { + return err + } + + // Parse filters. + if measurementFilter != "" { + re, err := regexp.Compile(measurementFilter) + if err != nil { + return err + } + cmd.measurementFilter = re + } + if tagKeyFilter != "" { + re, err := regexp.Compile(tagKeyFilter) + if err != nil { + return err + } + cmd.tagKeyFilter = re + } + if tagValueFilter != "" { + re, err := regexp.Compile(tagValueFilter) + if err != nil { + return err + } + cmd.tagValueFilter = re + } + + cmd.paths = fs.Args() + if len(cmd.paths) == 0 { + fmt.Printf("at least one path required\n\n") + fs.Usage() + return nil + } + + // Some flags imply other flags. + if cmd.showTagValueSeries { + cmd.showTagValues = true + } + if cmd.showTagValues { + cmd.showTagKeys = true + } + if cmd.showTagKeys { + cmd.showMeasurements = true + } + + return cmd.run() +} + +func (cmd *Command) run() error { + // Build a file set from the paths on the command line. + idx, fs, err := cmd.readFileSet() + if err != nil { + return err + } + + if idx != nil { + defer idx.Close() + } else { + defer fs.Close() + } + defer fs.Release() + + // Show either raw data or summary stats. + if cmd.showSeries || cmd.showMeasurements { + if err := cmd.printMerged(fs); err != nil { + return err + } + } else { + if err := cmd.printFileSummaries(fs); err != nil { + return err + } + } + + return nil +} + +func (cmd *Command) readFileSet() (*tsi1.Index, *tsi1.FileSet, error) { + // If only one path exists and it's a directory then open as an index. + if len(cmd.paths) == 1 { + fi, err := os.Stat(cmd.paths[0]) + if err != nil { + return nil, nil, err + } else if fi.IsDir() { + idx := tsi1.NewIndex() + idx.Path = cmd.paths[0] + idx.CompactionEnabled = false + if err := idx.Open(); err != nil { + return nil, nil, err + } + return idx, idx.RetainFileSet(), nil + } + } + + // Open each file and group into a fileset. + var files []tsi1.File + for _, path := range cmd.paths { + switch ext := filepath.Ext(path); ext { + case tsi1.LogFileExt: + f := tsi1.NewLogFile(path) + if err := f.Open(); err != nil { + return nil, nil, err + } + files = append(files, f) + + case tsi1.IndexFileExt: + f := tsi1.NewIndexFile() + f.SetPath(path) + if err := f.Open(); err != nil { + return nil, nil, err + } + files = append(files, f) + + default: + return nil, nil, fmt.Errorf("unexpected file extension: %s", ext) + } + } + + fs, err := tsi1.NewFileSet(nil, files) + if err != nil { + return nil, nil, err + } + fs.Retain() + + return nil, fs, nil +} + +func (cmd *Command) printMerged(fs *tsi1.FileSet) error { + if err := cmd.printSeries(fs); err != nil { + return err + } else if err := cmd.printMeasurements(fs); err != nil { + return err + } + return nil +} + +func (cmd *Command) printSeries(fs *tsi1.FileSet) error { + if !cmd.showSeries { + return nil + } + + // Print header. + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, "Series\t") + + // Iterate over each series. + itr := fs.SeriesIterator() + for e := itr.Next(); e != nil; e = itr.Next() { + name, tags := e.Name(), e.Tags() + + if !cmd.matchSeries(e.Name(), e.Tags()) { + continue + } + + fmt.Fprintf(tw, "%s%s\t%v\n", name, tags.HashKey(), deletedString(e.Deleted())) + } + + // Flush & write footer spacing. + if err := tw.Flush(); err != nil { + return err + } + fmt.Fprint(cmd.Stdout, "\n\n") + + return nil +} + +func (cmd *Command) printMeasurements(fs *tsi1.FileSet) error { + if !cmd.showMeasurements { + return nil + } + + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, "Measurement\t") + + // Iterate over each series. + itr := fs.MeasurementIterator() + for e := itr.Next(); e != nil; e = itr.Next() { + if cmd.measurementFilter != nil && !cmd.measurementFilter.Match(e.Name()) { + continue + } + + fmt.Fprintf(tw, "%s\t%v\n", e.Name(), deletedString(e.Deleted())) + if err := tw.Flush(); err != nil { + return err + } + + if err := cmd.printTagKeys(fs, e.Name()); err != nil { + return err + } + } + + fmt.Fprint(cmd.Stdout, "\n\n") + + return nil +} + +func (cmd *Command) printTagKeys(fs *tsi1.FileSet, name []byte) error { + if !cmd.showTagKeys { + return nil + } + + // Iterate over each key. + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + itr := fs.TagKeyIterator(name) + for e := itr.Next(); e != nil; e = itr.Next() { + if cmd.tagKeyFilter != nil && !cmd.tagKeyFilter.Match(e.Key()) { + continue + } + + fmt.Fprintf(tw, " %s\t%v\n", e.Key(), deletedString(e.Deleted())) + if err := tw.Flush(); err != nil { + return err + } + + if err := cmd.printTagValues(fs, name, e.Key()); err != nil { + return err + } + } + fmt.Fprint(cmd.Stdout, "\n") + + return nil +} + +func (cmd *Command) printTagValues(fs *tsi1.FileSet, name, key []byte) error { + if !cmd.showTagValues { + return nil + } + + // Iterate over each value. + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + itr := fs.TagValueIterator(name, key) + for e := itr.Next(); e != nil; e = itr.Next() { + if cmd.tagValueFilter != nil && !cmd.tagValueFilter.Match(e.Value()) { + continue + } + + fmt.Fprintf(tw, " %s\t%v\n", e.Value(), deletedString(e.Deleted())) + if err := tw.Flush(); err != nil { + return err + } + + if err := cmd.printTagValueSeries(fs, name, key, e.Value()); err != nil { + return err + } + } + fmt.Fprint(cmd.Stdout, "\n") + + return nil +} + +func (cmd *Command) printTagValueSeries(fs *tsi1.FileSet, name, key, value []byte) error { + if !cmd.showTagValueSeries { + return nil + } + + // Iterate over each series. + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + itr := fs.TagValueSeriesIterator(name, key, value) + for e := itr.Next(); e != nil; e = itr.Next() { + if !cmd.matchSeries(e.Name(), e.Tags()) { + continue + } + + fmt.Fprintf(tw, " %s%s\n", e.Name(), e.Tags().HashKey()) + if err := tw.Flush(); err != nil { + return err + } + } + fmt.Fprint(cmd.Stdout, "\n") + + return nil +} + +func (cmd *Command) printFileSummaries(fs *tsi1.FileSet) error { + for _, f := range fs.Files() { + switch f := f.(type) { + case *tsi1.LogFile: + if err := cmd.printLogFileSummary(f); err != nil { + return err + } + case *tsi1.IndexFile: + if err := cmd.printIndexFileSummary(f); err != nil { + return err + } + default: + panic("unreachable") + } + fmt.Fprintln(cmd.Stdout, "") + } + return nil +} + +func (cmd *Command) printLogFileSummary(f *tsi1.LogFile) error { + fmt.Fprintf(cmd.Stdout, "[LOG FILE] %s\n", filepath.Base(f.Path())) + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintf(tw, "Series:\t%d\n", f.SeriesN()) + fmt.Fprintf(tw, "Measurements:\t%d\n", f.MeasurementN()) + fmt.Fprintf(tw, "Tag Keys:\t%d\n", f.TagKeyN()) + fmt.Fprintf(tw, "Tag Values:\t%d\n", f.TagValueN()) + return tw.Flush() +} + +func (cmd *Command) printIndexFileSummary(f *tsi1.IndexFile) error { + fmt.Fprintf(cmd.Stdout, "[INDEX FILE] %s\n", filepath.Base(f.Path())) + + // Calculate summary stats. + seriesN := f.SeriesN() + var measurementN, measurementSeriesN, measurementSeriesSize uint64 + var keyN uint64 + var valueN, valueSeriesN, valueSeriesSize uint64 + mitr := f.MeasurementIterator() + for me, _ := mitr.Next().(*tsi1.MeasurementBlockElem); me != nil; me, _ = mitr.Next().(*tsi1.MeasurementBlockElem) { + kitr := f.TagKeyIterator(me.Name()) + for ke, _ := kitr.Next().(*tsi1.TagBlockKeyElem); ke != nil; ke, _ = kitr.Next().(*tsi1.TagBlockKeyElem) { + vitr := f.TagValueIterator(me.Name(), ke.Key()) + for ve, _ := vitr.Next().(*tsi1.TagBlockValueElem); ve != nil; ve, _ = vitr.Next().(*tsi1.TagBlockValueElem) { + valueN++ + valueSeriesN += uint64(ve.SeriesN()) + valueSeriesSize += uint64(len(ve.SeriesData())) + } + keyN++ + } + measurementN++ + measurementSeriesN += uint64(me.SeriesN()) + measurementSeriesSize += uint64(len(me.SeriesData())) + } + + // Write stats. + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintf(tw, "Series:\t%d\n", seriesN) + fmt.Fprintf(tw, "Measurements:\t%d\n", measurementN) + fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", measurementSeriesSize, formatSize(measurementSeriesSize)) + fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(measurementSeriesSize)/float64(measurementSeriesN)) + fmt.Fprintf(tw, "Tag Keys:\t%d\n", keyN) + fmt.Fprintf(tw, "Tag Values:\t%d\n", valueN) + fmt.Fprintf(tw, " Series:\t%d\n", valueSeriesN) + fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", valueSeriesSize, formatSize(valueSeriesSize)) + fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(valueSeriesSize)/float64(valueSeriesN)) + fmt.Fprintf(tw, "Avg tags per series:\t%.01f\n", float64(valueSeriesN)/float64(seriesN)) + if err := tw.Flush(); err != nil { + return err + } + + return nil +} + +// matchSeries returns true if the command filters matches the series. +func (cmd *Command) matchSeries(name []byte, tags models.Tags) bool { + // Filter by measurement. + if cmd.measurementFilter != nil && !cmd.measurementFilter.Match(name) { + return false + } + + // Filter by tag key/value. + if cmd.tagKeyFilter != nil || cmd.tagValueFilter != nil { + var matched bool + for _, tag := range tags { + if (cmd.tagKeyFilter == nil || cmd.tagKeyFilter.Match(tag.Key)) && (cmd.tagValueFilter == nil || cmd.tagValueFilter.Match(tag.Value)) { + matched = true + break + } + } + if !matched { + return false + } + } + + return true +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := `Dumps low-level details about tsi1 files. + +Usage: influx_inspect dumptsi [flags] path... + + -series + Dump raw series data + -measurements + Dump raw measurement data + -tag-keys + Dump raw tag keys + -tag-values + Dump raw tag values + -tag-value-series + Dump raw series for each tag value + -measurement-filter REGEXP + Filters data by measurement regular expression + -tag-key-filter REGEXP + Filters data by tag key regular expression + -tag-value-filter REGEXP + Filters data by tag value regular expression + +If no flags are specified then summary stats are provided for each file. +` + + fmt.Fprintf(cmd.Stdout, usage) +} + +// deletedString returns "(deleted)" if v is true. +func deletedString(v bool) string { + if v { + return "(deleted)" + } + return "" +} + +func formatSize(v uint64) string { + denom := uint64(1) + var uom string + for _, uom = range []string{"b", "kb", "mb", "gb", "tb"} { + if denom*1024 > v { + break + } + denom *= 1024 + } + return fmt.Sprintf("%0.01f%s", float64(v)/float64(denom), uom) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm.go new file mode 100644 index 0000000..f1a278a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm.go @@ -0,0 +1,332 @@ +// Package dumptsm inspects low-level details about tsm1 files. +package dumptsm + +import ( + "encoding/binary" + "flag" + "fmt" + "io" + "os" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influxd dumptsm". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + dumpIndex bool + dumpBlocks bool + dumpAll bool + filterKey string + path string +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fs := flag.NewFlagSet("file", flag.ExitOnError) + fs.BoolVar(&cmd.dumpIndex, "index", false, "Dump raw index data") + fs.BoolVar(&cmd.dumpBlocks, "blocks", false, "Dump raw block data") + fs.BoolVar(&cmd.dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information") + fs.StringVar(&cmd.filterKey, "filter-key", "", "Only display index and block data match this key substring") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + + if fs.Arg(0) == "" { + fmt.Printf("TSM file not specified\n\n") + fs.Usage() + return nil + } + cmd.path = fs.Args()[0] + cmd.dumpBlocks = cmd.dumpBlocks || cmd.dumpAll || cmd.filterKey != "" + cmd.dumpIndex = cmd.dumpIndex || cmd.dumpAll || cmd.filterKey != "" + return cmd.dump() +} + +func (cmd *Command) dump() error { + var errors []error + + f, err := os.Open(cmd.path) + if err != nil { + return err + } + + // Get the file size + stat, err := f.Stat() + if err != nil { + return err + } + b := make([]byte, 8) + + r, err := tsm1.NewTSMReader(f) + if err != nil { + return fmt.Errorf("Error opening TSM files: %s", err.Error()) + } + defer r.Close() + + minTime, maxTime := r.TimeRange() + keyCount := r.KeyCount() + + blockStats := &blockStats{} + + println("Summary:") + fmt.Printf(" File: %s\n", cmd.path) + fmt.Printf(" Time Range: %s - %s\n", + time.Unix(0, minTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, maxTime).UTC().Format(time.RFC3339Nano), + ) + fmt.Printf(" Duration: %s ", time.Unix(0, maxTime).Sub(time.Unix(0, minTime))) + fmt.Printf(" Series: %d ", keyCount) + fmt.Printf(" File Size: %d\n", stat.Size()) + println() + + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + + if cmd.dumpIndex { + println("Index:") + tw.Flush() + println() + + fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "Min Time", "Max Time", "Ofs", "Size", "Key", "Field"}, "\t")) + var pos int + for i := 0; i < keyCount; i++ { + key, _ := r.KeyAt(i) + for _, e := range r.Entries(string(key)) { + pos++ + split := strings.Split(string(key), "#!~#") + + // Possible corruption? Try to read as much as we can and point to the problem. + measurement := split[0] + field := split[1] + + if cmd.filterKey != "" && !strings.Contains(string(key), cmd.filterKey) { + continue + } + fmt.Fprintln(tw, " "+strings.Join([]string{ + strconv.FormatInt(int64(pos), 10), + time.Unix(0, e.MinTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, e.MaxTime).UTC().Format(time.RFC3339Nano), + strconv.FormatInt(int64(e.Offset), 10), + strconv.FormatInt(int64(e.Size), 10), + measurement, + field, + }, "\t")) + tw.Flush() + } + } + } + + tw = tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Chk", "Ofs", "Len", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t")) + + // Starting at 5 because the magic number is 4 bytes + 1 byte version + i := int64(5) + var blockCount, pointCount, blockSize int64 + indexSize := r.IndexSize() + + // Start at the beginning and read every block + for j := 0; j < keyCount; j++ { + key, _ := r.KeyAt(j) + for _, e := range r.Entries(string(key)) { + + f.Seek(int64(e.Offset), 0) + f.Read(b[:4]) + + chksum := binary.BigEndian.Uint32(b[:4]) + + buf := make([]byte, e.Size-4) + f.Read(buf) + + blockSize += int64(e.Size) + + if cmd.filterKey != "" && !strings.Contains(string(key), cmd.filterKey) { + i += blockSize + blockCount++ + continue + } + + blockType := buf[0] + + encoded := buf[1:] + + var v []tsm1.Value + v, err := tsm1.DecodeBlock(buf, v) + if err != nil { + return err + } + startTime := time.Unix(0, v[0].UnixNano()) + + pointCount += int64(len(v)) + + // Length of the timestamp block + tsLen, j := binary.Uvarint(encoded) + + // Unpack the timestamp bytes + ts := encoded[int(j) : int(j)+int(tsLen)] + + // Unpack the value bytes + values := encoded[int(j)+int(tsLen):] + + tsEncoding := timeEnc[int(ts[0]>>4)] + vEncoding := encDescs[int(blockType+1)][values[0]>>4] + + typeDesc := blockTypes[blockType] + + blockStats.inc(0, ts[0]>>4) + blockStats.inc(int(blockType+1), values[0]>>4) + blockStats.size(len(buf)) + + if cmd.dumpBlocks { + fmt.Fprintln(tw, " "+strings.Join([]string{ + strconv.FormatInt(blockCount, 10), + strconv.FormatUint(uint64(chksum), 10), + strconv.FormatInt(i, 10), + strconv.FormatInt(int64(len(buf)), 10), + typeDesc, + startTime.UTC().Format(time.RFC3339Nano), + strconv.FormatInt(int64(len(v)), 10), + fmt.Sprintf("%s/%s", tsEncoding, vEncoding), + fmt.Sprintf("%d/%d", len(ts), len(values)), + }, "\t")) + } + + i += blockSize + blockCount++ + } + } + + if cmd.dumpBlocks { + println("Blocks:") + tw.Flush() + println() + } + + var blockSizeAvg int64 + if blockCount > 0 { + blockSizeAvg = blockSize / blockCount + } + fmt.Printf("Statistics\n") + fmt.Printf(" Blocks:\n") + fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n", + blockCount, blockSize, blockStats.min, blockStats.max, blockSizeAvg) + fmt.Printf(" Index:\n") + fmt.Printf(" Total: %d Size: %d\n", blockCount, indexSize) + fmt.Printf(" Points:\n") + fmt.Printf(" Total: %d", pointCount) + println() + + println(" Encoding:") + for i, counts := range blockStats.counts { + if len(counts) == 0 { + continue + } + fmt.Printf(" %s: ", strings.Title(fieldType[i])) + for j, v := range counts { + fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100)) + } + println() + } + fmt.Printf(" Compression:\n") + fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount)) + fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount)) + + if len(errors) > 0 { + println() + fmt.Printf("Errors (%d):\n", len(errors)) + for _, err := range errors { + fmt.Printf(" * %v\n", err) + } + println() + return fmt.Errorf("error count %d", len(errors)) + } + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := `Dumps low-level details about tsm1 files. + +Usage: influx_inspect dumptsm [flags] + Only display index and block data match this key substring +` + + fmt.Fprintf(cmd.Stdout, usage) +} + +var ( + fieldType = []string{ + "timestamp", "float", "int", "bool", "string", + } + blockTypes = []string{ + "float64", "int64", "bool", "string", + } + timeEnc = []string{ + "none", "s8b", "rle", + } + floatEnc = []string{ + "none", "gor", + } + intEnc = []string{ + "none", "s8b", "rle", + } + boolEnc = []string{ + "none", "bp", + } + stringEnc = []string{ + "none", "snpy", + } + encDescs = [][]string{ + timeEnc, floatEnc, intEnc, boolEnc, stringEnc, + } +) + +type blockStats struct { + min, max int + counts [][]int +} + +func (b *blockStats) inc(typ int, enc byte) { + for len(b.counts) <= typ { + b.counts = append(b.counts, []int{}) + } + for len(b.counts[typ]) <= int(enc) { + b.counts[typ] = append(b.counts[typ], 0) + } + b.counts[typ][enc]++ +} + +func (b *blockStats) size(sz int) { + if b.min == 0 || sz < b.min { + b.min = sz + } + if b.min == 0 || sz > b.max { + b.max = sz + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm_test.go new file mode 100644 index 0000000..6a01a65 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm/dumptsm_test.go @@ -0,0 +1,3 @@ +package dumptsm_test + +// TODO: write some tests diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export.go new file mode 100644 index 0000000..dc6d9d1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export.go @@ -0,0 +1,408 @@ +// Package export exports TSM files into InfluxDB line protocol format. +package export + +import ( + "bufio" + "compress/gzip" + "flag" + "fmt" + "io" + "math" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/escape" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influx_inspect export". +type Command struct { + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + dataDir string + walDir string + out string + database string + retentionPolicy string + startTime int64 + endTime int64 + compress bool + + manifest map[string]struct{} + tsmFiles map[string][]string + walFiles map[string][]string +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + + manifest: make(map[string]struct{}), + tsmFiles: make(map[string][]string), + walFiles: make(map[string][]string), + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + var start, end string + fs := flag.NewFlagSet("export", flag.ExitOnError) + fs.StringVar(&cmd.dataDir, "datadir", os.Getenv("HOME")+"/.influxdb/data", "Data storage path") + fs.StringVar(&cmd.walDir, "waldir", os.Getenv("HOME")+"/.influxdb/wal", "WAL storage path") + fs.StringVar(&cmd.out, "out", os.Getenv("HOME")+"/.influxdb/export", "Destination file to export to") + fs.StringVar(&cmd.database, "database", "", "Optional: the database to export") + fs.StringVar(&cmd.retentionPolicy, "retention", "", "Optional: the retention policy to export (requires -database)") + fs.StringVar(&start, "start", "", "Optional: the start time to export (RFC3339 format)") + fs.StringVar(&end, "end", "", "Optional: the end time to export (RFC3339 format)") + fs.BoolVar(&cmd.compress, "compress", false, "Compress the output") + + fs.SetOutput(cmd.Stdout) + fs.Usage = func() { + fmt.Fprintf(cmd.Stdout, "Exports TSM files into InfluxDB line protocol format.\n\n") + fmt.Fprintf(cmd.Stdout, "Usage: %s export [flags]\n\n", filepath.Base(os.Args[0])) + fs.PrintDefaults() + } + + if err := fs.Parse(args); err != nil { + return err + } + + // set defaults + if start != "" { + s, err := time.Parse(time.RFC3339, start) + if err != nil { + return err + } + cmd.startTime = s.UnixNano() + } else { + cmd.startTime = math.MinInt64 + } + if end != "" { + e, err := time.Parse(time.RFC3339, end) + if err != nil { + return err + } + cmd.endTime = e.UnixNano() + } else { + // set end time to max if it is not set. + cmd.endTime = math.MaxInt64 + } + + if err := cmd.validate(); err != nil { + return err + } + + return cmd.export() +} + +func (cmd *Command) validate() error { + if cmd.retentionPolicy != "" && cmd.database == "" { + return fmt.Errorf("must specify a db") + } + if cmd.startTime != 0 && cmd.endTime != 0 && cmd.endTime < cmd.startTime { + return fmt.Errorf("end time before start time") + } + return nil +} + +func (cmd *Command) export() error { + if err := cmd.walkTSMFiles(); err != nil { + return err + } + if err := cmd.walkWALFiles(); err != nil { + return err + } + return cmd.write() +} + +func (cmd *Command) walkTSMFiles() error { + return filepath.Walk(cmd.dataDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // check to see if this is a tsm file + if filepath.Ext(path) != "."+tsm1.TSMFileExtension { + return nil + } + + relPath, err := filepath.Rel(cmd.dataDir, path) + if err != nil { + return err + } + dirs := strings.Split(relPath, string(byte(os.PathSeparator))) + if len(dirs) < 2 { + return fmt.Errorf("invalid directory structure for %s", path) + } + if dirs[0] == cmd.database || cmd.database == "" { + if dirs[1] == cmd.retentionPolicy || cmd.retentionPolicy == "" { + key := filepath.Join(dirs[0], dirs[1]) + cmd.manifest[key] = struct{}{} + cmd.tsmFiles[key] = append(cmd.tsmFiles[key], path) + } + } + return nil + }) +} + +func (cmd *Command) walkWALFiles() error { + return filepath.Walk(cmd.walDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // check to see if this is a wal file + fileName := filepath.Base(path) + if filepath.Ext(path) != "."+tsm1.WALFileExtension || !strings.HasPrefix(fileName, tsm1.WALFilePrefix) { + return nil + } + + relPath, err := filepath.Rel(cmd.walDir, path) + if err != nil { + return err + } + dirs := strings.Split(relPath, string(byte(os.PathSeparator))) + if len(dirs) < 2 { + return fmt.Errorf("invalid directory structure for %s", path) + } + if dirs[0] == cmd.database || cmd.database == "" { + if dirs[1] == cmd.retentionPolicy || cmd.retentionPolicy == "" { + key := filepath.Join(dirs[0], dirs[1]) + cmd.manifest[key] = struct{}{} + cmd.walFiles[key] = append(cmd.walFiles[key], path) + } + } + return nil + }) +} + +func (cmd *Command) write() error { + // open our output file and create an output buffer + f, err := os.Create(cmd.out) + if err != nil { + return err + } + defer f.Close() + + // Because calling (*os.File).Write is relatively expensive, + // and we don't *need* to sync to disk on every written line of export, + // use a sized buffered writer so that we only sync the file every megabyte. + bw := bufio.NewWriterSize(f, 1024*1024) + defer bw.Flush() + + var w io.Writer = bw + + if cmd.compress { + gzw := gzip.NewWriter(w) + defer gzw.Close() + w = gzw + } + + s, e := time.Unix(0, cmd.startTime).Format(time.RFC3339), time.Unix(0, cmd.endTime).Format(time.RFC3339) + fmt.Fprintf(w, "# INFLUXDB EXPORT: %s - %s\n", s, e) + + // Write out all the DDL + fmt.Fprintln(w, "# DDL") + for key := range cmd.manifest { + keys := strings.Split(key, string(os.PathSeparator)) + db, rp := influxql.QuoteIdent(keys[0]), influxql.QuoteIdent(keys[1]) + fmt.Fprintf(w, "CREATE DATABASE %s WITH NAME %s\n", db, rp) + } + + fmt.Fprintln(w, "# DML") + for key := range cmd.manifest { + keys := strings.Split(key, string(os.PathSeparator)) + fmt.Fprintf(w, "# CONTEXT-DATABASE:%s\n", keys[0]) + fmt.Fprintf(w, "# CONTEXT-RETENTION-POLICY:%s\n", keys[1]) + if files, ok := cmd.tsmFiles[key]; ok { + fmt.Fprintf(cmd.Stdout, "writing out tsm file data for %s...", key) + if err := cmd.writeTsmFiles(w, files); err != nil { + return err + } + fmt.Fprintln(cmd.Stdout, "complete.") + } + if _, ok := cmd.walFiles[key]; ok { + fmt.Fprintf(cmd.Stdout, "writing out wal file data for %s...", key) + if err := cmd.writeWALFiles(w, cmd.walFiles[key], key); err != nil { + return err + } + fmt.Fprintln(cmd.Stdout, "complete.") + } + } + return nil +} + +func (cmd *Command) writeTsmFiles(w io.Writer, files []string) error { + fmt.Fprintln(w, "# writing tsm data") + + // we need to make sure we write the same order that the files were written + sort.Strings(files) + + for _, f := range files { + if err := cmd.exportTSMFile(f, w); err != nil { + return err + } + } + + return nil +} + +func (cmd *Command) exportTSMFile(tsmFilePath string, w io.Writer) error { + f, err := os.Open(tsmFilePath) + if err != nil { + return err + } + defer f.Close() + + r, err := tsm1.NewTSMReader(f) + if err != nil { + fmt.Fprintf(cmd.Stderr, "unable to read %s, skipping: %s\n", tsmFilePath, err.Error()) + return nil + } + defer r.Close() + + if sgStart, sgEnd := r.TimeRange(); sgStart > cmd.endTime || sgEnd < cmd.startTime { + return nil + } + + for i := 0; i < r.KeyCount(); i++ { + key, _ := r.KeyAt(i) + values, err := r.ReadAll(string(key)) + if err != nil { + fmt.Fprintf(cmd.Stderr, "unable to read key %q in %s, skipping: %s\n", string(key), tsmFilePath, err.Error()) + continue + } + measurement, field := tsm1.SeriesAndFieldFromCompositeKey(key) + field = escape.Bytes(field) + + if err := cmd.writeValues(w, measurement, string(field), values); err != nil { + // An error from writeValues indicates an IO error, which should be returned. + return err + } + } + return nil +} + +func (cmd *Command) writeWALFiles(w io.Writer, files []string, key string) error { + fmt.Fprintln(w, "# writing wal data") + + // we need to make sure we write the same order that the wal received the data + sort.Strings(files) + + var once sync.Once + warnDelete := func() { + once.Do(func() { + msg := fmt.Sprintf(`WARNING: detected deletes in wal file. +Some series for %q may be brought back by replaying this data. +To resolve, you can either let the shard snapshot prior to exporting the data +or manually editing the exported file. + `, key) + fmt.Fprintln(cmd.Stderr, msg) + }) + } + + for _, f := range files { + if err := cmd.exportWALFile(f, w, warnDelete); err != nil { + return err + } + } + + return nil +} + +// exportWAL reads every WAL entry from r and exports it to w. +func (cmd *Command) exportWALFile(walFilePath string, w io.Writer, warnDelete func()) error { + f, err := os.Open(walFilePath) + if err != nil { + return err + } + defer f.Close() + + r := tsm1.NewWALSegmentReader(f) + defer r.Close() + + for r.Next() { + entry, err := r.Read() + if err != nil { + n := r.Count() + fmt.Fprintf(cmd.Stderr, "file %s corrupt at position %d", walFilePath, n) + break + } + + switch t := entry.(type) { + case *tsm1.DeleteWALEntry, *tsm1.DeleteRangeWALEntry: + warnDelete() + continue + case *tsm1.WriteWALEntry: + for key, values := range t.Values { + measurement, field := tsm1.SeriesAndFieldFromCompositeKey([]byte(key)) + // measurements are stored escaped, field names are not + field = escape.Bytes(field) + + if err := cmd.writeValues(w, measurement, string(field), values); err != nil { + // An error from writeValues indicates an IO error, which should be returned. + return err + } + } + } + } + return nil +} + +// writeValues writes every value in values to w, using the given series key and field name. +// If any call to w.Write fails, that error is returned. +func (cmd *Command) writeValues(w io.Writer, seriesKey []byte, field string, values []tsm1.Value) error { + buf := []byte(string(seriesKey) + " " + field + "=") + prefixLen := len(buf) + + for _, value := range values { + ts := value.UnixNano() + if (ts < cmd.startTime) || (ts > cmd.endTime) { + continue + } + + // Re-slice buf to be " =". + buf = buf[:prefixLen] + + // Append the correct representation of the value. + switch v := value.Value().(type) { + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case int64: + buf = strconv.AppendInt(buf, v, 10) + buf = append(buf, 'i') + case bool: + buf = strconv.AppendBool(buf, v) + case string: + buf = append(buf, '"') + buf = append(buf, models.EscapeStringField(v)...) + buf = append(buf, '"') + default: + // This shouldn't be possible, but we'll format it anyway. + buf = append(buf, fmt.Sprintf("%v", v)...) + } + + // Now buf has " =". + // Append the timestamp and a newline, then write it. + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, ts, 10) + buf = append(buf, '\n') + if _, err := w.Write(buf); err != nil { + // Underlying IO error needs to be returned. + return err + } + } + + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export_test.go new file mode 100644 index 0000000..b6c024f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/export/export_test.go @@ -0,0 +1,340 @@ +package export + +import ( + "bytes" + "fmt" + "io/ioutil" + "math" + "math/rand" + "os" + "sort" + "strconv" + "strings" + "testing" + + "github.com/golang/snappy" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +type corpus map[string][]tsm1.Value + +var ( + basicCorpus = corpus{ + tsm1.SeriesFieldKey("floats,k=f", "f"): []tsm1.Value{ + tsm1.NewValue(1, float64(1.5)), + tsm1.NewValue(2, float64(3)), + }, + tsm1.SeriesFieldKey("ints,k=i", "i"): []tsm1.Value{ + tsm1.NewValue(10, int64(15)), + tsm1.NewValue(20, int64(30)), + }, + tsm1.SeriesFieldKey("bools,k=b", "b"): []tsm1.Value{ + tsm1.NewValue(100, true), + tsm1.NewValue(200, false), + }, + tsm1.SeriesFieldKey("strings,k=s", "s"): []tsm1.Value{ + tsm1.NewValue(1000, "1k"), + tsm1.NewValue(2000, "2k"), + }, + } + + basicCorpusExpLines = []string{ + "floats,k=f f=1.5 1", + "floats,k=f f=3 2", + "ints,k=i i=15i 10", + "ints,k=i i=30i 20", + "bools,k=b b=true 100", + "bools,k=b b=false 200", + `strings,k=s s="1k" 1000`, + `strings,k=s s="2k" 2000`, + } + + escapeStringCorpus = corpus{ + tsm1.SeriesFieldKey("t", "s"): []tsm1.Value{ + tsm1.NewValue(1, `1. "quotes"`), + tsm1.NewValue(2, `2. back\slash`), + tsm1.NewValue(3, `3. bs\q"`), + }, + } + + escCorpusExpLines = []string{ + `t s="1. \"quotes\"" 1`, + `t s="2. back\\slash" 2`, + `t s="3. bs\\q\"" 3`, + } +) + +func Test_exportWALFile(t *testing.T) { + for _, c := range []struct { + corpus corpus + lines []string + }{ + {corpus: basicCorpus, lines: basicCorpusExpLines}, + {corpus: escapeStringCorpus, lines: escCorpusExpLines}, + } { + walFile := writeCorpusToWALFile(c.corpus) + defer os.Remove(walFile.Name()) + + var out bytes.Buffer + if err := newCommand().exportWALFile(walFile.Name(), &out, func() {}); err != nil { + t.Fatal(err) + } + + lines := strings.Split(out.String(), "\n") + for _, exp := range c.lines { + found := false + for _, l := range lines { + if exp == l { + found = true + break + } + } + + if !found { + t.Fatalf("expected line %q to be in exported output:\n%s", exp, out.String()) + } + } + } +} + +func Test_exportTSMFile(t *testing.T) { + for _, c := range []struct { + corpus corpus + lines []string + }{ + {corpus: basicCorpus, lines: basicCorpusExpLines}, + {corpus: escapeStringCorpus, lines: escCorpusExpLines}, + } { + tsmFile := writeCorpusToTSMFile(c.corpus) + defer os.Remove(tsmFile.Name()) + + var out bytes.Buffer + if err := newCommand().exportTSMFile(tsmFile.Name(), &out); err != nil { + t.Fatal(err) + } + + lines := strings.Split(out.String(), "\n") + for _, exp := range c.lines { + found := false + for _, l := range lines { + if exp == l { + found = true + break + } + } + + if !found { + t.Fatalf("expected line %q to be in exported output:\n%s", exp, out.String()) + } + } + } +} + +var sink interface{} + +func benchmarkExportTSM(c corpus, b *testing.B) { + // Garbage collection is relatively likely to happen during export, so track allocations. + b.ReportAllocs() + + f := writeCorpusToTSMFile(c) + defer os.Remove(f.Name()) + + cmd := newCommand() + var out bytes.Buffer + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + if err := cmd.exportTSMFile(f.Name(), &out); err != nil { + b.Fatal(err) + } + + sink = out.Bytes() + out.Reset() + } +} + +func BenchmarkExportTSMFloats_100s_250vps(b *testing.B) { + benchmarkExportTSM(makeFloatsCorpus(100, 250), b) +} + +func BenchmarkExportTSMInts_100s_250vps(b *testing.B) { + benchmarkExportTSM(makeIntsCorpus(100, 250), b) +} + +func BenchmarkExportTSMBools_100s_250vps(b *testing.B) { + benchmarkExportTSM(makeBoolsCorpus(100, 250), b) +} + +func BenchmarkExportTSMStrings_100s_250vps(b *testing.B) { + benchmarkExportTSM(makeStringsCorpus(100, 250), b) +} + +func benchmarkExportWAL(c corpus, b *testing.B) { + // Garbage collection is relatively likely to happen during export, so track allocations. + b.ReportAllocs() + + f := writeCorpusToWALFile(c) + defer os.Remove(f.Name()) + + cmd := newCommand() + var out bytes.Buffer + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + if err := cmd.exportWALFile(f.Name(), &out, func() {}); err != nil { + b.Fatal(err) + } + + sink = out.Bytes() + out.Reset() + } +} + +func BenchmarkExportWALFloats_100s_250vps(b *testing.B) { + benchmarkExportWAL(makeFloatsCorpus(100, 250), b) +} + +func BenchmarkExportWALInts_100s_250vps(b *testing.B) { + benchmarkExportWAL(makeIntsCorpus(100, 250), b) +} + +func BenchmarkExportWALBools_100s_250vps(b *testing.B) { + benchmarkExportWAL(makeBoolsCorpus(100, 250), b) +} + +func BenchmarkExportWALStrings_100s_250vps(b *testing.B) { + benchmarkExportWAL(makeStringsCorpus(100, 250), b) +} + +// newCommand returns a command that discards its output and that accepts all timestamps. +func newCommand() *Command { + return &Command{ + Stderr: ioutil.Discard, + Stdout: ioutil.Discard, + startTime: math.MinInt64, + endTime: math.MaxInt64, + } +} + +// makeCorpus returns a new corpus filled with values generated by fn. +// The RNG passed to fn is seeded with numSeries * numValuesPerSeries, for predictable output. +func makeCorpus(numSeries, numValuesPerSeries int, fn func(*rand.Rand) interface{}) corpus { + rng := rand.New(rand.NewSource(int64(numSeries) * int64(numValuesPerSeries))) + var unixNano int64 + corpus := make(corpus, numSeries) + for i := 0; i < numSeries; i++ { + vals := make([]tsm1.Value, numValuesPerSeries) + for j := 0; j < numValuesPerSeries; j++ { + vals[j] = tsm1.NewValue(unixNano, fn(rng)) + unixNano++ + } + + k := fmt.Sprintf("m,t=%d", i) + corpus[tsm1.SeriesFieldKey(k, "x")] = vals + } + + return corpus +} + +func makeFloatsCorpus(numSeries, numFloatsPerSeries int) corpus { + return makeCorpus(numSeries, numFloatsPerSeries, func(rng *rand.Rand) interface{} { + return rng.Float64() + }) +} + +func makeIntsCorpus(numSeries, numIntsPerSeries int) corpus { + return makeCorpus(numSeries, numIntsPerSeries, func(rng *rand.Rand) interface{} { + // This will only return positive integers. That's probably okay. + return rng.Int63() + }) +} + +func makeBoolsCorpus(numSeries, numBoolsPerSeries int) corpus { + return makeCorpus(numSeries, numBoolsPerSeries, func(rng *rand.Rand) interface{} { + return rand.Int63n(2) == 1 + }) +} + +func makeStringsCorpus(numSeries, numStringsPerSeries int) corpus { + return makeCorpus(numSeries, numStringsPerSeries, func(rng *rand.Rand) interface{} { + // The string will randomly have 2-6 parts + parts := make([]string, rand.Intn(4)+2) + + for i := range parts { + // Each part is a random base36-encoded number + parts[i] = strconv.FormatInt(rand.Int63(), 36) + } + + // Join the individual parts with underscores. + return strings.Join(parts, "_") + }) +} + +// writeCorpusToWALFile writes the given corpus as a WAL file, and returns a handle to that file. +// It is the caller's responsibility to remove the returned temp file. +// writeCorpusToWALFile will panic on any error that occurs. +func writeCorpusToWALFile(c corpus) *os.File { + walFile, err := ioutil.TempFile("", "export_test_corpus_wal") + if err != nil { + panic(err) + } + + e := &tsm1.WriteWALEntry{Values: c} + b, err := e.Encode(nil) + if err != nil { + panic(err) + } + + w := tsm1.NewWALSegmentWriter(walFile) + if err := w.Write(e.Type(), snappy.Encode(nil, b)); err != nil { + panic(err) + } + + if err := w.Flush(); err != nil { + panic(err) + } + // (*tsm1.WALSegmentWriter).sync isn't exported, but it only Syncs the file anyway. + if err := walFile.Sync(); err != nil { + panic(err) + } + + return walFile +} + +// writeCorpusToTSMFile writes the given corpus as a TSM file, and returns a handle to that file. +// It is the caller's responsibility to remove the returned temp file. +// writeCorpusToTSMFile will panic on any error that occurs. +func writeCorpusToTSMFile(c corpus) *os.File { + tsmFile, err := ioutil.TempFile("", "export_test_corpus_tsm") + if err != nil { + panic(err) + } + + w, err := tsm1.NewTSMWriter(tsmFile) + if err != nil { + panic(err) + } + + // Write the series in alphabetical order so that each test run is comparable, + // given an identical corpus. + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + if err := w.Write(k, c[k]); err != nil { + panic(err) + } + } + + if err := w.WriteIndex(); err != nil { + panic(err) + } + + if err := w.Close(); err != nil { + panic(err) + } + + return tsmFile +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go new file mode 100644 index 0000000..ff03389 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go @@ -0,0 +1,43 @@ +// Package help contains the help for the influx_inspect command. +package help + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Command displays help for command-line sub-commands. +type Command struct { + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) + return nil +} + +const usage = ` +Usage: influx_inspect [[command] [arguments]] + +The commands are: + + dumptsm dumps low-level details about tsm1 files. + export exports raw data from a shard to line protocol + help display this help message + report displays a shard level report + verify verifies integrity of TSM files + +"help" is the default command. + +Use "influx_inspect [command] -help" for more information about a command. +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help_test.go new file mode 100644 index 0000000..31d1632 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help_test.go @@ -0,0 +1,3 @@ +package help_test + +// TODO: write some tests diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go new file mode 100644 index 0000000..8f2434e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go @@ -0,0 +1,90 @@ +// The influx_inspect command displays detailed information about InfluxDB data files. +package main + +import ( + "fmt" + "io" + "log" + "os" + + "github.com/influxdata/influxdb/cmd" + "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi" + "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm" + "github.com/influxdata/influxdb/cmd/influx_inspect/export" + "github.com/influxdata/influxdb/cmd/influx_inspect/help" + "github.com/influxdata/influxdb/cmd/influx_inspect/report" + "github.com/influxdata/influxdb/cmd/influx_inspect/verify" + _ "github.com/influxdata/influxdb/tsdb/engine" +) + +func main() { + m := NewMain() + if err := m.Run(os.Args[1:]...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// Main represents the program execution. +type Main struct { + Logger *log.Logger + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain returns a new instance of Main. +func NewMain() *Main { + return &Main{ + Logger: log.New(os.Stderr, "[influx_inspect] ", log.LstdFlags), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run determines and runs the command specified by the CLI args. +func (m *Main) Run(args ...string) error { + name, args := cmd.ParseCommandName(args) + + // Extract name from args. + switch name { + case "", "help": + if err := help.NewCommand().Run(args...); err != nil { + return fmt.Errorf("help: %s", err) + } + case "dumptsi": + name := dumptsi.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("dumptsi: %s", err) + } + case "dumptsmdev": + fmt.Fprintf(m.Stderr, "warning: dumptsmdev is deprecated, use dumptsm instead.\n") + fallthrough + case "dumptsm": + name := dumptsm.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("dumptsm: %s", err) + } + case "export": + name := export.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("export: %s", err) + } + case "report": + name := report.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("report: %s", err) + } + case "verify": + name := verify.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("verify: %s", err) + } + default: + return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influx_inspect help' for usage`+"\n\n", name) + } + + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report.go new file mode 100644 index 0000000..86e9f5f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report.go @@ -0,0 +1,192 @@ +// Package report reports statistics about TSM files. +package report + +import ( + "flag" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" + "github.com/retailnext/hllpp" +) + +// Command represents the program execution for "influxd report". +type Command struct { + Stderr io.Writer + Stdout io.Writer + + dir string + pattern string + detailed bool +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fs := flag.NewFlagSet("report", flag.ExitOnError) + fs.StringVar(&cmd.pattern, "pattern", "", "Include only files matching a pattern") + fs.BoolVar(&cmd.detailed, "detailed", false, "Report detailed cardinality estimates") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + cmd.dir = fs.Arg(0) + + start := time.Now() + + files, err := filepath.Glob(filepath.Join(cmd.dir, fmt.Sprintf("*.%s", tsm1.TSMFileExtension))) + if err != nil { + return err + } + + var filtered []string + if cmd.pattern != "" { + for _, f := range files { + if strings.Contains(f, cmd.pattern) { + filtered = append(filtered, f) + } + } + files = filtered + } + + if len(files) == 0 { + return fmt.Errorf("no tsm files at %v", cmd.dir) + } + + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, strings.Join([]string{"File", "Series", "Load Time"}, "\t")) + + totalSeries := hllpp.New() + tagCardinalities := map[string]*hllpp.HLLPP{} + measCardinalities := map[string]*hllpp.HLLPP{} + fieldCardinalities := map[string]*hllpp.HLLPP{} + + for _, f := range files { + file, err := os.OpenFile(f, os.O_RDONLY, 0600) + if err != nil { + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", f, err) + continue + } + + loadStart := time.Now() + reader, err := tsm1.NewTSMReader(file) + if err != nil { + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", file.Name(), err) + continue + } + loadTime := time.Since(loadStart) + + seriesCount := reader.KeyCount() + for i := 0; i < seriesCount; i++ { + key, _ := reader.KeyAt(i) + totalSeries.Add([]byte(key)) + + if cmd.detailed { + sep := strings.Index(string(key), "#!~#") + seriesKey, field := key[:sep], key[sep+4:] + measurement, tags := models.ParseKey(seriesKey) + + measCount, ok := measCardinalities[measurement] + if !ok { + measCount = hllpp.New() + measCardinalities[measurement] = measCount + } + measCount.Add([]byte(key)) + + fieldCount, ok := fieldCardinalities[measurement] + if !ok { + fieldCount = hllpp.New() + fieldCardinalities[measurement] = fieldCount + } + fieldCount.Add([]byte(field)) + + for _, t := range tags { + tagCount, ok := tagCardinalities[string(t.Key)] + if !ok { + tagCount = hllpp.New() + tagCardinalities[string(t.Key)] = tagCount + } + tagCount.Add(t.Value) + } + } + } + reader.Close() + + fmt.Fprintln(tw, strings.Join([]string{ + filepath.Base(file.Name()), + strconv.FormatInt(int64(seriesCount), 10), + loadTime.String(), + }, "\t")) + tw.Flush() + } + + tw.Flush() + println() + fmt.Printf("Statistics\n") + fmt.Printf("\tSeries:\n") + fmt.Printf("\t\tTotal (est): %d\n", totalSeries.Count()) + + if cmd.detailed { + fmt.Printf("\tMeasurements (est):\n") + for _, t := range sortKeys(measCardinalities) { + fmt.Printf("\t\t%v: %d (%d%%)\n", t, measCardinalities[t].Count(), int((float64(measCardinalities[t].Count())/float64(totalSeries.Count()))*100)) + } + + fmt.Printf("\tFields (est):\n") + for _, t := range sortKeys(fieldCardinalities) { + fmt.Printf("\t\t%v: %d\n", t, fieldCardinalities[t].Count()) + } + + fmt.Printf("\tTags (est):\n") + for _, t := range sortKeys(tagCardinalities) { + fmt.Printf("\t\t%v: %d\n", t, tagCardinalities[t].Count()) + } + } + + fmt.Printf("Completed in %s\n", time.Since(start)) + return nil +} + +// sortKeys is a quick helper to return the sorted set of a map's keys +func sortKeys(vals map[string]*hllpp.HLLPP) (keys []string) { + for k := range vals { + keys = append(keys, k) + } + sort.Strings(keys) + + return keys +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := `Displays shard level report. + +Usage: influx_inspect report [flags] + + -pattern + Include only files matching a pattern. + -detailed + Report detailed cardinality estimates. + Defaults to "false". +` + + fmt.Fprintf(cmd.Stdout, usage) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report_test.go new file mode 100644 index 0000000..3a6ba2c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/report/report_test.go @@ -0,0 +1,3 @@ +package report_test + +// TODO: write some tests diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/verify.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/verify.go new file mode 100644 index 0000000..90043d7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/verify.go @@ -0,0 +1,120 @@ +// Package verify verifies integrity of TSM files. +package verify + +import ( + "flag" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influx_inspect verify". +type Command struct { + Stderr io.Writer + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + var path string + fs := flag.NewFlagSet("verify", flag.ExitOnError) + fs.StringVar(&path, "dir", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + + start := time.Now() + dataPath := filepath.Join(path, "data") + + brokenBlocks := 0 + totalBlocks := 0 + + // No need to do this in a loop + ext := fmt.Sprintf(".%s", tsm1.TSMFileExtension) + + // Get all TSM files by walking through the data dir + files := []string{} + err := filepath.Walk(dataPath, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + if filepath.Ext(path) == ext { + files = append(files, path) + } + return nil + }) + if err != nil { + panic(err) + } + + tw := tabwriter.NewWriter(cmd.Stdout, 16, 8, 0, '\t', 0) + + // Verify the checksums of every block in every file + for _, f := range files { + file, err := os.OpenFile(f, os.O_RDONLY, 0600) + if err != nil { + return err + } + + reader, err := tsm1.NewTSMReader(file) + if err != nil { + return err + } + + blockItr := reader.BlockIterator() + brokenFileBlocks := 0 + count := 0 + for blockItr.Next() { + totalBlocks++ + key, _, _, _, checksum, buf, err := blockItr.Read() + if err != nil { + brokenBlocks++ + fmt.Fprintf(tw, "%s: could not get checksum for key %v block %d due to error: %q\n", f, key, count, err) + } else if expected := crc32.ChecksumIEEE(buf); checksum != expected { + brokenBlocks++ + fmt.Fprintf(tw, "%s: got %d but expected %d for key %v, block %d\n", f, checksum, expected, key, count) + } + count++ + } + if brokenFileBlocks == 0 { + fmt.Fprintf(tw, "%s: healthy\n", f) + } + reader.Close() + } + + fmt.Fprintf(tw, "Broken Blocks: %d / %d, in %vs\n", brokenBlocks, totalBlocks, time.Since(start).Seconds()) + tw.Flush() + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := fmt.Sprintf(`Verifies the integrity of TSM files. + +Usage: influx_inspect verify [flags] + + -dir + Root storage path + Defaults to "%[1]s/.influxdb". + `, os.Getenv("HOME")) + + fmt.Fprintf(cmd.Stdout, usage) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/verify_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/verify_test.go new file mode 100644 index 0000000..c21ecdc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/verify/verify_test.go @@ -0,0 +1,3 @@ +package verify_test + +// TODO: write some tests diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md new file mode 100644 index 0000000..c403633 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md @@ -0,0 +1,43 @@ +# `influx_stress` + +If you run into any issues with this tool please mention @jackzampolin when you create an issue. + +## Ways to run + +### `influx_stress` +This runs a basic stress test with the [default config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) For more information on the configuration file please see the default. + +### `influx_stress -config someConfig.toml` +This runs the stress test with a valid configuration file located at `someConfig.tom` + +### `influx_stress -v2 -config someConfig.iql` +This runs the stress test with a valid `v2` configuration file. For more information about the `v2` stress test see the [v2 stress README](https://github.com/influxdata/influxdb/blob/master/stress/v2/README.md). + +## Flags + +If flags are defined they overwrite the config from any file passed in. + +### `-addr` string +IP address and port of database where response times will persist (e.g., localhost:8086) + +`default` = "http://localhost:8086" + +### `-config` string +The relative path to the stress test configuration file. + +`default` = [config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) + +### `-cpuprofile` filename +Writes the result of Go's cpu profile to filename + +`default` = no profiling + +### `-database` string +Name of database on `-addr` that `influx_stress` will persist write and query response times + +`default` = "stress" + +### `-tags` value +A comma separated list of tags to add to write and query response times. + +`default` = "" diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml new file mode 100644 index 0000000..08be339 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml @@ -0,0 +1,92 @@ +# This section can be removed +[provision] + # The basic provisioner simply deletes and creates database. + # If `reset_database` is false, it will not attempt to delete the database + [provision.basic] + # If enabled the provisioner will actually run + enabled = true + # Address of the instance that is to be provisioned + address = "localhost:8086" + # Database the will be created/deleted + database = "stress" + # Attempt to delete database + reset_database = true + +# This section cannot be commented out +# To prevent writes set `enabled=false` +# in [write.influx_client.basic] +[write] + [write.point_generator] + # The basic point generator will generate points of the form + # `cpu,host=server-%v,location=us-west value=234 123456` + [write.point_generator.basic] + # number of points that will be written for each of the series + point_count = 100 + # number of series + series_count = 100000 + # How much time between each timestamp + tick = "10s" + # Randomize timestamp a bit (not functional) + jitter = true + # Precision of points that are being written + precision = "s" + # name of the measurement that will be written + measurement = "cpu" + # The date for the first point that is written into influx + start_date = "2006-Jan-02" + # Defines a tag for a series + [[write.point_generator.basic.tag]] + key = "host" + value = "server" + [[write.point_generator.basic.tag]] + key = "location" + value = "us-west" + # Defines a field for a series + [[write.point_generator.basic.field]] + key = "value" + value = "float64" # supported types: float64, int, bool + + + [write.influx_client] + [write.influx_client.basic] + # If enabled the writer will actually write + enabled = true + # Addresses is an array of the Influxdb instances + addresses = ["localhost:8086"] # stress_test_server runs on port 1234 + # Database that is being written to + database = "stress" + # Precision of points that are being written + precision = "s" + # Size of batches that are sent to db + batch_size = 10000 + # Interval between each batch + batch_interval = "0s" + # How many concurrent writers to the db + concurrency = 10 + # ssl enabled? + ssl = false + # format of points that are written to influxdb + format = "line_http" # line_udp (not supported yet), graphite_tcp (not supported yet), graphite_udp (not supported yet) + +# This section can be removed +[read] + [read.query_generator] + [read.query_generator.basic] + # Template of the query that will be ran against the instance + template = "SELECT count(value) FROM cpu where host='server-%v'" + # How many times the templated query will be ran + query_count = 250 + + [read.query_client] + [read.query_client.basic] + # if enabled the reader will actually read + enabled = true + # Address of the instance that will be queried + addresses = ["localhost:8086"] + # Database that will be queried + database = "stress" + # Interval bewteen queries + query_interval = "100ms" + # Number of concurrent queriers + concurrency = 1 + diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go new file mode 100644 index 0000000..5186d18 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go @@ -0,0 +1,71 @@ +// Command influx_stress is deprecated; use github.com/influxdata/influx-stress instead. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "runtime/pprof" + + "github.com/influxdata/influxdb/stress" + v2 "github.com/influxdata/influxdb/stress/v2" +) + +var ( + useV2 = flag.Bool("v2", false, "Use version 2 of stress tool") + config = flag.String("config", "", "The stress test file") + cpuprofile = flag.String("cpuprofile", "", "Write the cpu profile to `filename`") + db = flag.String("db", "", "target database within test system for write and query load") +) + +func main() { + o := stress.NewOutputConfig() + flag.Parse() + + if *cpuprofile != "" { + f, err := os.Create(*cpuprofile) + if err != nil { + fmt.Println(err) + return + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + if *useV2 { + if *config != "" { + v2.RunStress(*config) + } else { + v2.RunStress("stress/v2/iql/file.iql") + } + } else { + + c, err := stress.NewConfig(*config) + if err != nil { + log.Fatal(err) + return + } + + if *db != "" { + c.Provision.Basic.Database = *db + c.Write.InfluxClients.Basic.Database = *db + c.Read.QueryClients.Basic.Database = *db + } + + w := stress.NewWriter(c.Write.PointGenerators.Basic, &c.Write.InfluxClients.Basic) + r := stress.NewQuerier(&c.Read.QueryGenerators.Basic, &c.Read.QueryClients.Basic) + s := stress.NewStressTest(&c.Provision.Basic, w, r) + + bw := stress.NewBroadcastChannel() + bw.Register(c.Write.InfluxClients.Basic.BasicWriteHandler) + bw.Register(o.HTTPHandler("write")) + + br := stress.NewBroadcastChannel() + br.Register(c.Read.QueryClients.Basic.BasicReadHandler) + br.Register(o.HTTPHandler("read")) + + s.Start(bw.Handle, br.Handle) + + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md new file mode 100644 index 0000000..d63c152 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md @@ -0,0 +1,152 @@ +# Converting b1 and bz1 shards to tsm1 + +`influx_tsm` is a tool for converting b1 and bz1 shards to tsm1 +format. Converting shards to tsm1 format results in a very significant +reduction in disk usage, and significantly improved write-throughput, +when writing data into those shards. + +Conversion can be controlled on a database-by-database basis. By +default a database is backed up before it is converted, allowing you +to roll back any changes. Because of the backup process, ensure the +host system has at least as much free disk space as the disk space +consumed by the _data_ directory of your InfluxDB system. + +The tool automatically ignores tsm1 shards, and can be run +idempotently on any database. + +Conversion is an offline process, and the InfluxDB system must be +stopped during conversion. However the conversion process reads and +writes shards directly on disk and should be fast. + +## Steps + +Follow these steps to perform a conversion. + +* Identify the databases you wish to convert. You can convert one or more databases at a time. By default all databases are converted. +* Decide on parallel operation. By default the conversion operation peforms each operation in a serial manner. This minimizes load on the host system performing the conversion, but also takes the most time. If you wish to minimize the time conversion takes, enable parallel mode. Conversion will then perform as many operations as possible in parallel, but the process may place significant load on the host system (CPU, disk, and RAM, usage will all increase). +* Stop all write-traffic to your InfluxDB system. +* Restart the InfluxDB service and wait until all WAL data is flushed to disk -- this has completed when the system responds to queries. This is to ensure all data is present in shards. +* Stop the InfluxDB service. It should not be restarted until conversion is complete. +* Run conversion tool. Depending on the size of the data directory, this might be a lengthy operation. Consider running the conversion tool under a "screen" session to avoid any interruptions. +* Unless you ran the conversion tool as the same user as that which runs InfluxDB, then you may need to set the correct read-and-write permissions on the new tsm1 directories. +* Restart node and ensure data looks correct. +* If everything looks OK, you may then wish to remove or archive the backed-up databases. +* Restart write traffic. + +## Example session + +Below is an example session, showing a database being converted. + +``` +$ # Create a backup location that the `influxdb` user has full access to +$ mkdir -m 0777 /path/to/influxdb_backup +$ sudo -u influxdb influx_tsm -backup /path/to/influxdb_backup -parallel /var/lib/influxdb/data + +b1 and bz1 shard conversion. +----------------------------------- +Data directory is: /var/lib/influxdb/data +Backup directory is: /path/to/influxdb_backup +Databases specified: all +Database backups enabled: yes +Parallel mode enabled (GOMAXPROCS): yes (8) + + +Found 1 shards that will be converted. + +Database Retention Path Engine Size +_internal monitor /var/lib/influxdb/data/_internal/monitor/1 bz1 65536 + +These shards will be converted. Proceed? y/N: y +Conversion starting.... +Backing up 1 databases... +2016/01/28 12:23:43.699266 Backup of databse '_internal' started +2016/01/28 12:23:43.699883 Backing up file /var/lib/influxdb/data/_internal/monitor/1 +2016/01/28 12:23:43.700052 Database _internal backed up (851.776µs) +2016/01/28 12:23:43.700320 Starting conversion of shard: /var/lib/influxdb/data/_internal/monitor/1 +2016/01/28 12:23:43.706276 Conversion of /var/lib/influxdb/data/_internal/monitor/1 successful (6.040148ms) + +Summary statistics +======================================== +Databases converted: 1 +Shards converted: 1 +TSM files created: 1 +Points read: 369 +Points written: 369 +NaN filtered: 0 +Inf filtered: 0 +Points without fields filtered: 0 +Disk usage pre-conversion (bytes): 65536 +Disk usage post-conversion (bytes): 11000 +Reduction factor: 83% +Bytes per TSM point: 29.81 +Total conversion time: 7.330443ms + +$ # restart node, verify data +$ sudo rm -r /path/to/influxdb_backup +``` + +Note that the tool first lists the shards that will be converted, +before asking for confirmation. You can abort the conversion process +at this step if you just wish to see what would be converted, or if +the list of shards does not look correct. + +__WARNING:__ If you run the `influx_tsm` tool as a user other than the +`influxdb` user (or the user that the InfluxDB process runs under), +please make sure to verify the shard permissions are correct prior to +starting InfluxDB. If needed, shard permissions can be corrected with +the `chown` command. For example: + +``` +sudo chown -R influxdb:influxdb /var/lib/influxdb +``` + +## Rolling back a conversion + +After a successful backup (the message `Database XYZ backed up` was +logged), you have a duplicate of that database in the _backup_ +directory you provided on the command line. If, when checking your +data after a successful conversion, you notice things missing or +something just isn't right, you can "undo" the conversion: + +- Shut down your node (this is very important) +- Remove the database's directory from the influxdb `data` directory (default: `~/.influxdb/data/XYZ` for binary installations or `/var/lib/influxdb/data/XYZ` for packaged installations) +- Copy (to really make sure the shard is preserved) the database's directory from the backup directory you created into the `data` directory. + +Using the same directories as above, and assuming a database named `stats`: + +``` +$ sudo rm -r /var/lib/influxdb/data/stats +$ sudo cp -r /path/to/influxdb_backup/stats /var/lib/influxdb/data/ +$ # restart influxd node +``` + +#### How to avoid downtime when upgrading shards + +*Identify non-`tsm1` shards* + +Non-`tsm1` shards are files of the form: `data///`. + +`tsm1` shards are files of the form: `data////.tsm`. + +*Determine which `bz`/`bz1` shards are cold for writes* + +Run the `SHOW SHARDS` query to see the start and end dates for shards. +If the date range for a shard does not span the current time then the shard is said to be cold for writes. +This means that no new points are expected to be added to the shard. +The shard whose date range spans now is said to be hot for writes. +You can only safely convert cold shards without stopping the InfluxDB process. + +*Convert cold shards* + +1. Copy each of the cold shards you'd like to convert to a new directory with the structure `/tmp/data///`. +2. Run the `influx_tsm` tool on the copied files: +``` +influx_tsm -parallel /tmp/data/ +``` +3. Remove the existing cold `b1`/`bz1` shards from the production data directory. +4. Move the new `tsm1` shards into the original directory, overwriting the existing `b1`/`bz1` shards of the same name. Do this simultaneously with step 3 to avoid any query errors. +5. Wait an hour, a day, or a week (depending on your retention period) for any hot `b1`/`bz1` shards to become cold and repeat steps 1 through 4 on the newly cold shards. + +> **Note:** Any points written to the cold shards after making a copy will be lost when the `tsm1` shard overwrites the existing cold shard. +Nothing in InfluxDB will prevent writes to cold shards, they are merely unexpected, not impossible. +It is your responsibility to prevent writes to cold shards to prevent data loss. diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go new file mode 100644 index 0000000..0ed844f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go @@ -0,0 +1,270 @@ +// Package b1 reads data from b1 shards. +package b1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/b1" + +import ( + "encoding/binary" + "math" + "sort" + "time" + + "github.com/boltdb/bolt" + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// DefaultChunkSize is the size of chunks read from the b1 shard +const DefaultChunkSize int = 1000 + +var excludedBuckets = map[string]bool{ + "fields": true, + "meta": true, + "series": true, + "wal": true, +} + +// Reader is used to read all data from a b1 shard. +type Reader struct { + path string + db *bolt.DB + tx *bolt.Tx + + cursors []*cursor + currCursor int + + keyBuf string + values []tsm1.Value + valuePos int + + fields map[string]*tsdb.MeasurementFields + codecs map[string]*tsdb.FieldCodec + + stats *stats.Stats +} + +// NewReader returns a reader for the b1 shard at path. +func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader { + r := &Reader{ + path: path, + fields: make(map[string]*tsdb.MeasurementFields), + codecs: make(map[string]*tsdb.FieldCodec), + stats: stats, + } + + if chunkSize <= 0 { + chunkSize = DefaultChunkSize + } + + r.values = make([]tsm1.Value, chunkSize) + + return r +} + +// Open opens the reader. +func (r *Reader) Open() error { + // Open underlying storage. + db, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return err + } + r.db = db + + // Load fields. + if err := r.db.View(func(tx *bolt.Tx) error { + meta := tx.Bucket([]byte("fields")) + c := meta.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + mf := &tsdb.MeasurementFields{} + if err := mf.UnmarshalBinary(v); err != nil { + return err + } + r.fields[string(k)] = mf + r.codecs[string(k)] = tsdb.NewFieldCodec(mf.Fields) + } + return nil + }); err != nil { + return err + } + + seriesSet := make(map[string]bool) + + // ignore series index and find all series in this shard + if err := r.db.View(func(tx *bolt.Tx) error { + tx.ForEach(func(name []byte, _ *bolt.Bucket) error { + key := string(name) + if !excludedBuckets[key] { + seriesSet[key] = true + } + return nil + }) + return nil + }); err != nil { + return err + } + + r.tx, err = r.db.Begin(false) + if err != nil { + return err + } + + // Create cursor for each field of each series. + for s := range seriesSet { + measurement := tsdb.MeasurementFromSeriesKey(s) + fields := r.fields[measurement] + if fields == nil { + r.stats.IncrFiltered() + continue + } + for _, f := range fields.Fields { + c := newCursor(r.tx, s, f.Name, r.codecs[measurement]) + c.SeekTo(0) + r.cursors = append(r.cursors, c) + } + } + sort.Sort(cursors(r.cursors)) + + return nil +} + +// Next returns whether any data remains to be read. It must be called before +// the next call to Read(). +func (r *Reader) Next() bool { + r.valuePos = 0 +OUTER: + for { + if r.currCursor >= len(r.cursors) { + // All cursors drained. No more data remains. + return false + } + + cc := r.cursors[r.currCursor] + r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) + + for { + k, v := cc.Next() + if k == -1 { + // Go to next cursor and try again. + r.currCursor++ + if r.valuePos == 0 { + // The previous cursor had no data. Instead of returning + // just go immediately to the next cursor. + continue OUTER + } + // There is some data available. Indicate that it should be read. + return true + } + + if f, ok := v.(float64); ok { + if math.IsInf(f, 0) { + r.stats.AddPointsRead(1) + r.stats.IncrInf() + continue + } + + if math.IsNaN(f) { + r.stats.AddPointsRead(1) + r.stats.IncrNaN() + continue + } + } + + r.values[r.valuePos] = tsm1.NewValue(k, v) + r.valuePos++ + + if r.valuePos >= len(r.values) { + return true + } + } + } +} + +// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is +// emitted completely for every field, in every series, before the next field is processed. +// Data from Read() adheres to the requirements for writing to tsm1 shards +func (r *Reader) Read() (string, []tsm1.Value, error) { + return r.keyBuf, r.values[:r.valuePos], nil +} + +// Close closes the reader. +func (r *Reader) Close() error { + r.tx.Rollback() + return r.db.Close() +} + +// cursor provides ordered iteration across a series. +type cursor struct { + // Bolt cursor and readahead buffer. + cursor *bolt.Cursor + keyBuf int64 + valBuf interface{} + + series string + field string + dec *tsdb.FieldCodec +} + +// Cursor returns an iterator for a key over a single field. +func newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor { + cur := &cursor{ + keyBuf: -2, + series: series, + field: field, + dec: dec, + } + + // Retrieve series bucket. + b := tx.Bucket([]byte(series)) + if b != nil { + cur.cursor = b.Cursor() + } + + return cur +} + +// Seek moves the cursor to a position. +func (c *cursor) SeekTo(seek int64) { + var seekBytes [8]byte + binary.BigEndian.PutUint64(seekBytes[:], uint64(seek)) + k, v := c.cursor.Seek(seekBytes[:]) + c.keyBuf, c.valBuf = tsdb.DecodeKeyValue(c.field, c.dec, k, v) +} + +// Next returns the next key/value pair from the cursor. +func (c *cursor) Next() (key int64, value interface{}) { + for { + k, v := func() (int64, interface{}) { + if c.keyBuf != -2 { + k, v := c.keyBuf, c.valBuf + c.keyBuf = -2 + return k, v + } + + k, v := c.cursor.Next() + if k == nil { + return -1, nil + } + return tsdb.DecodeKeyValue(c.field, c.dec, k, v) + }() + + if k != -1 && v == nil { + // There is a point in the series at the next timestamp, + // but not for this cursor's field. Go to the next point. + continue + } + return k, v + } +} + +// Sort b1 cursors in correct order for writing to TSM files. + +type cursors []*cursor + +func (a cursors) Len() int { return len(a) } +func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a cursors) Less(i, j int) bool { + if a[i].series == a[j].series { + return a[i].field < a[j].field + } + return a[i].series < a[j].series +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go new file mode 100644 index 0000000..b369aff --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go @@ -0,0 +1,371 @@ +// Package bz1 reads data from bz1 shards. +package bz1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/bz1" + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "math" + "sort" + "time" + + "github.com/boltdb/bolt" + "github.com/golang/snappy" + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// DefaultChunkSize is the size of chunks read from the bz1 shard +const DefaultChunkSize = 1000 + +// Reader is used to read all data from a bz1 shard. +type Reader struct { + path string + db *bolt.DB + tx *bolt.Tx + + cursors []*cursor + currCursor int + + keyBuf string + values []tsm1.Value + valuePos int + + fields map[string]*tsdb.MeasurementFields + codecs map[string]*tsdb.FieldCodec + + stats *stats.Stats +} + +// NewReader returns a reader for the bz1 shard at path. +func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader { + r := &Reader{ + path: path, + fields: make(map[string]*tsdb.MeasurementFields), + codecs: make(map[string]*tsdb.FieldCodec), + stats: stats, + } + + if chunkSize <= 0 { + chunkSize = DefaultChunkSize + } + + r.values = make([]tsm1.Value, chunkSize) + + return r +} + +// Open opens the reader. +func (r *Reader) Open() error { + // Open underlying storage. + db, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return err + } + r.db = db + + seriesSet := make(map[string]bool) + + if err := r.db.View(func(tx *bolt.Tx) error { + var data []byte + + meta := tx.Bucket([]byte("meta")) + if meta == nil { + // No data in this shard. + return nil + } + + pointsBucket := tx.Bucket([]byte("points")) + if pointsBucket == nil { + return nil + } + + if err := pointsBucket.ForEach(func(key, _ []byte) error { + seriesSet[string(key)] = true + return nil + }); err != nil { + return err + } + + buf := meta.Get([]byte("fields")) + if buf == nil { + // No data in this shard. + return nil + } + + data, err = snappy.Decode(nil, buf) + if err != nil { + return err + } + if err := json.Unmarshal(data, &r.fields); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + // Build the codec for each measurement. + for k, v := range r.fields { + r.codecs[k] = tsdb.NewFieldCodec(v.Fields) + } + + r.tx, err = r.db.Begin(false) + if err != nil { + return err + } + + // Create cursor for each field of each series. + for s := range seriesSet { + measurement := tsdb.MeasurementFromSeriesKey(s) + fields := r.fields[measurement] + if fields == nil { + r.stats.IncrFiltered() + continue + } + for _, f := range fields.Fields { + c := newCursor(r.tx, s, f.Name, r.codecs[measurement]) + if c == nil { + continue + } + c.SeekTo(0) + r.cursors = append(r.cursors, c) + } + } + sort.Sort(cursors(r.cursors)) + + return nil +} + +// Next returns whether there is any more data to be read. +func (r *Reader) Next() bool { + r.valuePos = 0 +OUTER: + for { + if r.currCursor >= len(r.cursors) { + // All cursors drained. No more data remains. + return false + } + + cc := r.cursors[r.currCursor] + r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) + + for { + k, v := cc.Next() + if k == -1 { + // Go to next cursor and try again. + r.currCursor++ + if r.valuePos == 0 { + // The previous cursor had no data. Instead of returning + // just go immediately to the next cursor. + continue OUTER + } + // There is some data available. Indicate that it should be read. + return true + } + + if f, ok := v.(float64); ok { + if math.IsInf(f, 0) { + r.stats.AddPointsRead(1) + r.stats.IncrInf() + continue + } + + if math.IsNaN(f) { + r.stats.AddPointsRead(1) + r.stats.IncrNaN() + continue + } + } + + r.values[r.valuePos] = tsm1.NewValue(k, v) + r.valuePos++ + + if r.valuePos >= len(r.values) { + return true + } + } + } +} + +// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is +// emitted completely for every field, in every series, before the next field is processed. +// Data from Read() adheres to the requirements for writing to tsm1 shards +func (r *Reader) Read() (string, []tsm1.Value, error) { + return r.keyBuf, r.values[:r.valuePos], nil +} + +// Close closes the reader. +func (r *Reader) Close() error { + r.tx.Rollback() + return r.db.Close() +} + +// cursor provides ordered iteration across a series. +type cursor struct { + cursor *bolt.Cursor + buf []byte // uncompressed buffer + off int // buffer offset + fieldIndices []int + index int + + series string + field string + dec *tsdb.FieldCodec + + keyBuf int64 + valBuf interface{} +} + +// newCursor returns an instance of a bz1 cursor. +func newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor { + // Retrieve points bucket. Ignore if there is no bucket. + b := tx.Bucket([]byte("points")).Bucket([]byte(series)) + if b == nil { + return nil + } + + return &cursor{ + cursor: b.Cursor(), + series: series, + field: field, + dec: dec, + keyBuf: -2, + } +} + +// Seek moves the cursor to a position. +func (c *cursor) SeekTo(seek int64) { + var seekBytes [8]byte + binary.BigEndian.PutUint64(seekBytes[:], uint64(seek)) + + // Move cursor to appropriate block and set to buffer. + k, v := c.cursor.Seek(seekBytes[:]) + if v == nil { // get the last block, it might have this time + _, v = c.cursor.Last() + } else if seek < int64(binary.BigEndian.Uint64(k)) { // the seek key is less than this block, go back one and check + _, v = c.cursor.Prev() + + // if the previous block max time is less than the seek value, reset to where we were originally + if v == nil || seek > int64(binary.BigEndian.Uint64(v[0:8])) { + _, v = c.cursor.Seek(seekBytes[:]) + } + } + c.setBuf(v) + + // Read current block up to seek position. + c.seekBuf(seekBytes[:]) + + // Return current entry. + c.keyBuf, c.valBuf = c.read() +} + +// seekBuf moves the cursor to a position within the current buffer. +func (c *cursor) seekBuf(seek []byte) (key, value []byte) { + for { + // Slice off the current entry. + buf := c.buf[c.off:] + + // Exit if current entry's timestamp is on or after the seek. + if len(buf) == 0 { + return + } + + if bytes.Compare(buf[0:8], seek) != -1 { + return + } + + c.off += entryHeaderSize + entryDataSize(buf) + } +} + +// Next returns the next key/value pair from the cursor. If there are no values +// remaining, -1 is returned. +func (c *cursor) Next() (int64, interface{}) { + for { + k, v := func() (int64, interface{}) { + if c.keyBuf != -2 { + k, v := c.keyBuf, c.valBuf + c.keyBuf = -2 + return k, v + } + + // Ignore if there is no buffer. + if len(c.buf) == 0 { + return -1, nil + } + + // Move forward to next entry. + c.off += entryHeaderSize + entryDataSize(c.buf[c.off:]) + + // If no items left then read first item from next block. + if c.off >= len(c.buf) { + _, v := c.cursor.Next() + c.setBuf(v) + } + + return c.read() + }() + + if k != -1 && v == nil { + // There is a point in the series at the next timestamp, + // but not for this cursor's field. Go to the next point. + continue + } + return k, v + } +} + +// setBuf saves a compressed block to the buffer. +func (c *cursor) setBuf(block []byte) { + // Clear if the block is empty. + if len(block) == 0 { + c.buf, c.off, c.fieldIndices, c.index = c.buf[0:0], 0, c.fieldIndices[0:0], 0 + return + } + + // Otherwise decode block into buffer. + // Skip over the first 8 bytes since they are the max timestamp. + buf, err := snappy.Decode(nil, block[8:]) + if err != nil { + c.buf = c.buf[0:0] + fmt.Printf("block decode error: %s\n", err) + } + + c.buf, c.off = buf, 0 +} + +// read reads the current key and value from the current block. +func (c *cursor) read() (key int64, value interface{}) { + // Return nil if the offset is at the end of the buffer. + if c.off >= len(c.buf) { + return -1, nil + } + + // Otherwise read the current entry. + buf := c.buf[c.off:] + dataSize := entryDataSize(buf) + + return tsdb.DecodeKeyValue(c.field, c.dec, buf[0:8], buf[entryHeaderSize:entryHeaderSize+dataSize]) +} + +// Sort bz1 cursors in correct order for writing to TSM files. + +type cursors []*cursor + +func (a cursors) Len() int { return len(a) } +func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a cursors) Less(i, j int) bool { + if a[i].series == a[j].series { + return a[i].field < a[j].field + } + return a[i].series < a[j].series +} + +// entryHeaderSize is the number of bytes required for the header. +const entryHeaderSize = 8 + 4 + +// entryDataSize returns the size of an entry's data field, in bytes. +func entryDataSize(v []byte) int { return int(binary.BigEndian.Uint32(v[8:12])) } diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go new file mode 100644 index 0000000..3469af6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go @@ -0,0 +1,118 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +const ( + maxBlocksPerKey = 65535 +) + +// KeyIterator is used to iterate over b* keys for conversion to tsm keys +type KeyIterator interface { + Next() bool + Read() (string, []tsm1.Value, error) +} + +// Converter encapsulates the logic for converting b*1 shards to tsm1 shards. +type Converter struct { + path string + maxTSMFileSize uint32 + sequence int + stats *stats.Stats +} + +// NewConverter returns a new instance of the Converter. +func NewConverter(path string, sz uint32, stats *stats.Stats) *Converter { + return &Converter{ + path: path, + maxTSMFileSize: sz, + stats: stats, + } +} + +// Process writes the data provided by iter to a tsm1 shard. +func (c *Converter) Process(iter KeyIterator) error { + // Ensure the tsm1 directory exists. + if err := os.MkdirAll(c.path, 0777); err != nil { + return err + } + + // Iterate until no more data remains. + var w tsm1.TSMWriter + var keyCount map[string]int + + for iter.Next() { + k, v, err := iter.Read() + if err != nil { + return err + } + + if w == nil { + w, err = c.nextTSMWriter() + if err != nil { + return err + } + keyCount = map[string]int{} + } + if err := w.Write(k, v); err != nil { + return err + } + keyCount[k]++ + + c.stats.AddPointsRead(len(v)) + c.stats.AddPointsWritten(len(v)) + + // If we have a max file size configured and we're over it, start a new TSM file. + if w.Size() > c.maxTSMFileSize || keyCount[k] == maxBlocksPerKey { + if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { + return err + } + + c.stats.AddTSMBytes(w.Size()) + + if err := w.Close(); err != nil { + return err + } + w = nil + } + } + + if w != nil { + if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { + return err + } + c.stats.AddTSMBytes(w.Size()) + + if err := w.Close(); err != nil { + return err + } + } + + return nil +} + +// nextTSMWriter returns the next TSMWriter for the Converter. +func (c *Converter) nextTSMWriter() (tsm1.TSMWriter, error) { + c.sequence++ + fileName := filepath.Join(c.path, fmt.Sprintf("%09d-%09d.%s", 1, c.sequence, tsm1.TSMFileExtension)) + + fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return nil, err + } + + // Create the writer for the new TSM file. + w, err := tsm1.NewTSMWriter(fd) + if err != nil { + return nil, err + } + + c.stats.IncrTSMFileCount() + return w, nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go new file mode 100644 index 0000000..f852388 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go @@ -0,0 +1,415 @@ +// Command influx_tsm converts b1 or bz1 shards (from InfluxDB releases earlier than v0.11) +// to the current tsm1 format. +package main + +import ( + "bufio" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "sort" + "strings" + "text/tabwriter" + "time" + + "net/http" + _ "net/http/pprof" + + "github.com/influxdata/influxdb/cmd/influx_tsm/b1" + "github.com/influxdata/influxdb/cmd/influx_tsm/bz1" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" +) + +// ShardReader reads b* shards and converts to tsm shards +type ShardReader interface { + KeyIterator + Open() error + Close() error +} + +const ( + tsmExt = "tsm" +) + +var description = ` +Convert a database from b1 or bz1 format to tsm1 format. + +This tool will backup the directories before conversion (if not disabled). +The backed-up files must be removed manually, generally after starting up the +node again to make sure all of data has been converted correctly. + +To restore a backup: + Shut down the node, remove the converted directory, and + copy the backed-up directory to the original location.` + +type options struct { + DataPath string + BackupPath string + DBs []string + DebugAddr string + TSMSize uint64 + Parallel bool + SkipBackup bool + UpdateInterval time.Duration + Yes bool + CPUFile string +} + +func (o *options) Parse() error { + fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + + var dbs string + + fs.StringVar(&dbs, "dbs", "", "Comma-delimited list of databases to convert. Default is to convert all databases.") + fs.Uint64Var(&opts.TSMSize, "sz", maxTSMSz, "Maximum size of individual TSM files.") + fs.BoolVar(&opts.Parallel, "parallel", false, "Perform parallel conversion. (up to GOMAXPROCS shards at once)") + fs.BoolVar(&opts.SkipBackup, "nobackup", false, "Disable database backups. Not recommended.") + fs.StringVar(&opts.BackupPath, "backup", "", "The location to backup up the current databases. Must not be within the data directory.") + fs.StringVar(&opts.DebugAddr, "debug", "", "If set, http debugging endpoints will be enabled on the given address") + fs.DurationVar(&opts.UpdateInterval, "interval", 5*time.Second, "How often status updates are printed.") + fs.BoolVar(&opts.Yes, "y", false, "Don't ask, just convert") + fs.StringVar(&opts.CPUFile, "profile", "", "CPU Profile location") + fs.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage: %v [options] \n", os.Args[0]) + fmt.Fprintf(os.Stderr, "%v\n\nOptions:\n", description) + fs.PrintDefaults() + fmt.Fprintf(os.Stderr, "\n") + } + + if err := fs.Parse(os.Args[1:]); err != nil { + return err + } + + if len(fs.Args()) < 1 { + return errors.New("no data directory specified") + } + var err error + if o.DataPath, err = filepath.Abs(fs.Args()[0]); err != nil { + return err + } + if o.DataPath, err = filepath.EvalSymlinks(filepath.Clean(o.DataPath)); err != nil { + return err + } + + if o.TSMSize > maxTSMSz { + return fmt.Errorf("bad TSM file size, maximum TSM file size is %d", maxTSMSz) + } + + // Check if specific databases were requested. + o.DBs = strings.Split(dbs, ",") + if len(o.DBs) == 1 && o.DBs[0] == "" { + o.DBs = nil + } + + if !o.SkipBackup { + if o.BackupPath == "" { + return errors.New("either -nobackup or -backup DIR must be set") + } + if o.BackupPath, err = filepath.Abs(o.BackupPath); err != nil { + return err + } + if o.BackupPath, err = filepath.EvalSymlinks(filepath.Clean(o.BackupPath)); err != nil { + if os.IsNotExist(err) { + return errors.New("backup directory must already exist") + } + return err + } + + if strings.HasPrefix(o.BackupPath, o.DataPath) { + fmt.Println(o.BackupPath, o.DataPath) + return errors.New("backup directory cannot be contained within data directory") + } + } + + if o.DebugAddr != "" { + log.Printf("Starting debugging server on http://%v", o.DebugAddr) + go func() { + log.Fatal(http.ListenAndServe(o.DebugAddr, nil)) + }() + } + + return nil +} + +var opts options + +const maxTSMSz uint64 = 2 * 1024 * 1024 * 1024 + +func init() { + log.SetOutput(os.Stderr) + log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds) +} + +func main() { + if err := opts.Parse(); err != nil { + log.Fatal(err) + } + + // Determine the list of databases + dbs, err := ioutil.ReadDir(opts.DataPath) + if err != nil { + log.Fatalf("failed to access data directory at %v: %v\n", opts.DataPath, err) + } + fmt.Println() // Cleanly separate output from start of program. + + if opts.Parallel { + if !isEnvSet("GOMAXPROCS") { + // Only modify GOMAXPROCS if it wasn't set in the environment + // This means 'GOMAXPROCS=1 influx_tsm -parallel' will not actually + // run in parallel + runtime.GOMAXPROCS(runtime.NumCPU()) + } + } + + var badUser string + if opts.SkipBackup { + badUser = "(NOT RECOMMENDED)" + } + + // Dump summary of what is about to happen. + fmt.Println("b1 and bz1 shard conversion.") + fmt.Println("-----------------------------------") + fmt.Println("Data directory is: ", opts.DataPath) + if !opts.SkipBackup { + fmt.Println("Backup directory is: ", opts.BackupPath) + } + fmt.Println("Databases specified: ", allDBs(opts.DBs)) + fmt.Println("Database backups enabled: ", yesno(!opts.SkipBackup), badUser) + fmt.Printf("Parallel mode enabled (GOMAXPROCS): %s (%d)\n", yesno(opts.Parallel), runtime.GOMAXPROCS(0)) + fmt.Println() + + shards := collectShards(dbs) + + // Anything to convert? + fmt.Printf("\nFound %d shards that will be converted.\n", len(shards)) + if len(shards) == 0 { + fmt.Println("Nothing to do.") + return + } + + // Display list of convertible shards. + fmt.Println() + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 8, 1, '\t', 0) + fmt.Fprintln(w, "Database\tRetention\tPath\tEngine\tSize") + for _, si := range shards { + fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%d\n", si.Database, si.RetentionPolicy, si.FullPath(opts.DataPath), si.FormatAsString(), si.Size) + } + w.Flush() + + if !opts.Yes { + // Get confirmation from user. + fmt.Printf("\nThese shards will be converted. Proceed? y/N: ") + liner := bufio.NewReader(os.Stdin) + yn, err := liner.ReadString('\n') + if err != nil { + log.Fatalf("failed to read response: %v", err) + } + yn = strings.TrimRight(strings.ToLower(yn), "\n") + if yn != "y" { + log.Fatal("Conversion aborted.") + } + } + fmt.Println("Conversion starting....") + + if opts.CPUFile != "" { + f, err := os.Create(opts.CPUFile) + if err != nil { + log.Fatal(err) + } + if err = pprof.StartCPUProfile(f); err != nil { + log.Fatal(err) + } + defer pprof.StopCPUProfile() + } + + tr := newTracker(shards, opts) + + if err := tr.Run(); err != nil { + log.Fatalf("Error occurred preventing completion: %v\n", err) + } + + tr.PrintStats() +} + +func collectShards(dbs []os.FileInfo) tsdb.ShardInfos { + // Get the list of shards for conversion. + var shards tsdb.ShardInfos + for _, db := range dbs { + d := tsdb.NewDatabase(filepath.Join(opts.DataPath, db.Name())) + shs, err := d.Shards() + if err != nil { + log.Fatalf("Failed to access shards for database %v: %v\n", d.Name(), err) + } + shards = append(shards, shs...) + } + + sort.Sort(shards) + shards = shards.FilterFormat(tsdb.TSM1) + if len(dbs) > 0 { + shards = shards.ExclusiveDatabases(opts.DBs) + } + + return shards +} + +// backupDatabase backs up the database named db +func backupDatabase(db string) error { + copyFile := func(path string, info os.FileInfo, err error) error { + // Strip the DataPath from the path and replace with BackupPath. + toPath := strings.Replace(path, opts.DataPath, opts.BackupPath, 1) + + if info.IsDir() { + return os.MkdirAll(toPath, info.Mode()) + } + + in, err := os.Open(path) + if err != nil { + return err + } + defer in.Close() + + srcInfo, err := os.Stat(path) + if err != nil { + return err + } + + out, err := os.OpenFile(toPath, os.O_CREATE|os.O_WRONLY, info.Mode()) + if err != nil { + return err + } + defer out.Close() + + dstInfo, err := os.Stat(toPath) + if err != nil { + return err + } + + if dstInfo.Size() == srcInfo.Size() { + log.Printf("Backup file already found for %v with correct size, skipping.", path) + return nil + } + + if dstInfo.Size() > srcInfo.Size() { + log.Printf("Invalid backup file found for %v, replacing with good copy.", path) + if err := out.Truncate(0); err != nil { + return err + } + if _, err := out.Seek(0, io.SeekStart); err != nil { + return err + } + } + + if dstInfo.Size() > 0 { + log.Printf("Resuming backup of file %v, starting at %v bytes", path, dstInfo.Size()) + } + + off, err := out.Seek(0, io.SeekEnd) + if err != nil { + return err + } + if _, err := in.Seek(off, io.SeekStart); err != nil { + return err + } + + log.Printf("Backing up file %v", path) + + _, err = io.Copy(out, in) + + return err + } + + return filepath.Walk(filepath.Join(opts.DataPath, db), copyFile) +} + +// convertShard converts the shard in-place. +func convertShard(si *tsdb.ShardInfo, tr *tracker) error { + src := si.FullPath(opts.DataPath) + dst := fmt.Sprintf("%v.%v", src, tsmExt) + + var reader ShardReader + switch si.Format { + case tsdb.BZ1: + reader = bz1.NewReader(src, &tr.Stats, 0) + case tsdb.B1: + reader = b1.NewReader(src, &tr.Stats, 0) + default: + return fmt.Errorf("Unsupported shard format: %v", si.FormatAsString()) + } + + // Open the shard, and create a converter. + if err := reader.Open(); err != nil { + return fmt.Errorf("Failed to open %v for conversion: %v", src, err) + } + defer reader.Close() + converter := NewConverter(dst, uint32(opts.TSMSize), &tr.Stats) + + // Perform the conversion. + if err := converter.Process(reader); err != nil { + return fmt.Errorf("Conversion of %v failed: %v", src, err) + } + + // Delete source shard, and rename new tsm1 shard. + if err := reader.Close(); err != nil { + return fmt.Errorf("Conversion of %v failed due to close: %v", src, err) + } + + if err := os.RemoveAll(si.FullPath(opts.DataPath)); err != nil { + return fmt.Errorf("Deletion of %v failed: %v", src, err) + } + if err := os.Rename(dst, src); err != nil { + return fmt.Errorf("Rename of %v to %v failed: %v", dst, src, err) + } + + return nil +} + +// ParallelGroup allows the maximum parrallelism of a set of operations to be controlled. +type ParallelGroup chan struct{} + +// NewParallelGroup returns a group which allows n operations to run in parallel. A value of 0 +// means no operations will ever run. +func NewParallelGroup(n int) ParallelGroup { + return make(chan struct{}, n) +} + +// Do executes one operation of the ParallelGroup +func (p ParallelGroup) Do(f func()) { + p <- struct{}{} // acquire working slot + defer func() { <-p }() + + f() +} + +// yesno returns "yes" for true, "no" for false. +func yesno(b bool) string { + if b { + return "yes" + } + return "no" +} + +// allDBs returns "all" if all databases are requested for conversion. +func allDBs(dbs []string) string { + if dbs == nil { + return "all" + } + return fmt.Sprintf("%v", dbs) +} + +// isEnvSet checks to see if a variable was set in the environment +func isEnvSet(name string) bool { + for _, s := range os.Environ() { + if strings.SplitN(s, "=", 2)[0] == name { + return true + } + } + return false +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go new file mode 100644 index 0000000..c3a7d3b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go @@ -0,0 +1,55 @@ +// Package stats contains statistics for converting non-TSM shards to TSM. +package stats + +import ( + "sync/atomic" + "time" +) + +// Stats are the statistics captured while converting non-TSM shards to TSM +type Stats struct { + NanFiltered uint64 + InfFiltered uint64 + FieldsFiltered uint64 + PointsWritten uint64 + PointsRead uint64 + TsmFilesCreated uint64 + TsmBytesWritten uint64 + CompletedShards uint64 + TotalTime time.Duration +} + +// AddPointsRead increments the number of read points. +func (s *Stats) AddPointsRead(n int) { + atomic.AddUint64(&s.PointsRead, uint64(n)) +} + +// AddPointsWritten increments the number of written points. +func (s *Stats) AddPointsWritten(n int) { + atomic.AddUint64(&s.PointsWritten, uint64(n)) +} + +// AddTSMBytes increments the number of TSM Bytes. +func (s *Stats) AddTSMBytes(n uint32) { + atomic.AddUint64(&s.TsmBytesWritten, uint64(n)) +} + +// IncrTSMFileCount increments the number of TSM files created. +func (s *Stats) IncrTSMFileCount() { + atomic.AddUint64(&s.TsmFilesCreated, 1) +} + +// IncrNaN increments the number of NaNs filtered. +func (s *Stats) IncrNaN() { + atomic.AddUint64(&s.NanFiltered, 1) +} + +// IncrInf increments the number of Infs filtered. +func (s *Stats) IncrInf() { + atomic.AddUint64(&s.InfFiltered, 1) +} + +// IncrFiltered increments the number of fields filtered. +func (s *Stats) IncrFiltered() { + atomic.AddUint64(&s.FieldsFiltered, 1) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go new file mode 100644 index 0000000..b91d9b9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go @@ -0,0 +1,130 @@ +package main + +import ( + "fmt" + "log" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" +) + +// tracker will orchestrate and track the conversions of non-TSM shards to TSM +type tracker struct { + Stats stats.Stats + + shards tsdb.ShardInfos + opts options + + pg ParallelGroup + wg sync.WaitGroup +} + +// newTracker will setup and return a clean tracker instance +func newTracker(shards tsdb.ShardInfos, opts options) *tracker { + t := &tracker{ + shards: shards, + opts: opts, + pg: NewParallelGroup(runtime.GOMAXPROCS(0)), + } + + return t +} + +func (t *tracker) Run() error { + conversionStart := time.Now() + + // Backup each directory. + if !opts.SkipBackup { + databases := t.shards.Databases() + fmt.Printf("Backing up %d databases...\n", len(databases)) + t.wg.Add(len(databases)) + for i := range databases { + db := databases[i] + go t.pg.Do(func() { + defer t.wg.Done() + + start := time.Now() + log.Printf("Backup of database '%v' started", db) + err := backupDatabase(db) + if err != nil { + log.Fatalf("Backup of database %v failed: %v\n", db, err) + } + log.Printf("Database %v backed up (%v)\n", db, time.Since(start)) + }) + } + t.wg.Wait() + } else { + fmt.Println("Database backup disabled.") + } + + t.wg.Add(len(t.shards)) + for i := range t.shards { + si := t.shards[i] + go t.pg.Do(func() { + defer func() { + atomic.AddUint64(&t.Stats.CompletedShards, 1) + t.wg.Done() + }() + + start := time.Now() + log.Printf("Starting conversion of shard: %v", si.FullPath(opts.DataPath)) + if err := convertShard(si, t); err != nil { + log.Fatalf("Failed to convert %v: %v\n", si.FullPath(opts.DataPath), err) + } + log.Printf("Conversion of %v successful (%v)\n", si.FullPath(opts.DataPath), time.Since(start)) + }) + } + + done := make(chan struct{}) + go func() { + t.wg.Wait() + close(done) + }() + +WAIT_LOOP: + for { + select { + case <-done: + break WAIT_LOOP + case <-time.After(opts.UpdateInterval): + t.StatusUpdate() + } + } + + t.Stats.TotalTime = time.Since(conversionStart) + + return nil +} + +func (t *tracker) StatusUpdate() { + shardCount := atomic.LoadUint64(&t.Stats.CompletedShards) + pointCount := atomic.LoadUint64(&t.Stats.PointsRead) + pointWritten := atomic.LoadUint64(&t.Stats.PointsWritten) + + log.Printf("Still Working: Completed Shards: %d/%d Points read/written: %d/%d", shardCount, len(t.shards), pointCount, pointWritten) +} + +func (t *tracker) PrintStats() { + preSize := t.shards.Size() + postSize := int64(t.Stats.TsmBytesWritten) + + fmt.Printf("\nSummary statistics\n========================================\n") + fmt.Printf("Databases converted: %d\n", len(t.shards.Databases())) + fmt.Printf("Shards converted: %d\n", len(t.shards)) + fmt.Printf("TSM files created: %d\n", t.Stats.TsmFilesCreated) + fmt.Printf("Points read: %d\n", t.Stats.PointsRead) + fmt.Printf("Points written: %d\n", t.Stats.PointsWritten) + fmt.Printf("NaN filtered: %d\n", t.Stats.NanFiltered) + fmt.Printf("Inf filtered: %d\n", t.Stats.InfFiltered) + fmt.Printf("Points without fields filtered: %d\n", t.Stats.FieldsFiltered) + fmt.Printf("Disk usage pre-conversion (bytes): %d\n", preSize) + fmt.Printf("Disk usage post-conversion (bytes): %d\n", postSize) + fmt.Printf("Reduction factor: %d%%\n", 100*(preSize-postSize)/preSize) + fmt.Printf("Bytes per TSM point: %.2f\n", float64(postSize)/float64(t.Stats.PointsWritten)) + fmt.Printf("Total conversion time: %v\n", t.Stats.TotalTime) + fmt.Println() +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go new file mode 100644 index 0000000..4c3a7b6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go @@ -0,0 +1,119 @@ +package tsdb + +import ( + "encoding/binary" + "errors" + "fmt" + "math" +) + +const ( + fieldFloat = 1 + fieldInteger = 2 + fieldBoolean = 3 + fieldString = 4 +) + +var ( + // ErrFieldNotFound is returned when a field cannot be found. + ErrFieldNotFound = errors.New("field not found") + + // ErrFieldUnmappedID is returned when the system is presented, during decode, with a field ID + // there is no mapping for. + ErrFieldUnmappedID = errors.New("field ID not mapped") +) + +// FieldCodec provides encoding and decoding functionality for the fields of a given +// Measurement. +type FieldCodec struct { + fieldsByID map[uint8]*Field + fieldsByName map[string]*Field +} + +// NewFieldCodec returns a FieldCodec for the given Measurement. Must be called with +// a RLock that protects the Measurement. +func NewFieldCodec(fields map[string]*Field) *FieldCodec { + fieldsByID := make(map[uint8]*Field, len(fields)) + fieldsByName := make(map[string]*Field, len(fields)) + for _, f := range fields { + fieldsByID[f.ID] = f + fieldsByName[f.Name] = f + } + return &FieldCodec{fieldsByID: fieldsByID, fieldsByName: fieldsByName} +} + +// FieldIDByName returns the ID for the given field. +func (f *FieldCodec) FieldIDByName(s string) (uint8, error) { + fi := f.fieldsByName[s] + if fi == nil { + return 0, ErrFieldNotFound + } + return fi.ID, nil +} + +// DecodeByID scans a byte slice for a field with the given ID, converts it to its +// expected type, and return that value. +func (f *FieldCodec) DecodeByID(targetID uint8, b []byte) (interface{}, error) { + var value interface{} + for { + if len(b) == 0 { + // No more bytes. + return nil, ErrFieldNotFound + } + + field := f.fieldsByID[b[0]] + if field == nil { + // This can happen, though is very unlikely. If this node receives encoded data, to be written + // to disk, and is queried for that data before its metastore is updated, there will be no field + // mapping for the data during decode. All this can happen because data is encoded by the node + // that first received the write request, not the node that actually writes the data to disk. + // So if this happens, the read must be aborted. + return nil, ErrFieldUnmappedID + } + + switch field.Type { + case fieldFloat: + if field.ID == targetID { + value = math.Float64frombits(binary.BigEndian.Uint64(b[1:9])) + } + b = b[9:] + case fieldInteger: + if field.ID == targetID { + value = int64(binary.BigEndian.Uint64(b[1:9])) + } + b = b[9:] + case fieldBoolean: + if field.ID == targetID { + value = b[1] == 1 + } + b = b[2:] + case fieldString: + length := binary.BigEndian.Uint16(b[1:3]) + if field.ID == targetID { + value = string(b[3 : 3+length]) + } + b = b[3+length:] + default: + panic(fmt.Sprintf("unsupported value type during decode by id: %T", field.Type)) + } + + if value != nil { + return value, nil + } + } +} + +// DecodeByName scans a byte slice for a field with the given name, converts it to its +// expected type, and return that value. +func (f *FieldCodec) DecodeByName(name string, b []byte) (interface{}, error) { + fi := f.FieldByName(name) + if fi == nil { + return 0, ErrFieldNotFound + } + return f.DecodeByID(fi.ID, b) +} + +// FieldByName returns the field by its name. It will return a nil if not found +func (f *FieldCodec) FieldByName(name string) *Field { + return f.fieldsByName[name] +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go new file mode 100644 index 0000000..94003d7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go @@ -0,0 +1,244 @@ +// Pacage tsdb abstracts the various shard types supported by the influx_tsm command. +package tsdb // import "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + +import ( + "fmt" + "os" + "path" + "path/filepath" + "sort" + "time" + + "github.com/boltdb/bolt" + "github.com/influxdata/influxdb/pkg/slices" +) + +// Flags for differentiating between engines +const ( + B1 = iota + BZ1 + TSM1 +) + +// EngineFormat holds the flag for the engine +type EngineFormat int + +// String returns the string format of the engine. +func (e EngineFormat) String() string { + switch e { + case TSM1: + return "tsm1" + case B1: + return "b1" + case BZ1: + return "bz1" + default: + panic("unrecognized shard engine format") + } +} + +// ShardInfo is the description of a shard on disk. +type ShardInfo struct { + Database string + RetentionPolicy string + Path string + Format EngineFormat + Size int64 +} + +// FormatAsString returns the format of the shard as a string. +func (s *ShardInfo) FormatAsString() string { + return s.Format.String() +} + +// FullPath returns the full path to the shard, given the data directory root. +func (s *ShardInfo) FullPath(dataPath string) string { + return filepath.Join(dataPath, s.Database, s.RetentionPolicy, s.Path) +} + +// ShardInfos is an array of ShardInfo +type ShardInfos []*ShardInfo + +func (s ShardInfos) Len() int { return len(s) } +func (s ShardInfos) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ShardInfos) Less(i, j int) bool { + if s[i].Database == s[j].Database { + if s[i].RetentionPolicy == s[j].RetentionPolicy { + return s[i].Path < s[j].Path + } + + return s[i].RetentionPolicy < s[j].RetentionPolicy + } + + return s[i].Database < s[j].Database +} + +// Databases returns the sorted unique set of databases for the shards. +func (s ShardInfos) Databases() []string { + dbm := make(map[string]bool) + for _, ss := range s { + dbm[ss.Database] = true + } + + var dbs []string + for k := range dbm { + dbs = append(dbs, k) + } + sort.Strings(dbs) + return dbs +} + +// FilterFormat returns a copy of the ShardInfos, with shards of the given +// format removed. +func (s ShardInfos) FilterFormat(fmt EngineFormat) ShardInfos { + var a ShardInfos + for _, si := range s { + if si.Format != fmt { + a = append(a, si) + } + } + return a +} + +// Size returns the space on disk consumed by the shards. +func (s ShardInfos) Size() int64 { + var sz int64 + for _, si := range s { + sz += si.Size + } + return sz +} + +// ExclusiveDatabases returns a copy of the ShardInfo, with shards associated +// with the given databases present. If the given set is empty, all databases +// are returned. +func (s ShardInfos) ExclusiveDatabases(exc []string) ShardInfos { + var a ShardInfos + + // Empty set? Return everything. + if len(exc) == 0 { + a = make(ShardInfos, len(s)) + copy(a, s) + return a + } + + for _, si := range s { + if slices.Exists(exc, si.Database) { + a = append(a, si) + } + } + return a +} + +// Database represents an entire database on disk. +type Database struct { + path string +} + +// NewDatabase creates a database instance using data at path. +func NewDatabase(path string) *Database { + return &Database{path: path} +} + +// Name returns the name of the database. +func (d *Database) Name() string { + return path.Base(d.path) +} + +// Path returns the path to the database. +func (d *Database) Path() string { + return d.path +} + +// Shards returns information for every shard in the database. +func (d *Database) Shards() ([]*ShardInfo, error) { + fd, err := os.Open(d.path) + if err != nil { + return nil, err + } + + // Get each retention policy. + rps, err := fd.Readdirnames(-1) + if err != nil { + return nil, err + } + + // Process each retention policy. + var shardInfos []*ShardInfo + for _, rp := range rps { + rpfd, err := os.Open(filepath.Join(d.path, rp)) + if err != nil { + return nil, err + } + + // Process each shard + shards, err := rpfd.Readdirnames(-1) + if err != nil { + return nil, err + } + + for _, sh := range shards { + fmt, sz, err := shardFormat(filepath.Join(d.path, rp, sh)) + if err != nil { + return nil, err + } + + si := &ShardInfo{ + Database: d.Name(), + RetentionPolicy: path.Base(rp), + Path: sh, + Format: fmt, + Size: sz, + } + shardInfos = append(shardInfos, si) + } + } + + sort.Sort(ShardInfos(shardInfos)) + return shardInfos, nil +} + +// shardFormat returns the format and size on disk of the shard at path. +func shardFormat(path string) (EngineFormat, int64, error) { + // If it's a directory then it's a tsm1 engine + fi, err := os.Stat(path) + if err != nil { + return 0, 0, err + } + if fi.Mode().IsDir() { + return TSM1, fi.Size(), nil + } + + // It must be a BoltDB-based engine. + db, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return 0, 0, err + } + defer db.Close() + + var format EngineFormat + err = db.View(func(tx *bolt.Tx) error { + // Retrieve the meta bucket. + b := tx.Bucket([]byte("meta")) + + // If no format is specified then it must be an original b1 database. + if b == nil { + format = B1 + return nil + } + + // There is an actual format indicator. + switch f := string(b.Get([]byte("format"))); f { + case "b1", "v1": + format = B1 + case "bz1": + format = BZ1 + default: + return fmt.Errorf("unrecognized engine format: %s", f) + } + + return nil + }) + + return format, fi.Size(), err +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go new file mode 100644 index 0000000..c580f4d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-gogo. +// source: internal/meta.proto +// DO NOT EDIT! + +/* +Package internal is a generated protocol buffer package. + +It is generated from these files: + internal/meta.proto + +It has these top-level messages: + Series + Tag + MeasurementFields + Field +*/ +package internal + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Series struct { + Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"` + Tags []*Tag `protobuf:"bytes,2,rep,name=Tags" json:"Tags,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Series) Reset() { *m = Series{} } +func (m *Series) String() string { return proto.CompactTextString(m) } +func (*Series) ProtoMessage() {} + +func (m *Series) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Series) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +type Tag struct { + Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,2,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} + +func (m *Tag) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Tag) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type MeasurementFields struct { + Fields []*Field `protobuf:"bytes,1,rep,name=Fields" json:"Fields,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MeasurementFields) Reset() { *m = MeasurementFields{} } +func (m *MeasurementFields) String() string { return proto.CompactTextString(m) } +func (*MeasurementFields) ProtoMessage() {} + +func (m *MeasurementFields) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +type Field struct { + ID *int32 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` + Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` + Type *int32 `protobuf:"varint,3,req,name=Type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return proto.CompactTextString(m) } +func (*Field) ProtoMessage() {} + +func (m *Field) GetID() int32 { + if m != nil && m.ID != nil { + return *m.ID + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Field) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go new file mode 100644 index 0000000..c0d0010 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go @@ -0,0 +1,60 @@ +package tsdb + +import ( + "encoding/binary" + "strings" + + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal" + "github.com/influxdata/influxdb/influxql" + + "github.com/gogo/protobuf/proto" +) + +// Field represents an encoded field. +type Field struct { + ID uint8 `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type influxql.DataType `json:"type,omitempty"` +} + +// MeasurementFields is a mapping from measurements to its fields. +type MeasurementFields struct { + Fields map[string]*Field `json:"fields"` + Codec *FieldCodec +} + +// UnmarshalBinary decodes the object from a binary format. +func (m *MeasurementFields) UnmarshalBinary(buf []byte) error { + var pb internal.MeasurementFields + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + m.Fields = make(map[string]*Field) + for _, f := range pb.Fields { + m.Fields[f.GetName()] = &Field{ID: uint8(f.GetID()), Name: f.GetName(), Type: influxql.DataType(f.GetType())} + } + return nil +} + +// Series represents a series in the shard. +type Series struct { + Key string + Tags map[string]string +} + +// MeasurementFromSeriesKey returns the Measurement name for a given series. +func MeasurementFromSeriesKey(key string) string { + return strings.SplitN(key, ",", 2)[0] +} + +// DecodeKeyValue decodes the key and value from bytes. +func DecodeKeyValue(field string, dec *FieldCodec, k, v []byte) (int64, interface{}) { + // Convert key to a timestamp. + key := int64(binary.BigEndian.Uint64(k[0:8])) + + decValue, err := dec.DecodeByName(field, v) + if err != nil { + return key, nil + } + return key, decValue +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go new file mode 100644 index 0000000..448d0ac --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go @@ -0,0 +1,387 @@ +// Package backup is the backup subcommand for the influxd command. +package backup + +import ( + "encoding/binary" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/services/snapshotter" + "github.com/influxdata/influxdb/tcp" +) + +const ( + // Suffix is a suffix added to the backup while it's in-process. + Suffix = ".pending" + + // Metafile is the base name given to the metastore backups. + Metafile = "meta" + + // BackupFilePattern is the beginning of the pattern for a backup + // file. They follow the scheme ... + BackupFilePattern = "%s.%s.%05d" +) + +// Command represents the program execution for "influxd backup". +type Command struct { + // The logger passed to the ticker during execution. + Logger *log.Logger + + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + host string + path string + database string +} + +// NewCommand returns a new instance of Command with default settings. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the program. +func (cmd *Command) Run(args ...string) error { + // Set up logger. + cmd.Logger = log.New(cmd.Stderr, "", log.LstdFlags) + + // Parse command line arguments. + retentionPolicy, shardID, since, err := cmd.parseFlags(args) + if err != nil { + return err + } + + // based on the arguments passed in we only backup the minimum + if shardID != "" { + // always backup the metastore + if err := cmd.backupMetastore(); err != nil { + return err + } + err = cmd.backupShard(retentionPolicy, shardID, since) + } else if retentionPolicy != "" { + err = cmd.backupRetentionPolicy(retentionPolicy, since) + } else if cmd.database != "" { + err = cmd.backupDatabase(since) + } else { + err = cmd.backupMetastore() + } + + if err != nil { + cmd.Logger.Printf("backup failed: %v", err) + return err + } + + cmd.Logger.Println("backup complete") + + return nil +} + +// parseFlags parses and validates the command line arguments into a request object. +func (cmd *Command) parseFlags(args []string) (retentionPolicy, shardID string, since time.Time, err error) { + fs := flag.NewFlagSet("", flag.ContinueOnError) + + fs.StringVar(&cmd.host, "host", "localhost:8088", "") + fs.StringVar(&cmd.database, "database", "", "") + fs.StringVar(&retentionPolicy, "retention", "", "") + fs.StringVar(&shardID, "shard", "", "") + var sinceArg string + fs.StringVar(&sinceArg, "since", "", "") + + fs.SetOutput(cmd.Stderr) + fs.Usage = cmd.printUsage + + err = fs.Parse(args) + if err != nil { + return + } + if sinceArg != "" { + since, err = time.Parse(time.RFC3339, sinceArg) + if err != nil { + return + } + } + + // Ensure that only one arg is specified. + if fs.NArg() == 0 { + return "", "", time.Unix(0, 0), errors.New("backup destination path required") + } else if fs.NArg() != 1 { + return "", "", time.Unix(0, 0), errors.New("only one backup path allowed") + } + cmd.path = fs.Arg(0) + + err = os.MkdirAll(cmd.path, 0700) + + return +} + +// backupShard will write a tar archive of the passed in shard with any TSM files that have been +// created since the time passed in +func (cmd *Command) backupShard(retentionPolicy string, shardID string, since time.Time) error { + id, err := strconv.ParseUint(shardID, 10, 64) + if err != nil { + return err + } + + shardArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, fmt.Sprintf(BackupFilePattern, cmd.database, retentionPolicy, id))) + if err != nil { + return err + } + + cmd.Logger.Printf("backing up db=%v rp=%v shard=%v to %s since %s", + cmd.database, retentionPolicy, shardID, shardArchivePath, since) + + req := &snapshotter.Request{ + Type: snapshotter.RequestShardBackup, + Database: cmd.database, + RetentionPolicy: retentionPolicy, + ShardID: id, + Since: since, + } + + // TODO: verify shard backup data + return cmd.downloadAndVerify(req, shardArchivePath, nil) +} + +// backupDatabase will request the database information from the server and then backup the metastore and +// every shard in every retention policy in the database. Each shard will be written to a separate tar. +func (cmd *Command) backupDatabase(since time.Time) error { + cmd.Logger.Printf("backing up db=%s since %s", cmd.database, since) + + req := &snapshotter.Request{ + Type: snapshotter.RequestDatabaseInfo, + Database: cmd.database, + } + + response, err := cmd.requestInfo(req) + if err != nil { + return err + } + + return cmd.backupResponsePaths(response, since) +} + +// backupRetentionPolicy will request the retention policy information from the server and then backup +// the metastore and every shard in the retention policy. Each shard will be written to a separate tar. +func (cmd *Command) backupRetentionPolicy(retentionPolicy string, since time.Time) error { + cmd.Logger.Printf("backing up rp=%s since %s", retentionPolicy, since) + + req := &snapshotter.Request{ + Type: snapshotter.RequestRetentionPolicyInfo, + Database: cmd.database, + RetentionPolicy: retentionPolicy, + } + + response, err := cmd.requestInfo(req) + if err != nil { + return err + } + + return cmd.backupResponsePaths(response, since) +} + +// backupResponsePaths will backup the metastore and all shard paths in the response struct +func (cmd *Command) backupResponsePaths(response *snapshotter.Response, since time.Time) error { + if err := cmd.backupMetastore(); err != nil { + return err + } + + // loop through the returned paths and back up each shard + for _, path := range response.Paths { + rp, id, err := retentionAndShardFromPath(path) + if err != nil { + return err + } + + if err := cmd.backupShard(rp, id, since); err != nil { + return err + } + } + + return nil +} + +// backupMetastore will backup the metastore on the host to the passed in path. Database and retention policy backups +// will force a backup of the metastore as well as requesting a specific shard backup from the command line +func (cmd *Command) backupMetastore() error { + metastoreArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, Metafile)) + if err != nil { + return err + } + + cmd.Logger.Printf("backing up metastore to %s", metastoreArchivePath) + + req := &snapshotter.Request{ + Type: snapshotter.RequestMetastoreBackup, + } + + return cmd.downloadAndVerify(req, metastoreArchivePath, func(file string) error { + binData, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + magic := binary.BigEndian.Uint64(binData[:8]) + if magic != snapshotter.BackupMagicHeader { + cmd.Logger.Println("Invalid metadata blob, ensure the metadata service is running (default port 8088)") + return errors.New("invalid metadata received") + } + + return nil + }) +} + +// nextPath returns the next file to write to. +func (cmd *Command) nextPath(path string) (string, error) { + // Iterate through incremental files until one is available. + for i := 0; ; i++ { + s := fmt.Sprintf(path+".%02d", i) + if _, err := os.Stat(s); os.IsNotExist(err) { + return s, nil + } else if err != nil { + return "", err + } + } +} + +// downloadAndVerify will download either the metastore or shard to a temp file and then +// rename it to a good backup file name after complete +func (cmd *Command) downloadAndVerify(req *snapshotter.Request, path string, validator func(string) error) error { + tmppath := path + Suffix + if err := cmd.download(req, tmppath); err != nil { + return err + } + + if validator != nil { + if err := validator(tmppath); err != nil { + if rmErr := os.Remove(tmppath); rmErr != nil { + cmd.Logger.Printf("Error cleaning up temporary file: %v", rmErr) + } + return err + } + } + + f, err := os.Stat(tmppath) + if err != nil { + return err + } + + // There was nothing downloaded, don't create an empty backup file. + if f.Size() == 0 { + return os.Remove(tmppath) + } + + // Rename temporary file to final path. + if err := os.Rename(tmppath, path); err != nil { + return fmt.Errorf("rename: %s", err) + } + + return nil +} + +// download downloads a snapshot of either the metastore or a shard from a host to a given path. +func (cmd *Command) download(req *snapshotter.Request, path string) error { + // Create local file to write to. + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("open temp file: %s", err) + } + defer f.Close() + + for i := 0; i < 10; i++ { + if err = func() error { + // Connect to snapshotter service. + conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader) + if err != nil { + return err + } + defer conn.Close() + + // Write the request + if err := json.NewEncoder(conn).Encode(req); err != nil { + return fmt.Errorf("encode snapshot request: %s", err) + } + + // Read snapshot from the connection + if n, err := io.Copy(f, conn); err != nil || n == 0 { + return fmt.Errorf("copy backup to file: err=%v, n=%d", err, n) + } + return nil + }(); err == nil { + break + } else if err != nil { + cmd.Logger.Printf("Download shard %v failed %s. Retrying (%d)...\n", req.ShardID, err, i) + time.Sleep(time.Second) + } + } + + return err +} + +// requestInfo will request the database or retention policy information from the host +func (cmd *Command) requestInfo(request *snapshotter.Request) (*snapshotter.Response, error) { + // Connect to snapshotter service. + conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader) + if err != nil { + return nil, err + } + defer conn.Close() + + // Write the request + if err := json.NewEncoder(conn).Encode(request); err != nil { + return nil, fmt.Errorf("encode snapshot request: %s", err) + } + + // Read the response + var r snapshotter.Response + if err := json.NewDecoder(conn).Decode(&r); err != nil { + return nil, err + } + + return &r, nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + fmt.Fprintf(cmd.Stdout, `Downloads a snapshot of a data node and saves it to disk. + +Usage: influxd backup [flags] PATH + + -host + The host to connect to snapshot. Defaults to 127.0.0.1:8088. + -database + The database to backup. + -retention + Optional. The retention policy to backup. + -shard + Optional. The shard id to backup. If specified, retention is required. + -since <2015-12-24T08:12:23> + Optional. Do an incremental backup since the passed in RFC3339 + formatted time. + +`) +} + +// retentionAndShardFromPath will take the shard relative path and split it into the +// retention policy name and shard ID. The first part of the path should be the database name. +func retentionAndShardFromPath(path string) (retention, shard string, err error) { + a := strings.Split(path, string(filepath.Separator)) + if len(a) != 3 { + return "", "", fmt.Errorf("expected database, retention policy, and shard id in path: %s", path) + } + + return a[1], a[2], nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go new file mode 100644 index 0000000..67c8cc9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go @@ -0,0 +1,46 @@ +// Package help is the help subcommand of the influxd command. +package help + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Command displays help for command-line sub-commands. +type Command struct { + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) + return nil +} + +const usage = ` +Configure and start an InfluxDB server. + +Usage: influxd [[command] [arguments]] + +The commands are: + + backup downloads a snapshot of a data node and saves it to disk + config display the default configuration + help display this help message + restore uses a snapshot of a data node to rebuild a cluster + run run node with existing configuration + version displays the InfluxDB version + +"run" is the default command. + +Use "influxd [command] -help" for more information about a command. +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go new file mode 100644 index 0000000..84c97c2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go @@ -0,0 +1,177 @@ +// Command influxd is the InfluxDB server. +package main + +import ( + "flag" + "fmt" + "io" + "math/rand" + "os" + "os/signal" + "syscall" + "time" + + "github.com/influxdata/influxdb/cmd" + "github.com/influxdata/influxdb/cmd/influxd/backup" + "github.com/influxdata/influxdb/cmd/influxd/help" + "github.com/influxdata/influxdb/cmd/influxd/restore" + "github.com/influxdata/influxdb/cmd/influxd/run" + "github.com/uber-go/zap" +) + +// These variables are populated via the Go linker. +var ( + version string + commit string + branch string +) + +func init() { + // If commit, branch, or build time are not set, make that clear. + if version == "" { + version = "unknown" + } + if commit == "" { + commit = "unknown" + } + if branch == "" { + branch = "unknown" + } +} + +func main() { + rand.Seed(time.Now().UnixNano()) + + m := NewMain() + if err := m.Run(os.Args[1:]...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// Main represents the program execution. +type Main struct { + Logger zap.Logger + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain return a new instance of Main. +func NewMain() *Main { + return &Main{ + Logger: zap.New( + zap.NewTextEncoder(), + zap.Output(os.Stderr), + ), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run determines and runs the command specified by the CLI args. +func (m *Main) Run(args ...string) error { + name, args := cmd.ParseCommandName(args) + + // Extract name from args. + switch name { + case "", "run": + cmd := run.NewCommand() + + // Tell the server the build details. + cmd.Version = version + cmd.Commit = commit + cmd.Branch = branch + cmd.Logger = m.Logger + + if err := cmd.Run(args...); err != nil { + return fmt.Errorf("run: %s", err) + } + + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) + m.Logger.Info("Listening for signals") + + // Block until one of the signals above is received + <-signalCh + m.Logger.Info("Signal received, initializing clean shutdown...") + go cmd.Close() + + // Block again until another signal is received, a shutdown timeout elapses, + // or the Command is gracefully closed + m.Logger.Info("Waiting for clean shutdown...") + select { + case <-signalCh: + m.Logger.Info("second signal received, initializing hard shutdown") + case <-time.After(time.Second * 30): + m.Logger.Info("time limit reached, initializing hard shutdown") + case <-cmd.Closed: + m.Logger.Info("server shutdown completed") + } + + // goodbye. + + case "backup": + name := backup.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("backup: %s", err) + } + case "restore": + name := restore.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("restore: %s", err) + } + case "config": + if err := run.NewPrintConfigCommand().Run(args...); err != nil { + return fmt.Errorf("config: %s", err) + } + case "version": + if err := NewVersionCommand().Run(args...); err != nil { + return fmt.Errorf("version: %s", err) + } + case "help": + if err := help.NewCommand().Run(args...); err != nil { + return fmt.Errorf("help: %s", err) + } + default: + return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influxd help' for usage`+"\n\n", name) + } + + return nil +} + +// VersionCommand represents the command executed by "influxd version". +type VersionCommand struct { + Stdout io.Writer + Stderr io.Writer +} + +// NewVersionCommand return a new instance of VersionCommand. +func NewVersionCommand() *VersionCommand { + return &VersionCommand{ + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run prints the current version and commit info. +func (cmd *VersionCommand) Run(args ...string) error { + // Parse flags in case -h is specified. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, versionUsage) } + if err := fs.Parse(args); err != nil { + return err + } + + // Print version info. + fmt.Fprintf(cmd.Stdout, "InfluxDB v%s (git: %s %s)\n", version, branch, commit) + + return nil +} + +var versionUsage = `Displays the InfluxDB version, build branch and git commit hash. + +Usage: influxd version +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go new file mode 100644 index 0000000..932aeb7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go @@ -0,0 +1,355 @@ +// Package restore is the restore subcommand for the influxd command, +// for restoring from a backup. +package restore + +import ( + "archive/tar" + "bytes" + "encoding/binary" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/influxdata/influxdb/cmd/influxd/backup" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/snapshotter" +) + +// Command represents the program execution for "influxd restore". +type Command struct { + Stdout io.Writer + Stderr io.Writer + + backupFilesPath string + metadir string + datadir string + database string + retention string + shard string + + // TODO: when the new meta stuff is done this should not be exported or be gone + MetaConfig *meta.Config +} + +// NewCommand returns a new instance of Command with default settings. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + Stderr: os.Stderr, + MetaConfig: meta.NewConfig(), + } +} + +// Run executes the program. +func (cmd *Command) Run(args ...string) error { + if err := cmd.parseFlags(args); err != nil { + return err + } + + if cmd.metadir != "" { + if err := cmd.unpackMeta(); err != nil { + return err + } + } + + if cmd.shard != "" { + return cmd.unpackShard(cmd.shard) + } else if cmd.retention != "" { + return cmd.unpackRetention() + } else if cmd.datadir != "" { + return cmd.unpackDatabase() + } + return nil +} + +// parseFlags parses and validates the command line arguments. +func (cmd *Command) parseFlags(args []string) error { + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&cmd.metadir, "metadir", "", "") + fs.StringVar(&cmd.datadir, "datadir", "", "") + fs.StringVar(&cmd.database, "database", "", "") + fs.StringVar(&cmd.retention, "retention", "", "") + fs.StringVar(&cmd.shard, "shard", "", "") + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + if err := fs.Parse(args); err != nil { + return err + } + + cmd.MetaConfig = meta.NewConfig() + cmd.MetaConfig.Dir = cmd.metadir + + // Require output path. + cmd.backupFilesPath = fs.Arg(0) + if cmd.backupFilesPath == "" { + return fmt.Errorf("path with backup files required") + } + + // validate the arguments + if cmd.metadir == "" && cmd.database == "" { + return fmt.Errorf("-metadir or -database are required to restore") + } + + if cmd.database != "" && cmd.datadir == "" { + return fmt.Errorf("-datadir is required to restore") + } + + if cmd.shard != "" { + if cmd.database == "" { + return fmt.Errorf("-database is required to restore shard") + } + if cmd.retention == "" { + return fmt.Errorf("-retention is required to restore shard") + } + } else if cmd.retention != "" && cmd.database == "" { + return fmt.Errorf("-database is required to restore retention policy") + } + + return nil +} + +// unpackMeta reads the metadata from the backup directory and initializes a raft +// cluster and replaces the root metadata. +func (cmd *Command) unpackMeta() error { + // find the meta file + metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup.Metafile+".*")) + if err != nil { + return err + } + + if len(metaFiles) == 0 { + return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath) + } + + latest := metaFiles[len(metaFiles)-1] + + fmt.Fprintf(cmd.Stdout, "Using metastore snapshot: %v\n", latest) + // Read the metastore backup + f, err := os.Open(latest) + if err != nil { + return err + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, f); err != nil { + return fmt.Errorf("copy: %s", err) + } + + b := buf.Bytes() + var i int + + // Make sure the file is actually a meta store backup file + magic := binary.BigEndian.Uint64(b[:8]) + if magic != snapshotter.BackupMagicHeader { + return fmt.Errorf("invalid metadata file") + } + i += 8 + + // Size of the meta store bytes + length := int(binary.BigEndian.Uint64(b[i : i+8])) + i += 8 + metaBytes := b[i : i+length] + i += int(length) + + // Size of the node.json bytes + length = int(binary.BigEndian.Uint64(b[i : i+8])) + i += 8 + nodeBytes := b[i : i+length] + + // Unpack into metadata. + var data meta.Data + if err := data.UnmarshalBinary(metaBytes); err != nil { + return fmt.Errorf("unmarshal: %s", err) + } + + // Copy meta config and remove peers so it starts in single mode. + c := cmd.MetaConfig + c.Dir = cmd.metadir + + // Create the meta dir + if os.MkdirAll(c.Dir, 0700); err != nil { + return err + } + + // Write node.json back to meta dir + if err := ioutil.WriteFile(filepath.Join(c.Dir, "node.json"), nodeBytes, 0655); err != nil { + return err + } + + client := meta.NewClient(c) + if err := client.Open(); err != nil { + return err + } + defer client.Close() + + // Force set the full metadata. + if err := client.SetData(&data); err != nil { + return fmt.Errorf("set data: %s", err) + } + + // remove the raft.db file if it exists + err = os.Remove(filepath.Join(cmd.metadir, "raft.db")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // remove the node.json file if it exists + err = os.Remove(filepath.Join(cmd.metadir, "node.json")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return nil +} + +// unpackShard will look for all backup files in the path matching this shard ID +// and restore them to the data dir +func (cmd *Command) unpackShard(shardID string) error { + // make sure the shard isn't already there so we don't clobber anything + restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention, shardID) + if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("shard already present: %s", restorePath) + } + + id, err := strconv.ParseUint(shardID, 10, 64) + if err != nil { + return err + } + + // find the shard backup files + pat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup.BackupFilePattern, cmd.database, cmd.retention, id)) + return cmd.unpackFiles(pat + ".*") +} + +// unpackDatabase will look for all backup files in the path matching this database +// and restore them to the data dir +func (cmd *Command) unpackDatabase() error { + // make sure the shard isn't already there so we don't clobber anything + restorePath := filepath.Join(cmd.datadir, cmd.database) + if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("database already present: %s", restorePath) + } + + // find the database backup files + pat := filepath.Join(cmd.backupFilesPath, cmd.database) + return cmd.unpackFiles(pat + ".*") +} + +// unpackRetention will look for all backup files in the path matching this retention +// and restore them to the data dir +func (cmd *Command) unpackRetention() error { + // make sure the shard isn't already there so we don't clobber anything + restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention) + if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("retention already present: %s", restorePath) + } + + // find the retention backup files + pat := filepath.Join(cmd.backupFilesPath, cmd.database) + return cmd.unpackFiles(fmt.Sprintf("%s.%s.*", pat, cmd.retention)) +} + +// unpackFiles will look for backup files matching the pattern and restore them to the data dir +func (cmd *Command) unpackFiles(pat string) error { + fmt.Printf("Restoring from backup %s\n", pat) + + backupFiles, err := filepath.Glob(pat) + if err != nil { + return err + } + + if len(backupFiles) == 0 { + return fmt.Errorf("no backup files for %s in %s", pat, cmd.backupFilesPath) + } + + for _, fn := range backupFiles { + if err := cmd.unpackTar(fn); err != nil { + return err + } + } + + return nil +} + +// unpackTar will restore a single tar archive to the data dir +func (cmd *Command) unpackTar(tarFile string) error { + f, err := os.Open(tarFile) + if err != nil { + return err + } + defer f.Close() + + tr := tar.NewReader(f) + + for { + hdr, err := tr.Next() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + if err := cmd.unpackFile(tr, hdr.Name); err != nil { + return err + } + } +} + +// unpackFile will copy the current file from the tar archive to the data dir +func (cmd *Command) unpackFile(tr *tar.Reader, fileName string) error { + nativeFileName := filepath.FromSlash(fileName) + fn := filepath.Join(cmd.datadir, nativeFileName) + fmt.Printf("unpacking %s\n", fn) + + if err := os.MkdirAll(filepath.Dir(fn), 0777); err != nil { + return fmt.Errorf("error making restore dir: %s", err.Error()) + } + + ff, err := os.Create(fn) + if err != nil { + return err + } + defer ff.Close() + + if _, err := io.Copy(ff, tr); err != nil { + return err + } + + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + fmt.Fprintf(cmd.Stdout, `Uses backups from the PATH to restore the metastore, databases, +retention policies, or specific shards. The InfluxDB process must not be +running during a restore. + +Usage: influxd restore [flags] PATH + + -metadir + Optional. If set the metastore will be recovered to the given path. + -datadir + Optional. If set the restore process will recover the specified + database, retention policy or shard to the given directory. + -database + Optional. Required if no metadir given. Will restore the database + TSM files. + -retention + Optional. If given, database is required. Will restore the retention policy's + TSM files. + -shard + Optional. If given, database and retention are required. Will restore the shard's + TSM files. + +`) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go new file mode 100644 index 0000000..a9def5d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go @@ -0,0 +1,261 @@ +// Package run is the run (default) subcommand for the influxd command. +package run + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "strconv" + "time" + + "github.com/uber-go/zap" +) + +const logo = ` + 8888888 .d888 888 8888888b. 888888b. + 888 d88P" 888 888 "Y88b 888 "88b + 888 888 888 888 888 888 .88P + 888 88888b. 888888 888 888 888 888 888 888 888 8888888K. + 888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b + 888 888 888 888 888 888 888 X88K 888 888 888 888 + 888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P + 8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P" + +` + +// Command represents the command executed by "influxd run". +type Command struct { + Version string + Branch string + Commit string + BuildTime string + + closing chan struct{} + Closed chan struct{} + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Logger zap.Logger + + Server *Server +} + +// NewCommand return a new instance of Command. +func NewCommand() *Command { + return &Command{ + closing: make(chan struct{}), + Closed: make(chan struct{}), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + Logger: zap.New(zap.NullEncoder()), + } +} + +// Run parses the config from args and runs the server. +func (cmd *Command) Run(args ...string) error { + // Parse the command line flags. + options, err := cmd.ParseFlags(args...) + if err != nil { + return err + } + + // Print sweet InfluxDB logo. + fmt.Print(logo) + + // Mark start-up in log. + cmd.Logger.Info(fmt.Sprintf("InfluxDB starting, version %s, branch %s, commit %s", + cmd.Version, cmd.Branch, cmd.Commit)) + cmd.Logger.Info(fmt.Sprintf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0))) + + // Write the PID file. + if err := cmd.writePIDFile(options.PIDFile); err != nil { + return fmt.Errorf("write pid file: %s", err) + } + + // Parse config + config, err := cmd.ParseConfig(options.GetConfigPath()) + if err != nil { + return fmt.Errorf("parse config: %s", err) + } + + // Apply any environment variables on top of the parsed config + if err := config.ApplyEnvOverrides(); err != nil { + return fmt.Errorf("apply env config: %v", err) + } + + // Validate the configuration. + if err := config.Validate(); err != nil { + return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err) + } + + if config.HTTPD.PprofEnabled { + // Turn on block profiling to debug stuck databases + runtime.SetBlockProfileRate(int(1 * time.Second)) + } + + // Create server from config and start it. + buildInfo := &BuildInfo{ + Version: cmd.Version, + Commit: cmd.Commit, + Branch: cmd.Branch, + Time: cmd.BuildTime, + } + s, err := NewServer(config, buildInfo) + if err != nil { + return fmt.Errorf("create server: %s", err) + } + s.Logger = cmd.Logger + s.CPUProfile = options.CPUProfile + s.MemProfile = options.MemProfile + if err := s.Open(); err != nil { + return fmt.Errorf("open server: %s", err) + } + cmd.Server = s + + // Begin monitoring the server's error channel. + go cmd.monitorServerErrors() + + return nil +} + +// Close shuts down the server. +func (cmd *Command) Close() error { + defer close(cmd.Closed) + close(cmd.closing) + if cmd.Server != nil { + return cmd.Server.Close() + } + return nil +} + +func (cmd *Command) monitorServerErrors() { + logger := log.New(cmd.Stderr, "", log.LstdFlags) + for { + select { + case err := <-cmd.Server.Err(): + logger.Println(err) + case <-cmd.closing: + return + } + } +} + +// ParseFlags parses the command line flags from args and returns an options set. +func (cmd *Command) ParseFlags(args ...string) (Options, error) { + var options Options + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&options.ConfigPath, "config", "", "") + fs.StringVar(&options.PIDFile, "pidfile", "", "") + // Ignore hostname option. + _ = fs.String("hostname", "", "") + fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") + fs.StringVar(&options.MemProfile, "memprofile", "", "") + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) } + if err := fs.Parse(args); err != nil { + return Options{}, err + } + return options, nil +} + +// writePIDFile writes the process ID to path. +func (cmd *Command) writePIDFile(path string) error { + // Ignore if path is not set. + if path == "" { + return nil + } + + // Ensure the required directory structure exists. + err := os.MkdirAll(filepath.Dir(path), 0777) + if err != nil { + return fmt.Errorf("mkdir: %s", err) + } + + // Retrieve the PID and write it. + pid := strconv.Itoa(os.Getpid()) + if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil { + return fmt.Errorf("write file: %s", err) + } + + return nil +} + +// ParseConfig parses the config at path. +// It returns a demo configuration if path is blank. +func (cmd *Command) ParseConfig(path string) (*Config, error) { + // Use demo configuration if no config path is specified. + if path == "" { + cmd.Logger.Info("no configuration provided, using default settings") + return NewDemoConfig() + } + + cmd.Logger.Info(fmt.Sprintf("Using configuration at: %s", path)) + + config := NewConfig() + if err := config.FromTomlFile(path); err != nil { + return nil, err + } + + return config, nil +} + +const usage = `Runs the InfluxDB server. + +Usage: influxd run [flags] + + -config + Set the path to the configuration file. + This defaults to the environment variable INFLUXDB_CONFIG_PATH, + ~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file + is present at any of these locations. + Disable the automatic loading of a configuration file using + the null device (such as /dev/null). + -pidfile + Write process ID to a file. + -cpuprofile + Write CPU profiling information to a file. + -memprofile + Write memory usage information to a file. +` + +// Options represents the command line options that can be parsed. +type Options struct { + ConfigPath string + PIDFile string + CPUProfile string + MemProfile string +} + +// GetConfigPath returns the config path from the options. +// It will return a path by searching in this order: +// 1. The CLI option in ConfigPath +// 2. The environment variable INFLUXDB_CONFIG_PATH +// 3. The first influxdb.conf file on the path: +// - ~/.influxdb +// - /etc/influxdb +func (opt *Options) GetConfigPath() string { + if opt.ConfigPath != "" { + if opt.ConfigPath == os.DevNull { + return "" + } + return opt.ConfigPath + } else if envVar := os.Getenv("INFLUXDB_CONFIG_PATH"); envVar != "" { + return envVar + } + + for _, path := range []string{ + os.ExpandEnv("${HOME}/.influxdb/influxdb.conf"), + "/etc/influxdb/influxdb.conf", + } { + if _, err := os.Stat(path); err == nil { + return path + } + } + return "" +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go new file mode 100644 index 0000000..116b43c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go @@ -0,0 +1,363 @@ +package run + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "os/user" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/monitor/diagnostics" + "github.com/influxdata/influxdb/services/collectd" + "github.com/influxdata/influxdb/services/continuous_querier" + "github.com/influxdata/influxdb/services/graphite" + "github.com/influxdata/influxdb/services/httpd" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/opentsdb" + "github.com/influxdata/influxdb/services/precreator" + "github.com/influxdata/influxdb/services/retention" + "github.com/influxdata/influxdb/services/subscriber" + "github.com/influxdata/influxdb/services/udp" + "github.com/influxdata/influxdb/tsdb" +) + +const ( + // DefaultBindAddress is the default address for various RPC services. + DefaultBindAddress = "127.0.0.1:8088" +) + +// Config represents the configuration format for the influxd binary. +type Config struct { + Meta *meta.Config `toml:"meta"` + Data tsdb.Config `toml:"data"` + Coordinator coordinator.Config `toml:"coordinator"` + Retention retention.Config `toml:"retention"` + Precreator precreator.Config `toml:"shard-precreation"` + + Monitor monitor.Config `toml:"monitor"` + Subscriber subscriber.Config `toml:"subscriber"` + HTTPD httpd.Config `toml:"http"` + GraphiteInputs []graphite.Config `toml:"graphite"` + CollectdInputs []collectd.Config `toml:"collectd"` + OpenTSDBInputs []opentsdb.Config `toml:"opentsdb"` + UDPInputs []udp.Config `toml:"udp"` + + ContinuousQuery continuous_querier.Config `toml:"continuous_queries"` + + // Server reporting + ReportingDisabled bool `toml:"reporting-disabled"` + + // BindAddress is the address that all TCP services use (Raft, Snapshot, Cluster, etc.) + BindAddress string `toml:"bind-address"` +} + +// NewConfig returns an instance of Config with reasonable defaults. +func NewConfig() *Config { + c := &Config{} + c.Meta = meta.NewConfig() + c.Data = tsdb.NewConfig() + c.Coordinator = coordinator.NewConfig() + c.Precreator = precreator.NewConfig() + + c.Monitor = monitor.NewConfig() + c.Subscriber = subscriber.NewConfig() + c.HTTPD = httpd.NewConfig() + + c.GraphiteInputs = []graphite.Config{graphite.NewConfig()} + c.CollectdInputs = []collectd.Config{collectd.NewConfig()} + c.OpenTSDBInputs = []opentsdb.Config{opentsdb.NewConfig()} + c.UDPInputs = []udp.Config{udp.NewConfig()} + + c.ContinuousQuery = continuous_querier.NewConfig() + c.Retention = retention.NewConfig() + c.BindAddress = DefaultBindAddress + + return c +} + +// NewDemoConfig returns the config that runs when no config is specified. +func NewDemoConfig() (*Config, error) { + c := NewConfig() + + var homeDir string + // By default, store meta and data files in current users home directory + u, err := user.Current() + if err == nil { + homeDir = u.HomeDir + } else if os.Getenv("HOME") != "" { + homeDir = os.Getenv("HOME") + } else { + return nil, fmt.Errorf("failed to determine current user for storage") + } + + c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta") + c.Data.Dir = filepath.Join(homeDir, ".influxdb/data") + c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal") + + return c, nil +} + +// trimBOM trims the Byte-Order-Marks from the beginning of the file. +// This is for Windows compatability only. +// See https://github.com/influxdata/telegraf/issues/1378. +func trimBOM(f []byte) []byte { + return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf")) +} + +// FromTomlFile loads the config from a TOML file. +func (c *Config) FromTomlFile(fpath string) error { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return err + } + bs = trimBOM(bs) + return c.FromToml(string(bs)) +} + +// FromToml loads the config from TOML. +func (c *Config) FromToml(input string) error { + // Replace deprecated [cluster] with [coordinator] + re := regexp.MustCompile(`(?m)^\s*\[cluster\]`) + input = re.ReplaceAllStringFunc(input, func(in string) string { + in = strings.TrimSpace(in) + out := "[coordinator]" + log.Printf("deprecated config option %s replaced with %s; %s will not be supported in a future release\n", in, out, in) + return out + }) + + _, err := toml.Decode(input, c) + return err +} + +// Validate returns an error if the config is invalid. +func (c *Config) Validate() error { + if err := c.Meta.Validate(); err != nil { + return err + } + + if err := c.Data.Validate(); err != nil { + return err + } + + if err := c.Monitor.Validate(); err != nil { + return err + } + + if err := c.ContinuousQuery.Validate(); err != nil { + return err + } + + if err := c.Retention.Validate(); err != nil { + return err + } + + if err := c.Precreator.Validate(); err != nil { + return err + } + + if err := c.Subscriber.Validate(); err != nil { + return err + } + + for _, graphite := range c.GraphiteInputs { + if err := graphite.Validate(); err != nil { + return fmt.Errorf("invalid graphite config: %v", err) + } + } + + for _, collectd := range c.CollectdInputs { + if err := collectd.Validate(); err != nil { + return fmt.Errorf("invalid collectd config: %v", err) + } + } + + return nil +} + +// ApplyEnvOverrides apply the environment configuration on top of the config. +func (c *Config) ApplyEnvOverrides() error { + return c.applyEnvOverrides("INFLUXDB", reflect.ValueOf(c), "") +} + +func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value, structKey string) error { + // If we have a pointer, dereference it + element := spec + if spec.Kind() == reflect.Ptr { + element = spec.Elem() + } + + value := os.Getenv(prefix) + + switch element.Kind() { + case reflect.String: + if len(value) == 0 { + return nil + } + element.SetString(value) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var intValue int64 + + // Handle toml.Duration + if element.Type().Name() == "Duration" { + dur, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value) + } + intValue = dur.Nanoseconds() + } else { + var err error + intValue, err = strconv.ParseInt(value, 0, element.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value) + } + } + element.SetInt(intValue) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + intValue, err := strconv.ParseUint(value, 0, element.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value) + } + element.SetUint(intValue) + case reflect.Bool: + boolValue, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value) + } + element.SetBool(boolValue) + case reflect.Float32, reflect.Float64: + floatValue, err := strconv.ParseFloat(value, element.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value) + } + element.SetFloat(floatValue) + case reflect.Slice: + // If the type is s slice, apply to each using the index as a suffix, e.g. GRAPHITE_0, GRAPHITE_0_TEMPLATES_0 or GRAPHITE_0_TEMPLATES="item1,item2" + for j := 0; j < element.Len(); j++ { + f := element.Index(j) + if err := c.applyEnvOverrides(prefix, f, structKey); err != nil { + return err + } + + if err := c.applyEnvOverrides(fmt.Sprintf("%s_%d", prefix, j), f, structKey); err != nil { + return err + } + } + + // If the type is s slice but have value not parsed as slice e.g. GRAPHITE_0_TEMPLATES="item1,item2" + if element.Len() == 0 && len(value) > 0 { + rules := strings.Split(value, ",") + + for _, rule := range rules { + element.Set(reflect.Append(element, reflect.ValueOf(rule))) + } + } + case reflect.Struct: + typeOfSpec := element.Type() + for i := 0; i < element.NumField(); i++ { + field := element.Field(i) + + // Skip any fields that we cannot set + if !field.CanSet() && field.Kind() != reflect.Slice { + continue + } + + fieldName := typeOfSpec.Field(i).Name + + configName := typeOfSpec.Field(i).Tag.Get("toml") + // Replace hyphens with underscores to avoid issues with shells + configName = strings.Replace(configName, "-", "_", -1) + + envKey := strings.ToUpper(configName) + if prefix != "" { + envKey = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName)) + } + + // If it's a sub-config, recursively apply + if field.Kind() == reflect.Struct || field.Kind() == reflect.Ptr || + field.Kind() == reflect.Slice || field.Kind() == reflect.Array { + if err := c.applyEnvOverrides(envKey, field, fieldName); err != nil { + return err + } + continue + } + + value := os.Getenv(envKey) + // Skip any fields we don't have a value to set + if len(value) == 0 { + continue + } + + if err := c.applyEnvOverrides(envKey, field, fieldName); err != nil { + return err + } + } + } + return nil +} + +// Diagnostics returns a diagnostics representation of Config. +func (c *Config) Diagnostics() (*diagnostics.Diagnostics, error) { + return diagnostics.RowFromMap(map[string]interface{}{ + "reporting-disabled": c.ReportingDisabled, + "bind-address": c.BindAddress, + }), nil +} + +func (c *Config) diagnosticsClients() map[string]diagnostics.Client { + // Config settings that are always present. + m := map[string]diagnostics.Client{ + "config": c, + + "config-data": c.Data, + "config-meta": c.Meta, + "config-coordinator": c.Coordinator, + "config-retention": c.Retention, + "config-precreator": c.Precreator, + + "config-monitor": c.Monitor, + "config-subscriber": c.Subscriber, + "config-httpd": c.HTTPD, + + "config-cqs": c.ContinuousQuery, + } + + // Config settings that can be repeated and can be disabled. + if g := graphite.Configs(c.GraphiteInputs); g.Enabled() { + m["config-graphite"] = g + } + if cc := collectd.Configs(c.CollectdInputs); cc.Enabled() { + m["config-collectd"] = cc + } + if t := opentsdb.Configs(c.OpenTSDBInputs); t.Enabled() { + m["config-opentsdb"] = t + } + if u := udp.Configs(c.UDPInputs); u.Enabled() { + m["config-udp"] = u + } + + return m +} + +// registerDiagnostics registers the config settings with the Monitor. +func (c *Config) registerDiagnostics(m *monitor.Monitor) { + for name, dc := range c.diagnosticsClients() { + m.RegisterDiagnosticsClient(name, dc) + } +} + +// registerDiagnostics deregisters the config settings from the Monitor. +func (c *Config) deregisterDiagnostics(m *monitor.Monitor) { + for name := range c.diagnosticsClients() { + m.DeregisterDiagnosticsClient(name) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go new file mode 100644 index 0000000..de945c5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go @@ -0,0 +1,92 @@ +package run + +import ( + "flag" + "fmt" + "io" + "os" + + "github.com/BurntSushi/toml" +) + +// PrintConfigCommand represents the command executed by "influxd config". +type PrintConfigCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewPrintConfigCommand return a new instance of PrintConfigCommand. +func NewPrintConfigCommand() *PrintConfigCommand { + return &PrintConfigCommand{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run parses and prints the current config loaded. +func (cmd *PrintConfigCommand) Run(args ...string) error { + // Parse command flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + configPath := fs.String("config", "", "") + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) } + if err := fs.Parse(args); err != nil { + return err + } + + // Parse config from path. + opt := Options{ConfigPath: *configPath} + config, err := cmd.parseConfig(opt.GetConfigPath()) + if err != nil { + return fmt.Errorf("parse config: %s", err) + } + + // Apply any environment variables on top of the parsed config + if err := config.ApplyEnvOverrides(); err != nil { + return fmt.Errorf("apply env config: %v", err) + } + + // Validate the configuration. + if err := config.Validate(); err != nil { + return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err) + } + + toml.NewEncoder(cmd.Stdout).Encode(config) + fmt.Fprint(cmd.Stdout, "\n") + + return nil +} + +// ParseConfig parses the config at path. +// Returns a demo configuration if path is blank. +func (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) { + config, err := NewDemoConfig() + if err != nil { + config = NewConfig() + } + + if path == "" { + return config, nil + } + + fmt.Fprintf(os.Stderr, "Merging with configuration at: %s\n", path) + + if err := config.FromTomlFile(path); err != nil { + return nil, err + } + return config, nil +} + +var printConfigUsage = `Displays the default configuration. + +Usage: influxd config [flags] + + -config + Set the path to the initial configuration file. + This defaults to the environment variable INFLUXDB_CONFIG_PATH, + ~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file + is present at any of these locations. + Disable the automatic loading of a configuration file using + the null device (such as /dev/null). +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go new file mode 100644 index 0000000..bf656fa --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go @@ -0,0 +1,312 @@ +package run_test + +import ( + "fmt" + "os" + "testing" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/cmd/influxd/run" +) + +// Ensure the configuration can be parsed. +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c run.Config + if err := c.FromToml(` +[meta] +dir = "/tmp/meta" + +[data] +dir = "/tmp/data" + +[coordinator] + +[http] +bind-address = ":8087" + +[[graphite]] +protocol = "udp" + +[[graphite]] +protocol = "tcp" + +[[collectd]] +bind-address = ":1000" + +[[collectd]] +bind-address = ":1010" + +[[opentsdb]] +bind-address = ":2000" + +[[opentsdb]] +bind-address = ":2010" + +[[opentsdb]] +bind-address = ":2020" + +[[udp]] +bind-address = ":4444" + +[monitoring] +enabled = true + +[subscriber] +enabled = true + +[continuous_queries] +enabled = true +`); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Meta.Dir != "/tmp/meta" { + t.Fatalf("unexpected meta dir: %s", c.Meta.Dir) + } else if c.Data.Dir != "/tmp/data" { + t.Fatalf("unexpected data dir: %s", c.Data.Dir) + } else if c.HTTPD.BindAddress != ":8087" { + t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress) + } else if len(c.GraphiteInputs) != 2 { + t.Fatalf("unexpected graphiteInputs count: %d", len(c.GraphiteInputs)) + } else if c.GraphiteInputs[0].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol(0): %s", c.GraphiteInputs[0].Protocol) + } else if c.GraphiteInputs[1].Protocol != "tcp" { + t.Fatalf("unexpected graphite protocol(1): %s", c.GraphiteInputs[1].Protocol) + } else if c.CollectdInputs[0].BindAddress != ":1000" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[0].BindAddress) + } else if c.CollectdInputs[1].BindAddress != ":1010" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress) + } else if c.OpenTSDBInputs[0].BindAddress != ":2000" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress) + } else if c.OpenTSDBInputs[1].BindAddress != ":2010" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[1].BindAddress) + } else if c.OpenTSDBInputs[2].BindAddress != ":2020" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[2].BindAddress) + } else if c.UDPInputs[0].BindAddress != ":4444" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) + } else if c.Subscriber.Enabled != true { + t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled) + } else if c.ContinuousQuery.Enabled != true { + t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) + } +} + +// Ensure the configuration can be parsed. +func TestConfig_Parse_EnvOverride(t *testing.T) { + // Parse configuration. + var c run.Config + if _, err := toml.Decode(` +[meta] +dir = "/tmp/meta" + +[data] +dir = "/tmp/data" + +[coordinator] + +[admin] +bind-address = ":8083" + +[http] +bind-address = ":8087" + +[[graphite]] +protocol = "udp" +templates = [ + "default.* .template.in.config" +] + +[[graphite]] +protocol = "tcp" + +[[collectd]] +bind-address = ":1000" + +[[collectd]] +bind-address = ":1010" + +[[opentsdb]] +bind-address = ":2000" + +[[opentsdb]] +bind-address = ":2010" + +[[udp]] +bind-address = ":4444" + +[[udp]] + +[monitoring] +enabled = true + +[continuous_queries] +enabled = true +`, &c); err != nil { + t.Fatal(err) + } + + if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := os.Setenv("INFLUXDB_UDP_0_BIND_ADDRESS", ":5555"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := os.Setenv("INFLUXDB_GRAPHITE_0_TEMPLATES_0", "overide.* .template.0"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := os.Setenv("INFLUXDB_GRAPHITE_1_TEMPLATES", "overide.* .template.1.1,overide.* .template.1.2"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := os.Setenv("INFLUXDB_GRAPHITE_1_PROTOCOL", "udp"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := os.Setenv("INFLUXDB_COLLECTD_1_BIND_ADDRESS", ":1020"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := os.Setenv("INFLUXDB_OPENTSDB_0_BIND_ADDRESS", ":2020"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + // uint64 type + if err := os.Setenv("INFLUXDB_DATA_CACHE_MAX_MEMORY_SIZE", "1000"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := c.ApplyEnvOverrides(); err != nil { + t.Fatalf("failed to apply env overrides: %v", err) + } + + if c.UDPInputs[0].BindAddress != ":5555" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) + } + + if c.UDPInputs[1].BindAddress != ":1234" { + t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[1].BindAddress) + } + + if len(c.GraphiteInputs[0].Templates) != 1 || c.GraphiteInputs[0].Templates[0] != "overide.* .template.0" { + t.Fatalf("unexpected graphite 0 templates: %+v", c.GraphiteInputs[0].Templates) + } + + if len(c.GraphiteInputs[1].Templates) != 2 || c.GraphiteInputs[1].Templates[1] != "overide.* .template.1.2" { + t.Fatalf("unexpected graphite 1 templates: %+v", c.GraphiteInputs[1].Templates) + } + + if c.GraphiteInputs[1].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol: %s", c.GraphiteInputs[1].Protocol) + } + + if c.CollectdInputs[1].BindAddress != ":1020" { + t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress) + } + + if c.OpenTSDBInputs[0].BindAddress != ":2020" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress) + } + + if c.Data.CacheMaxMemorySize != 1000 { + t.Fatalf("unexpected cache max memory size: %v", c.Data.CacheMaxMemorySize) + } +} + +func TestConfig_ValidateNoServiceConfigured(t *testing.T) { + var c run.Config + if _, err := toml.Decode(` +[meta] +enabled = false + +[data] +enabled = false +`, &c); err != nil { + t.Fatal(err) + } + + if e := c.Validate(); e == nil { + t.Fatalf("got nil, expected error") + } +} + +func TestConfig_ValidateMonitorStore_MetaOnly(t *testing.T) { + c := run.NewConfig() + if _, err := toml.Decode(` +[monitor] +store-enabled = true + +[meta] +dir = "foo" + +[data] +enabled = false +`, &c); err != nil { + t.Fatal(err) + } + + if err := c.Validate(); err == nil { + t.Fatalf("got nil, expected error") + } +} + +func TestConfig_DeprecatedOptions(t *testing.T) { + // Parse configuration. + var c run.Config + if err := c.FromToml(` +[cluster] +max-select-point = 100 +`); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Coordinator.MaxSelectPointN != 100 { + t.Fatalf("unexpected coordinator max select points: %d", c.Coordinator.MaxSelectPointN) + + } +} + +// Ensure that Config.Validate correctly validates the individual subsections. +func TestConfig_InvalidSubsections(t *testing.T) { + // Precondition: NewDemoConfig must validate correctly. + c, err := run.NewDemoConfig() + if err != nil { + t.Fatalf("error creating demo config: %s", err) + } + if err := c.Validate(); err != nil { + t.Fatalf("new demo config failed validation: %s", err) + } + + // For each subsection, load a config with a single invalid setting. + for _, tc := range []struct { + section string + kv string + }{ + {"meta", `dir = ""`}, + {"data", `dir = ""`}, + {"monitor", `store-database = ""`}, + {"continuous_queries", `run-interval = "0s"`}, + {"subscriber", `http-timeout = "0s"`}, + {"retention", `check-interval = "0s"`}, + {"shard-precreation", `advance-period = "0s"`}, + } { + c, err := run.NewDemoConfig() + if err != nil { + t.Fatalf("error creating demo config: %s", err) + } + + s := fmt.Sprintf("\n[%s]\n%s\n", tc.section, tc.kv) + if err := c.FromToml(s); err != nil { + t.Fatalf("error loading toml %q: %s", s, err) + } + + if err := c.Validate(); err == nil { + t.Fatalf("expected error but got nil for config: %s", s) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go new file mode 100644 index 0000000..2f6d4ab --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go @@ -0,0 +1,616 @@ +package run + +import ( + "fmt" + "io" + "log" + "net" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/services/collectd" + "github.com/influxdata/influxdb/services/continuous_querier" + "github.com/influxdata/influxdb/services/graphite" + "github.com/influxdata/influxdb/services/httpd" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/opentsdb" + "github.com/influxdata/influxdb/services/precreator" + "github.com/influxdata/influxdb/services/retention" + "github.com/influxdata/influxdb/services/snapshotter" + "github.com/influxdata/influxdb/services/subscriber" + "github.com/influxdata/influxdb/services/udp" + "github.com/influxdata/influxdb/tcp" + "github.com/influxdata/influxdb/tsdb" + client "github.com/influxdata/usage-client/v1" + "github.com/uber-go/zap" + + // Initialize the engine & index packages + _ "github.com/influxdata/influxdb/tsdb/engine" + _ "github.com/influxdata/influxdb/tsdb/index" +) + +var startTime time.Time + +func init() { + startTime = time.Now().UTC() +} + +// BuildInfo represents the build details for the server code. +type BuildInfo struct { + Version string + Commit string + Branch string + Time string +} + +// Server represents a container for the metadata and storage data and services. +// It is built using a Config and it manages the startup and shutdown of all +// services in the proper order. +type Server struct { + buildInfo BuildInfo + + err chan error + closing chan struct{} + + BindAddress string + Listener net.Listener + + Logger zap.Logger + + MetaClient *meta.Client + + TSDBStore *tsdb.Store + QueryExecutor *influxql.QueryExecutor + PointsWriter *coordinator.PointsWriter + Subscriber *subscriber.Service + + Services []Service + + // These references are required for the tcp muxer. + SnapshotterService *snapshotter.Service + + Monitor *monitor.Monitor + + // Server reporting and registration + reportingDisabled bool + + // Profiling + CPUProfile string + MemProfile string + + // httpAPIAddr is the host:port combination for the main HTTP API for querying and writing data + httpAPIAddr string + + // httpUseTLS specifies if we should use a TLS connection to the http servers + httpUseTLS bool + + // tcpAddr is the host:port combination for the TCP listener that services mux onto + tcpAddr string + + config *Config +} + +// NewServer returns a new instance of Server built from a config. +func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { + // We need to ensure that a meta directory always exists even if + // we don't start the meta store. node.json is always stored under + // the meta directory. + if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil { + return nil, fmt.Errorf("mkdir all: %s", err) + } + + // 0.10-rc1 and prior would sometimes put the node.json at the root + // dir which breaks backup/restore and restarting nodes. This moves + // the file from the root so it's always under the meta dir. + oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json") + newPath := filepath.Join(c.Meta.Dir, "node.json") + + if _, err := os.Stat(oldPath); err == nil { + if err := os.Rename(oldPath, newPath); err != nil { + return nil, err + } + } + + _, err := influxdb.LoadNode(c.Meta.Dir) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } + + if err := raftDBExists(c.Meta.Dir); err != nil { + return nil, err + } + + // In 0.10.0 bind-address got moved to the top level. Check + // The old location to keep things backwards compatible + bind := c.BindAddress + + s := &Server{ + buildInfo: *buildInfo, + err: make(chan error), + closing: make(chan struct{}), + + BindAddress: bind, + + Logger: zap.New( + zap.NewTextEncoder(), + zap.Output(os.Stderr), + ), + + MetaClient: meta.NewClient(c.Meta), + + reportingDisabled: c.ReportingDisabled, + + httpAPIAddr: c.HTTPD.BindAddress, + httpUseTLS: c.HTTPD.HTTPSEnabled, + tcpAddr: bind, + + config: c, + } + s.Monitor = monitor.New(s, c.Monitor) + s.config.registerDiagnostics(s.Monitor) + + if err := s.MetaClient.Open(); err != nil { + return nil, err + } + + s.TSDBStore = tsdb.NewStore(c.Data.Dir) + s.TSDBStore.EngineOptions.Config = c.Data + + // Copy TSDB configuration. + s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine + s.TSDBStore.EngineOptions.IndexVersion = c.Data.Index + + // Create the Subscriber service + s.Subscriber = subscriber.NewService(c.Subscriber) + + // Initialize points writer. + s.PointsWriter = coordinator.NewPointsWriter() + s.PointsWriter.WriteTimeout = time.Duration(c.Coordinator.WriteTimeout) + s.PointsWriter.TSDBStore = s.TSDBStore + s.PointsWriter.Subscriber = s.Subscriber + + // Initialize query executor. + s.QueryExecutor = influxql.NewQueryExecutor() + s.QueryExecutor.StatementExecutor = &coordinator.StatementExecutor{ + MetaClient: s.MetaClient, + TaskManager: s.QueryExecutor.TaskManager, + TSDBStore: coordinator.LocalTSDBStore{Store: s.TSDBStore}, + ShardMapper: &coordinator.LocalShardMapper{ + MetaClient: s.MetaClient, + TSDBStore: coordinator.LocalTSDBStore{Store: s.TSDBStore}, + }, + Monitor: s.Monitor, + PointsWriter: s.PointsWriter, + MaxSelectPointN: c.Coordinator.MaxSelectPointN, + MaxSelectSeriesN: c.Coordinator.MaxSelectSeriesN, + MaxSelectBucketsN: c.Coordinator.MaxSelectBucketsN, + } + s.QueryExecutor.TaskManager.QueryTimeout = time.Duration(c.Coordinator.QueryTimeout) + s.QueryExecutor.TaskManager.LogQueriesAfter = time.Duration(c.Coordinator.LogQueriesAfter) + s.QueryExecutor.TaskManager.MaxConcurrentQueries = c.Coordinator.MaxConcurrentQueries + + // Initialize the monitor + s.Monitor.Version = s.buildInfo.Version + s.Monitor.Commit = s.buildInfo.Commit + s.Monitor.Branch = s.buildInfo.Branch + s.Monitor.BuildTime = s.buildInfo.Time + s.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter) + return s, nil +} + +// Statistics returns statistics for the services running in the Server. +func (s *Server) Statistics(tags map[string]string) []models.Statistic { + var statistics []models.Statistic + statistics = append(statistics, s.QueryExecutor.Statistics(tags)...) + statistics = append(statistics, s.TSDBStore.Statistics(tags)...) + statistics = append(statistics, s.PointsWriter.Statistics(tags)...) + statistics = append(statistics, s.Subscriber.Statistics(tags)...) + for _, srv := range s.Services { + if m, ok := srv.(monitor.Reporter); ok { + statistics = append(statistics, m.Statistics(tags)...) + } + } + return statistics +} + +func (s *Server) appendSnapshotterService() { + srv := snapshotter.NewService() + srv.TSDBStore = s.TSDBStore + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) + s.SnapshotterService = srv +} + +// SetLogOutput sets the logger used for all messages. It must not be called +// after the Open method has been called. +func (s *Server) SetLogOutput(w io.Writer) { + s.Logger = zap.New(zap.NewTextEncoder(), zap.Output(zap.AddSync(w))) +} + +func (s *Server) appendMonitorService() { + s.Services = append(s.Services, s.Monitor) +} + +func (s *Server) appendRetentionPolicyService(c retention.Config) { + if !c.Enabled { + return + } + srv := retention.NewService(c) + srv.MetaClient = s.MetaClient + srv.TSDBStore = s.TSDBStore + s.Services = append(s.Services, srv) +} + +func (s *Server) appendHTTPDService(c httpd.Config) { + if !c.Enabled { + return + } + srv := httpd.NewService(c) + srv.Handler.MetaClient = s.MetaClient + srv.Handler.QueryAuthorizer = meta.NewQueryAuthorizer(s.MetaClient) + srv.Handler.WriteAuthorizer = meta.NewWriteAuthorizer(s.MetaClient) + srv.Handler.QueryExecutor = s.QueryExecutor + srv.Handler.Monitor = s.Monitor + srv.Handler.PointsWriter = s.PointsWriter + srv.Handler.Version = s.buildInfo.Version + + s.Services = append(s.Services, srv) +} + +func (s *Server) appendCollectdService(c collectd.Config) { + if !c.Enabled { + return + } + srv := collectd.NewService(c) + srv.MetaClient = s.MetaClient + srv.PointsWriter = s.PointsWriter + s.Services = append(s.Services, srv) +} + +func (s *Server) appendOpenTSDBService(c opentsdb.Config) error { + if !c.Enabled { + return nil + } + srv, err := opentsdb.NewService(c) + if err != nil { + return err + } + srv.PointsWriter = s.PointsWriter + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendGraphiteService(c graphite.Config) error { + if !c.Enabled { + return nil + } + srv, err := graphite.NewService(c) + if err != nil { + return err + } + + srv.PointsWriter = s.PointsWriter + srv.MetaClient = s.MetaClient + srv.Monitor = s.Monitor + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendPrecreatorService(c precreator.Config) error { + if !c.Enabled { + return nil + } + srv, err := precreator.NewService(c) + if err != nil { + return err + } + + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendUDPService(c udp.Config) { + if !c.Enabled { + return + } + srv := udp.NewService(c) + srv.PointsWriter = s.PointsWriter + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) +} + +func (s *Server) appendContinuousQueryService(c continuous_querier.Config) { + if !c.Enabled { + return + } + srv := continuous_querier.NewService(c) + srv.MetaClient = s.MetaClient + srv.QueryExecutor = s.QueryExecutor + s.Services = append(s.Services, srv) +} + +// Err returns an error channel that multiplexes all out of band errors received from all services. +func (s *Server) Err() <-chan error { return s.err } + +// Open opens the meta and data store and all services. +func (s *Server) Open() error { + // Start profiling, if set. + startProfile(s.CPUProfile, s.MemProfile) + + // Open shared TCP connection. + ln, err := net.Listen("tcp", s.BindAddress) + if err != nil { + return fmt.Errorf("listen: %s", err) + } + s.Listener = ln + + // Multiplex listener. + mux := tcp.NewMux() + go mux.Serve(ln) + + // Append services. + s.appendMonitorService() + s.appendPrecreatorService(s.config.Precreator) + s.appendSnapshotterService() + s.appendContinuousQueryService(s.config.ContinuousQuery) + s.appendHTTPDService(s.config.HTTPD) + s.appendRetentionPolicyService(s.config.Retention) + for _, i := range s.config.GraphiteInputs { + if err := s.appendGraphiteService(i); err != nil { + return err + } + } + for _, i := range s.config.CollectdInputs { + s.appendCollectdService(i) + } + for _, i := range s.config.OpenTSDBInputs { + if err := s.appendOpenTSDBService(i); err != nil { + return err + } + } + for _, i := range s.config.UDPInputs { + s.appendUDPService(i) + } + + s.Subscriber.MetaClient = s.MetaClient + s.Subscriber.MetaClient = s.MetaClient + s.PointsWriter.MetaClient = s.MetaClient + s.Monitor.MetaClient = s.MetaClient + + s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader) + + // Configure logging for all services and clients. + if s.config.Meta.LoggingEnabled { + s.MetaClient.WithLogger(s.Logger) + } + s.TSDBStore.WithLogger(s.Logger) + if s.config.Data.QueryLogEnabled { + s.QueryExecutor.WithLogger(s.Logger) + } + s.PointsWriter.WithLogger(s.Logger) + s.Subscriber.WithLogger(s.Logger) + for _, svc := range s.Services { + svc.WithLogger(s.Logger) + } + s.SnapshotterService.WithLogger(s.Logger) + s.Monitor.WithLogger(s.Logger) + + // Open TSDB store. + if err := s.TSDBStore.Open(); err != nil { + return fmt.Errorf("open tsdb store: %s", err) + } + + // Open the subcriber service + if err := s.Subscriber.Open(); err != nil { + return fmt.Errorf("open subscriber: %s", err) + } + + // Open the points writer service + if err := s.PointsWriter.Open(); err != nil { + return fmt.Errorf("open points writer: %s", err) + } + + for _, service := range s.Services { + if err := service.Open(); err != nil { + return fmt.Errorf("open service: %s", err) + } + } + + // Start the reporting service, if not disabled. + if !s.reportingDisabled { + go s.startServerReporting() + } + + return nil +} + +// Close shuts down the meta and data stores and all services. +func (s *Server) Close() error { + stopProfile() + + // Close the listener first to stop any new connections + if s.Listener != nil { + s.Listener.Close() + } + + // Close services to allow any inflight requests to complete + // and prevent new requests from being accepted. + for _, service := range s.Services { + service.Close() + } + + s.config.deregisterDiagnostics(s.Monitor) + + if s.PointsWriter != nil { + s.PointsWriter.Close() + } + + if s.QueryExecutor != nil { + s.QueryExecutor.Close() + } + + // Close the TSDBStore, no more reads or writes at this point + if s.TSDBStore != nil { + s.TSDBStore.Close() + } + + if s.Subscriber != nil { + s.Subscriber.Close() + } + + if s.MetaClient != nil { + s.MetaClient.Close() + } + + close(s.closing) + return nil +} + +// startServerReporting starts periodic server reporting. +func (s *Server) startServerReporting() { + s.reportServer() + + ticker := time.NewTicker(24 * time.Hour) + defer ticker.Stop() + for { + select { + case <-s.closing: + return + case <-ticker.C: + s.reportServer() + } + } +} + +// reportServer reports usage statistics about the system. +func (s *Server) reportServer() { + dbs := s.MetaClient.Databases() + numDatabases := len(dbs) + + var ( + numMeasurements int64 + numSeries int64 + ) + + for _, db := range dbs { + name := db.Name + n, err := s.TSDBStore.SeriesCardinality(name) + if err != nil { + s.Logger.Error(fmt.Sprintf("Unable to get series cardinality for database %s: %v", name, err)) + } else { + numSeries += n + } + + n, err = s.TSDBStore.MeasurementsCardinality(name) + if err != nil { + s.Logger.Error(fmt.Sprintf("Unable to get measurement cardinality for database %s: %v", name, err)) + } else { + numMeasurements += n + } + } + + clusterID := s.MetaClient.ClusterID() + cl := client.New("") + usage := client.Usage{ + Product: "influxdb", + Data: []client.UsageData{ + { + Values: client.Values{ + "os": runtime.GOOS, + "arch": runtime.GOARCH, + "version": s.buildInfo.Version, + "cluster_id": fmt.Sprintf("%v", clusterID), + "num_series": numSeries, + "num_measurements": numMeasurements, + "num_databases": numDatabases, + "uptime": time.Since(startTime).Seconds(), + }, + }, + }, + } + + s.Logger.Info("Sending usage statistics to usage.influxdata.com") + + go cl.Save(usage) +} + +// Service represents a service attached to the server. +type Service interface { + WithLogger(log zap.Logger) + Open() error + Close() error +} + +// prof stores the file locations of active profiles. +var prof struct { + cpu *os.File + mem *os.File +} + +// StartProfile initializes the cpu and memory profile, if specified. +func startProfile(cpuprofile, memprofile string) { + if cpuprofile != "" { + f, err := os.Create(cpuprofile) + if err != nil { + log.Fatalf("cpuprofile: %v", err) + } + log.Printf("writing CPU profile to: %s\n", cpuprofile) + prof.cpu = f + pprof.StartCPUProfile(prof.cpu) + } + + if memprofile != "" { + f, err := os.Create(memprofile) + if err != nil { + log.Fatalf("memprofile: %v", err) + } + log.Printf("writing mem profile to: %s\n", memprofile) + prof.mem = f + runtime.MemProfileRate = 4096 + } + +} + +// StopProfile closes the cpu and memory profiles if they are running. +func stopProfile() { + if prof.cpu != nil { + pprof.StopCPUProfile() + prof.cpu.Close() + log.Println("CPU profile stopped") + } + if prof.mem != nil { + pprof.Lookup("heap").WriteTo(prof.mem, 0) + prof.mem.Close() + log.Println("mem profile stopped") + } +} + +// monitorPointsWriter is a wrapper around `coordinator.PointsWriter` that helps +// to prevent a circular dependency between the `cluster` and `monitor` packages. +type monitorPointsWriter coordinator.PointsWriter + +func (pw *monitorPointsWriter) WritePoints(database, retentionPolicy string, points models.Points) error { + return (*coordinator.PointsWriter)(pw).WritePointsPrivileged(database, retentionPolicy, models.ConsistencyLevelAny, points) +} + +func raftDBExists(dir string) error { + // Check to see if there is a raft db, if so, error out with a message + // to downgrade, export, and then import the meta data + raftFile := filepath.Join(dir, "raft.db") + if _, err := os.Stat(raftFile); err == nil { + return fmt.Errorf("detected %s. To proceed, you'll need to either 1) downgrade to v0.11.x, export your metadata, upgrade to the current version again, and then import the metadata or 2) delete the file, which will effectively reset your database. For more assistance with the upgrade, see: https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/", raftFile) + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/parse.go b/vendor/github.com/influxdata/influxdb/cmd/parse.go new file mode 100644 index 0000000..7b140ed --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/parse.go @@ -0,0 +1,29 @@ +// Package cmd is the root package of the various command-line utilities for InfluxDB. +package cmd + +import "strings" + +// ParseCommandName extracts the command name and args from the args list. +func ParseCommandName(args []string) (string, []string) { + // Retrieve command name as first argument. + var name string + if len(args) > 0 { + if !strings.HasPrefix(args[0], "-") { + name = args[0] + } else if args[0] == "-h" || args[0] == "-help" || args[0] == "--help" { + // Special case -h immediately following binary name + name = "help" + } + } + + // If command is "help" and has an argument then rewrite args to use "-h". + if name == "help" && len(args) > 2 && !strings.HasPrefix(args[1], "-") { + return args[1], []string{"-h"} + } + + // If a named command is specified then return it with its arguments. + if name != "" { + return name, args[1:] + } + return "", args +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/config.go b/vendor/github.com/influxdata/influxdb/coordinator/config.go new file mode 100644 index 0000000..da37731 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/config.go @@ -0,0 +1,63 @@ +// Package coordinator contains abstractions for writing points, executing statements, +// and accessing meta data. +package coordinator + +import ( + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/monitor/diagnostics" + "github.com/influxdata/influxdb/toml" +) + +const ( + // DefaultWriteTimeout is the default timeout for a complete write to succeed. + DefaultWriteTimeout = 10 * time.Second + + // DefaultMaxConcurrentQueries is the maximum number of running queries. + // A value of zero will make the maximum query limit unlimited. + DefaultMaxConcurrentQueries = 0 + + // DefaultMaxSelectPointN is the maximum number of points a SELECT can process. + // A value of zero will make the maximum point count unlimited. + DefaultMaxSelectPointN = 0 + + // DefaultMaxSelectSeriesN is the maximum number of series a SELECT can run. + // A value of zero will make the maximum series count unlimited. + DefaultMaxSelectSeriesN = 0 +) + +// Config represents the configuration for the coordinator service. +type Config struct { + WriteTimeout toml.Duration `toml:"write-timeout"` + MaxConcurrentQueries int `toml:"max-concurrent-queries"` + QueryTimeout toml.Duration `toml:"query-timeout"` + LogQueriesAfter toml.Duration `toml:"log-queries-after"` + MaxSelectPointN int `toml:"max-select-point"` + MaxSelectSeriesN int `toml:"max-select-series"` + MaxSelectBucketsN int `toml:"max-select-buckets"` +} + +// NewConfig returns an instance of Config with defaults. +func NewConfig() Config { + return Config{ + WriteTimeout: toml.Duration(DefaultWriteTimeout), + QueryTimeout: toml.Duration(influxql.DefaultQueryTimeout), + MaxConcurrentQueries: DefaultMaxConcurrentQueries, + MaxSelectPointN: DefaultMaxSelectPointN, + MaxSelectSeriesN: DefaultMaxSelectSeriesN, + } +} + +// Diagnostics returns a diagnostics representation of a subset of the Config. +func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) { + return diagnostics.RowFromMap(map[string]interface{}{ + "write-timeout": c.WriteTimeout, + "max-concurrent-queries": c.MaxConcurrentQueries, + "query-timeout": c.QueryTimeout, + "log-queries-after": c.LogQueriesAfter, + "max-select-point": c.MaxSelectPointN, + "max-select-series": c.MaxSelectSeriesN, + "max-select-buckets": c.MaxSelectBucketsN, + }), nil +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/config_test.go b/vendor/github.com/influxdata/influxdb/coordinator/config_test.go new file mode 100644 index 0000000..2f21436 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/config_test.go @@ -0,0 +1,24 @@ +package coordinator_test + +import ( + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/coordinator" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c coordinator.Config + if _, err := toml.Decode(` +write-timeout = "20s" +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if time.Duration(c.WriteTimeout) != 20*time.Second { + t.Fatalf("unexpected write timeout s: %s", c.WriteTimeout) + } +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/meta_client.go b/vendor/github.com/influxdata/influxdb/coordinator/meta_client.go new file mode 100644 index 0000000..1fb825f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/meta_client.go @@ -0,0 +1,35 @@ +package coordinator + +import ( + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" +) + +// MetaClient is an interface for accessing meta data. +type MetaClient interface { + CreateContinuousQuery(database, name, query string) error + CreateDatabase(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) + CreateSubscription(database, rp, name, mode string, destinations []string) error + CreateUser(name, password string, admin bool) (meta.User, error) + Database(name string) *meta.DatabaseInfo + Databases() []meta.DatabaseInfo + DropShard(id uint64) error + DropContinuousQuery(database, name string) error + DropDatabase(name string) error + DropRetentionPolicy(database, name string) error + DropSubscription(database, rp, name string) error + DropUser(name string) error + RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + SetAdminPrivilege(username string, admin bool) error + SetPrivilege(username, database string, p influxql.Privilege) error + ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error + UpdateUser(name, password string) error + UserPrivilege(username, database string) (*influxql.Privilege, error) + UserPrivileges(username string) (map[string]influxql.Privilege, error) + Users() []meta.UserInfo +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go b/vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go new file mode 100644 index 0000000..fd4064d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go @@ -0,0 +1,160 @@ +package coordinator_test + +import ( + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" +) + +// MetaClient is a mockable implementation of cluster.MetaClient. +type MetaClient struct { + CreateContinuousQueryFn func(database, name, query string) error + CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + CreateRetentionPolicyFn func(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) + CreateSubscriptionFn func(database, rp, name, mode string, destinations []string) error + CreateUserFn func(name, password string, admin bool) (meta.User, error) + DatabaseFn func(name string) *meta.DatabaseInfo + DatabasesFn func() []meta.DatabaseInfo + DataNodeFn func(id uint64) (*meta.NodeInfo, error) + DataNodesFn func() ([]meta.NodeInfo, error) + DeleteDataNodeFn func(id uint64) error + DeleteMetaNodeFn func(id uint64) error + DropContinuousQueryFn func(database, name string) error + DropDatabaseFn func(name string) error + DropRetentionPolicyFn func(database, name string) error + DropSubscriptionFn func(database, rp, name string) error + DropShardFn func(id uint64) error + DropUserFn func(name string) error + MetaNodesFn func() ([]meta.NodeInfo, error) + RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + SetAdminPrivilegeFn func(username string, admin bool) error + SetPrivilegeFn func(username, database string, p influxql.Privilege) error + ShardGroupsByTimeRangeFn func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error + UpdateUserFn func(name, password string) error + UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) + UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) + UsersFn func() []meta.UserInfo +} + +func (c *MetaClient) CreateContinuousQuery(database, name, query string) error { + return c.CreateContinuousQueryFn(database, name, query) +} + +func (c *MetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseFn(name) +} + +func (c *MetaClient) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseWithRetentionPolicyFn(name, spec) +} + +func (c *MetaClient) CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) { + return c.CreateRetentionPolicyFn(database, spec, makeDefault) +} + +func (c *MetaClient) DropShard(id uint64) error { + return c.DropShardFn(id) +} + +func (c *MetaClient) CreateSubscription(database, rp, name, mode string, destinations []string) error { + return c.CreateSubscriptionFn(database, rp, name, mode, destinations) +} + +func (c *MetaClient) CreateUser(name, password string, admin bool) (meta.User, error) { + return c.CreateUserFn(name, password, admin) +} + +func (c *MetaClient) Database(name string) *meta.DatabaseInfo { + return c.DatabaseFn(name) +} + +func (c *MetaClient) Databases() []meta.DatabaseInfo { + return c.DatabasesFn() +} + +func (c *MetaClient) DataNode(id uint64) (*meta.NodeInfo, error) { + return c.DataNodeFn(id) +} + +func (c *MetaClient) DataNodes() ([]meta.NodeInfo, error) { + return c.DataNodesFn() +} + +func (c *MetaClient) DeleteDataNode(id uint64) error { + return c.DeleteDataNodeFn(id) +} + +func (c *MetaClient) DeleteMetaNode(id uint64) error { + return c.DeleteMetaNodeFn(id) +} + +func (c *MetaClient) DropContinuousQuery(database, name string) error { + return c.DropContinuousQueryFn(database, name) +} + +func (c *MetaClient) DropDatabase(name string) error { + return c.DropDatabaseFn(name) +} + +func (c *MetaClient) DropRetentionPolicy(database, name string) error { + return c.DropRetentionPolicyFn(database, name) +} + +func (c *MetaClient) DropSubscription(database, rp, name string) error { + return c.DropSubscriptionFn(database, rp, name) +} + +func (c *MetaClient) DropUser(name string) error { + return c.DropUserFn(name) +} + +func (c *MetaClient) MetaNodes() ([]meta.NodeInfo, error) { + return c.MetaNodesFn() +} + +func (c *MetaClient) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) { + return c.RetentionPolicyFn(database, name) +} + +func (c *MetaClient) SetAdminPrivilege(username string, admin bool) error { + return c.SetAdminPrivilegeFn(username, admin) +} + +func (c *MetaClient) SetPrivilege(username, database string, p influxql.Privilege) error { + return c.SetPrivilegeFn(username, database, p) +} + +func (c *MetaClient) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return c.ShardGroupsByTimeRangeFn(database, policy, min, max) +} + +func (c *MetaClient) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error { + return c.UpdateRetentionPolicyFn(database, name, rpu, makeDefault) +} + +func (c *MetaClient) UpdateUser(name, password string) error { + return c.UpdateUserFn(name, password) +} + +func (c *MetaClient) UserPrivilege(username, database string) (*influxql.Privilege, error) { + return c.UserPrivilegeFn(username, database) +} + +func (c *MetaClient) UserPrivileges(username string) (map[string]influxql.Privilege, error) { + return c.UserPrivilegesFn(username) +} + +func (c *MetaClient) Users() []meta.UserInfo { + return c.UsersFn() +} + +// DefaultMetaClientDatabaseFn returns a single database (db0) with a retention policy. +func DefaultMetaClientDatabaseFn(name string) *meta.DatabaseInfo { + return &meta.DatabaseInfo{ + Name: DefaultDatabase, + DefaultRetentionPolicy: DefaultRetentionPolicy, + } +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/points_writer.go b/vendor/github.com/influxdata/influxdb/coordinator/points_writer.go new file mode 100644 index 0000000..8779e43 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/points_writer.go @@ -0,0 +1,393 @@ +package coordinator + +import ( + "errors" + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + "github.com/uber-go/zap" +) + +// The keys for statistics generated by the "write" module. +const ( + statWriteReq = "req" + statPointWriteReq = "pointReq" + statPointWriteReqLocal = "pointReqLocal" + statWriteOK = "writeOk" + statWriteDrop = "writeDrop" + statWriteTimeout = "writeTimeout" + statWriteErr = "writeError" + statSubWriteOK = "subWriteOk" + statSubWriteDrop = "subWriteDrop" +) + +var ( + // ErrTimeout is returned when a write times out. + ErrTimeout = errors.New("timeout") + + // ErrPartialWrite is returned when a write partially succeeds but does + // not meet the requested consistency level. + ErrPartialWrite = errors.New("partial write") + + // ErrWriteFailed is returned when no writes succeeded. + ErrWriteFailed = errors.New("write failed") +) + +// PointsWriter handles writes across multiple local and remote data nodes. +type PointsWriter struct { + mu sync.RWMutex + closing chan struct{} + WriteTimeout time.Duration + Logger zap.Logger + + Node *influxdb.Node + + MetaClient interface { + Database(name string) (di *meta.DatabaseInfo) + RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error) + CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + } + + TSDBStore interface { + CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error + WriteToShard(shardID uint64, points []models.Point) error + } + + Subscriber interface { + Points() chan<- *WritePointsRequest + } + subPoints chan<- *WritePointsRequest + + stats *WriteStatistics +} + +// WritePointsRequest represents a request to write point data to the cluster. +type WritePointsRequest struct { + Database string + RetentionPolicy string + Points []models.Point +} + +// AddPoint adds a point to the WritePointRequest with field key 'value' +func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { + pt, err := models.NewPoint( + name, models.NewTags(tags), map[string]interface{}{"value": value}, timestamp, + ) + if err != nil { + return + } + w.Points = append(w.Points, pt) +} + +// NewPointsWriter returns a new instance of PointsWriter for a node. +func NewPointsWriter() *PointsWriter { + return &PointsWriter{ + closing: make(chan struct{}), + WriteTimeout: DefaultWriteTimeout, + Logger: zap.New(zap.NullEncoder()), + stats: &WriteStatistics{}, + } +} + +// ShardMapping contains a mapping of shards to points. +type ShardMapping struct { + n int + Points map[uint64][]models.Point // The points associated with a shard ID + Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID + Dropped []models.Point // Points that were dropped +} + +// NewShardMapping creates an empty ShardMapping. +func NewShardMapping(n int) *ShardMapping { + return &ShardMapping{ + n: n, + Points: map[uint64][]models.Point{}, + Shards: map[uint64]*meta.ShardInfo{}, + } +} + +// MapPoint adds the point to the ShardMapping, associated with the given shardInfo. +func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) { + if cap(s.Points[shardInfo.ID]) < s.n { + s.Points[shardInfo.ID] = make([]models.Point, 0, s.n) + } + s.Points[shardInfo.ID] = append(s.Points[shardInfo.ID], p) + s.Shards[shardInfo.ID] = shardInfo +} + +// Open opens the communication channel with the point writer. +func (w *PointsWriter) Open() error { + w.mu.Lock() + defer w.mu.Unlock() + w.closing = make(chan struct{}) + if w.Subscriber != nil { + w.subPoints = w.Subscriber.Points() + } + return nil +} + +// Close closes the communication channel with the point writer. +func (w *PointsWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closing != nil { + close(w.closing) + } + if w.subPoints != nil { + // 'nil' channels always block so this makes the + // select statement in WritePoints hit its default case + // dropping any in-flight writes. + w.subPoints = nil + } + return nil +} + +// WithLogger sets the Logger on w. +func (w *PointsWriter) WithLogger(log zap.Logger) { + w.Logger = log.With(zap.String("service", "write")) +} + +// WriteStatistics keeps statistics related to the PointsWriter. +type WriteStatistics struct { + WriteReq int64 + PointWriteReq int64 + PointWriteReqLocal int64 + WriteOK int64 + WriteDropped int64 + WriteTimeout int64 + WriteErr int64 + SubWriteOK int64 + SubWriteDrop int64 +} + +// Statistics returns statistics for periodic monitoring. +func (w *PointsWriter) Statistics(tags map[string]string) []models.Statistic { + return []models.Statistic{{ + Name: "write", + Tags: tags, + Values: map[string]interface{}{ + statWriteReq: atomic.LoadInt64(&w.stats.WriteReq), + statPointWriteReq: atomic.LoadInt64(&w.stats.PointWriteReq), + statPointWriteReqLocal: atomic.LoadInt64(&w.stats.PointWriteReqLocal), + statWriteOK: atomic.LoadInt64(&w.stats.WriteOK), + statWriteDrop: atomic.LoadInt64(&w.stats.WriteDropped), + statWriteTimeout: atomic.LoadInt64(&w.stats.WriteTimeout), + statWriteErr: atomic.LoadInt64(&w.stats.WriteErr), + statSubWriteOK: atomic.LoadInt64(&w.stats.SubWriteOK), + statSubWriteDrop: atomic.LoadInt64(&w.stats.SubWriteDrop), + }, + }} +} + +// MapShards maps the points contained in wp to a ShardMapping. If a point +// maps to a shard group or shard that does not currently exist, it will be +// created before returning the mapping. +func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) { + rp, err := w.MetaClient.RetentionPolicy(wp.Database, wp.RetentionPolicy) + if err != nil { + return nil, err + } else if rp == nil { + return nil, influxdb.ErrRetentionPolicyNotFound(wp.RetentionPolicy) + } + + // Holds all the shard groups and shards that are required for writes. + list := make(sgList, 0, 8) + min := time.Unix(0, models.MinNanoTime) + if rp.Duration > 0 { + min = time.Now().Add(-rp.Duration) + } + + for _, p := range wp.Points { + // Either the point is outside the scope of the RP, or we already have + // a suitable shard group for the point. + if p.Time().Before(min) || list.Covers(p.Time()) { + continue + } + + // No shard groups overlap with the point's time, so we will create + // a new shard group for this point. + sg, err := w.MetaClient.CreateShardGroup(wp.Database, wp.RetentionPolicy, p.Time()) + if err != nil { + return nil, err + } + + if sg == nil { + return nil, errors.New("nil shard group") + } + list = list.Append(*sg) + } + + mapping := NewShardMapping(len(wp.Points)) + for _, p := range wp.Points { + sg := list.ShardGroupAt(p.Time()) + if sg == nil { + // We didn't create a shard group because the point was outside the + // scope of the RP. + mapping.Dropped = append(mapping.Dropped, p) + atomic.AddInt64(&w.stats.WriteDropped, 1) + continue + } + + sh := sg.ShardFor(p.HashID()) + mapping.MapPoint(&sh, p) + } + return mapping, nil +} + +// sgList is a wrapper around a meta.ShardGroupInfos where we can also check +// if a given time is covered by any of the shard groups in the list. +type sgList meta.ShardGroupInfos + +func (l sgList) Covers(t time.Time) bool { + if len(l) == 0 { + return false + } + return l.ShardGroupAt(t) != nil +} + +// ShardGroupAt attempts to find a shard group that could contain a point +// at the given time. +// +// Shard groups are sorted first according to end time, and then according +// to start time. Therefore, if there are multiple shard groups that match +// this point's time they will be preferred in this order: +// +// - a shard group with the earliest end time; +// - (assuming identical end times) the shard group with the earliest start time. +func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo { + idx := sort.Search(len(l), func(i int) bool { return l[i].EndTime.After(t) }) + + // We couldn't find a shard group the point falls into. + if idx == len(l) || t.Before(l[idx].StartTime) { + return nil + } + return &l[idx] +} + +// Append appends a shard group to the list, and returns a sorted list. +func (l sgList) Append(sgi meta.ShardGroupInfo) sgList { + next := append(l, sgi) + sort.Sort(meta.ShardGroupInfos(next)) + return next +} + +// WritePointsInto is a copy of WritePoints that uses a tsdb structure instead of +// a cluster structure for information. This is to avoid a circular dependency. +func (w *PointsWriter) WritePointsInto(p *IntoWriteRequest) error { + return w.WritePointsPrivileged(p.Database, p.RetentionPolicy, models.ConsistencyLevelOne, p.Points) +} + +// WritePoints writes the data to the underlying storage. consitencyLevel and user are only used for clustered scenarios +func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error { + return w.WritePointsPrivileged(database, retentionPolicy, consistencyLevel, points) +} + +// WritePointsPrivileged writes the data to the underlying storage, consitencyLevel is only used for clustered scenarios +func (w *PointsWriter) WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error { + atomic.AddInt64(&w.stats.WriteReq, 1) + atomic.AddInt64(&w.stats.PointWriteReq, int64(len(points))) + + if retentionPolicy == "" { + db := w.MetaClient.Database(database) + if db == nil { + return influxdb.ErrDatabaseNotFound(database) + } + retentionPolicy = db.DefaultRetentionPolicy + } + + shardMappings, err := w.MapShards(&WritePointsRequest{Database: database, RetentionPolicy: retentionPolicy, Points: points}) + if err != nil { + return err + } + + // Write each shard in it's own goroutine and return as soon as one fails. + ch := make(chan error, len(shardMappings.Points)) + for shardID, points := range shardMappings.Points { + go func(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) { + ch <- w.writeToShard(shard, database, retentionPolicy, points) + }(shardMappings.Shards[shardID], database, retentionPolicy, points) + } + + // Send points to subscriptions if possible. + ok := false + // We need to lock just in case the channel is about to be nil'ed + w.mu.RLock() + select { + case w.subPoints <- &WritePointsRequest{Database: database, RetentionPolicy: retentionPolicy, Points: points}: + ok = true + default: + } + w.mu.RUnlock() + if ok { + atomic.AddInt64(&w.stats.SubWriteOK, 1) + } else { + atomic.AddInt64(&w.stats.SubWriteDrop, 1) + } + + if err == nil && len(shardMappings.Dropped) > 0 { + err = tsdb.PartialWriteError{Reason: "points beyond retention policy", Dropped: len(shardMappings.Dropped)} + + } + timeout := time.NewTimer(w.WriteTimeout) + defer timeout.Stop() + for range shardMappings.Points { + select { + case <-w.closing: + return ErrWriteFailed + case <-timeout.C: + atomic.AddInt64(&w.stats.WriteTimeout, 1) + // return timeout error to caller + return ErrTimeout + case err := <-ch: + if err != nil { + return err + } + } + } + return err +} + +// writeToShards writes points to a shard. +func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) error { + atomic.AddInt64(&w.stats.PointWriteReqLocal, int64(len(points))) + + err := w.TSDBStore.WriteToShard(shard.ID, points) + if err == nil { + atomic.AddInt64(&w.stats.WriteOK, 1) + return nil + } + + // If this is a partial write error, that is also ok. + if _, ok := err.(tsdb.PartialWriteError); ok { + atomic.AddInt64(&w.stats.WriteErr, 1) + return err + } + + // If we've written to shard that should exist on the current node, but the store has + // not actually created this shard, tell it to create it and retry the write + if err == tsdb.ErrShardNotFound { + err = w.TSDBStore.CreateShard(database, retentionPolicy, shard.ID, true) + if err != nil { + w.Logger.Info(fmt.Sprintf("write failed for shard %d: %v", shard.ID, err)) + + atomic.AddInt64(&w.stats.WriteErr, 1) + return err + } + } + err = w.TSDBStore.WriteToShard(shard.ID, points) + if err != nil { + w.Logger.Info(fmt.Sprintf("write failed for shard %d: %v", shard.ID, err)) + atomic.AddInt64(&w.stats.WriteErr, 1) + return err + } + + atomic.AddInt64(&w.stats.WriteOK, 1) + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/points_writer_internal_test.go b/vendor/github.com/influxdata/influxdb/coordinator/points_writer_internal_test.go new file mode 100644 index 0000000..ec6a6ca --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/points_writer_internal_test.go @@ -0,0 +1,46 @@ +package coordinator + +import ( + "testing" + "time" +) + +func TestSgList_ShardGroupAt(t *testing.T) { + base := time.Date(2016, 10, 19, 0, 0, 0, 0, time.UTC) + day := func(n int) time.Time { + return base.Add(time.Duration(24*n) * time.Hour) + } + + list := sgList{ + {ID: 1, StartTime: day(0), EndTime: day(1)}, + {ID: 2, StartTime: day(1), EndTime: day(2)}, + {ID: 3, StartTime: day(2), EndTime: day(3)}, + // SG day 3 to day 4 missing... + {ID: 4, StartTime: day(4), EndTime: day(5)}, + {ID: 5, StartTime: day(5), EndTime: day(6)}, + } + + examples := []struct { + T time.Time + ShardGroupID uint64 // 0 will indicate we don't expect a shard group + }{ + {T: base.Add(-time.Minute), ShardGroupID: 0}, // Before any SG + {T: day(0), ShardGroupID: 1}, + {T: day(0).Add(time.Minute), ShardGroupID: 1}, + {T: day(1), ShardGroupID: 2}, + {T: day(3).Add(time.Minute), ShardGroupID: 0}, // No matching SG + {T: day(5).Add(time.Hour), ShardGroupID: 5}, + } + + for i, example := range examples { + sg := list.ShardGroupAt(example.T) + var id uint64 + if sg != nil { + id = sg.ID + } + + if got, exp := id, example.ShardGroupID; got != exp { + t.Errorf("[Example %d] got %v, expected %v", i+1, got, exp) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go b/vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go new file mode 100644 index 0000000..dafa520 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go @@ -0,0 +1,683 @@ +package coordinator_test + +import ( + "fmt" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +// TODO(benbjohnson): Rewrite tests to use cluster_test.MetaClient. + +// Ensures the points writer maps a single point to a single shard. +func TestPointsWriter_MapShards_One(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return &rp.ShardGroups[0], nil + } + + c := coordinator.PointsWriter{MetaClient: ms} + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + pr.AddPoint("cpu", 1.0, time.Now(), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 1; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } +} + +// Ensures the points writer maps to a new shard group when the shard duration +// is changed. +func TestPointsWriter_MapShards_AlterShardDuration(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + var ( + i int + now = time.Now() + ) + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + sg := []meta.ShardGroupInfo{ + meta.ShardGroupInfo{ + Shards: make([]meta.ShardInfo, 1), + StartTime: now, EndTime: now.Add(rp.Duration).Add(-1), + }, + meta.ShardGroupInfo{ + Shards: make([]meta.ShardInfo, 1), + StartTime: now.Add(time.Hour), EndTime: now.Add(3 * time.Hour).Add(rp.Duration).Add(-1), + }, + }[i] + i++ + return &sg, nil + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + pr.AddPoint("cpu", 1.0, now, nil) + pr.AddPoint("cpu", 2.0, now.Add(2*time.Second), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if got, exp := len(shardMappings.Points[0]), 2; got != exp { + t.Fatalf("got %d point(s), expected %d", got, exp) + } + + if got, exp := len(shardMappings.Shards), 1; got != exp { + t.Errorf("got %d shard(s), expected %d", got, exp) + } + + // Now we alter the retention policy duration. + rp.ShardGroupDuration = 3 * time.Hour + + pr = &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + pr.AddPoint("cpu", 1.0, now.Add(2*time.Hour), nil) + + // Point is beyond previous shard group so a new shard group should be + // created. + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + // We can check value of i since it's only incremeneted when a shard group + // is created. + if got, exp := i, 2; got != exp { + t.Fatal("new shard group was not created, expected it to be") + } +} + +// Ensures the points writer maps a multiple points across shard group boundaries. +func TestPointsWriter_MapShards_Multiple(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + rp.ShardGroupDuration = time.Hour + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + for i, sg := range rp.ShardGroups { + if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { + return &rp.ShardGroups[i], nil + } + } + panic("should not get here") + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + defer c.Close() + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Now(), nil) + pr.AddPoint("cpu", 2.0, time.Now().Add(time.Hour), nil) + pr.AddPoint("cpu", 3.0, time.Now().Add(time.Hour+time.Second), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 2; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } + + for _, points := range shardMappings.Points { + // First shard should have 1 point w/ first point added + if len(points) == 1 && points[0].Time() != pr.Points[0].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[0].Time()) + } + + // Second shard should have the last two points added + if len(points) == 2 && points[0].Time() != pr.Points[1].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[1].Time()) + } + + if len(points) == 2 && points[1].Time() != pr.Points[2].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[1].Time(), pr.Points[2].Time()) + } + } +} + +// Ensures the points writer does not map points beyond the retention policy. +func TestPointsWriter_MapShards_Invalid(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return &rp.ShardGroups[0], nil + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + defer c.Close() + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + + // Add a point that goes beyond the current retention policy. + pr.AddPoint("cpu", 1.0, time.Now().Add(-2*time.Hour), nil) + + var ( + shardMappings *coordinator.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if got, exp := len(shardMappings.Points), 0; got != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", got, exp) + } + + if got, exp := len(shardMappings.Dropped), 1; got != exp { + t.Fatalf("MapShard() dropped mismatch: got %v, exp %v", got, exp) + } +} + +func TestPointsWriter_WritePoints(t *testing.T) { + tests := []struct { + name string + database string + retentionPolicy string + + // the responses returned by each shard write call. node ID 1 = pos 0 + err []error + expErr error + }{ + { + name: "write one success", + database: "mydb", + retentionPolicy: "myrp", + err: []error{nil, nil, nil}, + expErr: nil, + }, + + // Write to non-existent database + { + name: "write to non-existent database", + database: "doesnt_exist", + retentionPolicy: "", + err: []error{nil, nil, nil}, + expErr: fmt.Errorf("database not found: doesnt_exist"), + }, + } + + for _, test := range tests { + + pr := &coordinator.WritePointsRequest{ + Database: test.database, + RetentionPolicy: test.retentionPolicy, + } + + // Ensure that the test shard groups are created before the points + // are created. + ms := NewPointsWriterMetaClient() + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Now(), nil) + pr.AddPoint("cpu", 2.0, time.Now().Add(time.Hour), nil) + pr.AddPoint("cpu", 3.0, time.Now().Add(time.Hour+time.Second), nil) + + // copy to prevent data race + theTest := test + sm := coordinator.NewShardMapping(16) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[0]) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[1]) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[2]) + + // Local coordinator.Node ShardWriter + // lock on the write increment since these functions get called in parallel + var mu sync.Mutex + + store := &fakeStore{ + WriteFn: func(shardID uint64, points []models.Point) error { + mu.Lock() + defer mu.Unlock() + return theTest.err[0] + }, + } + + ms.DatabaseFn = func(database string) *meta.DatabaseInfo { + return nil + } + ms.NodeIDFn = func() uint64 { return 1 } + + subPoints := make(chan *coordinator.WritePointsRequest, 1) + sub := Subscriber{} + sub.PointsFn = func() chan<- *coordinator.WritePointsRequest { + return subPoints + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + c.TSDBStore = store + c.Subscriber = sub + c.Node = &influxdb.Node{ID: 1} + + c.Open() + defer c.Close() + + err := c.WritePointsPrivileged(pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points) + if err == nil && test.expErr != nil { + t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + + if err != nil && test.expErr == nil { + t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() { + t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + if test.expErr == nil { + select { + case p := <-subPoints: + if !reflect.DeepEqual(p, pr) { + t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: unexpected WritePointsRequest got %v, exp %v", test.name, p, pr) + } + default: + t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: Subscriber.Points not called", test.name) + } + } + } +} + +func TestPointsWriter_WritePoints_Dropped(t *testing.T) { + pr := &coordinator.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + } + + // Ensure that the test shard groups are created before the points + // are created. + ms := NewPointsWriterMetaClient() + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Now().Add(-24*time.Hour), nil) + + // copy to prevent data race + sm := coordinator.NewShardMapping(16) + + // ShardMapper dropped this point + sm.Dropped = append(sm.Dropped, pr.Points[0]) + + // Local coordinator.Node ShardWriter + // lock on the write increment since these functions get called in parallel + var mu sync.Mutex + + store := &fakeStore{ + WriteFn: func(shardID uint64, points []models.Point) error { + mu.Lock() + defer mu.Unlock() + return nil + }, + } + + ms.DatabaseFn = func(database string) *meta.DatabaseInfo { + return nil + } + ms.NodeIDFn = func() uint64 { return 1 } + + subPoints := make(chan *coordinator.WritePointsRequest, 1) + sub := Subscriber{} + sub.PointsFn = func() chan<- *coordinator.WritePointsRequest { + return subPoints + } + + c := coordinator.NewPointsWriter() + c.MetaClient = ms + c.TSDBStore = store + c.Subscriber = sub + c.Node = &influxdb.Node{ID: 1} + + c.Open() + defer c.Close() + + err := c.WritePointsPrivileged(pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points) + if _, ok := err.(tsdb.PartialWriteError); !ok { + t.Errorf("PointsWriter.WritePoints(): got %v, exp %v", err, tsdb.PartialWriteError{}) + } +} + +type fakePointsWriter struct { + WritePointsIntoFn func(*coordinator.IntoWriteRequest) error +} + +func (f *fakePointsWriter) WritePointsInto(req *coordinator.IntoWriteRequest) error { + return f.WritePointsIntoFn(req) +} + +func TestBufferedPointsWriter(t *testing.T) { + db := "db0" + rp := "rp0" + capacity := 10000 + + writePointsIntoCnt := 0 + pointsWritten := []models.Point{} + + reset := func() { + writePointsIntoCnt = 0 + pointsWritten = pointsWritten[:0] + } + + fakeWriter := &fakePointsWriter{ + WritePointsIntoFn: func(req *coordinator.IntoWriteRequest) error { + writePointsIntoCnt++ + pointsWritten = append(pointsWritten, req.Points...) + return nil + }, + } + + w := coordinator.NewBufferedPointsWriter(fakeWriter, db, rp, capacity) + + // Test that capacity and length are correct for new buffered writer. + if w.Cap() != capacity { + t.Fatalf("exp %d, got %d", capacity, w.Cap()) + } else if w.Len() != 0 { + t.Fatalf("exp %d, got %d", 0, w.Len()) + } + + // Test flushing an empty buffer. + if err := w.Flush(); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt > 0 { + t.Fatalf("exp 0, got %d", writePointsIntoCnt) + } + + // Test writing zero points. + if err := w.WritePointsInto(&coordinator.IntoWriteRequest{ + Database: db, + RetentionPolicy: rp, + Points: []models.Point{}, + }); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt > 0 { + t.Fatalf("exp 0, got %d", writePointsIntoCnt) + } else if w.Len() > 0 { + t.Fatalf("exp 0, got %d", w.Len()) + } + + // Test writing single large bunch of points points. + req := coordinator.WritePointsRequest{ + Database: db, + RetentionPolicy: rp, + } + + numPoints := int(float64(capacity) * 5.5) + for i := 0; i < numPoints; i++ { + req.AddPoint("cpu", float64(i), time.Now().Add(time.Duration(i)*time.Second), nil) + } + + r := coordinator.IntoWriteRequest(req) + if err := w.WritePointsInto(&r); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt != 5 { + t.Fatalf("exp 5, got %d", writePointsIntoCnt) + } else if w.Len() != capacity/2 { + t.Fatalf("exp %d, got %d", capacity/2, w.Len()) + } else if len(pointsWritten) != numPoints-capacity/2 { + t.Fatalf("exp %d, got %d", numPoints-capacity/2, len(pointsWritten)) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt != 6 { + t.Fatalf("exp 6, got %d", writePointsIntoCnt) + } else if w.Len() != 0 { + t.Fatalf("exp 0, got %d", w.Len()) + } else if len(pointsWritten) != numPoints { + t.Fatalf("exp %d, got %d", numPoints, len(pointsWritten)) + } else if !reflect.DeepEqual(r.Points, pointsWritten) { + t.Fatal("points don't match") + } + + reset() + + // Test writing points one at a time. + for i, _ := range r.Points { + if err := w.WritePointsInto(&coordinator.IntoWriteRequest{ + Database: db, + RetentionPolicy: rp, + Points: r.Points[i : i+1], + }); err != nil { + t.Fatal(err) + } + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } else if writePointsIntoCnt != 6 { + t.Fatalf("exp 6, got %d", writePointsIntoCnt) + } else if w.Len() != 0 { + t.Fatalf("exp 0, got %d", w.Len()) + } else if len(pointsWritten) != numPoints { + t.Fatalf("exp %d, got %d", numPoints, len(pointsWritten)) + } else if !reflect.DeepEqual(r.Points, pointsWritten) { + t.Fatal("points don't match") + } +} + +var shardID uint64 + +type fakeStore struct { + WriteFn func(shardID uint64, points []models.Point) error + CreateShardfn func(database, retentionPolicy string, shardID uint64, enabled bool) error +} + +func (f *fakeStore) WriteToShard(shardID uint64, points []models.Point) error { + return f.WriteFn(shardID, points) +} + +func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error { + return f.CreateShardfn(database, retentionPolicy, shardID, enabled) +} + +func NewPointsWriterMetaClient() *PointsWriterMetaClient { + ms := &PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + for i, sg := range rp.ShardGroups { + if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { + return &rp.ShardGroups[i], nil + } + } + panic("should not get here") + } + return ms +} + +type PointsWriterMetaClient struct { + NodeIDFn func() uint64 + RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error) + CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + DatabaseFn func(database string) *meta.DatabaseInfo + ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo) +} + +func (m PointsWriterMetaClient) NodeID() uint64 { return m.NodeIDFn() } + +func (m PointsWriterMetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) { + return m.RetentionPolicyFn(database, name) +} + +func (m PointsWriterMetaClient) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp) +} + +func (m PointsWriterMetaClient) Database(database string) *meta.DatabaseInfo { + return m.DatabaseFn(database) +} + +func (m PointsWriterMetaClient) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) { + return m.ShardOwnerFn(shardID) +} + +type Subscriber struct { + PointsFn func() chan<- *coordinator.WritePointsRequest +} + +func (s Subscriber) Points() chan<- *coordinator.WritePointsRequest { + return s.PointsFn() +} + +func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo { + shards := []meta.ShardInfo{} + owners := []meta.ShardOwner{} + for i := 1; i <= nodeCount; i++ { + owners = append(owners, meta.ShardOwner{NodeID: uint64(i)}) + } + + // each node is fully replicated with each other + shards = append(shards, meta.ShardInfo{ + ID: nextShardID(), + Owners: owners, + }) + + start := time.Now() + rp := &meta.RetentionPolicyInfo{ + Name: "myrp", + ReplicaN: nodeCount, + Duration: duration, + ShardGroupDuration: duration, + ShardGroups: []meta.ShardGroupInfo{ + meta.ShardGroupInfo{ + ID: nextShardID(), + StartTime: start, + EndTime: start.Add(duration).Add(-1), + Shards: shards, + }, + }, + } + return rp +} + +func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner) { + var startTime, endTime time.Time + if len(rp.ShardGroups) == 0 { + startTime = time.Now() + } else { + startTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration) + } + endTime = startTime.Add(rp.ShardGroupDuration).Add(-1) + + sh := meta.ShardGroupInfo{ + ID: uint64(len(rp.ShardGroups) + 1), + StartTime: startTime, + EndTime: endTime, + Shards: []meta.ShardInfo{ + meta.ShardInfo{ + ID: nextShardID(), + Owners: owners, + }, + }, + } + rp.ShardGroups = append(rp.ShardGroups, sh) +} + +func nextShardID() uint64 { + return atomic.AddUint64(&shardID, 1) +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper.go b/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper.go new file mode 100644 index 0000000..1143def --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper.go @@ -0,0 +1,196 @@ +package coordinator + +import ( + "io" + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +// IteratorCreator is an interface that combines mapping fields and creating iterators. +type IteratorCreator interface { + influxql.IteratorCreator + influxql.FieldMapper + io.Closer +} + +// ShardMapper retrieves and maps shards into an IteratorCreator that can later be +// used for executing queries. +type ShardMapper interface { + MapShards(sources influxql.Sources, opt *influxql.SelectOptions) (IteratorCreator, error) +} + +// LocalShardMapper implements a ShardMapper for local shards. +type LocalShardMapper struct { + MetaClient interface { + ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + } + + TSDBStore interface { + ShardGroup(ids []uint64) tsdb.ShardGroup + } +} + +// MapShards maps the sources to the appropriate shards into an IteratorCreator. +func (e *LocalShardMapper) MapShards(sources influxql.Sources, opt *influxql.SelectOptions) (IteratorCreator, error) { + a := &LocalShardMapping{ + ShardMap: make(map[Source]tsdb.ShardGroup), + } + + if err := e.mapShards(a, sources, opt); err != nil { + return nil, err + } + return a, nil +} + +func (e *LocalShardMapper) mapShards(a *LocalShardMapping, sources influxql.Sources, opt *influxql.SelectOptions) error { + for _, s := range sources { + switch s := s.(type) { + case *influxql.Measurement: + source := Source{ + Database: s.Database, + RetentionPolicy: s.RetentionPolicy, + } + + // Retrieve the list of shards for this database. This list of + // shards is always the same regardless of which measurement we are + // using. + if _, ok := a.ShardMap[source]; !ok { + groups, err := e.MetaClient.ShardGroupsByTimeRange(s.Database, s.RetentionPolicy, opt.MinTime, opt.MaxTime) + if err != nil { + return err + } + + if len(groups) == 0 { + a.ShardMap[source] = nil + continue + } + + shardIDs := make([]uint64, 0, len(groups[0].Shards)*len(groups)) + for _, g := range groups { + for _, si := range g.Shards { + shardIDs = append(shardIDs, si.ID) + } + } + a.ShardMap[source] = e.TSDBStore.ShardGroup(shardIDs) + } + case *influxql.SubQuery: + if err := e.mapShards(a, s.Statement.Sources, opt); err != nil { + return err + } + } + } + return nil +} + +// ShardMapper maps data sources to a list of shard information. +type LocalShardMapping struct { + ShardMap map[Source]tsdb.ShardGroup +} + +func (a *LocalShardMapping) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + source := Source{ + Database: m.Database, + RetentionPolicy: m.RetentionPolicy, + } + + sg := a.ShardMap[source] + if sg == nil { + return + } + + fields = make(map[string]influxql.DataType) + dimensions = make(map[string]struct{}) + + var measurements []string + if m.Regex != nil { + measurements = sg.MeasurementsByRegex(m.Regex.Val) + } else { + measurements = []string{m.Name} + } + + f, d, err := sg.FieldDimensions(measurements) + if err != nil { + return nil, nil, err + } + for k, typ := range f { + fields[k] = typ + } + for k := range d { + dimensions[k] = struct{}{} + } + return +} + +func (a *LocalShardMapping) MapType(m *influxql.Measurement, field string) influxql.DataType { + source := Source{ + Database: m.Database, + RetentionPolicy: m.RetentionPolicy, + } + + sg := a.ShardMap[source] + if sg == nil { + return influxql.Unknown + } + + var names []string + if m.Regex != nil { + names = sg.MeasurementsByRegex(m.Regex.Val) + } else { + names = []string{m.Name} + } + + var typ influxql.DataType + for _, name := range names { + t := sg.MapType(name, field) + if typ.LessThan(t) { + typ = t + } + } + return typ +} + +func (a *LocalShardMapping) CreateIterator(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + source := Source{ + Database: m.Database, + RetentionPolicy: m.RetentionPolicy, + } + + sg := a.ShardMap[source] + if sg == nil { + return nil, nil + } + + if m.Regex != nil { + measurements := sg.MeasurementsByRegex(m.Regex.Val) + inputs := make([]influxql.Iterator, 0, len(measurements)) + if err := func() error { + for _, measurement := range measurements { + input, err := sg.CreateIterator(measurement, opt) + if err != nil { + return err + } + inputs = append(inputs, input) + } + return nil + }(); err != nil { + influxql.Iterators(inputs).Close() + return nil, err + } + return influxql.Iterators(inputs).Merge(opt) + } + return sg.CreateIterator(m.Name, opt) +} + +// Close does nothing for a LocalShardMapping. +func (a *LocalShardMapping) Close() error { + return nil +} + +// Source contains the database and retention policy source for data. +type Source struct { + Database string + RetentionPolicy string +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper_test.go b/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper_test.go new file mode 100644 index 0000000..81dd356 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/shard_mapper_test.go @@ -0,0 +1,102 @@ +package coordinator_test + +import ( + "reflect" + "testing" + "time" + + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +func TestLocalShardMapper(t *testing.T) { + var metaClient MetaClient + metaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) ([]meta.ShardGroupInfo, error) { + if database != "db0" { + t.Errorf("unexpected database: %s", database) + } + if policy != "rp0" { + t.Errorf("unexpected retention policy: %s", policy) + } + return []meta.ShardGroupInfo{ + {ID: 1, Shards: []meta.ShardInfo{ + {ID: 1, Owners: []meta.ShardOwner{{NodeID: 0}}}, + {ID: 2, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + {ID: 2, Shards: []meta.ShardInfo{ + {ID: 3, Owners: []meta.ShardOwner{{NodeID: 0}}}, + {ID: 4, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + }, nil + } + + var tsdbStore TSDBStore + tsdbStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { + if !reflect.DeepEqual(ids, []uint64{1, 2, 3, 4}) { + t.Errorf("unexpected shard ids: %#v", ids) + } + + var sh MockShard + sh.CreateIteratorFn = func(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if measurement != "cpu" { + t.Errorf("unexpected measurement: %s", measurement) + } + return &FloatIterator{}, nil + } + return &sh + } + + // Initialize the shard mapper. + shardMapper := &coordinator.LocalShardMapper{ + MetaClient: &metaClient, + TSDBStore: &tsdbStore, + } + + // Normal measurement. + measurement := &influxql.Measurement{ + Database: "db0", + RetentionPolicy: "rp0", + Name: "cpu", + } + ic, err := shardMapper.MapShards([]influxql.Source{measurement}, &influxql.SelectOptions{}) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // This should be a LocalShardMapping. + m, ok := ic.(*coordinator.LocalShardMapping) + if !ok { + t.Fatalf("unexpected mapping type: %T", ic) + } else if len(m.ShardMap) != 1 { + t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap)) + } + + if _, err := ic.CreateIterator(measurement, influxql.IteratorOptions{}); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Subquery. + subquery := &influxql.SubQuery{ + Statement: &influxql.SelectStatement{ + Sources: []influxql.Source{measurement}, + }, + } + ic, err = shardMapper.MapShards([]influxql.Source{subquery}, &influxql.SelectOptions{}) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // This should be a LocalShardMapping. + m, ok = ic.(*coordinator.LocalShardMapping) + if !ok { + t.Fatalf("unexpected mapping type: %T", ic) + } else if len(m.ShardMap) != 1 { + t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap)) + } + + if _, err := ic.CreateIterator(measurement, influxql.IteratorOptions{}); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go new file mode 100644 index 0000000..21ece23 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go @@ -0,0 +1,1192 @@ +package coordinator + +import ( + "bytes" + "errors" + "fmt" + "io" + "sort" + "strconv" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +// ErrDatabaseNameRequired is returned when executing statements that require a database, +// when a database has not been provided. +var ErrDatabaseNameRequired = errors.New("database name required") + +type pointsWriter interface { + WritePointsInto(*IntoWriteRequest) error +} + +// StatementExecutor executes a statement in the query. +type StatementExecutor struct { + MetaClient MetaClient + + // TaskManager holds the StatementExecutor that handles task-related commands. + TaskManager influxql.StatementExecutor + + // TSDB storage for local node. + TSDBStore TSDBStore + + // ShardMapper for mapping shards when executing a SELECT statement. + ShardMapper ShardMapper + + // Holds monitoring data for SHOW STATS and SHOW DIAGNOSTICS. + Monitor *monitor.Monitor + + // Used for rewriting points back into system for SELECT INTO statements. + PointsWriter pointsWriter + + // Select statement limits + MaxSelectPointN int + MaxSelectSeriesN int + MaxSelectBucketsN int +} + +// ExecuteStatement executes the given statement with the given execution context. +func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx influxql.ExecutionContext) error { + // Select statements are handled separately so that they can be streamed. + if stmt, ok := stmt.(*influxql.SelectStatement); ok { + return e.executeSelectStatement(stmt, &ctx) + } + + var rows models.Rows + var messages []*influxql.Message + var err error + switch stmt := stmt.(type) { + case *influxql.AlterRetentionPolicyStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeAlterRetentionPolicyStatement(stmt) + case *influxql.CreateContinuousQueryStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateContinuousQueryStatement(stmt) + case *influxql.CreateDatabaseStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateDatabaseStatement(stmt) + case *influxql.CreateRetentionPolicyStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateRetentionPolicyStatement(stmt) + case *influxql.CreateSubscriptionStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateSubscriptionStatement(stmt) + case *influxql.CreateUserStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeCreateUserStatement(stmt) + case *influxql.DeleteSeriesStatement: + err = e.executeDeleteSeriesStatement(stmt, ctx.Database) + case *influxql.DropContinuousQueryStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropContinuousQueryStatement(stmt) + case *influxql.DropDatabaseStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropDatabaseStatement(stmt) + case *influxql.DropMeasurementStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropMeasurementStatement(stmt, ctx.Database) + case *influxql.DropSeriesStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropSeriesStatement(stmt, ctx.Database) + case *influxql.DropRetentionPolicyStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropRetentionPolicyStatement(stmt) + case *influxql.DropShardStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropShardStatement(stmt) + case *influxql.DropSubscriptionStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropSubscriptionStatement(stmt) + case *influxql.DropUserStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeDropUserStatement(stmt) + case *influxql.GrantStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeGrantStatement(stmt) + case *influxql.GrantAdminStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeGrantAdminStatement(stmt) + case *influxql.RevokeStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeRevokeStatement(stmt) + case *influxql.RevokeAdminStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeRevokeAdminStatement(stmt) + case *influxql.ShowContinuousQueriesStatement: + rows, err = e.executeShowContinuousQueriesStatement(stmt) + case *influxql.ShowDatabasesStatement: + rows, err = e.executeShowDatabasesStatement(stmt, &ctx) + case *influxql.ShowDiagnosticsStatement: + rows, err = e.executeShowDiagnosticsStatement(stmt) + case *influxql.ShowGrantsForUserStatement: + rows, err = e.executeShowGrantsForUserStatement(stmt) + case *influxql.ShowMeasurementsStatement: + return e.executeShowMeasurementsStatement(stmt, &ctx) + case *influxql.ShowRetentionPoliciesStatement: + rows, err = e.executeShowRetentionPoliciesStatement(stmt) + case *influxql.ShowShardsStatement: + rows, err = e.executeShowShardsStatement(stmt) + case *influxql.ShowShardGroupsStatement: + rows, err = e.executeShowShardGroupsStatement(stmt) + case *influxql.ShowStatsStatement: + rows, err = e.executeShowStatsStatement(stmt) + case *influxql.ShowSubscriptionsStatement: + rows, err = e.executeShowSubscriptionsStatement(stmt) + case *influxql.ShowTagValuesStatement: + return e.executeShowTagValues(stmt, &ctx) + case *influxql.ShowUsersStatement: + rows, err = e.executeShowUsersStatement(stmt) + case *influxql.SetPasswordUserStatement: + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + err = e.executeSetPasswordUserStatement(stmt) + case *influxql.ShowQueriesStatement, *influxql.KillQueryStatement: + // Send query related statements to the task manager. + return e.TaskManager.ExecuteStatement(stmt, ctx) + default: + return influxql.ErrInvalidQuery + } + + if err != nil { + return err + } + + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Series: rows, + Messages: messages, + }) +} + +func (e *StatementExecutor) executeAlterRetentionPolicyStatement(stmt *influxql.AlterRetentionPolicyStatement) error { + rpu := &meta.RetentionPolicyUpdate{ + Duration: stmt.Duration, + ReplicaN: stmt.Replication, + ShardGroupDuration: stmt.ShardGroupDuration, + } + + // Update the retention policy. + if err := e.MetaClient.UpdateRetentionPolicy(stmt.Database, stmt.Name, rpu, stmt.Default); err != nil { + return err + } + return nil +} + +func (e *StatementExecutor) executeCreateContinuousQueryStatement(q *influxql.CreateContinuousQueryStatement) error { + // Verify that retention policies exist. + var err error + verifyRPFn := func(n influxql.Node) { + if err != nil { + return + } + switch m := n.(type) { + case *influxql.Measurement: + var rp *meta.RetentionPolicyInfo + if rp, err = e.MetaClient.RetentionPolicy(m.Database, m.RetentionPolicy); err != nil { + return + } else if rp == nil { + err = fmt.Errorf("%s: %s.%s", meta.ErrRetentionPolicyNotFound, m.Database, m.RetentionPolicy) + } + default: + return + } + } + + influxql.WalkFunc(q, verifyRPFn) + + if err != nil { + return err + } + + return e.MetaClient.CreateContinuousQuery(q.Database, q.Name, q.String()) +} + +func (e *StatementExecutor) executeCreateDatabaseStatement(stmt *influxql.CreateDatabaseStatement) error { + if !meta.ValidName(stmt.Name) { + // TODO This should probably be in `(*meta.Data).CreateDatabase` + // but can't go there until 1.1 is used everywhere + return meta.ErrInvalidName + } + + if !stmt.RetentionPolicyCreate { + _, err := e.MetaClient.CreateDatabase(stmt.Name) + return err + } + + // If we're doing, for example, CREATE DATABASE "db" WITH DURATION 1d then + // the name will not yet be set. We only need to validate non-empty + // retention policy names, such as in the statement: + // CREATE DATABASE "db" WITH DURATION 1d NAME "xyz" + if stmt.RetentionPolicyName != "" && !meta.ValidName(stmt.RetentionPolicyName) { + return meta.ErrInvalidName + } + + spec := meta.RetentionPolicySpec{ + Name: stmt.RetentionPolicyName, + Duration: stmt.RetentionPolicyDuration, + ReplicaN: stmt.RetentionPolicyReplication, + ShardGroupDuration: stmt.RetentionPolicyShardGroupDuration, + } + _, err := e.MetaClient.CreateDatabaseWithRetentionPolicy(stmt.Name, &spec) + return err +} + +func (e *StatementExecutor) executeCreateRetentionPolicyStatement(stmt *influxql.CreateRetentionPolicyStatement) error { + if !meta.ValidName(stmt.Name) { + // TODO This should probably be in `(*meta.Data).CreateRetentionPolicy` + // but can't go there until 1.1 is used everywhere + return meta.ErrInvalidName + } + + spec := meta.RetentionPolicySpec{ + Name: stmt.Name, + Duration: &stmt.Duration, + ReplicaN: &stmt.Replication, + ShardGroupDuration: stmt.ShardGroupDuration, + } + + // Create new retention policy. + _, err := e.MetaClient.CreateRetentionPolicy(stmt.Database, &spec, stmt.Default) + if err != nil { + return err + } + + return nil +} + +func (e *StatementExecutor) executeCreateSubscriptionStatement(q *influxql.CreateSubscriptionStatement) error { + return e.MetaClient.CreateSubscription(q.Database, q.RetentionPolicy, q.Name, q.Mode, q.Destinations) +} + +func (e *StatementExecutor) executeCreateUserStatement(q *influxql.CreateUserStatement) error { + _, err := e.MetaClient.CreateUser(q.Name, q.Password, q.Admin) + return err +} + +func (e *StatementExecutor) executeDeleteSeriesStatement(stmt *influxql.DeleteSeriesStatement, database string) error { + if dbi := e.MetaClient.Database(database); dbi == nil { + return influxql.ErrDatabaseNotFound(database) + } + + // Convert "now()" to current time. + stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: time.Now().UTC()}) + + // Locally delete the series. + return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition) +} + +func (e *StatementExecutor) executeDropContinuousQueryStatement(q *influxql.DropContinuousQueryStatement) error { + return e.MetaClient.DropContinuousQuery(q.Database, q.Name) +} + +// executeDropDatabaseStatement drops a database from the cluster. +// It does not return an error if the database was not found on any of +// the nodes, or in the Meta store. +func (e *StatementExecutor) executeDropDatabaseStatement(stmt *influxql.DropDatabaseStatement) error { + if e.MetaClient.Database(stmt.Name) == nil { + return nil + } + + // Locally delete the datababse. + if err := e.TSDBStore.DeleteDatabase(stmt.Name); err != nil { + return err + } + + // Remove the database from the Meta Store. + return e.MetaClient.DropDatabase(stmt.Name) +} + +func (e *StatementExecutor) executeDropMeasurementStatement(stmt *influxql.DropMeasurementStatement, database string) error { + if dbi := e.MetaClient.Database(database); dbi == nil { + return influxql.ErrDatabaseNotFound(database) + } + + // Locally drop the measurement + return e.TSDBStore.DeleteMeasurement(database, stmt.Name) +} + +func (e *StatementExecutor) executeDropSeriesStatement(stmt *influxql.DropSeriesStatement, database string) error { + if dbi := e.MetaClient.Database(database); dbi == nil { + return influxql.ErrDatabaseNotFound(database) + } + + // Check for time in WHERE clause (not supported). + if influxql.HasTimeExpr(stmt.Condition) { + return errors.New("DROP SERIES doesn't support time in WHERE clause") + } + + // Locally drop the series. + return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition) +} + +func (e *StatementExecutor) executeDropShardStatement(stmt *influxql.DropShardStatement) error { + // Locally delete the shard. + if err := e.TSDBStore.DeleteShard(stmt.ID); err != nil { + return err + } + + // Remove the shard reference from the Meta Store. + return e.MetaClient.DropShard(stmt.ID) +} + +func (e *StatementExecutor) executeDropRetentionPolicyStatement(stmt *influxql.DropRetentionPolicyStatement) error { + dbi := e.MetaClient.Database(stmt.Database) + if dbi == nil { + return nil + } + + if dbi.RetentionPolicy(stmt.Name) == nil { + return nil + } + + // Locally drop the retention policy. + if err := e.TSDBStore.DeleteRetentionPolicy(stmt.Database, stmt.Name); err != nil { + return err + } + + return e.MetaClient.DropRetentionPolicy(stmt.Database, stmt.Name) +} + +func (e *StatementExecutor) executeDropSubscriptionStatement(q *influxql.DropSubscriptionStatement) error { + return e.MetaClient.DropSubscription(q.Database, q.RetentionPolicy, q.Name) +} + +func (e *StatementExecutor) executeDropUserStatement(q *influxql.DropUserStatement) error { + return e.MetaClient.DropUser(q.Name) +} + +func (e *StatementExecutor) executeGrantStatement(stmt *influxql.GrantStatement) error { + return e.MetaClient.SetPrivilege(stmt.User, stmt.On, stmt.Privilege) +} + +func (e *StatementExecutor) executeGrantAdminStatement(stmt *influxql.GrantAdminStatement) error { + return e.MetaClient.SetAdminPrivilege(stmt.User, true) +} + +func (e *StatementExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) error { + priv := influxql.NoPrivileges + + // Revoking all privileges means there's no need to look at existing user privileges. + if stmt.Privilege != influxql.AllPrivileges { + p, err := e.MetaClient.UserPrivilege(stmt.User, stmt.On) + if err != nil { + return err + } + // Bit clear (AND NOT) the user's privilege with the revoked privilege. + priv = *p &^ stmt.Privilege + } + + return e.MetaClient.SetPrivilege(stmt.User, stmt.On, priv) +} + +func (e *StatementExecutor) executeRevokeAdminStatement(stmt *influxql.RevokeAdminStatement) error { + return e.MetaClient.SetAdminPrivilege(stmt.User, false) +} + +func (e *StatementExecutor) executeSetPasswordUserStatement(q *influxql.SetPasswordUserStatement) error { + return e.MetaClient.UpdateUser(q.Name, q.Password) +} + +func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) error { + itrs, stmt, err := e.createIterators(stmt, ctx) + if err != nil { + return err + } + + // Generate a row emitter from the iterator set. + em := influxql.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize) + em.Columns = stmt.ColumnNames() + if stmt.Location != nil { + em.Location = stmt.Location + } + em.OmitTime = stmt.OmitTime + defer em.Close() + + // Emit rows to the results channel. + var writeN int64 + var emitted bool + + var pointsWriter *BufferedPointsWriter + if stmt.Target != nil { + pointsWriter = NewBufferedPointsWriter(e.PointsWriter, stmt.Target.Measurement.Database, stmt.Target.Measurement.RetentionPolicy, 10000) + } + + for { + row, partial, err := em.Emit() + if err != nil { + return err + } else if row == nil { + // Check if the query was interrupted while emitting. + select { + case <-ctx.InterruptCh: + return influxql.ErrQueryInterrupted + default: + } + break + } + + // Write points back into system for INTO statements. + if stmt.Target != nil { + if err := e.writeInto(pointsWriter, stmt, row); err != nil { + return err + } + writeN += int64(len(row.Values)) + continue + } + + result := &influxql.Result{ + StatementID: ctx.StatementID, + Series: []*models.Row{row}, + Partial: partial, + } + + // Send results or exit if closing. + if err := ctx.Send(result); err != nil { + return err + } + + emitted = true + } + + // Flush remaining points and emit write count if an INTO statement. + if stmt.Target != nil { + if err := pointsWriter.Flush(); err != nil { + return err + } + + var messages []*influxql.Message + if ctx.ReadOnly { + messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) + } + + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Messages: messages, + Series: []*models.Row{{ + Name: "result", + Columns: []string{"time", "written"}, + Values: [][]interface{}{{time.Unix(0, 0).UTC(), writeN}}, + }}, + }) + } + + // Always emit at least one result. + if !emitted { + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Series: make([]*models.Row, 0), + }) + } + + return nil +} + +func (e *StatementExecutor) createIterators(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) ([]influxql.Iterator, *influxql.SelectStatement, error) { + // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` + now := time.Now().UTC() + opt := influxql.SelectOptions{ + InterruptCh: ctx.InterruptCh, + NodeID: ctx.ExecutionOptions.NodeID, + MaxSeriesN: e.MaxSelectSeriesN, + Authorizer: ctx.Authorizer, + } + + // Replace instances of "now()" with the current time, and check the resultant times. + nowValuer := influxql.NowValuer{Now: now, Location: stmt.Location} + stmt = stmt.Reduce(&nowValuer) + + var err error + opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition, stmt.Location) + if err != nil { + return nil, stmt, err + } + + if opt.MaxTime.IsZero() { + opt.MaxTime = time.Unix(0, influxql.MaxTime) + } + if opt.MinTime.IsZero() { + opt.MinTime = time.Unix(0, influxql.MinTime).UTC() + } + + // Convert DISTINCT into a call. + stmt.RewriteDistinct() + + // Remove "time" from fields list. + stmt.RewriteTimeFields() + + // Rewrite time condition. + if err := stmt.RewriteTimeCondition(now); err != nil { + return nil, stmt, err + } + + // Rewrite any regex conditions that could make use of the index. + stmt.RewriteRegexConditions() + + // Create an iterator creator based on the shards in the cluster. + ic, err := e.ShardMapper.MapShards(stmt.Sources, &opt) + if err != nil { + return nil, stmt, err + } + defer ic.Close() + + // Rewrite wildcards, if any exist. + tmp, err := stmt.RewriteFields(ic) + if err != nil { + return nil, stmt, err + } + stmt = tmp + + if e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery { + interval, err := stmt.GroupByInterval() + if err != nil { + return nil, stmt, err + } + + if interval > 0 { + // Determine the start and end time matched to the interval (may not match the actual times). + min := opt.MinTime.Truncate(interval) + max := opt.MaxTime.Truncate(interval).Add(interval) + + // Determine the number of buckets by finding the time span and dividing by the interval. + buckets := int64(max.Sub(min)) / int64(interval) + if int(buckets) > e.MaxSelectBucketsN { + return nil, stmt, fmt.Errorf("max-select-buckets limit exceeded: (%d/%d)", buckets, e.MaxSelectBucketsN) + } + } + } + + // Create a set of iterators from a selection. + itrs, err := influxql.Select(stmt, ic, &opt) + if err != nil { + return nil, stmt, err + } + + if e.MaxSelectPointN > 0 { + monitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN) + ctx.Query.Monitor(monitor) + } + return itrs, stmt, nil +} + +func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql.ShowContinuousQueriesStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"name", "query"}, Name: di.Name} + for _, cqi := range di.ContinuousQueries { + row.Values = append(row.Values, []interface{}{cqi.Name, cqi.Query}) + } + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowDatabasesStatement(q *influxql.ShowDatabasesStatement, ctx *influxql.ExecutionContext) (models.Rows, error) { + dis := e.MetaClient.Databases() + a := ctx.ExecutionOptions.Authorizer + + row := &models.Row{Name: "databases", Columns: []string{"name"}} + for _, di := range dis { + // Only include databases that the user is authorized to read or write. + if a.AuthorizeDatabase(influxql.ReadPrivilege, di.Name) || a.AuthorizeDatabase(influxql.WritePrivilege, di.Name) { + row.Values = append(row.Values, []interface{}{di.Name}) + } + } + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowDiagnosticsStatement(stmt *influxql.ShowDiagnosticsStatement) (models.Rows, error) { + diags, err := e.Monitor.Diagnostics() + if err != nil { + return nil, err + } + + // Get a sorted list of diagnostics keys. + sortedKeys := make([]string, 0, len(diags)) + for k := range diags { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + rows := make([]*models.Row, 0, len(diags)) + for _, k := range sortedKeys { + if stmt.Module != "" && k != stmt.Module { + continue + } + + row := &models.Row{Name: k} + + row.Columns = diags[k].Columns + row.Values = diags[k].Rows + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGrantsForUserStatement) (models.Rows, error) { + priv, err := e.MetaClient.UserPrivileges(q.Name) + if err != nil { + return nil, err + } + + row := &models.Row{Columns: []string{"database", "privilege"}} + for d, p := range priv { + row.Values = append(row.Values, []interface{}{d, p.String()}) + } + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowMeasurementsStatement(q *influxql.ShowMeasurementsStatement, ctx *influxql.ExecutionContext) error { + if q.Database == "" { + return ErrDatabaseNameRequired + } + + names, err := e.TSDBStore.MeasurementNames(q.Database, q.Condition) + if err != nil || len(names) == 0 { + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Err: err, + }) + } + + if q.Offset > 0 { + if q.Offset >= len(names) { + names = nil + } else { + names = names[q.Offset:] + } + } + + if q.Limit > 0 { + if q.Limit < len(names) { + names = names[:q.Limit] + } + } + + values := make([][]interface{}, len(names)) + for i, name := range names { + values[i] = []interface{}{string(name)} + } + + if len(values) == 0 { + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + }) + } + + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Series: []*models.Row{{ + Name: "measurements", + Columns: []string{"name"}, + Values: values, + }}, + }) +} + +func (e *StatementExecutor) executeShowRetentionPoliciesStatement(q *influxql.ShowRetentionPoliciesStatement) (models.Rows, error) { + if q.Database == "" { + return nil, ErrDatabaseNameRequired + } + + di := e.MetaClient.Database(q.Database) + if di == nil { + return nil, influxdb.ErrDatabaseNotFound(q.Database) + } + + row := &models.Row{Columns: []string{"name", "duration", "shardGroupDuration", "replicaN", "default"}} + for _, rpi := range di.RetentionPolicies { + row.Values = append(row.Values, []interface{}{rpi.Name, rpi.Duration.String(), rpi.ShardGroupDuration.String(), rpi.ReplicaN, di.DefaultRetentionPolicy == rpi.Name}) + } + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"id", "database", "retention_policy", "shard_group", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name} + for _, rpi := range di.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + // Shards associated with deleted shard groups are effectively deleted. + // Don't list them. + if sgi.Deleted() { + continue + } + + for _, si := range sgi.Shards { + ownerIDs := make([]uint64, len(si.Owners)) + for i, owner := range si.Owners { + ownerIDs[i] = owner.NodeID + } + + row.Values = append(row.Values, []interface{}{ + si.ID, + di.Name, + rpi.Name, + sgi.ID, + sgi.StartTime.UTC().Format(time.RFC3339), + sgi.EndTime.UTC().Format(time.RFC3339), + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), + joinUint64(ownerIDs), + }) + } + } + } + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowShardGroupsStatement(stmt *influxql.ShowShardGroupsStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + row := &models.Row{Columns: []string{"id", "database", "retention_policy", "start_time", "end_time", "expiry_time"}, Name: "shard groups"} + for _, di := range dis { + for _, rpi := range di.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + // Shards associated with deleted shard groups are effectively deleted. + // Don't list them. + if sgi.Deleted() { + continue + } + + row.Values = append(row.Values, []interface{}{ + sgi.ID, + di.Name, + rpi.Name, + sgi.StartTime.UTC().Format(time.RFC3339), + sgi.EndTime.UTC().Format(time.RFC3339), + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), + }) + } + } + } + + return []*models.Row{row}, nil +} + +func (e *StatementExecutor) executeShowStatsStatement(stmt *influxql.ShowStatsStatement) (models.Rows, error) { + stats, err := e.Monitor.Statistics(nil) + if err != nil { + return nil, err + } + + var rows []*models.Row + for _, stat := range stats { + if stmt.Module != "" && stat.Name != stmt.Module { + continue + } + row := &models.Row{Name: stat.Name, Tags: stat.Tags} + + values := make([]interface{}, 0, len(stat.Values)) + for _, k := range stat.ValueNames() { + row.Columns = append(row.Columns, k) + values = append(values, stat.Values[k]) + } + row.Values = [][]interface{}{values} + rows = append(rows, row) + } + return rows, nil +} + +func (e *StatementExecutor) executeShowSubscriptionsStatement(stmt *influxql.ShowSubscriptionsStatement) (models.Rows, error) { + dis := e.MetaClient.Databases() + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"retention_policy", "name", "mode", "destinations"}, Name: di.Name} + for _, rpi := range di.RetentionPolicies { + for _, si := range rpi.Subscriptions { + row.Values = append(row.Values, []interface{}{rpi.Name, si.Name, si.Mode, si.Destinations}) + } + } + if len(row.Values) > 0 { + rows = append(rows, row) + } + } + return rows, nil +} + +func (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatement, ctx *influxql.ExecutionContext) error { + if q.Database == "" { + return ErrDatabaseNameRequired + } + + tagValues, err := e.TSDBStore.TagValues(q.Database, q.Condition) + if err != nil { + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Err: err, + }) + } + + emitted := false + for _, m := range tagValues { + values := m.Values + + if q.Offset > 0 { + if q.Offset >= len(values) { + values = nil + } else { + values = values[q.Offset:] + } + } + + if q.Limit > 0 { + if q.Limit < len(values) { + values = values[:q.Limit] + } + } + + if len(values) == 0 { + continue + } + + row := &models.Row{ + Name: m.Measurement, + Columns: []string{"key", "value"}, + Values: make([][]interface{}, len(values)), + } + for i, v := range values { + row.Values[i] = []interface{}{v.Key, v.Value} + } + + if err := ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + Series: []*models.Row{row}, + }); err != nil { + return err + } + emitted = true + } + + // Ensure at least one result is emitted. + if !emitted { + return ctx.Send(&influxql.Result{ + StatementID: ctx.StatementID, + }) + } + return nil +} + +func (e *StatementExecutor) executeShowUsersStatement(q *influxql.ShowUsersStatement) (models.Rows, error) { + row := &models.Row{Columns: []string{"user", "admin"}} + for _, ui := range e.MetaClient.Users() { + row.Values = append(row.Values, []interface{}{ui.Name, ui.Admin}) + } + return []*models.Row{row}, nil +} + +// BufferedPointsWriter adds buffering to a pointsWriter so that SELECT INTO queries +// write their points to the destination in batches. +type BufferedPointsWriter struct { + w pointsWriter + buf []models.Point + database string + retentionPolicy string +} + +// NewBufferedPointsWriter returns a new BufferedPointsWriter. +func NewBufferedPointsWriter(w pointsWriter, database, retentionPolicy string, capacity int) *BufferedPointsWriter { + return &BufferedPointsWriter{ + w: w, + buf: make([]models.Point, 0, capacity), + database: database, + retentionPolicy: retentionPolicy, + } +} + +// WritePointsInto implements pointsWriter for BufferedPointsWriter. +func (w *BufferedPointsWriter) WritePointsInto(req *IntoWriteRequest) error { + // Make sure we're buffering points only for the expected destination. + if req.Database != w.database || req.RetentionPolicy != w.retentionPolicy { + return fmt.Errorf("writer for %s.%s can't write into %s.%s", w.database, w.retentionPolicy, req.Database, req.RetentionPolicy) + } + + for i := 0; i < len(req.Points); { + // Get the available space in the buffer. + avail := cap(w.buf) - len(w.buf) + + // Calculate number of points to copy into the buffer. + n := len(req.Points[i:]) + if n > avail { + n = avail + } + + // Copy points into buffer. + w.buf = append(w.buf, req.Points[i:n+i]...) + + // Advance the index by number of points copied. + i += n + + // If buffer is full, flush points to underlying writer. + if len(w.buf) == cap(w.buf) { + if err := w.Flush(); err != nil { + return err + } + } + } + + return nil +} + +// Flush writes all buffered points to the underlying writer. +func (w *BufferedPointsWriter) Flush() error { + if len(w.buf) == 0 { + return nil + } + + if err := w.w.WritePointsInto(&IntoWriteRequest{ + Database: w.database, + RetentionPolicy: w.retentionPolicy, + Points: w.buf, + }); err != nil { + return err + } + + // Clear the buffer. + w.buf = w.buf[:0] + + return nil +} + +// Len returns the number of points buffered. +func (w *BufferedPointsWriter) Len() int { return len(w.buf) } + +// Cap returns the capacity (in points) of the buffer. +func (w *BufferedPointsWriter) Cap() int { return cap(w.buf) } + +func (e *StatementExecutor) writeInto(w pointsWriter, stmt *influxql.SelectStatement, row *models.Row) error { + if stmt.Target.Measurement.Database == "" { + return errNoDatabaseInTarget + } + + // It might seem a bit weird that this is where we do this, since we will have to + // convert rows back to points. The Executors (both aggregate and raw) are complex + // enough that changing them to write back to the DB is going to be clumsy + // + // it might seem weird to have the write be in the QueryExecutor, but the interweaving of + // limitedRowWriter and ExecuteAggregate/Raw makes it ridiculously hard to make sure that the + // results will be the same as when queried normally. + name := stmt.Target.Measurement.Name + if name == "" { + name = row.Name + } + + points, err := convertRowToPoints(name, row) + if err != nil { + return err + } + + if err := w.WritePointsInto(&IntoWriteRequest{ + Database: stmt.Target.Measurement.Database, + RetentionPolicy: stmt.Target.Measurement.RetentionPolicy, + Points: points, + }); err != nil { + return err + } + + return nil +} + +var errNoDatabaseInTarget = errors.New("no database in target") + +// convertRowToPoints will convert a query result Row into Points that can be written back in. +func convertRowToPoints(measurementName string, row *models.Row) ([]models.Point, error) { + // figure out which parts of the result are the time and which are the fields + timeIndex := -1 + fieldIndexes := make(map[string]int) + for i, c := range row.Columns { + if c == "time" { + timeIndex = i + } else { + fieldIndexes[c] = i + } + } + + if timeIndex == -1 { + return nil, errors.New("error finding time index in result") + } + + points := make([]models.Point, 0, len(row.Values)) + for _, v := range row.Values { + vals := make(map[string]interface{}) + for fieldName, fieldIndex := range fieldIndexes { + val := v[fieldIndex] + if val != nil { + vals[fieldName] = v[fieldIndex] + } + } + + p, err := models.NewPoint(measurementName, models.NewTags(row.Tags), vals, v[timeIndex].(time.Time)) + if err != nil { + // Drop points that can't be stored + continue + } + + points = append(points, p) + } + + return points, nil +} + +// NormalizeStatement adds a default database and policy to the measurements in statement. +func (e *StatementExecutor) NormalizeStatement(stmt influxql.Statement, defaultDatabase string) (err error) { + influxql.WalkFunc(stmt, func(node influxql.Node) { + if err != nil { + return + } + switch node := node.(type) { + case *influxql.ShowRetentionPoliciesStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.ShowMeasurementsStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.ShowTagValuesStatement: + if node.Database == "" { + node.Database = defaultDatabase + } + case *influxql.Measurement: + switch stmt.(type) { + case *influxql.DropSeriesStatement, *influxql.DeleteSeriesStatement: + // DB and RP not supported by these statements so don't rewrite into invalid + // statements + default: + err = e.normalizeMeasurement(node, defaultDatabase) + } + } + }) + return +} + +func (e *StatementExecutor) normalizeMeasurement(m *influxql.Measurement, defaultDatabase string) error { + // Targets (measurements in an INTO clause) can have blank names, which means it will be + // the same as the measurement name it came from in the FROM clause. + if !m.IsTarget && m.Name == "" && m.Regex == nil { + return errors.New("invalid measurement") + } + + // Measurement does not have an explicit database? Insert default. + if m.Database == "" { + m.Database = defaultDatabase + } + + // The database must now be specified by this point. + if m.Database == "" { + return ErrDatabaseNameRequired + } + + // Find database. + di := e.MetaClient.Database(m.Database) + if di == nil { + return influxdb.ErrDatabaseNotFound(m.Database) + } + + // If no retention policy was specified, use the default. + if m.RetentionPolicy == "" { + if di.DefaultRetentionPolicy == "" { + return fmt.Errorf("default retention policy not set for: %s", di.Name) + } + m.RetentionPolicy = di.DefaultRetentionPolicy + } + + return nil +} + +// IntoWriteRequest is a partial copy of cluster.WriteRequest +type IntoWriteRequest struct { + Database string + RetentionPolicy string + Points []models.Point +} + +// TSDBStore is an interface for accessing the time series data store. +type TSDBStore interface { + CreateShard(database, policy string, shardID uint64, enabled bool) error + WriteToShard(shardID uint64, points []models.Point) error + + RestoreShard(id uint64, r io.Reader) error + BackupShard(id uint64, since time.Time, w io.Writer) error + + DeleteDatabase(name string) error + DeleteMeasurement(database, name string) error + DeleteRetentionPolicy(database, name string) error + DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error + DeleteShard(id uint64) error + + MeasurementNames(database string, cond influxql.Expr) ([][]byte, error) + TagValues(database string, cond influxql.Expr) ([]tsdb.TagValues, error) +} + +var _ TSDBStore = LocalTSDBStore{} + +// LocalTSDBStore embeds a tsdb.Store and implements IteratorCreator +// to satisfy the TSDBStore interface. +type LocalTSDBStore struct { + *tsdb.Store +} + +// ShardIteratorCreator is an interface for creating an IteratorCreator to access a specific shard. +type ShardIteratorCreator interface { + ShardIteratorCreator(id uint64) influxql.IteratorCreator +} + +// joinUint64 returns a comma-delimited string of uint64 numbers. +func joinUint64(a []uint64) string { + var buf bytes.Buffer + for i, x := range a { + buf.WriteString(strconv.FormatUint(x, 10)) + if i < len(a)-1 { + buf.WriteRune(',') + } + } + return buf.String() +} diff --git a/vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go new file mode 100644 index 0000000..3ae3a99 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go @@ -0,0 +1,462 @@ +package coordinator_test + +import ( + "bytes" + "errors" + "io" + "os" + "reflect" + "regexp" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" + "github.com/uber-go/zap" +) + +const ( + // DefaultDatabase is the default database name used in tests. + DefaultDatabase = "db0" + + // DefaultRetentionPolicy is the default retention policy name used in tests. + DefaultRetentionPolicy = "rp0" +) + +// Ensure query executor can execute a simple SELECT statement. +func TestQueryExecutor_ExecuteQuery_SelectStatement(t *testing.T) { + e := DefaultQueryExecutor() + + // The meta client should return a single shard owned by the local node. + e.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return []meta.ShardGroupInfo{ + {ID: 1, Shards: []meta.ShardInfo{ + {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + }, nil + } + + // The TSDB store should return an IteratorCreator for shard. + // This IteratorCreator returns a single iterator with "value" in the aux fields. + e.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { + if !reflect.DeepEqual(ids, []uint64{100}) { + t.Fatalf("unexpected shard ids: %v", ids) + } + + var sh MockShard + sh.CreateIteratorFn = func(m string, opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}, + {Name: "cpu", Time: int64(1 * time.Second), Aux: []interface{}{float64(200)}}, + }}, nil + } + sh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + if !reflect.DeepEqual(measurements, []string{"cpu"}) { + t.Fatalf("unexpected source: %#v", measurements) + } + return map[string]influxql.DataType{"value": influxql.Float}, nil, nil + } + return &sh + } + + // Verify all results from the query. + if a := ReadAllResults(e.ExecuteQuery(`SELECT * FROM cpu`, "db0", 0)); !reflect.DeepEqual(a, []*influxql.Result{ + { + StatementID: 0, + Series: []*models.Row{{ + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), float64(100)}, + {time.Unix(1, 0).UTC(), float64(200)}, + }, + }}, + }, + }) { + t.Fatalf("unexpected results: %s", spew.Sdump(a)) + } +} + +// Ensure query executor can enforce a maximum bucket selection count. +func TestQueryExecutor_ExecuteQuery_MaxSelectBucketsN(t *testing.T) { + e := DefaultQueryExecutor() + e.StatementExecutor.MaxSelectBucketsN = 3 + + // The meta client should return a single shards on the local node. + e.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return []meta.ShardGroupInfo{ + {ID: 1, Shards: []meta.ShardInfo{ + {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + }, nil + } + + e.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { + if !reflect.DeepEqual(ids, []uint64{100}) { + t.Fatalf("unexpected shard ids: %v", ids) + } + + var sh MockShard + sh.CreateIteratorFn = func(m string, opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{ + Points: []influxql.FloatPoint{{Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}}, + }, nil + } + sh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + if !reflect.DeepEqual(measurements, []string{"cpu"}) { + t.Fatalf("unexpected source: %#v", measurements) + } + return map[string]influxql.DataType{"value": influxql.Float}, nil, nil + } + return &sh + } + + // Verify all results from the query. + if a := ReadAllResults(e.ExecuteQuery(`SELECT count(value) FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s)`, "db0", 0)); !reflect.DeepEqual(a, []*influxql.Result{ + { + StatementID: 0, + Err: errors.New("max-select-buckets limit exceeded: (4/3)"), + }, + }) { + t.Fatalf("unexpected results: %s", spew.Sdump(a)) + } +} + +func TestStatementExecutor_NormalizeDropSeries(t *testing.T) { + q, err := influxql.ParseQuery("DROP SERIES FROM cpu") + if err != nil { + t.Fatalf("unexpected error parsing query: %v", err) + } + + stmt := q.Statements[0].(*influxql.DropSeriesStatement) + + s := &coordinator.StatementExecutor{ + MetaClient: &internal.MetaClientMock{ + DatabaseFn: func(name string) *meta.DatabaseInfo { + t.Fatal("meta client should not be called") + return nil + }, + }, + } + if err := s.NormalizeStatement(stmt, "foo"); err != nil { + t.Fatalf("unexpected error normalizing statement: %v", err) + } + + m := stmt.Sources[0].(*influxql.Measurement) + if m.Database != "" { + t.Fatalf("database rewritten when not supposed to: %v", m.Database) + } + if m.RetentionPolicy != "" { + t.Fatalf("database rewritten when not supposed to: %v", m.RetentionPolicy) + } + + if exp, got := "DROP SERIES FROM cpu", q.String(); exp != got { + t.Fatalf("generated query does match parsed: exp %v, got %v", exp, got) + } +} + +func TestStatementExecutor_NormalizeDeleteSeries(t *testing.T) { + q, err := influxql.ParseQuery("DELETE FROM cpu") + if err != nil { + t.Fatalf("unexpected error parsing query: %v", err) + } + + stmt := q.Statements[0].(*influxql.DeleteSeriesStatement) + + s := &coordinator.StatementExecutor{ + MetaClient: &internal.MetaClientMock{ + DatabaseFn: func(name string) *meta.DatabaseInfo { + t.Fatal("meta client should not be called") + return nil + }, + }, + } + if err := s.NormalizeStatement(stmt, "foo"); err != nil { + t.Fatalf("unexpected error normalizing statement: %v", err) + } + + m := stmt.Sources[0].(*influxql.Measurement) + if m.Database != "" { + t.Fatalf("database rewritten when not supposed to: %v", m.Database) + } + if m.RetentionPolicy != "" { + t.Fatalf("database rewritten when not supposed to: %v", m.RetentionPolicy) + } + + if exp, got := "DELETE FROM cpu", q.String(); exp != got { + t.Fatalf("generated query does match parsed: exp %v, got %v", exp, got) + } +} + +type mockAuthorizer struct { + AuthorizeDatabaseFn func(influxql.Privilege, string) bool +} + +func (a *mockAuthorizer) AuthorizeDatabase(p influxql.Privilege, name string) bool { + return a.AuthorizeDatabaseFn(p, name) +} + +func (m *mockAuthorizer) AuthorizeQuery(database string, query *influxql.Query) error { + panic("fail") +} + +func (m *mockAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { + panic("fail") +} + +func (m *mockAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { + panic("fail") +} + +func TestQueryExecutor_ExecuteQuery_ShowDatabases(t *testing.T) { + qe := influxql.NewQueryExecutor() + qe.StatementExecutor = &coordinator.StatementExecutor{ + MetaClient: &internal.MetaClientMock{ + DatabasesFn: func() []meta.DatabaseInfo { + return []meta.DatabaseInfo{ + {Name: "db1"}, {Name: "db2"}, {Name: "db3"}, {Name: "db4"}, + } + }, + }, + } + + opt := influxql.ExecutionOptions{ + Authorizer: &mockAuthorizer{ + AuthorizeDatabaseFn: func(p influxql.Privilege, name string) bool { + return name == "db2" || name == "db4" + }, + }, + } + + q, err := influxql.ParseQuery("SHOW DATABASES") + if err != nil { + t.Fatal(err) + } + + results := ReadAllResults(qe.ExecuteQuery(q, opt, make(chan struct{}))) + exp := []*influxql.Result{ + { + StatementID: 0, + Series: []*models.Row{{ + Name: "databases", + Columns: []string{"name"}, + Values: [][]interface{}{ + {"db2"}, {"db4"}, + }, + }}, + }, + } + if !reflect.DeepEqual(results, exp) { + t.Fatalf("unexpected results: exp %s, got %s", spew.Sdump(exp), spew.Sdump(results)) + } +} + +// QueryExecutor is a test wrapper for coordinator.QueryExecutor. +type QueryExecutor struct { + *influxql.QueryExecutor + + MetaClient MetaClient + TSDBStore TSDBStore + StatementExecutor *coordinator.StatementExecutor + LogOutput bytes.Buffer +} + +// NewQueryExecutor returns a new instance of QueryExecutor. +// This query executor always has a node id of 0. +func NewQueryExecutor() *QueryExecutor { + e := &QueryExecutor{ + QueryExecutor: influxql.NewQueryExecutor(), + } + e.StatementExecutor = &coordinator.StatementExecutor{ + MetaClient: &e.MetaClient, + TSDBStore: &e.TSDBStore, + ShardMapper: &coordinator.LocalShardMapper{ + MetaClient: &e.MetaClient, + TSDBStore: &e.TSDBStore, + }, + } + e.QueryExecutor.StatementExecutor = e.StatementExecutor + + var out io.Writer = &e.LogOutput + if testing.Verbose() { + out = io.MultiWriter(out, os.Stderr) + } + e.QueryExecutor.WithLogger(zap.New( + zap.NewTextEncoder(), + zap.Output(zap.AddSync(out)), + )) + + return e +} + +// DefaultQueryExecutor returns a QueryExecutor with a database (db0) and retention policy (rp0). +func DefaultQueryExecutor() *QueryExecutor { + e := NewQueryExecutor() + e.MetaClient.DatabaseFn = DefaultMetaClientDatabaseFn + return e +} + +// ExecuteQuery parses query and executes against the database. +func (e *QueryExecutor) ExecuteQuery(query, database string, chunkSize int) <-chan *influxql.Result { + return e.QueryExecutor.ExecuteQuery(MustParseQuery(query), influxql.ExecutionOptions{ + Database: database, + ChunkSize: chunkSize, + }, make(chan struct{})) +} + +// TSDBStore is a mockable implementation of coordinator.TSDBStore. +type TSDBStore struct { + CreateShardFn func(database, policy string, shardID uint64, enabled bool) error + WriteToShardFn func(shardID uint64, points []models.Point) error + + RestoreShardFn func(id uint64, r io.Reader) error + BackupShardFn func(id uint64, since time.Time, w io.Writer) error + + DeleteDatabaseFn func(name string) error + DeleteMeasurementFn func(database, name string) error + DeleteRetentionPolicyFn func(database, name string) error + DeleteShardFn func(id uint64) error + DeleteSeriesFn func(database string, sources []influxql.Source, condition influxql.Expr) error + ShardGroupFn func(ids []uint64) tsdb.ShardGroup +} + +func (s *TSDBStore) CreateShard(database, policy string, shardID uint64, enabled bool) error { + if s.CreateShardFn == nil { + return nil + } + return s.CreateShardFn(database, policy, shardID, enabled) +} + +func (s *TSDBStore) WriteToShard(shardID uint64, points []models.Point) error { + return s.WriteToShardFn(shardID, points) +} + +func (s *TSDBStore) RestoreShard(id uint64, r io.Reader) error { + return s.RestoreShardFn(id, r) +} + +func (s *TSDBStore) BackupShard(id uint64, since time.Time, w io.Writer) error { + return s.BackupShardFn(id, since, w) +} + +func (s *TSDBStore) DeleteDatabase(name string) error { + return s.DeleteDatabaseFn(name) +} + +func (s *TSDBStore) DeleteMeasurement(database, name string) error { + return s.DeleteMeasurementFn(database, name) +} + +func (s *TSDBStore) DeleteRetentionPolicy(database, name string) error { + return s.DeleteRetentionPolicyFn(database, name) +} + +func (s *TSDBStore) DeleteShard(id uint64) error { + return s.DeleteShardFn(id) +} + +func (s *TSDBStore) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error { + return s.DeleteSeriesFn(database, sources, condition) +} + +func (s *TSDBStore) ShardGroup(ids []uint64) tsdb.ShardGroup { + return s.ShardGroupFn(ids) +} + +func (s *TSDBStore) Measurements(database string, cond influxql.Expr) ([]string, error) { + return nil, nil +} + +func (s *TSDBStore) MeasurementNames(database string, cond influxql.Expr) ([][]byte, error) { + return nil, nil +} + +func (s *TSDBStore) TagValues(database string, cond influxql.Expr) ([]tsdb.TagValues, error) { + return nil, nil +} + +type MockShard struct { + Measurements []string + FieldDimensionsFn func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) + CreateIteratorFn func(m string, opt influxql.IteratorOptions) (influxql.Iterator, error) + ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) +} + +func (sh *MockShard) MeasurementsByRegex(re *regexp.Regexp) []string { + names := make([]string, 0, len(sh.Measurements)) + for _, name := range sh.Measurements { + if re.MatchString(name) { + names = append(names, name) + } + } + return names +} + +func (sh *MockShard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + return sh.FieldDimensionsFn(measurements) +} + +func (sh *MockShard) MapType(measurement, field string) influxql.DataType { + f, d, err := sh.FieldDimensions([]string{measurement}) + if err != nil { + return influxql.Unknown + } + + if typ, ok := f[field]; ok { + return typ + } else if _, ok := d[field]; ok { + return influxql.Tag + } + return influxql.Unknown +} + +func (sh *MockShard) CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) { + return sh.CreateIteratorFn(measurement, opt) +} + +func (sh *MockShard) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { + return sh.ExpandSourcesFn(sources) +} + +// MustParseQuery parses s into a query. Panic on error. +func MustParseQuery(s string) *influxql.Query { + q, err := influxql.ParseQuery(s) + if err != nil { + panic(err) + } + return q +} + +// ReadAllResults reads all results from c and returns as a slice. +func ReadAllResults(c <-chan *influxql.Result) []*influxql.Result { + var a []*influxql.Result + for result := range c { + a = append(a, result) + } + return a +} + +// FloatIterator is a represents an iterator that reads from a slice. +type FloatIterator struct { + Points []influxql.FloatPoint + stats influxql.IteratorStats +} + +func (itr *FloatIterator) Stats() influxql.IteratorStats { return itr.stats } +func (itr *FloatIterator) Close() error { return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *FloatIterator) Next() (*influxql.FloatPoint, error) { + if len(itr.Points) == 0 { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} diff --git a/vendor/github.com/influxdata/influxdb/errors.go b/vendor/github.com/influxdata/influxdb/errors.go new file mode 100644 index 0000000..9bc6b99 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/errors.go @@ -0,0 +1,42 @@ +package influxdb + +import ( + "errors" + "fmt" + "strings" +) + +// ErrFieldTypeConflict is returned when a new field already exists with a +// different type. +var ErrFieldTypeConflict = errors.New("field type conflict") + +// ErrDatabaseNotFound indicates that a database operation failed on the +// specified database because the specified database does not exist. +func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } + +// ErrRetentionPolicyNotFound indicates that the named retention policy could +// not be found in the database. +func ErrRetentionPolicyNotFound(name string) error { + return fmt.Errorf("retention policy not found: %s", name) +} + +// IsAuthorizationError indicates whether an error is due to an authorization failure +func IsAuthorizationError(err error) bool { + e, ok := err.(interface { + AuthorizationFailed() bool + }) + return ok && e.AuthorizationFailed() +} + +// IsClientError indicates whether an error is a known client error. +func IsClientError(err error) bool { + if err == nil { + return false + } + + if strings.HasPrefix(err.Error(), ErrFieldTypeConflict.Error()) { + return true + } + + return false +} diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc b/vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc new file mode 100644 index 0000000..a9c1a9c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc @@ -0,0 +1 @@ +rvm use ruby-2.1.0@burn-in --create diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile new file mode 100644 index 0000000..b1816e8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +gem "colorize" +gem "influxdb" diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock new file mode 100644 index 0000000..9e721c3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock @@ -0,0 +1,14 @@ +GEM + remote: https://rubygems.org/ + specs: + colorize (0.6.0) + influxdb (0.0.16) + json + json (1.8.1) + +PLATFORMS + ruby + +DEPENDENCIES + colorize + influxdb diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb new file mode 100644 index 0000000..1d44bc2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb @@ -0,0 +1,79 @@ +require "influxdb" +require "colorize" +require "benchmark" + +require_relative "log" +require_relative "random_gaussian" + +BATCH_SIZE = 10_000 + +Log.info "Starting burn-in suite" +master = InfluxDB::Client.new +master.delete_database("burn-in") rescue nil +master.create_database("burn-in") +master.create_database_user("burn-in", "user", "pass") + +master.database = "burn-in" +# master.query "select * from test1 into test2;" +# master.query "select count(value) from test1 group by time(1m) into test2;" + +influxdb = InfluxDB::Client.new "burn-in", username: "user", password: "pass" + +Log.success "Connected to server #{influxdb.host}:#{influxdb.port}" + +Log.log "Creating RandomGaussian(500, 25)" +gaussian = RandomGaussian.new(500, 25) +point_count = 0 + +while true + Log.log "Generating 10,000 points.." + points = [] + BATCH_SIZE.times do |n| + points << {value: gaussian.rand.to_i.abs} + end + point_count += points.length + + Log.info "Sending points to server.." + begin + st = Time.now + foo = influxdb.write_point("test1", points) + et = Time.now + Log.log foo.inspect + Log.log "#{et-st} seconds elapsed" + Log.success "Write successful." + rescue => e + Log.failure "Write failed:" + Log.log e + end + sleep 0.5 + + Log.info "Checking regular points" + st = Time.now + response = influxdb.query("select count(value) from test1;") + et = Time.now + + Log.log "#{et-st} seconds elapsed" + + response_count = response["test1"].first["count"] + if point_count == response_count + Log.success "Point counts match: #{point_count} == #{response_count}" + else + Log.failure "Point counts don't match: #{point_count} != #{response_count}" + end + + # Log.info "Checking continuous query points for test2" + # st = Time.now + # response = influxdb.query("select count(value) from test2;") + # et = Time.now + + # Log.log "#{et-st} seconds elapsed" + + # response_count = response["test2"].first["count"] + # if point_count == response_count + # Log.success "Point counts match: #{point_count} == #{response_count}" + # else + # Log.failure "Point counts don't match: #{point_count} != #{response_count}" + # end +end + + diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb new file mode 100644 index 0000000..0f70d76 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb @@ -0,0 +1,23 @@ +module Log + def self.info(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:yellow) + end + + def self.success(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:green) + end + + def self.failure(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:red) + end + + def self.log(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s + end +end + + diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb new file mode 100644 index 0000000..51d6c3c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb @@ -0,0 +1,31 @@ +class RandomGaussian + def initialize(mean, stddev, rand_helper = lambda { Kernel.rand }) + @rand_helper = rand_helper + @mean = mean + @stddev = stddev + @valid = false + @next = 0 + end + + def rand + if @valid then + @valid = false + return @next + else + @valid = true + x, y = self.class.gaussian(@mean, @stddev, @rand_helper) + @next = y + return x + end + end + + private + def self.gaussian(mean, stddev, rand) + theta = 2 * Math::PI * rand.call + rho = Math.sqrt(-2 * Math.log(1 - rand.call)) + scale = stddev * rho + x = mean + scale * Math.cos(theta) + y = mean + scale * Math.sin(theta) + return x, y + end +end diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb new file mode 100644 index 0000000..93bc831 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb @@ -0,0 +1,29 @@ +require "influxdb" + +ONE_WEEK_IN_SECONDS = 7*24*60*60 +NUM_POINTS = 10_000 +BATCHES = 100 + +master = InfluxDB::Client.new +master.delete_database("ctx") rescue nil +master.create_database("ctx") + +influxdb = InfluxDB::Client.new "ctx" +influxdb.time_precision = "s" + +names = ["foo", "bar", "baz", "quu", "qux"] + +st = Time.now +BATCHES.times do |m| + points = [] + + puts "Writing #{NUM_POINTS} points, time ##{m}.." + NUM_POINTS.times do |n| + timestamp = Time.now.to_i - rand(ONE_WEEK_IN_SECONDS) + points << {value: names.sample, time: timestamp} + end + + influxdb.write_point("ct1", points) +end +puts st +puts Time.now diff --git a/vendor/github.com/influxdata/influxdb/etc/config.sample.toml b/vendor/github.com/influxdata/influxdb/etc/config.sample.toml new file mode 100644 index 0000000..23c4e03 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/config.sample.toml @@ -0,0 +1,431 @@ +### Welcome to the InfluxDB configuration file. + +# The values in this file override the default values used by the system if +# a config option is not specified. The commented out lines are the configuration +# field and the default value used. Uncommenting a line and changing the value +# will change the value used at runtime when the process is restarted. + +# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com +# The data includes a random ID, os, arch, version, the number of series and other +# usage data. No data from user databases is ever transmitted. +# Change this option to true to disable reporting. +# reporting-disabled = false + +# Bind address to use for the RPC service for backup and restore. +# bind-address = "127.0.0.1:8088" + +### +### [meta] +### +### Controls the parameters for the Raft consensus group that stores metadata +### about the InfluxDB cluster. +### + +[meta] + # Where the metadata/raft database is stored + dir = "/var/lib/influxdb/meta" + + # Automatically create a default retention policy when creating a database. + # retention-autocreate = true + + # If log messages are printed for the meta service + # logging-enabled = true + +### +### [data] +### +### Controls where the actual shard data for InfluxDB lives and how it is +### flushed from the WAL. "dir" may need to be changed to a suitable place +### for your system, but the WAL settings are an advanced configuration. The +### defaults should work for most systems. +### + +[data] + # The directory where the TSM storage engine stores TSM files. + dir = "/var/lib/influxdb/data" + + # The directory where the TSM storage engine stores WAL files. + wal-dir = "/var/lib/influxdb/wal" + + # The amount of time that a write will wait before fsyncing. A duration + # greater than 0 can be used to batch up multiple fsync calls. This is useful for slower + # disks or when WAL write contention is seen. A value of 0s fsyncs every write to the WAL. + # Values in the range of 0-100ms are recommended for non-SSD disks. + # wal-fsync-delay = "0s" + + + # The type of shard index to use for new shards. The default is an in-memory index that is + # recreated at startup. A value of "tsi1" will use a disk based index that supports higher + # cardinality datasets. + # index-version = "inmem" + + # Trace logging provides more verbose output around the tsm engine. Turning + # this on can provide more useful output for debugging tsm engine issues. + # trace-logging-enabled = false + + # Whether queries should be logged before execution. Very useful for troubleshooting, but will + # log any sensitive data contained within a query. + # query-log-enabled = true + + # Settings for the TSM engine + + # CacheMaxMemorySize is the maximum size a shard's cache can + # reach before it starts rejecting writes. + # cache-max-memory-size = 1048576000 + + # CacheSnapshotMemorySize is the size at which the engine will + # snapshot the cache and write it to a TSM file, freeing up memory + # cache-snapshot-memory-size = 26214400 + + # CacheSnapshotWriteColdDuration is the length of time at + # which the engine will snapshot the cache and write it to + # a new TSM file if the shard hasn't received writes or deletes + # cache-snapshot-write-cold-duration = "10m" + + # CompactFullWriteColdDuration is the duration at which the engine + # will compact all TSM files in a shard if it hasn't received a + # write or delete + # compact-full-write-cold-duration = "4h" + + # The maximum number of concurrent full and level compactions that can run at one time. A + # value of 0 results in runtime.GOMAXPROCS(0) used at runtime. This setting does not apply + # to cache snapshotting. + # max-concurrent-compactions = 0 + + # The maximum series allowed per database before writes are dropped. This limit can prevent + # high cardinality issues at the database level. This limit can be disabled by setting it to + # 0. + # max-series-per-database = 1000000 + + # The maximum number of tag values per tag that are allowed before writes are dropped. This limit + # can prevent high cardinality tag values from being written to a measurement. This limit can be + # disabled by setting it to 0. + # max-values-per-tag = 100000 + +### +### [coordinator] +### +### Controls the clustering service configuration. +### + +[coordinator] + # The default time a write request will wait until a "timeout" error is returned to the caller. + # write-timeout = "10s" + + # The maximum number of concurrent queries allowed to be executing at one time. If a query is + # executed and exceeds this limit, an error is returned to the caller. This limit can be disabled + # by setting it to 0. + # max-concurrent-queries = 0 + + # The maximum time a query will is allowed to execute before being killed by the system. This limit + # can help prevent run away queries. Setting the value to 0 disables the limit. + # query-timeout = "0s" + + # The time threshold when a query will be logged as a slow query. This limit can be set to help + # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging. + # log-queries-after = "0s" + + # The maximum number of points a SELECT can process. A value of 0 will make + # the maximum point count unlimited. This will only be checked every 10 seconds so queries will not + # be aborted immediately when hitting the limit. + # max-select-point = 0 + + # The maximum number of series a SELECT can run. A value of 0 will make the maximum series + # count unlimited. + # max-select-series = 0 + + # The maxium number of group by time bucket a SELECT can create. A value of zero will max the maximum + # number of buckets unlimited. + # max-select-buckets = 0 + +### +### [retention] +### +### Controls the enforcement of retention policies for evicting old data. +### + +[retention] + # Determines whether retention policy enforcement enabled. + # enabled = true + + # The interval of time when retention policy enforcement checks run. + # check-interval = "30m" + +### +### [shard-precreation] +### +### Controls the precreation of shards, so they are available before data arrives. +### Only shards that, after creation, will have both a start- and end-time in the +### future, will ever be created. Shards are never precreated that would be wholly +### or partially in the past. + +[shard-precreation] + # Determines whether shard pre-creation service is enabled. + # enabled = true + + # The interval of time when the check to pre-create new shards runs. + # check-interval = "10m" + + # The default period ahead of the endtime of a shard group that its successor + # group is created. + # advance-period = "30m" + +### +### Controls the system self-monitoring, statistics and diagnostics. +### +### The internal database for monitoring data is created automatically if +### if it does not already exist. The target retention within this database +### is called 'monitor' and is also created with a retention period of 7 days +### and a replication factor of 1, if it does not exist. In all cases the +### this retention policy is configured as the default for the database. + +[monitor] + # Whether to record statistics internally. + # store-enabled = true + + # The destination database for recorded statistics + # store-database = "_internal" + + # The interval at which to record statistics + # store-interval = "10s" + +### +### [http] +### +### Controls how the HTTP endpoints are configured. These are the primary +### mechanism for getting data into and out of InfluxDB. +### + +[http] + # Determines whether HTTP endpoint is enabled. + # enabled = true + + # The bind address used by the HTTP service. + # bind-address = ":8086" + + # Determines whether user authentication is enabled over HTTP/HTTPS. + # auth-enabled = false + + # The default realm sent back when issuing a basic auth challenge. + # realm = "InfluxDB" + + # Determines whether HTTP request logging is enabled. + # log-enabled = true + + # Determines whether detailed write logging is enabled. + # write-tracing = false + + # Determines whether the pprof endpoint is enabled. This endpoint is used for + # troubleshooting and monitoring. + # pprof-enabled = true + + # Determines whether HTTPS is enabled. + # https-enabled = false + + # The SSL certificate to use when HTTPS is enabled. + # https-certificate = "/etc/ssl/influxdb.pem" + + # Use a separate private key location. + # https-private-key = "" + + # The JWT auth shared secret to validate requests using JSON web tokens. + # shared-secret = "" + + # The default chunk size for result sets that should be chunked. + # max-row-limit = 0 + + # The maximum number of HTTP connections that may be open at once. New connections that + # would exceed this limit are dropped. Setting this value to 0 disables the limit. + # max-connection-limit = 0 + + # Enable http service over unix domain socket + # unix-socket-enabled = false + + # The path of the unix domain socket. + # bind-socket = "/var/run/influxdb.sock" + +### +### [subscriber] +### +### Controls the subscriptions, which can be used to fork a copy of all data +### received by the InfluxDB host. +### + +[subscriber] + # Determines whether the subscriber service is enabled. + # enabled = true + + # The default timeout for HTTP writes to subscribers. + # http-timeout = "30s" + + # Allows insecure HTTPS connections to subscribers. This is useful when testing with self- + # signed certificates. + # insecure-skip-verify = false + + # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used + # ca-certs = "" + + # The number of writer goroutines processing the write channel. + # write-concurrency = 40 + + # The number of in-flight writes buffered in the write channel. + # write-buffer-size = 1000 + + +### +### [[graphite]] +### +### Controls one or many listeners for Graphite data. +### + +[[graphite]] + # Determines whether the graphite endpoint is enabled. + # enabled = false + # database = "graphite" + # retention-policy = "" + # bind-address = ":2003" + # protocol = "tcp" + # consistency-level = "one" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # udp-read-buffer = 0 + + ### This string joins multiple matching 'measurement' values providing more control over the final measurement name. + # separator = "." + + ### Default tags that will be added to all metrics. These can be overridden at the template level + ### or by tags extracted from metric + # tags = ["region=us-east", "zone=1c"] + + ### Each template line requires a template pattern. It can have an optional + ### filter before the template and separated by spaces. It can also have optional extra + ### tags following the template. Multiple tags should be separated by commas and no spaces + ### similar to the line protocol format. There can be only one default template. + # templates = [ + # "*.app env.service.resource.measurement", + # # Default template + # "server.*", + # ] + +### +### [collectd] +### +### Controls one or many listeners for collectd data. +### + +[[collectd]] + # enabled = false + # bind-address = ":25826" + # database = "collectd" + # retention-policy = "" + # + # The collectd service supports either scanning a directory for multiple types + # db files, or specifying a single db file. + # typesdb = "/usr/local/share/collectd" + # + # security-level = "none" + # auth-file = "/etc/collectd/auth_file" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "10s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 + +### +### [opentsdb] +### +### Controls one or many listeners for OpenTSDB data. +### + +[[opentsdb]] + # enabled = false + # bind-address = ":4242" + # database = "opentsdb" + # retention-policy = "" + # consistency-level = "one" + # tls-enabled = false + # certificate= "/etc/ssl/influxdb.pem" + + # Log an error for every malformed point. + # log-point-errors = true + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Only points + # metrics received over the telnet protocol undergo batching. + + # Flush if this many points get buffered + # batch-size = 1000 + + # Number of batches that may be pending in memory + # batch-pending = 5 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + +### +### [[udp]] +### +### Controls the listeners for InfluxDB line protocol data via UDP. +### + +[[udp]] + # enabled = false + # bind-address = ":8089" + # database = "udp" + # retention-policy = "" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Will flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 + +### +### [continuous_queries] +### +### Controls how continuous queries are run within InfluxDB. +### + +[continuous_queries] + # Determines whether the continuous query service is enabled. + # enabled = true + + # Controls whether queries are logged when executed by the CQ service. + # log-enabled = true + + # interval for how often continuous queries will be checked if they need to run + # run-interval = "1s" diff --git a/vendor/github.com/influxdata/influxdb/gobuild.sh b/vendor/github.com/influxdata/influxdb/gobuild.sh new file mode 100755 index 0000000..9a96e7e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/gobuild.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# This script run inside the Dockerfile_build_ubuntu64_git container and +# gets the latests Go source code and compiles it. +# Then passes control over to the normal build.py script + +set -e + +cd /go/src +git fetch --all +git checkout $GO_CHECKOUT +# Merge in recent changes if we are on a branch +# if we checked out a tag just ignore the error +git pull || true +./make.bash + +# Run normal build.py +cd "$PROJECT_DIR" +exec ./build.py "$@" diff --git a/vendor/github.com/influxdata/influxdb/importer/README.md b/vendor/github.com/influxdata/influxdb/importer/README.md new file mode 100644 index 0000000..7b0dd87 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/importer/README.md @@ -0,0 +1,214 @@ +# Import/Export + +## Exporting from 0.8.9 + +Version `0.8.9` of InfluxDB adds support to export your data to a format that can be imported into `0.9.3` and later. + +Note that `0.8.9` can be found here: + +``` +http://get.influxdb.org.s3.amazonaws.com/influxdb_0.8.9_amd64.deb +http://get.influxdb.org.s3.amazonaws.com/influxdb-0.8.9-1.x86_64.rpm +``` + +### Design + +`0.8.9` exports raw data to a flat file that includes two sections, `DDL` and `DML`. You can choose to export them independently (see below). + +The `DDL` section contains the sql commands to create databases and retention policies. the `DML` section is [line protocol](https://github.com/influxdata/influxdb/blob/master/tsdb/README.md) and can be directly posted to the [http endpoint](https://docs.influxdata.com/influxdb/v0.10/guides/writing_data) in `0.10`. Remember that batching is important and we don't recommend batch sizes over 5k without further testing. + +Example export file: +``` +# DDL +CREATE DATABASE db0 +CREATE DATABASE db1 +CREATE RETENTION POLICY rp1 ON db1 DURATION 1h REPLICATION 1 + +# DML +# CONTEXT-DATABASE:db0 +# CONTEXT-RETENTION-POLICY:autogen +cpu,host=server1 value=33.3 1464026335000000000 +cpu,host=server1 value=43.3 1464026395000000000 +cpu,host=server1 value=63.3 1464026575000000000 + +# CONTEXT-DATABASE:db1 +# CONTEXT-RETENTION-POLICY:rp1 +cpu,host=server1 value=73.3 1464026335000000000 +cpu,host=server1 value=83.3 1464026395000000000 +cpu,host=server1 value=93.3 1464026575000000000 +``` + +You need to specify a database and shard group when you export. + +To list out your shards, use the following http endpoint: + +`/cluster/shard_spaces` + +example: +```sh +http://username:password@localhost:8086/cluster/shard_spaces +``` + +Then, to export a database with then name "metrics" and a shard space with the name "default", issue the following curl command: + +```sh +curl -o export http://username:password@localhost:8086/export/metrics/default +``` + +Compression is supported, and will result in a significantly smaller file size. + +Use the following command for compression: +```sh +curl -o export.gz --compressed http://username:password@localhost:8086/export/metrics/default +``` + +You can also export just the `DDL` with this option: + +```sh +curl -o export.ddl http://username:password@localhost:8086/export/metrics/default?l=ddl +``` + +Or just the `DML` with this option: + +```sh +curl -o export.dml.gz --compressed http://username:password@localhost:8086/export/metrics/default?l=dml +``` + +### Assumptions + +- Series name mapping follows these [guidelines](https://docs.influxdata.com/influxdb/v0.8/advanced_topics/schema_design/) +- Database name will map directly from `0.8` to `0.10` +- Shard Spaces map to Retention Policies +- Shard Space Duration is ignored, as in `0.10` we determine shard size automatically +- Regex is used to match the correct series names and only exports that data for the database +- Duration becomes the new Retention Policy duration + +- Users are not migrated due to inability to get passwords. Anyone using users will need to manually set these back up in `0.10` + +### Upgrade Recommendations + +It's recommended that you upgrade to `0.9.3` or later first and have all your writes going there. Then, on the `0.8.X` instances, upgrade to `0.8.9`. + +It is important that when exporting you change your config to allow for the http endpoints not timing out. To do so, make this change in your config: + +```toml +# Configure the http api +[api] +read-timeout = "0s" +``` + +### Exceptions + +If a series can't be exported to tags based on the guidelines mentioned above, +we will insert the entire series name as the measurement name. You can either +allow that to import into the new InfluxDB instance, or you can do your own +data massage on it prior to importing it. + +For example, if you have the following series name: + +``` +metric.disk.c.host.server01.single +``` + +It will export as exactly thta as the measurement name and no tags: + +``` +metric.disk.c.host.server01.single +``` + +### Export Metrics + +When you export, you will now get comments inline in the `DML`: + +`# Found 999 Series for export` + +As well as count totals for each series exported: + +`# Series FOO - Points Exported: 999` + +With a total at the bottom: + +`# Points Exported: 999` + +You can grep the file that was exported at the end to get all the export metrics: + +`cat myexport | grep Exported` + +## Importing + +Version `0.9.3` of InfluxDB adds support to import your data from version `0.8.9`. + +## Caveats + +For the export/import to work, all requisites have to be met. For export, all series names in `0.8` should be in the following format: + +``` +.... +``` +for example: +``` +az.us-west-1.host.serverA.cpu +``` +or any number of tags +``` +building.2.temperature +``` + +Additionally, the fields need to have a consistent type (all float64, int64, etc) for every write in `0.8`. Otherwise they have the potential to fail writes in the import. +See below for more information. + +## Running the import command + + To import via the cli, you can specify the following command: + + ```sh + influx -import -path=metrics-default.gz -compressed + ``` + + If the file is not compressed you can issue it without the `-compressed` flag: + + ```sh + influx -import -path=metrics-default + ``` + + To redirect failed import lines to another file, run this command: + + ```sh + influx -import -path=metrics-default.gz -compressed > failures + ``` + + The import will use the line protocol in batches of 5,000 lines per batch when sending data to the server. + +### Throttiling the import + + If you need to throttle the import so the database has time to ingest, you can use the `-pps` flag. This will limit the points per second that will be sent to the server. + + ```sh + influx -import -path=metrics-default.gz -compressed -pps 50000 > failures + ``` + + Which is stating that you don't want MORE than 50,000 points per second to write to the database. Due to the processing that is taking place however, you will likely never get exactly 50,000 pps, more like 35,000 pps, etc. + +## Understanding the results of the import + +During the import, a status message will write out for every 100,000 points imported and report stats on the progress of the import: + +``` +2015/08/21 14:48:01 Processed 3100000 lines. Time elapsed: 56.740578415s. Points per second (PPS): 54634 +``` + + The batch will give some basic stats when finished: + + ```sh + 2015/07/29 23:15:20 Processed 2 commands + 2015/07/29 23:15:20 Processed 70207923 inserts + 2015/07/29 23:15:20 Failed 29785000 inserts + ``` + + Most inserts fail due to the following types of error: + + ```sh + 2015/07/29 22:18:28 error writing batch: write failed: field type conflict: input field "value" on measurement "metric" is type float64, already exists as type integer + ``` + + This is due to the fact that in `0.8` a field could get created and saved as int or float types for independent writes. In `0.9` and greater the field has to have a consistent type. diff --git a/vendor/github.com/influxdata/influxdb/importer/v8/importer.go b/vendor/github.com/influxdata/influxdb/importer/v8/importer.go new file mode 100644 index 0000000..5e1b01e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/importer/v8/importer.go @@ -0,0 +1,252 @@ +// Package v8 contains code for importing data from 0.8 instances of InfluxDB. +package v8 // import "github.com/influxdata/influxdb/importer/v8" + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + "log" + "os" + "strings" + "time" + + "github.com/influxdata/influxdb/client" +) + +const batchSize = 5000 + +// Config is the config used to initialize a Importer importer +type Config struct { + Path string // Path to import data. + Version string + Compressed bool // Whether import data is gzipped. + PPS int // points per second importer imports with. + + client.Config +} + +// NewConfig returns an initialized *Config +func NewConfig() Config { + return Config{Config: client.NewConfig()} +} + +// Importer is the importer used for importing 0.8 data +type Importer struct { + client *client.Client + database string + retentionPolicy string + config Config + batch []string + totalInserts int + failedInserts int + totalCommands int + throttlePointsWritten int + lastWrite time.Time + throttle *time.Ticker +} + +// NewImporter will return an intialized Importer struct +func NewImporter(config Config) *Importer { + config.UserAgent = fmt.Sprintf("influxDB importer/%s", config.Version) + return &Importer{ + config: config, + batch: make([]string, 0, batchSize), + } +} + +// Import processes the specified file in the Config and writes the data to the databases in chunks specified by batchSize +func (i *Importer) Import() error { + // Create a client and try to connect. + cl, err := client.NewClient(i.config.Config) + if err != nil { + return fmt.Errorf("could not create client %s", err) + } + i.client = cl + if _, _, e := i.client.Ping(); e != nil { + return fmt.Errorf("failed to connect to %s\n", i.client.Addr()) + } + + // Validate args + if i.config.Path == "" { + return fmt.Errorf("file argument required") + } + + defer func() { + if i.totalInserts > 0 { + log.Printf("Processed %d commands\n", i.totalCommands) + log.Printf("Processed %d inserts\n", i.totalInserts) + log.Printf("Failed %d inserts\n", i.failedInserts) + } + }() + + // Open the file + f, err := os.Open(i.config.Path) + if err != nil { + return err + } + defer f.Close() + + var r io.Reader + + // If gzipped, wrap in a gzip reader + if i.config.Compressed { + gr, err := gzip.NewReader(f) + if err != nil { + return err + } + defer gr.Close() + // Set the reader to the gzip reader + r = gr + } else { + // Standard text file so our reader can just be the file + r = f + } + + // Get our reader + scanner := bufio.NewScanner(r) + + // Process the DDL + i.processDDL(scanner) + + // Set up our throttle channel. Since there is effectively no other activity at this point + // the smaller resolution gets us much closer to the requested PPS + i.throttle = time.NewTicker(time.Microsecond) + defer i.throttle.Stop() + + // Prime the last write + i.lastWrite = time.Now() + + // Process the DML + i.processDML(scanner) + + // Check if we had any errors scanning the file + if err := scanner.Err(); err != nil { + return fmt.Errorf("reading standard input: %s", err) + } + + // If there were any failed inserts then return an error so that a non-zero + // exit code can be returned. + if i.failedInserts > 0 { + plural := " was" + if i.failedInserts > 1 { + plural = "s were" + } + + return fmt.Errorf("%d point%s not inserted", i.failedInserts, plural) + } + + return nil +} + +func (i *Importer) processDDL(scanner *bufio.Scanner) { + for scanner.Scan() { + line := scanner.Text() + // If we find the DML token, we are done with DDL + if strings.HasPrefix(line, "# DML") { + return + } + if strings.HasPrefix(line, "#") { + continue + } + // Skip blank lines + if strings.TrimSpace(line) == "" { + continue + } + i.queryExecutor(line) + } +} + +func (i *Importer) processDML(scanner *bufio.Scanner) { + start := time.Now() + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "# CONTEXT-DATABASE:") { + i.database = strings.TrimSpace(strings.Split(line, ":")[1]) + } + if strings.HasPrefix(line, "# CONTEXT-RETENTION-POLICY:") { + i.retentionPolicy = strings.TrimSpace(strings.Split(line, ":")[1]) + } + if strings.HasPrefix(line, "#") { + continue + } + // Skip blank lines + if strings.TrimSpace(line) == "" { + continue + } + i.batchAccumulator(line, start) + } + // Call batchWrite one last time to flush anything out in the batch + i.batchWrite() +} + +func (i *Importer) execute(command string) { + response, err := i.client.Query(client.Query{Command: command, Database: i.database}) + if err != nil { + log.Printf("error: %s\n", err) + return + } + if err := response.Error(); err != nil { + log.Printf("error: %s\n", response.Error()) + } +} + +func (i *Importer) queryExecutor(command string) { + i.totalCommands++ + i.execute(command) +} + +func (i *Importer) batchAccumulator(line string, start time.Time) { + i.batch = append(i.batch, line) + if len(i.batch) == batchSize { + i.batchWrite() + i.batch = i.batch[:0] + // Give some status feedback every 100000 lines processed + processed := i.totalInserts + i.failedInserts + if processed%100000 == 0 { + since := time.Since(start) + pps := float64(processed) / since.Seconds() + log.Printf("Processed %d lines. Time elapsed: %s. Points per second (PPS): %d", processed, since.String(), int64(pps)) + } + } +} + +func (i *Importer) batchWrite() { + // Accumulate the batch size to see how many points we have written this second + i.throttlePointsWritten += len(i.batch) + + // Find out when we last wrote data + since := time.Since(i.lastWrite) + + // Check to see if we've exceeded our points per second for the current timeframe + var currentPPS int + if since.Seconds() > 0 { + currentPPS = int(float64(i.throttlePointsWritten) / since.Seconds()) + } else { + currentPPS = i.throttlePointsWritten + } + + // If our currentPPS is greater than the PPS specified, then we wait and retry + if int(currentPPS) > i.config.PPS && i.config.PPS != 0 { + // Wait for the next tick + <-i.throttle.C + + // Decrement the batch size back out as it is going to get called again + i.throttlePointsWritten -= len(i.batch) + i.batchWrite() + return + } + + _, e := i.client.WriteLineProtocol(strings.Join(i.batch, "\n"), i.database, i.retentionPolicy, i.config.Precision, i.config.WriteConsistency) + if e != nil { + log.Println("error writing batch: ", e) + // Output failed lines to STDOUT so users can capture lines that failed to import + fmt.Println(strings.Join(i.batch, "\n")) + i.failedInserts += len(i.batch) + } else { + i.totalInserts += len(i.batch) + } + i.throttlePointsWritten = 0 + i.lastWrite = time.Now() + return +} diff --git a/vendor/github.com/influxdata/influxdb/influxdb.go b/vendor/github.com/influxdata/influxdb/influxdb.go new file mode 100644 index 0000000..a594175 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxdb.go @@ -0,0 +1,6 @@ +// Package influxdb is the root package of InfluxDB, +// the scalable datastore for metrics, events, and real-time analytics. +// +// If you're looking for the Go HTTP client for InfluxDB, +// see package github.com/influxdata/influxdb/client/v2. +package influxdb // import "github.com/influxdata/influxdb" diff --git a/vendor/github.com/influxdata/influxdb/influxql/README.md b/vendor/github.com/influxdata/influxdb/influxql/README.md new file mode 100644 index 0000000..1441900 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/README.md @@ -0,0 +1,1161 @@ +# The Influx Query Language Specification + +## Introduction + +This is a reference for the Influx Query Language ("InfluxQL"). + +InfluxQL is a SQL-like query language for interacting with InfluxDB. It has +been lovingly crafted to feel familiar to those coming from other SQL or +SQL-like environments while providing features specific to storing and analyzing +time series data. + + +## Notation + +The syntax is specified using Extended Backus-Naur Form ("EBNF"). EBNF is the +same notation used in the [Go](http://golang.org) programming language +specification, which can be found [here](https://golang.org/ref/spec). Not so +coincidentally, InfluxDB is written in Go. + +``` +Production = production_name "=" [ Expression ] "." . +Expression = Alternative { "|" Alternative } . +Alternative = Term { Term } . +Term = production_name | token [ "…" token ] | Group | Option | Repetition . +Group = "(" Expression ")" . +Option = "[" Expression "]" . +Repetition = "{" Expression "}" . +``` + +Notation operators in order of increasing precedence: + +``` +| alternation +() grouping +[] option (0 or 1 times) +{} repetition (0 to n times) +``` + +## Comments + +Both single and multiline comments are supported. A comment is treated +the same as whitespace by the parser. + +``` +-- single line comment +/* + multiline comment +*/ +``` + +Single line comments will skip all text until the scanner hits a +newline. Multiline comments will skip all text until the end comment +marker is hit. Nested multiline comments are not supported so the +following does not work: + +``` +/* /* this does not work */ */ +``` + +## Query representation + +### Characters + +InfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8). + +``` +newline = /* the Unicode code point U+000A */ . +unicode_char = /* an arbitrary Unicode code point except newline */ . +``` + +## Letters and digits + +Letters are the set of ASCII characters plus the underscore character _ (U+005F) +is considered a letter. + +Only decimal digits are supported. + +``` +letter = ascii_letter | "_" . +ascii_letter = "A" … "Z" | "a" … "z" . +digit = "0" … "9" . +``` + +## Identifiers + +Identifiers are tokens which refer to database names, retention policy names, +user names, measurement names, tag keys, and field keys. + +The rules: + +- double quoted identifiers can contain any unicode character other than a new line +- double quoted identifiers can contain escaped `"` characters (i.e., `\"`) +- double quoted identifiers can contain InfluxQL keywords +- unquoted identifiers must start with an upper or lowercase ASCII character or "_" +- unquoted identifiers may contain only ASCII letters, decimal digits, and "_" + +``` +identifier = unquoted_identifier | quoted_identifier . +unquoted_identifier = ( letter ) { letter | digit } . +quoted_identifier = `"` unicode_char { unicode_char } `"` . +``` + +#### Examples: + +``` +cpu +_cpu_stats +"1h" +"anything really" +"1_Crazy-1337.identifier>NAME👍" +``` + +## Keywords + +``` +ALL ALTER ANY AS ASC BEGIN +BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT +DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP +DURATION END EVERY EXPLAIN FIELD FOR +FROM GRANT GRANTS GROUP GROUPS IN +INF INSERT INTO KEY KEYS KILL +LIMIT SHOW MEASUREMENT MEASUREMENTS NAME OFFSET +ON ORDER PASSWORD POLICY POLICIES PRIVILEGES +QUERIES QUERY READ REPLICATION RESAMPLE RETENTION +REVOKE SELECT SERIES SET SHARD SHARDS +SLIMIT SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS TAG +TO USER USERS VALUES WHERE WITH +WRITE +``` + +## Literals + +### Integers + +InfluxQL supports decimal integer literals. Hexadecimal and octal literals are +not currently supported. + +``` +int_lit = [ "+" | "-" ] ( "1" … "9" ) { digit } . +``` + +### Floats + +InfluxQL supports floating-point literals. Exponents are not currently supported. + +``` +float_lit = [ "+" | "-" ] ( "." digit { digit } | digit { digit } "." { digit } ) . +``` + +### Strings + +String literals must be surrounded by single quotes. Strings may contain `'` +characters as long as they are escaped (i.e., `\'`). + +``` +string_lit = `'` { unicode_char } `'` . +``` + +### Durations + +Duration literals specify a length of time. An integer literal followed +immediately (with no spaces) by a duration unit listed below is interpreted as +a duration literal. + +### Duration units +| Units | Meaning | +|--------|-----------------------------------------| +| u or µ | microseconds (1 millionth of a second) | +| ms | milliseconds (1 thousandth of a second) | +| s | second | +| m | minute | +| h | hour | +| d | day | +| w | week | + +``` +duration_lit = int_lit duration_unit . +duration_unit = "u" | "µ" | "ms" | "s" | "m" | "h" | "d" | "w" . +``` + +### Dates & Times + +The date and time literal format is not specified in EBNF like the rest of this document. It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL. The reference date time is: + +InfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM + +``` +time_lit = "2006-01-02 15:04:05.999999" | "2006-01-02" . +``` + +### Booleans + +``` +bool_lit = TRUE | FALSE . +``` + +### Regular Expressions + +``` +regex_lit = "/" { unicode_char } "/" . +``` + +**Comparators:** +`=~` matches against +`!~` doesn't match against + +> **Note:** Use regular expressions to match measurements and tags. +You cannot use regular expressions to match databases, retention policies, or fields. + +## Queries + +A query is composed of one or more statements separated by a semicolon. + +``` +query = statement { ";" statement } . + +statement = alter_retention_policy_stmt | + create_continuous_query_stmt | + create_database_stmt | + create_retention_policy_stmt | + create_subscription_stmt | + create_user_stmt | + delete_stmt | + drop_continuous_query_stmt | + drop_database_stmt | + drop_measurement_stmt | + drop_retention_policy_stmt | + drop_series_stmt | + drop_shard_stmt | + drop_subscription_stmt | + drop_user_stmt | + grant_stmt | + kill_query_statement | + show_continuous_queries_stmt | + show_databases_stmt | + show_field_keys_stmt | + show_grants_stmt | + show_measurements_stmt | + show_queries_stmt | + show_retention_policies | + show_series_stmt | + show_shard_groups_stmt | + show_shards_stmt | + show_subscriptions_stmt| + show_tag_keys_stmt | + show_tag_values_stmt | + show_users_stmt | + revoke_stmt | + select_stmt . +``` + +## Statements + +### ALTER RETENTION POLICY + +``` +alter_retention_policy_stmt = "ALTER RETENTION POLICY" policy_name on_clause + retention_policy_option + [ retention_policy_option ] + [ retention_policy_option ] + [ retention_policy_option ] . +``` + +> Replication factors do not serve a purpose with single node instances. + +#### Examples: + +```sql +-- Set default retention policy for mydb to 1h.cpu. +ALTER RETENTION POLICY "1h.cpu" ON "mydb" DEFAULT + +-- Change duration and replication factor. +ALTER RETENTION POLICY "policy1" ON "somedb" DURATION 1h REPLICATION 4 +``` + +### CREATE CONTINUOUS QUERY + +``` +create_continuous_query_stmt = "CREATE CONTINUOUS QUERY" query_name on_clause + [ "RESAMPLE" resample_opts ] + "BEGIN" select_stmt "END" . + +query_name = identifier . + +resample_opts = (every_stmt for_stmt | every_stmt | for_stmt) . +every_stmt = "EVERY" duration_lit +for_stmt = "FOR" duration_lit +``` + +#### Examples: + +```sql +-- selects from DEFAULT retention policy and writes into 6_months retention policy +CREATE CONTINUOUS QUERY "10m_event_count" +ON "db_name" +BEGIN + SELECT count("value") + INTO "6_months"."events" + FROM "events" + GROUP BY time(10m) +END; + +-- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy +CREATE CONTINUOUS QUERY "1h_event_count" +ON "db_name" +BEGIN + SELECT sum("count") as "count" + INTO "2_years"."events" + FROM "6_months"."events" + GROUP BY time(1h) +END; + +-- this customizes the resample interval so the interval is queried every 10s and intervals are resampled until 2m after their start time +-- when resample is used, at least one of "EVERY" or "FOR" must be used +CREATE CONTINUOUS QUERY "cpu_mean" +ON "db_name" +RESAMPLE EVERY 10s FOR 2m +BEGIN + SELECT mean("value") + INTO "cpu_mean" + FROM "cpu" + GROUP BY time(1m) +END; +``` + +### CREATE DATABASE + +``` +create_database_stmt = "CREATE DATABASE" db_name + [ WITH + [ retention_policy_duration ] + [ retention_policy_replication ] + [ retention_policy_shard_group_duration ] + [ retention_policy_name ] + ] . +``` + +> Replication factors do not serve a purpose with single node instances. + +#### Examples: + +```sql +-- Create a database called foo +CREATE DATABASE "foo" + +-- Create a database called bar with a new DEFAULT retention policy and specify the duration, replication, shard group duration, and name of that retention policy +CREATE DATABASE "bar" WITH DURATION 1d REPLICATION 1 SHARD DURATION 30m NAME "myrp" + +-- Create a database called mydb with a new DEFAULT retention policy and specify the name of that retention policy +CREATE DATABASE "mydb" WITH NAME "myrp" +``` + +### CREATE RETENTION POLICY + +``` +create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name on_clause + retention_policy_duration + retention_policy_replication + [ retention_policy_shard_group_duration ] + [ "DEFAULT" ] . +``` + +> Replication factors do not serve a purpose with single node instances. + +#### Examples + +```sql +-- Create a retention policy. +CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 + +-- Create a retention policy and set it as the DEFAULT. +CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 DEFAULT + +-- Create a retention policy and specify the shard group duration. +CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 SHARD DURATION 30m +``` + +### CREATE SUBSCRIPTION + +Subscriptions tell InfluxDB to send all the data it receives to Kapacitor or other third parties. + +``` +create_subscription_stmt = "CREATE SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy "DESTINATIONS" ("ANY"|"ALL") host { "," host} . +``` + +#### Examples: + +```sql +-- Create a SUBSCRIPTION on database 'mydb' and retention policy 'autogen' that send data to 'example.com:9090' via UDP. +CREATE SUBSCRIPTION "sub0" ON "mydb"."autogen" DESTINATIONS ALL 'udp://example.com:9090' + +-- Create a SUBSCRIPTION on database 'mydb' and retention policy 'autogen' that round robins the data to 'h1.example.com:9090' and 'h2.example.com:9090'. +CREATE SUBSCRIPTION "sub0" ON "mydb"."autogen" DESTINATIONS ANY 'udp://h1.example.com:9090', 'udp://h2.example.com:9090' +``` + +### CREATE USER + +``` +create_user_stmt = "CREATE USER" user_name "WITH PASSWORD" password + [ "WITH ALL PRIVILEGES" ] . +``` + +#### Examples: + +```sql +-- Create a normal database user. +CREATE USER "jdoe" WITH PASSWORD '1337password' + +-- Create an admin user. +-- Note: Unlike the GRANT statement, the "PRIVILEGES" keyword is required here. +CREATE USER "jdoe" WITH PASSWORD '1337password' WITH ALL PRIVILEGES +``` + +> **Note:** The password string must be wrapped in single quotes. + +### DELETE + +``` +delete_stmt = "DELETE" ( from_clause | where_clause | from_clause where_clause ) . +``` + +#### Examples: + +```sql +DELETE FROM "cpu" +DELETE FROM "cpu" WHERE time < '2000-01-01T00:00:00Z' +DELETE WHERE time < '2000-01-01T00:00:00Z' +``` + +### DROP CONTINUOUS QUERY + +``` +drop_continuous_query_stmt = "DROP CONTINUOUS QUERY" query_name on_clause . +``` + +#### Example: + +```sql +DROP CONTINUOUS QUERY "myquery" ON "mydb" +``` + +### DROP DATABASE + +``` +drop_database_stmt = "DROP DATABASE" db_name . +``` + +#### Example: + +```sql +DROP DATABASE "mydb" +``` + +### DROP MEASUREMENT + +``` +drop_measurement_stmt = "DROP MEASUREMENT" measurement . +``` + +#### Examples: + +```sql +-- drop the cpu measurement +DROP MEASUREMENT "cpu" +``` + +### DROP RETENTION POLICY + +``` +drop_retention_policy_stmt = "DROP RETENTION POLICY" policy_name on_clause . +``` + +#### Example: + +```sql +-- drop the retention policy named 1h.cpu from mydb +DROP RETENTION POLICY "1h.cpu" ON "mydb" +``` + +### DROP SERIES + +``` +drop_series_stmt = "DROP SERIES" ( from_clause | where_clause | from_clause where_clause ) . +``` + +#### Example: + +```sql +DROP SERIES FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu8' + +``` + +### DROP SHARD + +``` +drop_shard_stmt = "DROP SHARD" ( shard_id ) . +``` + +#### Example: + +``` +DROP SHARD 1 +``` + +### DROP SUBSCRIPTION + +``` +drop_subscription_stmt = "DROP SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy . +``` + +#### Example: + +```sql +DROP SUBSCRIPTION "sub0" ON "mydb"."autogen" +``` + +### DROP USER + +``` +drop_user_stmt = "DROP USER" user_name . +``` + +#### Example: + +```sql +DROP USER "jdoe" +``` + +### GRANT + +> **NOTE:** Users can be granted privileges on databases that do not exist. + +``` +grant_stmt = "GRANT" privilege [ on_clause ] to_clause . +``` + +#### Examples: + +```sql +-- grant admin privileges +GRANT ALL TO "jdoe" + +-- grant read access to a database +GRANT READ ON "mydb" TO "jdoe" +``` + +### KILL QUERY + +``` +kill_query_statement = "KILL QUERY" query_id . +``` + +#### Examples: + +``` +--- kill a query with the query_id 36 +KILL QUERY 36 +``` + +> **NOTE:** Identify the `query_id` from the `SHOW QUERIES` output. + +### SHOW CONTINUOUS QUERIES + +``` +show_continuous_queries_stmt = "SHOW CONTINUOUS QUERIES" . +``` + +#### Example: + +```sql +-- show all continuous queries +SHOW CONTINUOUS QUERIES +``` + +### SHOW DATABASES + +``` +show_databases_stmt = "SHOW DATABASES" . +``` + +#### Example: + +```sql +-- show all databases +SHOW DATABASES +``` + +### SHOW FIELD KEYS + +``` +show_field_keys_stmt = "SHOW FIELD KEYS" [ from_clause ] . +``` + +#### Examples: + +```sql +-- show field keys and field value data types from all measurements +SHOW FIELD KEYS + +-- show field keys and field value data types from specified measurement +SHOW FIELD KEYS FROM "cpu" +``` + +### SHOW GRANTS + +``` +show_grants_stmt = "SHOW GRANTS FOR" user_name . +``` + +#### Example: + +```sql +-- show grants for jdoe +SHOW GRANTS FOR "jdoe" +``` + +### SHOW MEASUREMENTS + +``` +show_measurements_stmt = "SHOW MEASUREMENTS" [ with_measurement_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all measurements +SHOW MEASUREMENTS + +-- show measurements where region tag = 'uswest' AND host tag = 'serverA' +SHOW MEASUREMENTS WHERE "region" = 'uswest' AND "host" = 'serverA' + +-- show measurements that start with 'h2o' +SHOW MEASUREMENTS WITH MEASUREMENT =~ /h2o.*/ +``` + +### SHOW QUERIES + +``` +show_queries_stmt = "SHOW QUERIES" . +``` + +#### Example: + +```sql +-- show all currently-running queries +SHOW QUERIES +``` + +### SHOW RETENTION POLICIES + +``` +show_retention_policies = "SHOW RETENTION POLICIES" on_clause . +``` + +#### Example: + +```sql +-- show all retention policies on a database +SHOW RETENTION POLICIES ON "mydb" +``` + +### SHOW SERIES + +``` +show_series_stmt = "SHOW SERIES" [ from_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . +``` + +#### Example: + +```sql +SHOW SERIES FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu8' +``` + +### SHOW SHARD GROUPS + +``` +show_shard_groups_stmt = "SHOW SHARD GROUPS" . +``` + +#### Example: + +```sql +SHOW SHARD GROUPS +``` + +### SHOW SHARDS + +``` +show_shards_stmt = "SHOW SHARDS" . +``` + +#### Example: + +```sql +SHOW SHARDS +``` + +### SHOW SUBSCRIPTIONS + +``` +show_subscriptions_stmt = "SHOW SUBSCRIPTIONS" . +``` + +#### Example: + +```sql +SHOW SUBSCRIPTIONS +``` + +### SHOW TAG KEYS + +``` +show_tag_keys_stmt = "SHOW TAG KEYS" [ from_clause ] [ where_clause ] [ group_by_clause ] + [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all tag keys +SHOW TAG KEYS + +-- show all tag keys from the cpu measurement +SHOW TAG KEYS FROM "cpu" + +-- show all tag keys from the cpu measurement where the region key = 'uswest' +SHOW TAG KEYS FROM "cpu" WHERE "region" = 'uswest' + +-- show all tag keys where the host key = 'serverA' +SHOW TAG KEYS WHERE "host" = 'serverA' +``` + +### SHOW TAG VALUES + +``` +show_tag_values_stmt = "SHOW TAG VALUES" [ from_clause ] with_tag_clause [ where_clause ] + [ group_by_clause ] [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all tag values across all measurements for the region tag +SHOW TAG VALUES WITH KEY = "region" + +-- show tag values from the cpu measurement for the region tag +SHOW TAG VALUES FROM "cpu" WITH KEY = "region" + +-- show tag values across all measurements for all tag keys that do not include the letter c +SHOW TAG VALUES WITH KEY !~ /.*c.*/ + +-- show tag values from the cpu measurement for region & host tag keys where service = 'redis' +SHOW TAG VALUES FROM "cpu" WITH KEY IN ("region", "host") WHERE "service" = 'redis' +``` + +### SHOW USERS + +``` +show_users_stmt = "SHOW USERS" . +``` + +#### Example: + +```sql +-- show all users +SHOW USERS +``` + +### REVOKE + +``` +revoke_stmt = "REVOKE" privilege [ on_clause ] "FROM" user_name . +``` + +#### Examples: + +```sql +-- revoke admin privileges from jdoe +REVOKE ALL PRIVILEGES FROM "jdoe" + +-- revoke read privileges from jdoe on mydb +REVOKE READ ON "mydb" FROM "jdoe" +``` + +### SELECT + +``` +select_stmt = "SELECT" fields from_clause [ into_clause ] [ where_clause ] + [ group_by_clause ] [ order_by_clause ] [ limit_clause ] + [ offset_clause ] [ slimit_clause ] [ soffset_clause ] + [ timezone_clause ] . +``` + +#### Examples: + +```sql +-- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals +SELECT mean("value") FROM "cpu" WHERE "region" = 'uswest' GROUP BY time(10m) fill(0) + +-- select from all measurements beginning with cpu into the same measurement name in the cpu_1h retention policy +SELECT mean("value") INTO "cpu_1h".:MEASUREMENT FROM /cpu.*/ + +-- select from measurements grouped by the day with a timezone +SELECT mean("value") FROM "cpu" GROUP BY region, time(1d) fill(0) tz("America/Chicago") +``` + +## Clauses + +``` +from_clause = "FROM" measurements . + +group_by_clause = "GROUP BY" dimensions fill(fill_option). + +into_clause = "INTO" ( measurement | back_ref ). + +limit_clause = "LIMIT" int_lit . + +offset_clause = "OFFSET" int_lit . + +slimit_clause = "SLIMIT" int_lit . + +soffset_clause = "SOFFSET" int_lit . + +timezone_clause = tz(string_lit) . + +on_clause = "ON" db_name . + +order_by_clause = "ORDER BY" sort_fields . + +to_clause = "TO" user_name . + +where_clause = "WHERE" expr . + +with_measurement_clause = "WITH MEASUREMENT" ( "=" measurement | "=~" regex_lit ) . + +with_tag_clause = "WITH KEY" ( "=" tag_key | "!=" tag_key | "=~" regex_lit | "IN (" tag_keys ")" ) . +``` + +## Expressions + +``` +binary_op = "+" | "-" | "*" | "/" | "%" | "&" | "|" | "^" | "AND" | + "OR" | "=" | "!=" | "<>" | "<" | "<=" | ">" | ">=" . + +expr = unary_expr { binary_op unary_expr } . + +unary_expr = "(" expr ")" | var_ref | time_lit | string_lit | int_lit | + float_lit | bool_lit | duration_lit | regex_lit . +``` + +## Other + +``` +alias = "AS" identifier . + +back_ref = ( policy_name ".:MEASUREMENT" ) | + ( db_name "." [ policy_name ] ".:MEASUREMENT" ) . + +db_name = identifier . + +dimension = expr . + +dimensions = dimension { "," dimension } . + +field_key = identifier . + +field = expr [ alias ] . + +fields = field { "," field } . + +fill_option = "null" | "none" | "previous" | "linear" | int_lit | float_lit . + +host = string_lit . + +measurement = measurement_name | + ( policy_name "." measurement_name ) | + ( db_name "." [ policy_name ] "." measurement_name ) . + +measurements = measurement { "," measurement } . + +measurement_name = identifier | regex_lit . + +password = string_lit . + +policy_name = identifier . + +privilege = "ALL" [ "PRIVILEGES" ] | "READ" | "WRITE" . + +query_id = int_lit . + +query_name = identifier . + +retention_policy = identifier . + +retention_policy_option = retention_policy_duration | + retention_policy_replication | + retention_policy_shard_group_duration | + "DEFAULT" . + +retention_policy_duration = "DURATION" duration_lit . + +retention_policy_replication = "REPLICATION" int_lit . + +retention_policy_shard_group_duration = "SHARD DURATION" duration_lit . + +retention_policy_name = "NAME" identifier . + +series_id = int_lit . + +shard_id = int_lit . + +sort_field = field_key [ ASC | DESC ] . + +sort_fields = sort_field { "," sort_field } . + +subscription_name = identifier . + +tag_key = identifier . + +tag_keys = tag_key { "," tag_key } . + +user_name = identifier . + +var_ref = measurement . +``` + +## Query Engine Internals + +Once you understand the language itself, it's important to know how these +language constructs are implemented in the query engine. This gives you an +intuitive sense for how results will be processed and how to create efficient +queries. + +The life cycle of a query looks like this: + +1. InfluxQL query string is tokenized and then parsed into an abstract syntax + tree (AST). This is the code representation of the query itself. + +2. The AST is passed to the `QueryExecutor` which directs queries to the + appropriate handlers. For example, queries related to meta data are executed + by the meta service and `SELECT` statements are executed by the shards + themselves. + +3. The query engine then determines the shards that match the `SELECT` + statement's time range. From these shards, iterators are created for each + field in the statement. + +4. Iterators are passed to the emitter which drains them and joins the resulting + points. The emitter's job is to convert simple time/value points into the + more complex result objects that are returned to the client. + + +### Understanding Iterators + +Iterators are at the heart of the query engine. They provide a simple interface +for looping over a set of points. For example, this is an iterator over Float +points: + +``` +type FloatIterator interface { + Next() (*FloatPoint, error) +} +``` + +These iterators are created through the `IteratorCreator` interface: + +``` +type IteratorCreator interface { + CreateIterator(m *Measurement, opt IteratorOptions) (Iterator, error) +} +``` + +The `IteratorOptions` provide arguments about field selection, time ranges, +and dimensions that the iterator creator can use when planning an iterator. +The `IteratorCreator` interface is used at many levels such as the `Shards`, +`Shard`, and `Engine`. This allows optimizations to be performed when applicable +such as returning a precomputed `COUNT()`. + +Iterators aren't just for reading raw data from storage though. Iterators can be +composed so that they provided additional functionality around an input +iterator. For example, a `DistinctIterator` can compute the distinct values for +each time window for an input iterator. Or a `FillIterator` can generate +additional points that are missing from an input iterator. + +This composition also lends itself well to aggregation. For example, a statement +such as this: + +``` +SELECT MEAN(value) FROM cpu GROUP BY time(10m) +``` + +In this case, `MEAN(value)` is a `MeanIterator` wrapping an iterator from the +underlying shards. However, if we can add an additional iterator to determine +the derivative of the mean: + +``` +SELECT DERIVATIVE(MEAN(value), 20m) FROM cpu GROUP BY time(10m) +``` + + +### Understanding Auxiliary Fields + +Because InfluxQL allows users to use selector functions such as `FIRST()`, +`LAST()`, `MIN()`, and `MAX()`, the engine must provide a way to return related +data at the same time with the selected point. + +For example, in this query: + +``` +SELECT FIRST(value), host FROM cpu GROUP BY time(1h) +``` + +We are selecting the first `value` that occurs every hour but we also want to +retrieve the `host` associated with that point. Since the `Point` types only +specify a single typed `Value` for efficiency, we push the `host` into the +auxiliary fields of the point. These auxiliary fields are attached to the point +until it is passed to the emitter where the fields get split off to their own +iterator. + + +### Built-in Iterators + +There are many helper iterators that let us build queries: + +* Merge Iterator - This iterator combines one or more iterators into a single + new iterator of the same type. This iterator guarantees that all points + within a window will be output before starting the next window but does not + provide ordering guarantees within the window. This allows for fast access + for aggregate queries which do not need stronger sorting guarantees. + +* Sorted Merge Iterator - This iterator also combines one or more iterators + into a new iterator of the same type. However, this iterator guarantees + time ordering of every point. This makes it slower than the `MergeIterator` + but this ordering guarantee is required for non-aggregate queries which + return the raw data points. + +* Limit Iterator - This iterator limits the number of points per name/tag + group. This is the implementation of the `LIMIT` & `OFFSET` syntax. + +* Fill Iterator - This iterator injects extra points if they are missing from + the input iterator. It can provide `null` points, points with the previous + value, or points with a specific value. + +* Buffered Iterator - This iterator provides the ability to "unread" a point + back onto a buffer so it can be read again next time. This is used extensively + to provide lookahead for windowing. + +* Reduce Iterator - This iterator calls a reduction function for each point in + a window. When the window is complete then all points for that window are + output. This is used for simple aggregate functions such as `COUNT()`. + +* Reduce Slice Iterator - This iterator collects all points for a window first + and then passes them all to a reduction function at once. The results are + returned from the iterator. This is used for aggregate functions such as + `DERIVATIVE()`. + +* Transform Iterator - This iterator calls a transform function for each point + from an input iterator. This is used for executing binary expressions. + +* Dedupe Iterator - This iterator only outputs unique points. It is resource + intensive so it is only used for small queries such as meta query statements. + + +### Call Iterators + +Function calls in InfluxQL are implemented at two levels. Some calls can be +wrapped at multiple layers to improve efficiency. For example, a `COUNT()` can +be performed at the shard level and then multiple `CountIterator`s can be +wrapped with another `CountIterator` to compute the count of all shards. These +iterators can be created using `NewCallIterator()`. + +Some iterators are more complex or need to be implemented at a higher level. +For example, the `DERIVATIVE()` needs to retrieve all points for a window first +before performing the calculation. This iterator is created by the engine itself +and is never requested to be created by the lower levels. + +### Subqueries + +Subqueries are built on top of iterators. Most of the work involved in +supporting subqueries is in organizing how data is streamed to the +iterators that will process the data. + +The final ordering of the stream has to output all points from one +series before moving to the next series and it also needs to ensure +those points are printed in order. So there are two separate concepts we +need to consider when creating an iterator: ordering and grouping. + +When an inner query has a different grouping than the outermost query, +we still need to group together related points into buckets, but we do +not have to ensure that all points from one buckets are output before +the points in another bucket. In fact, if we do that, we will be unable +to perform the grouping for the outer query correctly. Instead, we group +all points by the outermost query for an interval and then, within that +interval, we group the points for the inner query. For example, here are +series keys and times in seconds (fields are omitted since they don't +matter in this example): + + cpu,host=server01 0 + cpu,host=server01 10 + cpu,host=server01 20 + cpu,host=server01 30 + cpu,host=server02 0 + cpu,host=server02 10 + cpu,host=server02 20 + cpu,host=server02 30 + +With the following query: + + SELECT mean(max) FROM (SELECT max(value) FROM cpu GROUP BY host, time(20s)) GROUP BY time(20s) + +The final grouping keeps all of the points together which means we need +to group `server01` with `server02`. That means we output the points +from the underlying engine like this: + + cpu,host=server01 0 + cpu,host=server01 10 + cpu,host=server02 0 + cpu,host=server02 10 + cpu,host=server01 20 + cpu,host=server01 30 + cpu,host=server02 20 + cpu,host=server02 30 + +Within each one of those time buckets, we calculate the `max()` value +for each unique host so the output stream gets transformed to look like +this: + + cpu,host=server01 0 + cpu,host=server02 0 + cpu,host=server01 20 + cpu,host=server02 20 + +Then we can process the `mean()` on this stream of data instead and it +will be output in the correct order. This is true of any order of +grouping since grouping can only go from more specific to less specific. + +When it comes to ordering, unordered data is faster to process, but we +always need to produce ordered data. When processing a raw query with no +aggregates, we need to ensure data coming from the engine is ordered so +the output is ordered. When we have an aggregate, we know one point is +being emitted for each interval and will always produce ordered output. +So for aggregates, we can take unordered data as the input and get +ordered output. Any ordered data as input will always result in ordered +data so we just need to look at how an iterator processes unordered +data. + +| | raw query | selector (without group by time) | selector (with group by time) | aggregator | +|-----------------|------------------|----------------------------------|-------------------------------|----------------| +| ordered input | ordered output | ordered output | ordered output | ordered output | +| unordered input | unordered output | unordered output | ordered output | ordered output | + +Since we always need ordered output, we just need to work backwards and +determine which pattern of input gives us ordered output. If both +ordered and unordered input produce ordered output, we prefer unordered +input since it is faster. + +There are also certain aggregates that require ordered input like +`median()` and `percentile()`. These functions will explicitly request +ordered input. It is also important to realize that selectors that are +grouped by time are the equivalent of an aggregator. It is only +selectors without a group by time that are different. diff --git a/vendor/github.com/influxdata/influxdb/influxql/ast.go b/vendor/github.com/influxdata/influxdb/influxql/ast.go new file mode 100644 index 0000000..f337644 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/ast.go @@ -0,0 +1,5371 @@ +package influxql + +import ( + "bytes" + "errors" + "fmt" + "math" + "regexp" + "regexp/syntax" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" +) + +// DataType represents the primitive data types available in InfluxQL. +type DataType int + +const ( + // Unknown primitive data type. + Unknown DataType = 0 + // Float means the data type is a float. + Float = 1 + // Integer means the data type is an integer. + Integer = 2 + // String means the data type is a string of text. + String = 3 + // Boolean means the data type is a boolean. + Boolean = 4 + // Time means the data type is a time. + Time = 5 + // Duration means the data type is a duration of time. + Duration = 6 + // Tag means the data type is a tag. + Tag = 7 + // AnyField means the data type is any field. + AnyField = 8 +) + +var ( + // ErrInvalidTime is returned when the timestamp string used to + // compare against time field is invalid. + ErrInvalidTime = errors.New("invalid timestamp string") +) + +// InspectDataType returns the data type of a given value. +func InspectDataType(v interface{}) DataType { + switch v.(type) { + case float64: + return Float + case int64, int32, int: + return Integer + case string: + return String + case bool: + return Boolean + case time.Time: + return Time + case time.Duration: + return Duration + default: + return Unknown + } +} + +// InspectDataTypes returns all of the data types for an interface slice. +func InspectDataTypes(a []interface{}) []DataType { + dta := make([]DataType, len(a)) + for i, v := range a { + dta[i] = InspectDataType(v) + } + return dta +} + +// LessThan returns true if the other DataType has greater precedence than the +// current data type. Unknown has the lowest precedence. +// +// NOTE: This is not the same as using the `<` or `>` operator because the +// integers used decrease with higher precedence, but Unknown is the lowest +// precedence at the zero value. +func (d DataType) LessThan(other DataType) bool { + return d == Unknown || (other != Unknown && other < d) +} + +// String returns the human-readable string representation of the DataType. +func (d DataType) String() string { + switch d { + case Float: + return "float" + case Integer: + return "integer" + case String: + return "string" + case Boolean: + return "boolean" + case Time: + return "time" + case Duration: + return "duration" + case Tag: + return "tag" + case AnyField: + return "field" + } + return "unknown" +} + +// Node represents a node in the InfluxDB abstract syntax tree. +type Node interface { + // node is unexported to ensure implementations of Node + // can only originate in this package. + node() + String() string +} + +func (*Query) node() {} +func (Statements) node() {} + +func (*AlterRetentionPolicyStatement) node() {} +func (*CreateContinuousQueryStatement) node() {} +func (*CreateDatabaseStatement) node() {} +func (*CreateRetentionPolicyStatement) node() {} +func (*CreateSubscriptionStatement) node() {} +func (*CreateUserStatement) node() {} +func (*Distinct) node() {} +func (*DeleteSeriesStatement) node() {} +func (*DeleteStatement) node() {} +func (*DropContinuousQueryStatement) node() {} +func (*DropDatabaseStatement) node() {} +func (*DropMeasurementStatement) node() {} +func (*DropRetentionPolicyStatement) node() {} +func (*DropSeriesStatement) node() {} +func (*DropShardStatement) node() {} +func (*DropSubscriptionStatement) node() {} +func (*DropUserStatement) node() {} +func (*GrantStatement) node() {} +func (*GrantAdminStatement) node() {} +func (*KillQueryStatement) node() {} +func (*RevokeStatement) node() {} +func (*RevokeAdminStatement) node() {} +func (*SelectStatement) node() {} +func (*SetPasswordUserStatement) node() {} +func (*ShowContinuousQueriesStatement) node() {} +func (*ShowGrantsForUserStatement) node() {} +func (*ShowDatabasesStatement) node() {} +func (*ShowFieldKeysStatement) node() {} +func (*ShowRetentionPoliciesStatement) node() {} +func (*ShowMeasurementsStatement) node() {} +func (*ShowQueriesStatement) node() {} +func (*ShowSeriesStatement) node() {} +func (*ShowShardGroupsStatement) node() {} +func (*ShowShardsStatement) node() {} +func (*ShowStatsStatement) node() {} +func (*ShowSubscriptionsStatement) node() {} +func (*ShowDiagnosticsStatement) node() {} +func (*ShowTagKeysStatement) node() {} +func (*ShowTagValuesStatement) node() {} +func (*ShowUsersStatement) node() {} + +func (*BinaryExpr) node() {} +func (*BooleanLiteral) node() {} +func (*Call) node() {} +func (*Dimension) node() {} +func (Dimensions) node() {} +func (*DurationLiteral) node() {} +func (*IntegerLiteral) node() {} +func (*Field) node() {} +func (Fields) node() {} +func (*Measurement) node() {} +func (Measurements) node() {} +func (*nilLiteral) node() {} +func (*NumberLiteral) node() {} +func (*ParenExpr) node() {} +func (*RegexLiteral) node() {} +func (*ListLiteral) node() {} +func (*SortField) node() {} +func (SortFields) node() {} +func (Sources) node() {} +func (*StringLiteral) node() {} +func (*SubQuery) node() {} +func (*Target) node() {} +func (*TimeLiteral) node() {} +func (*VarRef) node() {} +func (*Wildcard) node() {} + +// Query represents a collection of ordered statements. +type Query struct { + Statements Statements +} + +// String returns a string representation of the query. +func (q *Query) String() string { return q.Statements.String() } + +// Statements represents a list of statements. +type Statements []Statement + +// String returns a string representation of the statements. +func (a Statements) String() string { + var str []string + for _, stmt := range a { + str = append(str, stmt.String()) + } + return strings.Join(str, ";\n") +} + +// Statement represents a single command in InfluxQL. +type Statement interface { + Node + // stmt is unexported to ensure implementations of Statement + // can only originate in this package. + stmt() + RequiredPrivileges() (ExecutionPrivileges, error) +} + +// HasDefaultDatabase provides an interface to get the default database from a Statement. +type HasDefaultDatabase interface { + Node + // stmt is unexported to ensure implementations of HasDefaultDatabase + // can only originate in this package. + stmt() + DefaultDatabase() string +} + +// ExecutionPrivilege is a privilege required for a user to execute +// a statement on a database or resource. +type ExecutionPrivilege struct { + // Admin privilege required. + Admin bool + + // Name of the database. + Name string + + // Database privilege required. + Privilege Privilege +} + +// ExecutionPrivileges is a list of privileges required to execute a statement. +type ExecutionPrivileges []ExecutionPrivilege + +func (*AlterRetentionPolicyStatement) stmt() {} +func (*CreateContinuousQueryStatement) stmt() {} +func (*CreateDatabaseStatement) stmt() {} +func (*CreateRetentionPolicyStatement) stmt() {} +func (*CreateSubscriptionStatement) stmt() {} +func (*CreateUserStatement) stmt() {} +func (*DeleteSeriesStatement) stmt() {} +func (*DeleteStatement) stmt() {} +func (*DropContinuousQueryStatement) stmt() {} +func (*DropDatabaseStatement) stmt() {} +func (*DropMeasurementStatement) stmt() {} +func (*DropRetentionPolicyStatement) stmt() {} +func (*DropSeriesStatement) stmt() {} +func (*DropSubscriptionStatement) stmt() {} +func (*DropUserStatement) stmt() {} +func (*GrantStatement) stmt() {} +func (*GrantAdminStatement) stmt() {} +func (*KillQueryStatement) stmt() {} +func (*ShowContinuousQueriesStatement) stmt() {} +func (*ShowGrantsForUserStatement) stmt() {} +func (*ShowDatabasesStatement) stmt() {} +func (*ShowFieldKeysStatement) stmt() {} +func (*ShowMeasurementsStatement) stmt() {} +func (*ShowQueriesStatement) stmt() {} +func (*ShowRetentionPoliciesStatement) stmt() {} +func (*ShowSeriesStatement) stmt() {} +func (*ShowShardGroupsStatement) stmt() {} +func (*ShowShardsStatement) stmt() {} +func (*ShowStatsStatement) stmt() {} +func (*DropShardStatement) stmt() {} +func (*ShowSubscriptionsStatement) stmt() {} +func (*ShowDiagnosticsStatement) stmt() {} +func (*ShowTagKeysStatement) stmt() {} +func (*ShowTagValuesStatement) stmt() {} +func (*ShowUsersStatement) stmt() {} +func (*RevokeStatement) stmt() {} +func (*RevokeAdminStatement) stmt() {} +func (*SelectStatement) stmt() {} +func (*SetPasswordUserStatement) stmt() {} + +// Expr represents an expression that can be evaluated to a value. +type Expr interface { + Node + // expr is unexported to ensure implementations of Expr + // can only originate in this package. + expr() +} + +func (*BinaryExpr) expr() {} +func (*BooleanLiteral) expr() {} +func (*Call) expr() {} +func (*Distinct) expr() {} +func (*DurationLiteral) expr() {} +func (*IntegerLiteral) expr() {} +func (*nilLiteral) expr() {} +func (*NumberLiteral) expr() {} +func (*ParenExpr) expr() {} +func (*RegexLiteral) expr() {} +func (*ListLiteral) expr() {} +func (*StringLiteral) expr() {} +func (*TimeLiteral) expr() {} +func (*VarRef) expr() {} +func (*Wildcard) expr() {} + +// Literal represents a static literal. +type Literal interface { + Expr + // literal is unexported to ensure implementations of Literal + // can only originate in this package. + literal() +} + +func (*BooleanLiteral) literal() {} +func (*DurationLiteral) literal() {} +func (*IntegerLiteral) literal() {} +func (*nilLiteral) literal() {} +func (*NumberLiteral) literal() {} +func (*RegexLiteral) literal() {} +func (*ListLiteral) literal() {} +func (*StringLiteral) literal() {} +func (*TimeLiteral) literal() {} + +// Source represents a source of data for a statement. +type Source interface { + Node + // source is unexported to ensure implementations of Source + // can only originate in this package. + source() +} + +func (*Measurement) source() {} +func (*SubQuery) source() {} + +// Sources represents a list of sources. +type Sources []Source + +// Names returns a list of source names. +func (a Sources) Names() []string { + names := make([]string, 0, len(a)) + for _, s := range a { + switch s := s.(type) { + case *Measurement: + names = append(names, s.Name) + } + } + return names +} + +// Filter returns a list of source names filtered by the database/retention policy. +func (a Sources) Filter(database, retentionPolicy string) []Source { + sources := make([]Source, 0, len(a)) + for _, s := range a { + switch s := s.(type) { + case *Measurement: + if s.Database == database && s.RetentionPolicy == retentionPolicy { + sources = append(sources, s) + } + case *SubQuery: + filteredSources := s.Statement.Sources.Filter(database, retentionPolicy) + sources = append(sources, filteredSources...) + } + } + return sources +} + +// HasSystemSource returns true if any of the sources are internal, system sources. +func (a Sources) HasSystemSource() bool { + for _, s := range a { + switch s := s.(type) { + case *Measurement: + if IsSystemName(s.Name) { + return true + } + } + } + return false +} + +// HasRegex returns true if any of the sources are regex measurements. +func (a Sources) HasRegex() bool { + for _, s := range a { + switch s := s.(type) { + case *Measurement: + if s.Regex != nil { + return true + } + } + } + return false +} + +// String returns a string representation of a Sources array. +func (a Sources) String() string { + var buf bytes.Buffer + + ubound := len(a) - 1 + for i, src := range a { + _, _ = buf.WriteString(src.String()) + if i < ubound { + _, _ = buf.WriteString(", ") + } + } + + return buf.String() +} + +// Measurements returns all measurements including ones embedded in subqueries. +func (a Sources) Measurements() []*Measurement { + mms := make([]*Measurement, 0, len(a)) + for _, src := range a { + switch src := src.(type) { + case *Measurement: + mms = append(mms, src) + case *SubQuery: + mms = append(mms, src.Statement.Sources.Measurements()...) + } + } + return mms +} + +// MarshalBinary encodes a list of sources to a binary format. +func (a Sources) MarshalBinary() ([]byte, error) { + var pb internal.Measurements + pb.Items = make([]*internal.Measurement, len(a)) + for i, source := range a { + pb.Items[i] = encodeMeasurement(source.(*Measurement)) + } + return proto.Marshal(&pb) +} + +// UnmarshalBinary decodes binary data into a list of sources. +func (a *Sources) UnmarshalBinary(buf []byte) error { + var pb internal.Measurements + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + *a = make(Sources, len(pb.GetItems())) + for i := range pb.GetItems() { + mm, err := decodeMeasurement(pb.GetItems()[i]) + if err != nil { + return err + } + (*a)[i] = mm + } + return nil +} + +// IsSystemName returns true if name is an internal system name. +func IsSystemName(name string) bool { + switch name { + case "_fieldKeys", + "_measurements", + "_series", + "_tagKeys", + "_tags": + return true + default: + return false + } +} + +// SortField represents a field to sort results by. +type SortField struct { + // Name of the field. + Name string + + // Sort order. + Ascending bool +} + +// String returns a string representation of a sort field. +func (field *SortField) String() string { + var buf bytes.Buffer + if field.Name != "" { + _, _ = buf.WriteString(field.Name) + _, _ = buf.WriteString(" ") + } + if field.Ascending { + _, _ = buf.WriteString("ASC") + } else { + _, _ = buf.WriteString("DESC") + } + return buf.String() +} + +// SortFields represents an ordered list of ORDER BY fields. +type SortFields []*SortField + +// String returns a string representation of sort fields. +func (a SortFields) String() string { + fields := make([]string, 0, len(a)) + for _, field := range a { + fields = append(fields, field.String()) + } + return strings.Join(fields, ", ") +} + +// CreateDatabaseStatement represents a command for creating a new database. +type CreateDatabaseStatement struct { + // Name of the database to be created. + Name string + + // RetentionPolicyCreate indicates whether the user explicitly wants to create a retention policy. + RetentionPolicyCreate bool + + // RetentionPolicyDuration indicates retention duration for the new database. + RetentionPolicyDuration *time.Duration + + // RetentionPolicyReplication indicates retention replication for the new database. + RetentionPolicyReplication *int + + // RetentionPolicyName indicates retention name for the new database. + RetentionPolicyName string + + // RetentionPolicyShardGroupDuration indicates shard group duration for the new database. + RetentionPolicyShardGroupDuration time.Duration +} + +// String returns a string representation of the create database statement. +func (s *CreateDatabaseStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("CREATE DATABASE ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + if s.RetentionPolicyCreate { + _, _ = buf.WriteString(" WITH") + if s.RetentionPolicyDuration != nil { + _, _ = buf.WriteString(" DURATION ") + _, _ = buf.WriteString(s.RetentionPolicyDuration.String()) + } + if s.RetentionPolicyReplication != nil { + _, _ = buf.WriteString(" REPLICATION ") + _, _ = buf.WriteString(strconv.Itoa(*s.RetentionPolicyReplication)) + } + if s.RetentionPolicyShardGroupDuration > 0 { + _, _ = buf.WriteString(" SHARD DURATION ") + _, _ = buf.WriteString(s.RetentionPolicyShardGroupDuration.String()) + } + if s.RetentionPolicyName != "" { + _, _ = buf.WriteString(" NAME ") + _, _ = buf.WriteString(QuoteIdent(s.RetentionPolicyName)) + } + } + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a CreateDatabaseStatement. +func (s *CreateDatabaseStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// DropDatabaseStatement represents a command to drop a database. +type DropDatabaseStatement struct { + // Name of the database to be dropped. + Name string +} + +// String returns a string representation of the drop database statement. +func (s *DropDatabaseStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DROP DATABASE ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a DropDatabaseStatement. +func (s *DropDatabaseStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// DropRetentionPolicyStatement represents a command to drop a retention policy from a database. +type DropRetentionPolicyStatement struct { + // Name of the policy to drop. + Name string + + // Name of the database to drop the policy from. + Database string +} + +// String returns a string representation of the drop retention policy statement. +func (s *DropRetentionPolicyStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DROP RETENTION POLICY ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a DropRetentionPolicyStatement. +func (s *DropRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: WritePrivilege}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *DropRetentionPolicyStatement) DefaultDatabase() string { + return s.Database +} + +// CreateUserStatement represents a command for creating a new user. +type CreateUserStatement struct { + // Name of the user to be created. + Name string + + // User's password. + Password string + + // User's admin privilege. + Admin bool +} + +// String returns a string representation of the create user statement. +func (s *CreateUserStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("CREATE USER ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" WITH PASSWORD ") + _, _ = buf.WriteString("[REDACTED]") + if s.Admin { + _, _ = buf.WriteString(" WITH ALL PRIVILEGES") + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a CreateUserStatement. +func (s *CreateUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// DropUserStatement represents a command for dropping a user. +type DropUserStatement struct { + // Name of the user to drop. + Name string +} + +// String returns a string representation of the drop user statement. +func (s *DropUserStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DROP USER ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a DropUserStatement. +func (s *DropUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// Privilege is a type of action a user can be granted the right to use. +type Privilege int + +const ( + // NoPrivileges means no privileges required / granted / revoked. + NoPrivileges Privilege = iota + // ReadPrivilege means read privilege required / granted / revoked. + ReadPrivilege + // WritePrivilege means write privilege required / granted / revoked. + WritePrivilege + // AllPrivileges means all privileges required / granted / revoked. + AllPrivileges +) + +// NewPrivilege returns an initialized *Privilege. +func NewPrivilege(p Privilege) *Privilege { return &p } + +// String returns a string representation of a Privilege. +func (p Privilege) String() string { + switch p { + case NoPrivileges: + return "NO PRIVILEGES" + case ReadPrivilege: + return "READ" + case WritePrivilege: + return "WRITE" + case AllPrivileges: + return "ALL PRIVILEGES" + } + return "" +} + +// GrantStatement represents a command for granting a privilege. +type GrantStatement struct { + // The privilege to be granted. + Privilege Privilege + + // Database to grant the privilege to. + On string + + // Who to grant the privilege to. + User string +} + +// String returns a string representation of the grant statement. +func (s *GrantStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("GRANT ") + _, _ = buf.WriteString(s.Privilege.String()) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.On)) + _, _ = buf.WriteString(" TO ") + _, _ = buf.WriteString(QuoteIdent(s.User)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a GrantStatement. +func (s *GrantStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *GrantStatement) DefaultDatabase() string { + return s.On +} + +// GrantAdminStatement represents a command for granting admin privilege. +type GrantAdminStatement struct { + // Who to grant the privilege to. + User string +} + +// String returns a string representation of the grant admin statement. +func (s *GrantAdminStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("GRANT ALL PRIVILEGES TO ") + _, _ = buf.WriteString(QuoteIdent(s.User)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a GrantAdminStatement. +func (s *GrantAdminStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// KillQueryStatement represents a command for killing a query. +type KillQueryStatement struct { + // The query to kill. + QueryID uint64 + + // The host to delegate the kill to. + Host string +} + +// String returns a string representation of the kill query statement. +func (s *KillQueryStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("KILL QUERY ") + _, _ = buf.WriteString(strconv.FormatUint(s.QueryID, 10)) + if s.Host != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Host)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a KillQueryStatement. +func (s *KillQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// SetPasswordUserStatement represents a command for changing user password. +type SetPasswordUserStatement struct { + // Plain-text password. + Password string + + // Who to grant the privilege to. + Name string +} + +// String returns a string representation of the set password statement. +func (s *SetPasswordUserStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SET PASSWORD FOR ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" = ") + _, _ = buf.WriteString("[REDACTED]") + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a SetPasswordUserStatement. +func (s *SetPasswordUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// RevokeStatement represents a command to revoke a privilege from a user. +type RevokeStatement struct { + // The privilege to be revoked. + Privilege Privilege + + // Database to revoke the privilege from. + On string + + // Who to revoke privilege from. + User string +} + +// String returns a string representation of the revoke statement. +func (s *RevokeStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("REVOKE ") + _, _ = buf.WriteString(s.Privilege.String()) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.On)) + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(QuoteIdent(s.User)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a RevokeStatement. +func (s *RevokeStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *RevokeStatement) DefaultDatabase() string { + return s.On +} + +// RevokeAdminStatement represents a command to revoke admin privilege from a user. +type RevokeAdminStatement struct { + // Who to revoke admin privilege from. + User string +} + +// String returns a string representation of the revoke admin statement. +func (s *RevokeAdminStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("REVOKE ALL PRIVILEGES FROM ") + _, _ = buf.WriteString(QuoteIdent(s.User)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a RevokeAdminStatement. +func (s *RevokeAdminStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// CreateRetentionPolicyStatement represents a command to create a retention policy. +type CreateRetentionPolicyStatement struct { + // Name of policy to create. + Name string + + // Name of database this policy belongs to. + Database string + + // Duration data written to this policy will be retained. + Duration time.Duration + + // Replication factor for data written to this policy. + Replication int + + // Should this policy be set as default for the database? + Default bool + + // Shard Duration. + ShardGroupDuration time.Duration +} + +// String returns a string representation of the create retention policy. +func (s *CreateRetentionPolicyStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("CREATE RETENTION POLICY ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + _, _ = buf.WriteString(" DURATION ") + _, _ = buf.WriteString(FormatDuration(s.Duration)) + _, _ = buf.WriteString(" REPLICATION ") + _, _ = buf.WriteString(strconv.Itoa(s.Replication)) + if s.ShardGroupDuration > 0 { + _, _ = buf.WriteString(" SHARD DURATION ") + _, _ = buf.WriteString(FormatDuration(s.ShardGroupDuration)) + } + if s.Default { + _, _ = buf.WriteString(" DEFAULT") + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a CreateRetentionPolicyStatement. +func (s *CreateRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *CreateRetentionPolicyStatement) DefaultDatabase() string { + return s.Database +} + +// AlterRetentionPolicyStatement represents a command to alter an existing retention policy. +type AlterRetentionPolicyStatement struct { + // Name of policy to alter. + Name string + + // Name of the database this policy belongs to. + Database string + + // Duration data written to this policy will be retained. + Duration *time.Duration + + // Replication factor for data written to this policy. + Replication *int + + // Should this policy be set as defalut for the database? + Default bool + + // Duration of the Shard. + ShardGroupDuration *time.Duration +} + +// String returns a string representation of the alter retention policy statement. +func (s *AlterRetentionPolicyStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("ALTER RETENTION POLICY ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + + if s.Duration != nil { + _, _ = buf.WriteString(" DURATION ") + _, _ = buf.WriteString(FormatDuration(*s.Duration)) + } + + if s.Replication != nil { + _, _ = buf.WriteString(" REPLICATION ") + _, _ = buf.WriteString(strconv.Itoa(*s.Replication)) + } + + if s.ShardGroupDuration != nil { + _, _ = buf.WriteString(" SHARD DURATION ") + _, _ = buf.WriteString(FormatDuration(*s.ShardGroupDuration)) + } + + if s.Default { + _, _ = buf.WriteString(" DEFAULT") + } + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute an AlterRetentionPolicyStatement. +func (s *AlterRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *AlterRetentionPolicyStatement) DefaultDatabase() string { + return s.Database +} + +// FillOption represents different options for filling aggregate windows. +type FillOption int + +const ( + // NullFill means that empty aggregate windows will just have null values. + NullFill FillOption = iota + // NoFill means that empty aggregate windows will be purged from the result. + NoFill + // NumberFill means that empty aggregate windows will be filled with a provided number. + NumberFill + // PreviousFill means that empty aggregate windows will be filled with whatever the previous aggregate window had. + PreviousFill + // LinearFill means that empty aggregate windows will be filled with whatever a linear value between non null windows. + LinearFill +) + +// SelectStatement represents a command for extracting data from the database. +type SelectStatement struct { + // Expressions returned from the selection. + Fields Fields + + // Target (destination) for the result of a SELECT INTO query. + Target *Target + + // Expressions used for grouping the selection. + Dimensions Dimensions + + // Data sources (measurements) that fields are extracted from. + Sources Sources + + // An expression evaluated on data point. + Condition Expr + + // Fields to sort results by. + SortFields SortFields + + // Maximum number of rows to be returned. Unlimited if zero. + Limit int + + // Returns rows starting at an offset from the first row. + Offset int + + // Maxiumum number of series to be returned. Unlimited if zero. + SLimit int + + // Returns series starting at an offset from the first one. + SOffset int + + // Memoized group by interval from GroupBy(). + groupByInterval time.Duration + + // Whether it's a query for raw data values (i.e. not an aggregate). + IsRawQuery bool + + // What fill option the select statement uses, if any. + Fill FillOption + + // The value to fill empty aggregate buckets with, if any. + FillValue interface{} + + // The timezone for the query, if any. + Location *time.Location + + // Renames the implicit time field name. + TimeAlias string + + // Removes the "time" column from the output. + OmitTime bool + + // Removes duplicate rows from raw queries. + Dedupe bool +} + +// HasDerivative returns true if any function call in the statement is a +// derivative aggregate. +func (s *SelectStatement) HasDerivative() bool { + for _, f := range s.FunctionCalls() { + if f.Name == "derivative" || f.Name == "non_negative_derivative" { + return true + } + } + return false +} + +// IsSimpleDerivative return true if any function call is a derivative function with a +// variable ref as the first arg. +func (s *SelectStatement) IsSimpleDerivative() bool { + for _, f := range s.FunctionCalls() { + if f.Name == "derivative" || f.Name == "non_negative_derivative" { + // it's nested if the first argument is an aggregate function + if _, ok := f.Args[0].(*VarRef); ok { + return true + } + } + } + return false +} + +// HasSelector returns true if there is exactly one selector. +func (s *SelectStatement) HasSelector() bool { + var selector *Call + for _, f := range s.Fields { + if call, ok := f.Expr.(*Call); ok { + if selector != nil || !IsSelector(call) { + // This is an aggregate call or there is already a selector. + return false + } + selector = call + } + } + return selector != nil +} + +// TimeAscending returns true if the time field is sorted in chronological order. +func (s *SelectStatement) TimeAscending() bool { + return len(s.SortFields) == 0 || s.SortFields[0].Ascending +} + +// TimeFieldName returns the name of the time field. +func (s *SelectStatement) TimeFieldName() string { + if s.TimeAlias != "" { + return s.TimeAlias + } + return "time" +} + +// Clone returns a deep copy of the statement. +func (s *SelectStatement) Clone() *SelectStatement { + clone := *s + clone.Fields = make(Fields, 0, len(s.Fields)) + clone.Dimensions = make(Dimensions, 0, len(s.Dimensions)) + clone.Sources = cloneSources(s.Sources) + clone.SortFields = make(SortFields, 0, len(s.SortFields)) + clone.Condition = CloneExpr(s.Condition) + + if s.Target != nil { + clone.Target = &Target{ + Measurement: &Measurement{ + Database: s.Target.Measurement.Database, + RetentionPolicy: s.Target.Measurement.RetentionPolicy, + Name: s.Target.Measurement.Name, + Regex: CloneRegexLiteral(s.Target.Measurement.Regex), + }, + } + } + for _, f := range s.Fields { + clone.Fields = append(clone.Fields, &Field{Expr: CloneExpr(f.Expr), Alias: f.Alias}) + } + for _, d := range s.Dimensions { + clone.Dimensions = append(clone.Dimensions, &Dimension{Expr: CloneExpr(d.Expr)}) + } + for _, f := range s.SortFields { + clone.SortFields = append(clone.SortFields, &SortField{Name: f.Name, Ascending: f.Ascending}) + } + return &clone +} + +func cloneSources(sources Sources) Sources { + clone := make(Sources, 0, len(sources)) + for _, s := range sources { + clone = append(clone, cloneSource(s)) + } + return clone +} + +func cloneSource(s Source) Source { + if s == nil { + return nil + } + + switch s := s.(type) { + case *Measurement: + m := &Measurement{Database: s.Database, RetentionPolicy: s.RetentionPolicy, Name: s.Name} + if s.Regex != nil { + m.Regex = &RegexLiteral{Val: regexp.MustCompile(s.Regex.Val.String())} + } + return m + case *SubQuery: + return &SubQuery{Statement: s.Statement.Clone()} + default: + panic("unreachable") + } +} + +// RewriteFields returns the re-written form of the select statement. Any wildcard query +// fields are replaced with the supplied fields, and any wildcard GROUP BY fields are replaced +// with the supplied dimensions. Any fields with no type specifier are rewritten with the +// appropriate type. +func (s *SelectStatement) RewriteFields(m FieldMapper) (*SelectStatement, error) { + // Clone the statement so we aren't rewriting the original. + other := s.Clone() + + // Iterate through the sources and rewrite any subqueries first. + for _, src := range other.Sources { + switch src := src.(type) { + case *SubQuery: + stmt, err := src.Statement.RewriteFields(m) + if err != nil { + return nil, err + } + src.Statement = stmt + } + } + + // Rewrite all variable references in the fields with their types if one + // hasn't been specified. + rewrite := func(n Node) { + ref, ok := n.(*VarRef) + if !ok || (ref.Type != Unknown && ref.Type != AnyField) { + return + } + + typ := EvalType(ref, other.Sources, m) + if typ == Tag && ref.Type == AnyField { + return + } + ref.Type = typ + } + WalkFunc(other.Fields, rewrite) + WalkFunc(other.Condition, rewrite) + + // Ignore if there are no wildcards. + hasFieldWildcard := other.HasFieldWildcard() + hasDimensionWildcard := other.HasDimensionWildcard() + if !hasFieldWildcard && !hasDimensionWildcard { + return other, nil + } + + fieldSet, dimensionSet, err := FieldDimensions(other.Sources, m) + if err != nil { + return nil, err + } + + // If there are no dimension wildcards then merge dimensions to fields. + if !hasDimensionWildcard { + // Remove the dimensions present in the group by so they don't get added as fields. + for _, d := range other.Dimensions { + switch expr := d.Expr.(type) { + case *VarRef: + if _, ok := dimensionSet[expr.Val]; ok { + delete(dimensionSet, expr.Val) + } + } + } + } + + // Sort the field and dimension names for wildcard expansion. + var fields []VarRef + if len(fieldSet) > 0 { + fields = make([]VarRef, 0, len(fieldSet)) + for name, typ := range fieldSet { + fields = append(fields, VarRef{Val: name, Type: typ}) + } + if !hasDimensionWildcard { + for name := range dimensionSet { + fields = append(fields, VarRef{Val: name, Type: Tag}) + } + dimensionSet = nil + } + sort.Sort(VarRefs(fields)) + } + dimensions := stringSetSlice(dimensionSet) + + // Rewrite all wildcard query fields + if hasFieldWildcard { + // Allocate a slice assuming there is exactly one wildcard for efficiency. + rwFields := make(Fields, 0, len(other.Fields)+len(fields)-1) + for _, f := range other.Fields { + switch expr := f.Expr.(type) { + case *Wildcard: + for _, ref := range fields { + if expr.Type == FIELD && ref.Type == Tag { + continue + } else if expr.Type == TAG && ref.Type != Tag { + continue + } + rwFields = append(rwFields, &Field{Expr: &VarRef{Val: ref.Val, Type: ref.Type}}) + } + case *RegexLiteral: + for _, ref := range fields { + if expr.Val.MatchString(ref.Val) { + rwFields = append(rwFields, &Field{Expr: &VarRef{Val: ref.Val, Type: ref.Type}}) + } + } + case *Call: + // Clone a template that we can modify and use for new fields. + template := CloneExpr(expr).(*Call) + + // Search for the call with a wildcard by continuously descending until + // we no longer have a call. + call := template + for len(call.Args) > 0 { + arg, ok := call.Args[0].(*Call) + if !ok { + break + } + call = arg + } + + // Check if this field value is a wildcard. + if len(call.Args) == 0 { + rwFields = append(rwFields, f) + continue + } + + // Retrieve if this is a wildcard or a regular expression. + var re *regexp.Regexp + switch expr := call.Args[0].(type) { + case *Wildcard: + if expr.Type == TAG { + return nil, fmt.Errorf("unable to use tag wildcard in %s()", call.Name) + } + case *RegexLiteral: + re = expr.Val + default: + rwFields = append(rwFields, f) + continue + } + + // All types that can expand wildcards support float and integer. + supportedTypes := map[DataType]struct{}{ + Float: struct{}{}, + Integer: struct{}{}, + } + + // Add additional types for certain functions. + switch call.Name { + case "count", "first", "last", "distinct", "elapsed", "mode", "sample": + supportedTypes[String] = struct{}{} + fallthrough + case "min", "max": + supportedTypes[Boolean] = struct{}{} + } + + for _, ref := range fields { + // Do not expand tags within a function call. It likely won't do anything + // anyway and will be the wrong thing in 99% of cases. + if ref.Type == Tag { + continue + } else if _, ok := supportedTypes[ref.Type]; !ok { + continue + } else if re != nil && !re.MatchString(ref.Val) { + continue + } + + // Make a new expression and replace the wildcard within this cloned expression. + call.Args[0] = &VarRef{Val: ref.Val, Type: ref.Type} + rwFields = append(rwFields, &Field{ + Expr: CloneExpr(template), + Alias: fmt.Sprintf("%s_%s", f.Name(), ref.Val), + }) + } + case *BinaryExpr: + // Search for regexes or wildcards within the binary + // expression. If we find any, throw an error indicating that + // it's illegal. + var regex, wildcard bool + WalkFunc(expr, func(n Node) { + switch n.(type) { + case *RegexLiteral: + regex = true + case *Wildcard: + wildcard = true + } + }) + + if wildcard { + return nil, fmt.Errorf("unsupported expression with wildcard: %s", f.Expr) + } else if regex { + return nil, fmt.Errorf("unsupported expression with regex field: %s", f.Expr) + } + rwFields = append(rwFields, f) + default: + rwFields = append(rwFields, f) + } + } + other.Fields = rwFields + } + + // Rewrite all wildcard GROUP BY fields + if hasDimensionWildcard { + // Allocate a slice assuming there is exactly one wildcard for efficiency. + rwDimensions := make(Dimensions, 0, len(other.Dimensions)+len(dimensions)-1) + for _, d := range other.Dimensions { + switch expr := d.Expr.(type) { + case *Wildcard: + for _, name := range dimensions { + rwDimensions = append(rwDimensions, &Dimension{Expr: &VarRef{Val: name}}) + } + case *RegexLiteral: + for _, name := range dimensions { + if expr.Val.MatchString(name) { + rwDimensions = append(rwDimensions, &Dimension{Expr: &VarRef{Val: name}}) + } + } + default: + rwDimensions = append(rwDimensions, d) + } + } + other.Dimensions = rwDimensions + } + + return other, nil +} + +// RewriteRegexConditions rewrites regex conditions to make better use of the +// database index. +// +// Conditions that can currently be simplified are: +// +// - host =~ /^foo$/ becomes host = 'foo' +// - host !~ /^foo$/ becomes host != 'foo' +// +// Note: if the regex contains groups, character classes, repetition or +// similar, it's likely it won't be rewritten. In order to support rewriting +// regexes with these characters would be a lot more work. +func (s *SelectStatement) RewriteRegexConditions() { + s.Condition = RewriteExpr(s.Condition, func(e Expr) Expr { + be, ok := e.(*BinaryExpr) + if !ok || (be.Op != EQREGEX && be.Op != NEQREGEX) { + // This expression is not a binary condition or doesn't have a + // regex based operator. + return e + } + + // Handle regex-based condition. + rhs := be.RHS.(*RegexLiteral) // This must be a regex. + + val, ok := matchExactRegex(rhs.Val.String()) + if !ok { + // Regex didn't match. + return e + } + + // Remove leading and trailing ^ and $. + be.RHS = &StringLiteral{Val: val} + + // Update the condition operator. + if be.Op == EQREGEX { + be.Op = EQ + } else { + be.Op = NEQ + } + return be + }) +} + +// matchExactRegex matches regexes that have the following form: /^foo$/. It +// considers /^$/ to be a matching regex. +func matchExactRegex(v string) (string, bool) { + re, err := syntax.Parse(v, syntax.Perl) + if err != nil { + // Nothing we can do or log. + return "", false + } + + if re.Op != syntax.OpConcat { + return "", false + } + + if len(re.Sub) < 2 || len(re.Sub) > 3 { + // Regex has too few or too many subexpressions. + return "", false + } + + start := re.Sub[0] + if !(start.Op == syntax.OpBeginLine || start.Op == syntax.OpBeginText) { + // Regex does not begin with ^ + return "", false + } + + end := re.Sub[len(re.Sub)-1] + if !(end.Op == syntax.OpEndLine || end.Op == syntax.OpEndText) { + // Regex does not end with $ + return "", false + } + + if len(re.Sub) == 3 { + middle := re.Sub[1] + if middle.Op != syntax.OpLiteral || middle.Flags^syntax.Perl != 0 { + // Regex does not contain a literal op. + return "", false + } + + // We can rewrite this regex. + return string(middle.Rune), true + } + + // The regex /^$/ + return "", true +} + +// RewriteDistinct rewrites the expression to be a call for map/reduce to work correctly. +// This method assumes all validation has passed. +func (s *SelectStatement) RewriteDistinct() { + WalkFunc(s.Fields, func(n Node) { + switch n := n.(type) { + case *Field: + if expr, ok := n.Expr.(*Distinct); ok { + n.Expr = expr.NewCall() + s.IsRawQuery = false + } + case *Call: + for i, arg := range n.Args { + if arg, ok := arg.(*Distinct); ok { + n.Args[i] = arg.NewCall() + } + } + } + }) +} + +// RewriteTimeFields removes any "time" field references. +func (s *SelectStatement) RewriteTimeFields() { + for i := 0; i < len(s.Fields); i++ { + switch expr := s.Fields[i].Expr.(type) { + case *VarRef: + if expr.Val == "time" { + s.TimeAlias = s.Fields[i].Alias + s.Fields = append(s.Fields[:i], s.Fields[i+1:]...) + } + } + } +} + +// RewriteTimeCondition adds time constraints to aggregate queries. +func (s *SelectStatement) RewriteTimeCondition(now time.Time) error { + interval, err := s.GroupByInterval() + if err != nil { + return err + } else if interval > 0 && s.Condition != nil { + _, tmax, err := TimeRange(s.Condition, s.Location) + if err != nil { + return err + } + + if tmax.IsZero() { + s.Condition = &BinaryExpr{ + Op: AND, + LHS: s.Condition, + RHS: &BinaryExpr{ + Op: LTE, + LHS: &VarRef{Val: "time"}, + RHS: &TimeLiteral{Val: now}, + }, + } + } + } + + for _, source := range s.Sources { + switch source := source.(type) { + case *SubQuery: + if err := source.Statement.RewriteTimeCondition(now); err != nil { + return err + } + } + } + return nil +} + +// ColumnNames will walk all fields and functions and return the appropriate field names for the select statement +// while maintaining order of the field names. +func (s *SelectStatement) ColumnNames() []string { + // First walk each field to determine the number of columns. + columnFields := Fields{} + for _, field := range s.Fields { + columnFields = append(columnFields, field) + + switch f := field.Expr.(type) { + case *Call: + if s.Target == nil && (f.Name == "top" || f.Name == "bottom") { + for _, arg := range f.Args[1:] { + ref, ok := arg.(*VarRef) + if ok { + columnFields = append(columnFields, &Field{Expr: ref}) + } + } + } + } + } + + // Determine if we should add an extra column for an implicit time. + offset := 0 + if !s.OmitTime { + offset++ + } + + columnNames := make([]string, len(columnFields)+offset) + if !s.OmitTime { + // Add the implicit time if requested. + columnNames[0] = s.TimeFieldName() + } + + // Keep track of the encountered column names. + names := make(map[string]int) + + // Resolve aliases first. + for i, col := range columnFields { + if col.Alias != "" { + columnNames[i+offset] = col.Alias + names[col.Alias] = 1 + } + } + + // Resolve any generated names and resolve conflicts. + for i, col := range columnFields { + if columnNames[i+offset] != "" { + continue + } + + name := col.Name() + count, conflict := names[name] + if conflict { + for { + resolvedName := fmt.Sprintf("%s_%d", name, count) + _, conflict = names[resolvedName] + if !conflict { + names[name] = count + 1 + name = resolvedName + break + } + count++ + } + } + names[name]++ + columnNames[i+offset] = name + } + return columnNames +} + +// FieldExprByName returns the expression that matches the field name and the +// index where this was found. If the name matches one of the arguments to +// "top" or "bottom", the variable reference inside of the function is returned +// and the index is of the function call rather than the variable reference. +// If no expression is found, -1 is returned for the index and the expression +// will be nil. +func (s *SelectStatement) FieldExprByName(name string) (int, Expr) { + for i, f := range s.Fields { + if f.Name() == name { + return i, f.Expr + } else if call, ok := f.Expr.(*Call); ok && (call.Name == "top" || call.Name == "bottom") && len(call.Args) > 2 { + for _, arg := range call.Args[1 : len(call.Args)-1] { + if arg, ok := arg.(*VarRef); ok && arg.Val == name { + return i, arg + } + } + } + } + return -1, nil +} + +// Reduce calls the Reduce function on the different components of the +// SelectStatement to reduce the statement. +func (s *SelectStatement) Reduce(valuer Valuer) *SelectStatement { + stmt := s.Clone() + stmt.Condition = Reduce(stmt.Condition, valuer) + for _, d := range stmt.Dimensions { + d.Expr = Reduce(d.Expr, valuer) + } + + for _, source := range stmt.Sources { + switch source := source.(type) { + case *SubQuery: + source.Statement = source.Statement.Reduce(valuer) + } + } + return stmt +} + +// HasTimeFieldSpecified will walk all fields and determine if the user explicitly asked for time. +// This is needed to determine re-write behaviors for functions like TOP and BOTTOM. +func (s *SelectStatement) HasTimeFieldSpecified() bool { + for _, f := range s.Fields { + if f.Name() == "time" { + return true + } + } + return false +} + +// String returns a string representation of the select statement. +func (s *SelectStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SELECT ") + _, _ = buf.WriteString(s.Fields.String()) + + if s.Target != nil { + _, _ = buf.WriteString(" ") + _, _ = buf.WriteString(s.Target.String()) + } + if len(s.Sources) > 0 { + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(s.Sources.String()) + } + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + if len(s.Dimensions) > 0 { + _, _ = buf.WriteString(" GROUP BY ") + _, _ = buf.WriteString(s.Dimensions.String()) + } + switch s.Fill { + case NoFill: + _, _ = buf.WriteString(" fill(none)") + case NumberFill: + _, _ = buf.WriteString(fmt.Sprintf(" fill(%v)", s.FillValue)) + case LinearFill: + _, _ = buf.WriteString(" fill(linear)") + case PreviousFill: + _, _ = buf.WriteString(" fill(previous)") + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = fmt.Fprintf(&buf, " LIMIT %d", s.Limit) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + if s.SLimit > 0 { + _, _ = fmt.Fprintf(&buf, " SLIMIT %d", s.SLimit) + } + if s.SOffset > 0 { + _, _ = fmt.Fprintf(&buf, " SOFFSET %d", s.SOffset) + } + if s.Location != nil { + _, _ = fmt.Fprintf(&buf, ` TZ('%s')`, s.Location) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute the SelectStatement. +// NOTE: Statement should be normalized first (database name(s) in Sources and +// Target should be populated). If the statement has not been normalized, an +// empty string will be returned for the database name and it is up to the caller +// to interpret that as the default database. +func (s *SelectStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + ep := ExecutionPrivileges{} + for _, source := range s.Sources { + switch source := source.(type) { + case *Measurement: + ep = append(ep, ExecutionPrivilege{ + Name: source.Database, + Privilege: ReadPrivilege, + }) + case *SubQuery: + privs, err := source.Statement.RequiredPrivileges() + if err != nil { + return nil, err + } + ep = append(ep, privs...) + default: + return nil, fmt.Errorf("invalid source: %s", source) + } + } + + if s.Target != nil { + p := ExecutionPrivilege{Admin: false, Name: s.Target.Measurement.Database, Privilege: WritePrivilege} + ep = append(ep, p) + } + return ep, nil +} + +// HasWildcard returns whether or not the select statement has at least 1 wildcard. +func (s *SelectStatement) HasWildcard() bool { + return s.HasFieldWildcard() || s.HasDimensionWildcard() +} + +// HasFieldWildcard returns whether or not the select statement has at least 1 wildcard in the fields. +func (s *SelectStatement) HasFieldWildcard() (hasWildcard bool) { + WalkFunc(s.Fields, func(n Node) { + if hasWildcard { + return + } + switch n.(type) { + case *Wildcard, *RegexLiteral: + hasWildcard = true + } + }) + return hasWildcard +} + +// HasDimensionWildcard returns whether or not the select statement has +// at least 1 wildcard in the dimensions aka `GROUP BY`. +func (s *SelectStatement) HasDimensionWildcard() bool { + for _, d := range s.Dimensions { + switch d.Expr.(type) { + case *Wildcard, *RegexLiteral: + return true + } + } + + return false +} + +func (s *SelectStatement) validate(tr targetRequirement) error { + if err := s.validateFields(); err != nil { + return err + } + + if err := s.validateDimensions(); err != nil { + return err + } + + if err := s.validateDistinct(); err != nil { + return err + } + + if err := s.validateTopBottom(); err != nil { + return err + } + + if err := s.validateAggregates(tr); err != nil { + return err + } + + if err := s.validateFill(); err != nil { + return err + } + + return nil +} + +func (s *SelectStatement) validateFields() error { + ns := s.NamesInSelect() + if len(ns) == 1 && ns[0] == "time" { + return fmt.Errorf("at least 1 non-time field must be queried") + } + + for _, f := range s.Fields { + switch expr := f.Expr.(type) { + case *BinaryExpr: + if err := expr.validate(); err != nil { + return err + } + } + } + return nil +} + +func (s *SelectStatement) validateDimensions() error { + var dur time.Duration + for _, dim := range s.Dimensions { + switch expr := dim.Expr.(type) { + case *Call: + // Ensure the call is time() and it has one or two duration arguments. + // If we already have a duration + if expr.Name != "time" { + return errors.New("only time() calls allowed in dimensions") + } else if got := len(expr.Args); got < 1 || got > 2 { + return errors.New("time dimension expected 1 or 2 arguments") + } else if lit, ok := expr.Args[0].(*DurationLiteral); !ok { + return errors.New("time dimension must have duration argument") + } else if dur != 0 { + return errors.New("multiple time dimensions not allowed") + } else { + dur = lit.Val + if len(expr.Args) == 2 { + switch lit := expr.Args[1].(type) { + case *DurationLiteral: + // noop + case *Call: + if lit.Name != "now" { + return errors.New("time dimension offset function must be now()") + } else if len(lit.Args) != 0 { + return errors.New("time dimension offset now() function requires no arguments") + } + default: + return errors.New("time dimension offset must be duration or now()") + } + } + } + case *VarRef: + if strings.ToLower(expr.Val) == "time" { + return errors.New("time() is a function and expects at least one argument") + } + case *Wildcard: + case *RegexLiteral: + default: + return errors.New("only time and tag dimensions allowed") + } + } + return nil +} + +// validSelectWithAggregate determines if a SELECT statement has the correct +// combination of aggregate functions combined with selected fields and tags +// Currently we don't have support for all aggregates, but aggregates that +// can be combined with fields/tags are: +// TOP, BOTTOM, MAX, MIN, FIRST, LAST +func (s *SelectStatement) validSelectWithAggregate() error { + calls := map[string]struct{}{} + numAggregates := 0 + for _, f := range s.Fields { + fieldCalls := walkFunctionCalls(f.Expr) + for _, c := range fieldCalls { + calls[c.Name] = struct{}{} + } + if len(fieldCalls) != 0 { + numAggregates++ + } + } + // For TOP, BOTTOM, MAX, MIN, FIRST, LAST, PERCENTILE (selector functions) it is ok to ask for fields and tags + // but only if one function is specified. Combining multiple functions and fields and tags is not currently supported + onlySelectors := true + for k := range calls { + switch k { + case "top", "bottom", "max", "min", "first", "last", "percentile", "sample": + default: + onlySelectors = false + break + } + } + if onlySelectors { + // If they only have one selector, they can have as many fields or tags as they want + if numAggregates == 1 { + return nil + } + // If they have multiple selectors, they are not allowed to have any other fields or tags specified + if numAggregates > 1 && len(s.Fields) != numAggregates { + return fmt.Errorf("mixing multiple selector functions with tags or fields is not supported") + } + } + + if numAggregates != 0 && numAggregates != len(s.Fields) { + return fmt.Errorf("mixing aggregate and non-aggregate queries is not supported") + } + return nil +} + +// validTopBottomAggr determines if TOP or BOTTOM aggregates have valid arguments. +func (s *SelectStatement) validTopBottomAggr(expr *Call) error { + if exp, got := 2, len(expr.Args); got < exp { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d, got %d", expr.Name, exp, got) + } + if len(expr.Args) > 1 { + callLimit, ok := expr.Args[len(expr.Args)-1].(*IntegerLiteral) + if !ok { + return fmt.Errorf("expected integer as last argument in %s(), found %s", expr.Name, expr.Args[len(expr.Args)-1]) + } + // Check if they asked for a limit smaller than what they passed into the call + if int64(callLimit.Val) > int64(s.Limit) && s.Limit != 0 { + return fmt.Errorf("limit (%d) in %s function can not be larger than the LIMIT (%d) in the select statement", int64(callLimit.Val), expr.Name, int64(s.Limit)) + } + + for _, v := range expr.Args[:len(expr.Args)-1] { + if _, ok := v.(*VarRef); !ok { + return fmt.Errorf("only fields or tags are allowed in %s(), found %s", expr.Name, v) + } + } + } + return nil +} + +// validPercentileAggr determines if the call to PERCENTILE has valid arguments. +func (s *SelectStatement) validPercentileAggr(expr *Call) error { + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if exp, got := 2, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + + switch expr.Args[0].(type) { + case *VarRef, *RegexLiteral, *Wildcard: + // do nothing + default: + return fmt.Errorf("expected field argument in percentile()") + } + + switch expr.Args[1].(type) { + case *IntegerLiteral, *NumberLiteral: + return nil + default: + return fmt.Errorf("expected float argument in percentile()") + } +} + +// validPercentileAggr determines if the call to SAMPLE has valid arguments. +func (s *SelectStatement) validSampleAggr(expr *Call) error { + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if exp, got := 2, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + + switch expr.Args[0].(type) { + case *VarRef, *RegexLiteral, *Wildcard: + // do nothing + default: + return fmt.Errorf("expected field argument in sample()") + } + + switch expr.Args[1].(type) { + case *IntegerLiteral: + return nil + default: + return fmt.Errorf("expected integer argument in sample()") + } +} + +func (s *SelectStatement) validateAggregates(tr targetRequirement) error { + for _, f := range s.Fields { + for _, expr := range walkFunctionCalls(f.Expr) { + switch expr.Name { + case "derivative", "non_negative_derivative", "difference", "non_negative_difference", "moving_average", "cumulative_sum", "elapsed": + if err := s.validSelectWithAggregate(); err != nil { + return err + } + switch expr.Name { + case "derivative", "non_negative_derivative", "elapsed": + if min, max, got := 1, 2, len(expr.Args); got > max || got < min { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", expr.Name, min, max, got) + } + // If a duration arg is passed, make sure it's a duration + if len(expr.Args) == 2 { + // Second must be a duration .e.g (1h) + if _, ok := expr.Args[1].(*DurationLiteral); !ok { + return fmt.Errorf("second argument to %s must be a duration, got %T", expr.Name, expr.Args[1]) + } + } + case "difference", "non_negative_difference", "cumulative_sum": + if got := len(expr.Args); got != 1 { + return fmt.Errorf("invalid number of arguments for %s, expected 1, got %d", expr.Name, got) + } + case "moving_average": + if got := len(expr.Args); got != 2 { + return fmt.Errorf("invalid number of arguments for moving_average, expected 2, got %d", got) + } + + if lit, ok := expr.Args[1].(*IntegerLiteral); !ok { + return fmt.Errorf("second argument for moving_average must be an integer, got %T", expr.Args[1]) + } else if lit.Val <= 1 { + return fmt.Errorf("moving_average window must be greater than 1, got %d", lit.Val) + } else if int64(int(lit.Val)) != lit.Val { + return fmt.Errorf("moving_average window too large, got %d", lit.Val) + } + } + // Validate that if they have grouping by time, they need a sub-call like min/max, etc. + groupByInterval, err := s.GroupByInterval() + if err != nil { + return fmt.Errorf("invalid group interval: %v", err) + } + + if c, ok := expr.Args[0].(*Call); ok && groupByInterval == 0 && tr != targetSubquery { + return fmt.Errorf("%s aggregate requires a GROUP BY interval", expr.Name) + } else if !ok && groupByInterval > 0 { + return fmt.Errorf("aggregate function required inside the call to %s", expr.Name) + } else if ok { + switch c.Name { + case "top", "bottom": + if err := s.validTopBottomAggr(c); err != nil { + return err + } + case "percentile": + if err := s.validPercentileAggr(c); err != nil { + return err + } + default: + if exp, got := 1, len(c.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", c.Name, exp, got) + } + + switch fc := c.Args[0].(type) { + case *VarRef, *Wildcard, *RegexLiteral: + // do nothing + case *Call: + if fc.Name != "distinct" || expr.Name != "count" { + return fmt.Errorf("expected field argument in %s()", c.Name) + } else if exp, got := 1, len(fc.Args); got != exp { + return fmt.Errorf("count(distinct %s) can only have %d argument(s), got %d", fc.Name, exp, got) + } else if _, ok := fc.Args[0].(*VarRef); !ok { + return fmt.Errorf("expected field argument in distinct()") + } + case *Distinct: + if expr.Name != "count" { + return fmt.Errorf("expected field argument in %s()", c.Name) + } + default: + return fmt.Errorf("expected field argument in %s()", c.Name) + } + } + } + case "top", "bottom": + if err := s.validTopBottomAggr(expr); err != nil { + return err + } + case "percentile": + if err := s.validPercentileAggr(expr); err != nil { + return err + } + case "sample": + if err := s.validSampleAggr(expr); err != nil { + return err + } + case "integral": + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if min, max, got := 1, 2, len(expr.Args); got > max || got < min { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", expr.Name, min, max, got) + } + // If a duration arg is passed, make sure it's a duration + if len(expr.Args) == 2 { + // Second must be a duration .e.g (1h) + if _, ok := expr.Args[1].(*DurationLiteral); !ok { + return errors.New("second argument must be a duration") + } + } + case "holt_winters", "holt_winters_with_fit": + if exp, got := 3, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + // Validate that if they have grouping by time, they need a sub-call like min/max, etc. + groupByInterval, err := s.GroupByInterval() + if err != nil { + return fmt.Errorf("invalid group interval: %v", err) + } + + if _, ok := expr.Args[0].(*Call); ok && groupByInterval == 0 && tr != targetSubquery { + return fmt.Errorf("%s aggregate requires a GROUP BY interval", expr.Name) + } else if !ok { + return fmt.Errorf("must use aggregate function with %s", expr.Name) + } + if arg, ok := expr.Args[1].(*IntegerLiteral); !ok { + return fmt.Errorf("expected integer argument as second arg in %s", expr.Name) + } else if arg.Val <= 0 { + return fmt.Errorf("second arg to %s must be greater than 0, got %d", expr.Name, arg.Val) + } + if _, ok := expr.Args[2].(*IntegerLiteral); !ok { + return fmt.Errorf("expected integer argument as third arg in %s", expr.Name) + } + default: + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if exp, got := 1, len(expr.Args); got != exp { + // Special error message if distinct was used as the argument. + if expr.Name == "count" && got >= 1 { + if _, ok := expr.Args[0].(*Distinct); ok { + return fmt.Errorf("count(distinct ) can only have one argument") + } + } + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + switch fc := expr.Args[0].(type) { + case *VarRef, *Wildcard, *RegexLiteral: + // do nothing + case *Call: + if fc.Name != "distinct" || expr.Name != "count" { + return fmt.Errorf("expected field argument in %s()", expr.Name) + } else if exp, got := 1, len(fc.Args); got != exp { + return fmt.Errorf("count(distinct ) can only have one argument") + } else if _, ok := fc.Args[0].(*VarRef); !ok { + return fmt.Errorf("expected field argument in distinct()") + } + case *Distinct: + if expr.Name != "count" { + return fmt.Errorf("expected field argument in %s()", expr.Name) + } + default: + return fmt.Errorf("expected field argument in %s()", expr.Name) + } + } + } + } + + // Check that we have valid duration and where clauses for aggregates + + // fetch the group by duration + groupByDuration, _ := s.GroupByInterval() + + // If we have a group by interval, but no aggregate function, it's an invalid statement + if s.IsRawQuery && groupByDuration > 0 { + return fmt.Errorf("GROUP BY requires at least one aggregate function") + } + + // If we have an aggregate function with a group by time without a where clause, it's an invalid statement + if tr == targetNotRequired { // ignore create continuous query statements + if err := s.validateTimeExpression(); err != nil { + return err + } + } + if tr != targetSubquery { + if err := s.validateGroupByInterval(); err != nil { + return err + } + } + return nil +} + +// validateFill ensures that the fill option matches the query type. +func (s *SelectStatement) validateFill() error { + info := newSelectInfo(s) + if len(info.calls) == 0 { + switch s.Fill { + case NoFill: + return errors.New("fill(none) must be used with a function") + case LinearFill: + return errors.New("fill(linear) must be used with a function") + } + } + return nil +} + +// validateTimeExpression ensures that any select statements that have a group +// by interval either have a time expression limiting the time range or have a +// parent query that does that. +func (s *SelectStatement) validateTimeExpression() error { + // If we have a time expression, we and all subqueries are fine. + if HasTimeExpr(s.Condition) { + return nil + } + + // Check if this is not a raw query and if the group by duration exists. + // If these are true, then we have an error. + interval, err := s.GroupByInterval() + if err != nil { + return err + } else if !s.IsRawQuery && interval > 0 { + return fmt.Errorf("aggregate functions with GROUP BY time require a WHERE time clause") + } + + // Validate the subqueries. If we have a time expression in this select + // statement, we don't need to do this because parent time ranges propagate + // to children. So we only execute this when there is no time condition in + // the parent. + for _, source := range s.Sources { + switch source := source.(type) { + case *SubQuery: + if err := source.Statement.validateTimeExpression(); err != nil { + return err + } + } + } + return nil +} + +// validateGroupByInterval ensures that a select statement is grouped by an +// interval if it contains certain functions. +func (s *SelectStatement) validateGroupByInterval() error { + interval, err := s.GroupByInterval() + if err != nil { + return err + } else if interval > 0 { + // If we have an interval here, that means the interval will propagate + // into any subqueries and we can just stop looking. + return nil + } + + // Check inside of the fields for any of the specific functions that ned a group by interval. + for _, f := range s.Fields { + switch expr := f.Expr.(type) { + case *Call: + switch expr.Name { + case "derivative", "non_negative_derivative", "difference", "non_negative_difference", "moving_average", "cumulative_sum", "elapsed", "holt_winters", "holt_winters_with_fit": + // If the first argument is a call, we needed a group by interval and we don't have one. + if _, ok := expr.Args[0].(*Call); ok { + return fmt.Errorf("%s aggregate requires a GROUP BY interval", expr.Name) + } + } + } + } + + // Validate the subqueries. + for _, source := range s.Sources { + switch source := source.(type) { + case *SubQuery: + if err := source.Statement.validateGroupByInterval(); err != nil { + return err + } + } + } + return nil +} + +// HasDistinct checks if a select statement contains a call to DISTINCT. +func (s *SelectStatement) HasDistinct() bool { + for _, f := range s.Fields { + switch c := f.Expr.(type) { + case *Call: + if c.Name == "distinct" { + return true + } + case *Distinct: + return true + } + } + return false +} + +func (s *SelectStatement) validateDistinct() error { + if !s.HasDistinct() { + return nil + } + + if len(s.Fields) > 1 { + return fmt.Errorf("aggregate function distinct() cannot be combined with other functions or fields") + } + + switch c := s.Fields[0].Expr.(type) { + case *Call: + if len(c.Args) == 0 { + return fmt.Errorf("distinct function requires at least one argument") + } + + if len(c.Args) != 1 { + return fmt.Errorf("distinct function can only have one argument") + } + } + return nil +} + +func (s *SelectStatement) validateTopBottom() error { + // Ensure there are not multiple calls if top/bottom is present. + info := newSelectInfo(s) + if len(info.calls) > 1 { + for call := range info.calls { + if call.Name == "top" || call.Name == "bottom" { + return fmt.Errorf("selector function %s() cannot be combined with other functions", call.Name) + } + } + } + return nil +} + +// GroupByInterval extracts the time interval, if specified. +func (s *SelectStatement) GroupByInterval() (time.Duration, error) { + // return if we've already pulled it out + if s.groupByInterval != 0 { + return s.groupByInterval, nil + } + + // Ignore if there are no dimensions. + if len(s.Dimensions) == 0 { + return 0, nil + } + + for _, d := range s.Dimensions { + if call, ok := d.Expr.(*Call); ok && call.Name == "time" { + // Make sure there is exactly one argument. + if got := len(call.Args); got < 1 || got > 2 { + return 0, errors.New("time dimension expected 1 or 2 arguments") + } + + // Ensure the argument is a duration. + lit, ok := call.Args[0].(*DurationLiteral) + if !ok { + return 0, errors.New("time dimension must have duration argument") + } + s.groupByInterval = lit.Val + return lit.Val, nil + } + } + return 0, nil +} + +// GroupByOffset extracts the time interval offset, if specified. +func (s *SelectStatement) GroupByOffset() (time.Duration, error) { + interval, err := s.GroupByInterval() + if err != nil { + return 0, err + } + + // Ignore if there are no dimensions. + if len(s.Dimensions) == 0 { + return 0, nil + } + + for _, d := range s.Dimensions { + if call, ok := d.Expr.(*Call); ok && call.Name == "time" { + if len(call.Args) == 2 { + switch expr := call.Args[1].(type) { + case *DurationLiteral: + return expr.Val % interval, nil + case *TimeLiteral: + return expr.Val.Sub(expr.Val.Truncate(interval)), nil + default: + return 0, fmt.Errorf("invalid time dimension offset: %s", expr) + } + } + return 0, nil + } + } + return 0, nil +} + +// SetTimeRange sets the start and end time of the select statement to [start, end). i.e. start inclusive, end exclusive. +// This is used commonly for continuous queries so the start and end are in buckets. +func (s *SelectStatement) SetTimeRange(start, end time.Time) error { + cond := fmt.Sprintf("time >= '%s' AND time < '%s'", start.UTC().Format(time.RFC3339Nano), end.UTC().Format(time.RFC3339Nano)) + if s.Condition != nil { + cond = fmt.Sprintf("%s AND %s", s.rewriteWithoutTimeDimensions(), cond) + } + + expr, err := NewParser(strings.NewReader(cond)).ParseExpr() + if err != nil { + return err + } + + // Fold out any previously replaced time dimensions and set the condition. + s.Condition = Reduce(expr, nil) + + return nil +} + +// rewriteWithoutTimeDimensions will remove any WHERE time... clauses from the select statement. +// This is necessary when setting an explicit time range to override any that previously existed. +func (s *SelectStatement) rewriteWithoutTimeDimensions() string { + n := RewriteFunc(s.Condition, func(n Node) Node { + switch n := n.(type) { + case *BinaryExpr: + if n.LHS.String() == "time" { + return &BooleanLiteral{Val: true} + } + return n + case *Call: + return &BooleanLiteral{Val: true} + default: + return n + } + }) + + return n.String() +} + +// NamesInWhere returns the field and tag names (idents) referenced in the where clause. +func (s *SelectStatement) NamesInWhere() []string { + var a []string + if s.Condition != nil { + a = walkNames(s.Condition) + } + return a +} + +// NamesInSelect returns the field and tag names (idents) in the select clause. +func (s *SelectStatement) NamesInSelect() []string { + var a []string + + for _, f := range s.Fields { + a = append(a, walkNames(f.Expr)...) + } + + return a +} + +// NamesInDimension returns the field and tag names (idents) in the group by clause. +func (s *SelectStatement) NamesInDimension() []string { + var a []string + + for _, d := range s.Dimensions { + a = append(a, walkNames(d.Expr)...) + } + + return a +} + +// LimitTagSets returns a tag set list with SLIMIT and SOFFSET applied. +func LimitTagSets(a []*TagSet, slimit, soffset int) []*TagSet { + // Ignore if no limit or offset is specified. + if slimit == 0 && soffset == 0 { + return a + } + + // If offset is beyond the number of tag sets then return nil. + if soffset > len(a) { + return nil + } + + // Clamp limit to the max number of tag sets. + if soffset+slimit > len(a) { + slimit = len(a) - soffset + } + return a[soffset : soffset+slimit] +} + +// walkNames will walk the Expr and return the identifier names used. +func walkNames(exp Expr) []string { + switch expr := exp.(type) { + case *VarRef: + return []string{expr.Val} + case *Call: + var a []string + for _, expr := range expr.Args { + if ref, ok := expr.(*VarRef); ok { + a = append(a, ref.Val) + } + } + return a + case *BinaryExpr: + var ret []string + ret = append(ret, walkNames(expr.LHS)...) + ret = append(ret, walkNames(expr.RHS)...) + return ret + case *ParenExpr: + return walkNames(expr.Expr) + } + + return nil +} + +// walkRefs will walk the Expr and return the var refs used. +func walkRefs(exp Expr) []VarRef { + refs := make(map[VarRef]struct{}) + var walk func(exp Expr) + walk = func(exp Expr) { + switch expr := exp.(type) { + case *VarRef: + refs[*expr] = struct{}{} + case *Call: + for _, expr := range expr.Args { + if ref, ok := expr.(*VarRef); ok { + refs[*ref] = struct{}{} + } + } + case *BinaryExpr: + walk(expr.LHS) + walk(expr.RHS) + case *ParenExpr: + walk(expr.Expr) + } + } + walk(exp) + + // Turn the map into a slice. + a := make([]VarRef, 0, len(refs)) + for ref := range refs { + a = append(a, ref) + } + return a +} + +// ExprNames returns a list of non-"time" field names from an expression. +func ExprNames(expr Expr) []VarRef { + m := make(map[VarRef]struct{}) + for _, ref := range walkRefs(expr) { + if ref.Val == "time" { + continue + } + m[ref] = struct{}{} + } + + a := make([]VarRef, 0, len(m)) + for k := range m { + a = append(a, k) + } + sort.Sort(VarRefs(a)) + + return a +} + +// FunctionCalls returns the Call objects from the query. +func (s *SelectStatement) FunctionCalls() []*Call { + var a []*Call + for _, f := range s.Fields { + a = append(a, walkFunctionCalls(f.Expr)...) + } + return a +} + +// FunctionCallsByPosition returns the Call objects from the query in the order they appear in the select statement. +func (s *SelectStatement) FunctionCallsByPosition() [][]*Call { + var a [][]*Call + for _, f := range s.Fields { + a = append(a, walkFunctionCalls(f.Expr)) + } + return a +} + +// walkFunctionCalls walks the Expr and returns any function calls made. +func walkFunctionCalls(exp Expr) []*Call { + switch expr := exp.(type) { + case *VarRef: + return nil + case *Call: + return []*Call{expr} + case *BinaryExpr: + var ret []*Call + ret = append(ret, walkFunctionCalls(expr.LHS)...) + ret = append(ret, walkFunctionCalls(expr.RHS)...) + return ret + case *ParenExpr: + return walkFunctionCalls(expr.Expr) + } + + return nil +} + +// MatchSource returns the source name that matches a field name. +// It returns a blank string if no sources match. +func MatchSource(sources Sources, name string) string { + for _, src := range sources { + switch src := src.(type) { + case *Measurement: + if strings.HasPrefix(name, src.Name) { + return src.Name + } + } + } + return "" +} + +// Target represents a target (destination) policy, measurement, and DB. +type Target struct { + // Measurement to write into. + Measurement *Measurement +} + +// String returns a string representation of the Target. +func (t *Target) String() string { + if t == nil { + return "" + } + + var buf bytes.Buffer + _, _ = buf.WriteString("INTO ") + _, _ = buf.WriteString(t.Measurement.String()) + if t.Measurement.Name == "" { + _, _ = buf.WriteString(":MEASUREMENT") + } + + return buf.String() +} + +// DeleteStatement represents a command for deleting data from the database. +type DeleteStatement struct { + // Data source that values are removed from. + Source Source + + // An expression evaluated on data point. + Condition Expr +} + +// String returns a string representation of the delete statement. +func (s *DeleteStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DELETE FROM ") + _, _ = buf.WriteString(s.Source.String()) + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a DeleteStatement. +func (s *DeleteStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *DeleteStatement) DefaultDatabase() string { + if m, ok := s.Source.(*Measurement); ok { + return m.Database + } + return "" +} + +// ShowSeriesStatement represents a command for listing series in the database. +type ShowSeriesStatement struct { + // Database to query. If blank, use the default database. + // The database can also be specified per source in the Sources. + Database string + + // Measurement(s) the series are listed for. + Sources Sources + + // An expression evaluated on a series name or tag. + Condition Expr + + // Fields to sort results by + SortFields SortFields + + // Maximum number of rows to be returned. + // Unlimited if zero. + Limit int + + // Returns rows starting at an offset from the first row. + Offset int +} + +// String returns a string representation of the list series statement. +func (s *ShowSeriesStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW SERIES") + + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + } + if s.Sources != nil { + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(s.Sources.String()) + } + + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = buf.WriteString(" LIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.Limit)) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a ShowSeriesStatement. +func (s *ShowSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *ShowSeriesStatement) DefaultDatabase() string { + return s.Database +} + +// DropSeriesStatement represents a command for removing a series from the database. +type DropSeriesStatement struct { + // Data source that fields are extracted from (optional) + Sources Sources + + // An expression evaluated on data point (optional) + Condition Expr +} + +// String returns a string representation of the drop series statement. +func (s *DropSeriesStatement) String() string { + var buf bytes.Buffer + buf.WriteString("DROP SERIES") + + if s.Sources != nil { + buf.WriteString(" FROM ") + buf.WriteString(s.Sources.String()) + } + if s.Condition != nil { + buf.WriteString(" WHERE ") + buf.WriteString(s.Condition.String()) + } + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a DropSeriesStatement. +func (s DropSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil +} + +// DeleteSeriesStatement represents a command for deleting all or part of a series from a database. +type DeleteSeriesStatement struct { + // Data source that fields are extracted from (optional) + Sources Sources + + // An expression evaluated on data point (optional) + Condition Expr +} + +// String returns a string representation of the delete series statement. +func (s *DeleteSeriesStatement) String() string { + var buf bytes.Buffer + buf.WriteString("DELETE") + + if s.Sources != nil { + buf.WriteString(" FROM ") + buf.WriteString(s.Sources.String()) + } + if s.Condition != nil { + buf.WriteString(" WHERE ") + buf.WriteString(s.Condition.String()) + } + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a DeleteSeriesStatement. +func (s DeleteSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil +} + +// DropShardStatement represents a command for removing a shard from +// the node. +type DropShardStatement struct { + // ID of the shard to be dropped. + ID uint64 +} + +// String returns a string representation of the drop series statement. +func (s *DropShardStatement) String() string { + var buf bytes.Buffer + buf.WriteString("DROP SHARD ") + buf.WriteString(strconv.FormatUint(s.ID, 10)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a +// DropShardStatement. +func (s *DropShardStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// ShowContinuousQueriesStatement represents a command for listing continuous queries. +type ShowContinuousQueriesStatement struct{} + +// String returns a string representation of the show continuous queries statement. +func (s *ShowContinuousQueriesStatement) String() string { return "SHOW CONTINUOUS QUERIES" } + +// RequiredPrivileges returns the privilege required to execute a ShowContinuousQueriesStatement. +func (s *ShowContinuousQueriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil +} + +// ShowGrantsForUserStatement represents a command for listing user privileges. +type ShowGrantsForUserStatement struct { + // Name of the user to display privileges. + Name string +} + +// String returns a string representation of the show grants for user. +func (s *ShowGrantsForUserStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW GRANTS FOR ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a ShowGrantsForUserStatement +func (s *ShowGrantsForUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// ShowDatabasesStatement represents a command for listing all databases in the cluster. +type ShowDatabasesStatement struct{} + +// String returns a string representation of the show databases command. +func (s *ShowDatabasesStatement) String() string { return "SHOW DATABASES" } + +// RequiredPrivileges returns the privilege required to execute a ShowDatabasesStatement. +func (s *ShowDatabasesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + // SHOW DATABASES is one of few statements that have no required privileges. + // Anyone is allowed to execute it, but the returned results depend on the user's + // individual database permissions. + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: NoPrivileges}}, nil +} + +// CreateContinuousQueryStatement represents a command for creating a continuous query. +type CreateContinuousQueryStatement struct { + // Name of the continuous query to be created. + Name string + + // Name of the database to create the continuous query on. + Database string + + // Source of data (SELECT statement). + Source *SelectStatement + + // Interval to resample previous queries. + ResampleEvery time.Duration + + // Maximum duration to resample previous queries. + ResampleFor time.Duration +} + +// String returns a string representation of the statement. +func (s *CreateContinuousQueryStatement) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "CREATE CONTINUOUS QUERY %s ON %s ", QuoteIdent(s.Name), QuoteIdent(s.Database)) + + if s.ResampleEvery > 0 || s.ResampleFor > 0 { + buf.WriteString("RESAMPLE ") + if s.ResampleEvery > 0 { + fmt.Fprintf(&buf, "EVERY %s ", FormatDuration(s.ResampleEvery)) + } + if s.ResampleFor > 0 { + fmt.Fprintf(&buf, "FOR %s ", FormatDuration(s.ResampleFor)) + } + } + fmt.Fprintf(&buf, "BEGIN %s END", s.Source.String()) + return buf.String() +} + +// DefaultDatabase returns the default database from the statement. +func (s *CreateContinuousQueryStatement) DefaultDatabase() string { + return s.Database +} + +// RequiredPrivileges returns the privilege required to execute a CreateContinuousQueryStatement. +func (s *CreateContinuousQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + ep := ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}} + + // Selecting into a database that's different from the source? + if s.Source.Target.Measurement.Database != "" { + // Change source database privilege requirement to read. + ep[0].Privilege = ReadPrivilege + + // Add destination database privilege requirement and set it to write. + p := ExecutionPrivilege{ + Admin: false, + Name: s.Source.Target.Measurement.Database, + Privilege: WritePrivilege, + } + ep = append(ep, p) + } + + return ep, nil +} + +func (s *CreateContinuousQueryStatement) validate() error { + interval, err := s.Source.GroupByInterval() + if err != nil { + return err + } + + if s.ResampleFor != 0 { + if s.ResampleEvery != 0 && s.ResampleEvery > interval { + interval = s.ResampleEvery + } + if interval > s.ResampleFor { + return fmt.Errorf("FOR duration must be >= GROUP BY time duration: must be a minimum of %s, got %s", FormatDuration(interval), FormatDuration(s.ResampleFor)) + } + } + return nil +} + +// DropContinuousQueryStatement represents a command for removing a continuous query. +type DropContinuousQueryStatement struct { + Name string + Database string +} + +// String returns a string representation of the statement. +func (s *DropContinuousQueryStatement) String() string { + return fmt.Sprintf("DROP CONTINUOUS QUERY %s ON %s", QuoteIdent(s.Name), QuoteIdent(s.Database)) +} + +// RequiredPrivileges returns the privilege(s) required to execute a DropContinuousQueryStatement +func (s *DropContinuousQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *DropContinuousQueryStatement) DefaultDatabase() string { + return s.Database +} + +// ShowMeasurementsStatement represents a command for listing measurements. +type ShowMeasurementsStatement struct { + // Database to query. If blank, use the default database. + Database string + + // Measurement name or regex. + Source Source + + // An expression evaluated on data point. + Condition Expr + + // Fields to sort results by + SortFields SortFields + + // Maximum number of rows to be returned. + // Unlimited if zero. + Limit int + + // Returns rows starting at an offset from the first row. + Offset int +} + +// String returns a string representation of the statement. +func (s *ShowMeasurementsStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW MEASUREMENTS") + + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(s.Database) + } + if s.Source != nil { + _, _ = buf.WriteString(" WITH MEASUREMENT ") + if m, ok := s.Source.(*Measurement); ok && m.Regex != nil { + _, _ = buf.WriteString("=~ ") + } else { + _, _ = buf.WriteString("= ") + } + _, _ = buf.WriteString(s.Source.String()) + } + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = buf.WriteString(" LIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.Limit)) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowMeasurementsStatement. +func (s *ShowMeasurementsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *ShowMeasurementsStatement) DefaultDatabase() string { + return s.Database +} + +// DropMeasurementStatement represents a command to drop a measurement. +type DropMeasurementStatement struct { + // Name of the measurement to be dropped. + Name string +} + +// String returns a string representation of the drop measurement statement. +func (s *DropMeasurementStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DROP MEASUREMENT ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a DropMeasurementStatement +func (s *DropMeasurementStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// ShowQueriesStatement represents a command for listing all running queries. +type ShowQueriesStatement struct{} + +// String returns a string representation of the show queries statement. +func (s *ShowQueriesStatement) String() string { + return "SHOW QUERIES" +} + +// RequiredPrivileges returns the privilege required to execute a ShowQueriesStatement. +func (s *ShowQueriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil +} + +// ShowRetentionPoliciesStatement represents a command for listing retention policies. +type ShowRetentionPoliciesStatement struct { + // Name of the database to list policies for. + Database string +} + +// String returns a string representation of a ShowRetentionPoliciesStatement. +func (s *ShowRetentionPoliciesStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW RETENTION POLICIES") + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowRetentionPoliciesStatement +func (s *ShowRetentionPoliciesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *ShowRetentionPoliciesStatement) DefaultDatabase() string { + return s.Database +} + +// ShowStatsStatement displays statistics for a given module. +type ShowStatsStatement struct { + Module string +} + +// String returns a string representation of a ShowStatsStatement. +func (s *ShowStatsStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW STATS") + if s.Module != "" { + _, _ = buf.WriteString(" FOR ") + _, _ = buf.WriteString(QuoteString(s.Module)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowStatsStatement +func (s *ShowStatsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// ShowShardGroupsStatement represents a command for displaying shard groups in the cluster. +type ShowShardGroupsStatement struct{} + +// String returns a string representation of the SHOW SHARD GROUPS command. +func (s *ShowShardGroupsStatement) String() string { return "SHOW SHARD GROUPS" } + +// RequiredPrivileges returns the privileges required to execute the statement. +func (s *ShowShardGroupsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// ShowShardsStatement represents a command for displaying shards in the cluster. +type ShowShardsStatement struct{} + +// String returns a string representation. +func (s *ShowShardsStatement) String() string { return "SHOW SHARDS" } + +// RequiredPrivileges returns the privileges required to execute the statement. +func (s *ShowShardsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// ShowDiagnosticsStatement represents a command for show node diagnostics. +type ShowDiagnosticsStatement struct { + // Module + Module string +} + +// String returns a string representation of the ShowDiagnosticsStatement. +func (s *ShowDiagnosticsStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW DIAGNOSTICS") + if s.Module != "" { + _, _ = buf.WriteString(" FOR ") + _, _ = buf.WriteString(QuoteString(s.Module)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a ShowDiagnosticsStatement +func (s *ShowDiagnosticsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// CreateSubscriptionStatement represents a command to add a subscription to the incoming data stream. +type CreateSubscriptionStatement struct { + Name string + Database string + RetentionPolicy string + Destinations []string + Mode string +} + +// String returns a string representation of the CreateSubscriptionStatement. +func (s *CreateSubscriptionStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("CREATE SUBSCRIPTION ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + _, _ = buf.WriteString(".") + _, _ = buf.WriteString(QuoteIdent(s.RetentionPolicy)) + _, _ = buf.WriteString(" DESTINATIONS ") + _, _ = buf.WriteString(s.Mode) + _, _ = buf.WriteString(" ") + for i, dest := range s.Destinations { + if i != 0 { + _, _ = buf.WriteString(", ") + } + _, _ = buf.WriteString(QuoteString(dest)) + } + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a CreateSubscriptionStatement. +func (s *CreateSubscriptionStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *CreateSubscriptionStatement) DefaultDatabase() string { + return s.Database +} + +// DropSubscriptionStatement represents a command to drop a subscription to the incoming data stream. +type DropSubscriptionStatement struct { + Name string + Database string + RetentionPolicy string +} + +// String returns a string representation of the DropSubscriptionStatement. +func (s *DropSubscriptionStatement) String() string { + return fmt.Sprintf(`DROP SUBSCRIPTION %s ON %s.%s`, QuoteIdent(s.Name), QuoteIdent(s.Database), QuoteIdent(s.RetentionPolicy)) +} + +// RequiredPrivileges returns the privilege required to execute a DropSubscriptionStatement +func (s *DropSubscriptionStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *DropSubscriptionStatement) DefaultDatabase() string { + return s.Database +} + +// ShowSubscriptionsStatement represents a command to show a list of subscriptions. +type ShowSubscriptionsStatement struct { +} + +// String returns a string representation of the ShowSubscriptionsStatement. +func (s *ShowSubscriptionsStatement) String() string { + return "SHOW SUBSCRIPTIONS" +} + +// RequiredPrivileges returns the privilege required to execute a ShowSubscriptionsStatement. +func (s *ShowSubscriptionsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// ShowTagKeysStatement represents a command for listing tag keys. +type ShowTagKeysStatement struct { + // Database to query. If blank, use the default database. + // The database can also be specified per source in the Sources. + Database string + + // Data sources that fields are extracted from. + Sources Sources + + // An expression evaluated on data point. + Condition Expr + + // Fields to sort results by. + SortFields SortFields + + // Maximum number of tag keys per measurement. Unlimited if zero. + Limit int + + // Returns tag keys starting at an offset from the first row. + Offset int + + // Maxiumum number of series to be returned. Unlimited if zero. + SLimit int + + // Returns series starting at an offset from the first one. + SOffset int +} + +// String returns a string representation of the statement. +func (s *ShowTagKeysStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW TAG KEYS") + + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + } + if s.Sources != nil { + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(s.Sources.String()) + } + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = buf.WriteString(" LIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.Limit)) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + if s.SLimit > 0 { + _, _ = buf.WriteString(" SLIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.SLimit)) + } + if s.SOffset > 0 { + _, _ = buf.WriteString(" SOFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.SOffset)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowTagKeysStatement. +func (s *ShowTagKeysStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *ShowTagKeysStatement) DefaultDatabase() string { + return s.Database +} + +// ShowTagValuesStatement represents a command for listing tag values. +type ShowTagValuesStatement struct { + // Database to query. If blank, use the default database. + // The database can also be specified per source in the Sources. + Database string + + // Data source that fields are extracted from. + Sources Sources + + // Operation to use when selecting tag key(s). + Op Token + + // Literal to compare the tag key(s) with. + TagKeyExpr Literal + + // An expression evaluated on data point. + Condition Expr + + // Fields to sort results by. + SortFields SortFields + + // Maximum number of rows to be returned. + // Unlimited if zero. + Limit int + + // Returns rows starting at an offset from the first row. + Offset int +} + +// String returns a string representation of the statement. +func (s *ShowTagValuesStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW TAG VALUES") + + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + } + if s.Sources != nil { + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(s.Sources.String()) + } + _, _ = buf.WriteString(" WITH KEY ") + _, _ = buf.WriteString(s.Op.String()) + _, _ = buf.WriteString(" ") + if lit, ok := s.TagKeyExpr.(*StringLiteral); ok { + _, _ = buf.WriteString(QuoteIdent(lit.Val)) + } else { + _, _ = buf.WriteString(s.TagKeyExpr.String()) + } + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = buf.WriteString(" LIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.Limit)) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowTagValuesStatement. +func (s *ShowTagValuesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *ShowTagValuesStatement) DefaultDatabase() string { + return s.Database +} + +// ShowUsersStatement represents a command for listing users. +type ShowUsersStatement struct{} + +// String returns a string representation of the ShowUsersStatement. +func (s *ShowUsersStatement) String() string { + return "SHOW USERS" +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowUsersStatement +func (s *ShowUsersStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil +} + +// ShowFieldKeysStatement represents a command for listing field keys. +type ShowFieldKeysStatement struct { + // Database to query. If blank, use the default database. + // The database can also be specified per source in the Sources. + Database string + + // Data sources that fields are extracted from. + Sources Sources + + // Fields to sort results by + SortFields SortFields + + // Maximum number of rows to be returned. + // Unlimited if zero. + Limit int + + // Returns rows starting at an offset from the first row. + Offset int +} + +// String returns a string representation of the statement. +func (s *ShowFieldKeysStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW FIELD KEYS") + + if s.Database != "" { + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + } + if s.Sources != nil { + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(s.Sources.String()) + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = buf.WriteString(" LIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.Limit)) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowFieldKeysStatement. +func (s *ShowFieldKeysStatement) RequiredPrivileges() (ExecutionPrivileges, error) { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil +} + +// DefaultDatabase returns the default database from the statement. +func (s *ShowFieldKeysStatement) DefaultDatabase() string { + return s.Database +} + +// Fields represents a list of fields. +type Fields []*Field + +// AliasNames returns a list of calculated field names in +// order of alias, function name, then field. +func (a Fields) AliasNames() []string { + names := []string{} + for _, f := range a { + names = append(names, f.Name()) + } + return names +} + +// Names returns a list of field names. +func (a Fields) Names() []string { + names := []string{} + for _, f := range a { + switch expr := f.Expr.(type) { + case *Call: + names = append(names, expr.Name) + case *VarRef: + names = append(names, expr.Val) + case *BinaryExpr: + names = append(names, walkNames(expr)...) + case *ParenExpr: + names = append(names, walkNames(expr)...) + } + } + return names +} + +// String returns a string representation of the fields. +func (a Fields) String() string { + var str []string + for _, f := range a { + str = append(str, f.String()) + } + return strings.Join(str, ", ") +} + +// Field represents an expression retrieved from a select statement. +type Field struct { + Expr Expr + Alias string +} + +// Name returns the name of the field. Returns alias, if set. +// Otherwise uses the function name or variable name. +func (f *Field) Name() string { + // Return alias, if set. + if f.Alias != "" { + return f.Alias + } + + // Return the function name or variable name, if available. + switch expr := f.Expr.(type) { + case *Call: + return expr.Name + case *BinaryExpr: + return BinaryExprName(expr) + case *ParenExpr: + f := Field{Expr: expr.Expr} + return f.Name() + case *VarRef: + return expr.Val + } + + // Otherwise return a blank name. + return "" +} + +// String returns a string representation of the field. +func (f *Field) String() string { + str := f.Expr.String() + + if f.Alias == "" { + return str + } + return fmt.Sprintf("%s AS %s", str, QuoteIdent(f.Alias)) +} + +// Len implements sort.Interface. +func (a Fields) Len() int { return len(a) } + +// Less implements sort.Interface. +func (a Fields) Less(i, j int) bool { return a[i].Name() < a[j].Name() } + +// Swap implements sort.Interface. +func (a Fields) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Dimensions represents a list of dimensions. +type Dimensions []*Dimension + +// String returns a string representation of the dimensions. +func (a Dimensions) String() string { + var str []string + for _, d := range a { + str = append(str, d.String()) + } + return strings.Join(str, ", ") +} + +// Normalize returns the interval and tag dimensions separately. +// Returns 0 if no time interval is specified. +func (a Dimensions) Normalize() (time.Duration, []string) { + var dur time.Duration + var tags []string + + for _, dim := range a { + switch expr := dim.Expr.(type) { + case *Call: + lit, _ := expr.Args[0].(*DurationLiteral) + dur = lit.Val + case *VarRef: + tags = append(tags, expr.Val) + } + } + + return dur, tags +} + +// Dimension represents an expression that a select statement is grouped by. +type Dimension struct { + Expr Expr +} + +// String returns a string representation of the dimension. +func (d *Dimension) String() string { return d.Expr.String() } + +// Measurements represents a list of measurements. +type Measurements []*Measurement + +// String returns a string representation of the measurements. +func (a Measurements) String() string { + var str []string + for _, m := range a { + str = append(str, m.String()) + } + return strings.Join(str, ", ") +} + +// Measurement represents a single measurement used as a datasource. +type Measurement struct { + Database string + RetentionPolicy string + Name string + Regex *RegexLiteral + IsTarget bool +} + +// String returns a string representation of the measurement. +func (m *Measurement) String() string { + var buf bytes.Buffer + if m.Database != "" { + _, _ = buf.WriteString(QuoteIdent(m.Database)) + _, _ = buf.WriteString(".") + } + + if m.RetentionPolicy != "" { + _, _ = buf.WriteString(QuoteIdent(m.RetentionPolicy)) + } + + if m.Database != "" || m.RetentionPolicy != "" { + _, _ = buf.WriteString(`.`) + } + + if m.Name != "" { + _, _ = buf.WriteString(QuoteIdent(m.Name)) + } else if m.Regex != nil { + _, _ = buf.WriteString(m.Regex.String()) + } + + return buf.String() +} + +func encodeMeasurement(mm *Measurement) *internal.Measurement { + pb := &internal.Measurement{ + Database: proto.String(mm.Database), + RetentionPolicy: proto.String(mm.RetentionPolicy), + Name: proto.String(mm.Name), + IsTarget: proto.Bool(mm.IsTarget), + } + if mm.Regex != nil { + pb.Regex = proto.String(mm.Regex.Val.String()) + } + return pb +} + +func decodeMeasurement(pb *internal.Measurement) (*Measurement, error) { + mm := &Measurement{ + Database: pb.GetDatabase(), + RetentionPolicy: pb.GetRetentionPolicy(), + Name: pb.GetName(), + IsTarget: pb.GetIsTarget(), + } + + if pb.Regex != nil { + regex, err := regexp.Compile(pb.GetRegex()) + if err != nil { + return nil, fmt.Errorf("invalid binary measurement regex: value=%q, err=%s", pb.GetRegex(), err) + } + mm.Regex = &RegexLiteral{Val: regex} + } + + return mm, nil +} + +// SubQuery is a source with a SelectStatement as the backing store. +type SubQuery struct { + Statement *SelectStatement +} + +// String returns a string representation of the subquery. +func (s *SubQuery) String() string { + return fmt.Sprintf("(%s)", s.Statement.String()) +} + +// VarRef represents a reference to a variable. +type VarRef struct { + Val string + Type DataType +} + +// String returns a string representation of the variable reference. +func (r *VarRef) String() string { + buf := bytes.NewBufferString(QuoteIdent(r.Val)) + if r.Type != Unknown { + buf.WriteString("::") + buf.WriteString(r.Type.String()) + } + return buf.String() +} + +// VarRefs represents a slice of VarRef types. +type VarRefs []VarRef + +// Len implements sort.Interface. +func (a VarRefs) Len() int { return len(a) } + +// Less implements sort.Interface. +func (a VarRefs) Less(i, j int) bool { + if a[i].Val != a[j].Val { + return a[i].Val < a[j].Val + } + return a[i].Type < a[j].Type +} + +// Swap implements sort.Interface. +func (a VarRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Strings returns a slice of the variable names. +func (a VarRefs) Strings() []string { + s := make([]string, len(a)) + for i, ref := range a { + s[i] = ref.Val + } + return s +} + +// Call represents a function call. +type Call struct { + Name string + Args []Expr +} + +// String returns a string representation of the call. +func (c *Call) String() string { + // Join arguments. + var str []string + for _, arg := range c.Args { + str = append(str, arg.String()) + } + + // Write function name and args. + return fmt.Sprintf("%s(%s)", c.Name, strings.Join(str, ", ")) +} + +// Distinct represents a DISTINCT expression. +type Distinct struct { + // Identifier following DISTINCT + Val string +} + +// String returns a string representation of the expression. +func (d *Distinct) String() string { + return fmt.Sprintf("DISTINCT %s", d.Val) +} + +// NewCall returns a new call expression from this expressions. +func (d *Distinct) NewCall() *Call { + return &Call{ + Name: "distinct", + Args: []Expr{ + &VarRef{Val: d.Val}, + }, + } +} + +// NumberLiteral represents a numeric literal. +type NumberLiteral struct { + Val float64 +} + +// String returns a string representation of the literal. +func (l *NumberLiteral) String() string { return strconv.FormatFloat(l.Val, 'f', 3, 64) } + +// IntegerLiteral represents an integer literal. +type IntegerLiteral struct { + Val int64 +} + +// String returns a string representation of the literal. +func (l *IntegerLiteral) String() string { return fmt.Sprintf("%d", l.Val) } + +// BooleanLiteral represents a boolean literal. +type BooleanLiteral struct { + Val bool +} + +// String returns a string representation of the literal. +func (l *BooleanLiteral) String() string { + if l.Val { + return "true" + } + return "false" +} + +// isTrueLiteral returns true if the expression is a literal "true" value. +func isTrueLiteral(expr Expr) bool { + if expr, ok := expr.(*BooleanLiteral); ok { + return expr.Val == true + } + return false +} + +// isFalseLiteral returns true if the expression is a literal "false" value. +func isFalseLiteral(expr Expr) bool { + if expr, ok := expr.(*BooleanLiteral); ok { + return expr.Val == false + } + return false +} + +// ListLiteral represents a list of tag key literals. +type ListLiteral struct { + Vals []string +} + +// String returns a string representation of the literal. +func (s *ListLiteral) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("(") + for idx, tagKey := range s.Vals { + if idx != 0 { + _, _ = buf.WriteString(", ") + } + _, _ = buf.WriteString(QuoteIdent(tagKey)) + } + _, _ = buf.WriteString(")") + return buf.String() +} + +// StringLiteral represents a string literal. +type StringLiteral struct { + Val string +} + +// String returns a string representation of the literal. +func (l *StringLiteral) String() string { return QuoteString(l.Val) } + +// IsTimeLiteral returns if this string can be interpreted as a time literal. +func (l *StringLiteral) IsTimeLiteral() bool { + return isDateTimeString(l.Val) || isDateString(l.Val) +} + +// ToTimeLiteral returns a time literal if this string can be converted to a time literal. +func (l *StringLiteral) ToTimeLiteral(loc *time.Location) (*TimeLiteral, error) { + if loc == nil { + loc = time.UTC + } + + if isDateTimeString(l.Val) { + t, err := time.ParseInLocation(DateTimeFormat, l.Val, loc) + if err != nil { + // try to parse it as an RFCNano time + t, err = time.ParseInLocation(time.RFC3339Nano, l.Val, loc) + if err != nil { + return nil, ErrInvalidTime + } + } + return &TimeLiteral{Val: t}, nil + } else if isDateString(l.Val) { + t, err := time.ParseInLocation(DateFormat, l.Val, loc) + if err != nil { + return nil, ErrInvalidTime + } + return &TimeLiteral{Val: t}, nil + } + return nil, ErrInvalidTime +} + +// TimeLiteral represents a point-in-time literal. +type TimeLiteral struct { + Val time.Time +} + +// String returns a string representation of the literal. +func (l *TimeLiteral) String() string { + return `'` + l.Val.UTC().Format(time.RFC3339Nano) + `'` +} + +// DurationLiteral represents a duration literal. +type DurationLiteral struct { + Val time.Duration +} + +// String returns a string representation of the literal. +func (l *DurationLiteral) String() string { return FormatDuration(l.Val) } + +// nilLiteral represents a nil literal. +// This is not available to the query language itself. It's only used internally. +type nilLiteral struct{} + +// String returns a string representation of the literal. +func (l *nilLiteral) String() string { return `nil` } + +// BinaryExpr represents an operation between two expressions. +type BinaryExpr struct { + Op Token + LHS Expr + RHS Expr +} + +// String returns a string representation of the binary expression. +func (e *BinaryExpr) String() string { + return fmt.Sprintf("%s %s %s", e.LHS.String(), e.Op.String(), e.RHS.String()) +} + +func (e *BinaryExpr) validate() error { + v := binaryExprValidator{} + Walk(&v, e) + if v.err != nil { + return v.err + } else if v.calls && v.refs { + return errors.New("binary expressions cannot mix aggregates and raw fields") + } + return nil +} + +type binaryExprValidator struct { + calls bool + refs bool + err error +} + +func (v *binaryExprValidator) Visit(n Node) Visitor { + if v.err != nil { + return nil + } + + switch n := n.(type) { + case *Call: + v.calls = true + + if n.Name == "top" || n.Name == "bottom" { + v.err = fmt.Errorf("cannot use %s() inside of a binary expression", n.Name) + return nil + } + + for _, expr := range n.Args { + switch e := expr.(type) { + case *BinaryExpr: + v.err = e.validate() + return nil + } + } + return nil + case *VarRef: + v.refs = true + return nil + } + return v +} + +// BinaryExprName returns the name of a binary expression by concatenating +// the variables in the binary expression with underscores. +func BinaryExprName(expr *BinaryExpr) string { + v := binaryExprNameVisitor{} + Walk(&v, expr) + return strings.Join(v.names, "_") +} + +type binaryExprNameVisitor struct { + names []string +} + +func (v *binaryExprNameVisitor) Visit(n Node) Visitor { + switch n := n.(type) { + case *VarRef: + v.names = append(v.names, n.Val) + case *Call: + v.names = append(v.names, n.Name) + return nil + } + return v +} + +// ParenExpr represents a parenthesized expression. +type ParenExpr struct { + Expr Expr +} + +// String returns a string representation of the parenthesized expression. +func (e *ParenExpr) String() string { return fmt.Sprintf("(%s)", e.Expr.String()) } + +// RegexLiteral represents a regular expression. +type RegexLiteral struct { + Val *regexp.Regexp +} + +// String returns a string representation of the literal. +func (r *RegexLiteral) String() string { + if r.Val != nil { + return fmt.Sprintf("/%s/", strings.Replace(r.Val.String(), `/`, `\/`, -1)) + } + return "" +} + +// CloneRegexLiteral returns a clone of the RegexLiteral. +func CloneRegexLiteral(r *RegexLiteral) *RegexLiteral { + if r == nil { + return nil + } + + clone := &RegexLiteral{} + if r.Val != nil { + clone.Val = regexp.MustCompile(r.Val.String()) + } + + return clone +} + +// Wildcard represents a wild card expression. +type Wildcard struct { + Type Token +} + +// String returns a string representation of the wildcard. +func (e *Wildcard) String() string { + switch e.Type { + case FIELD: + return "*::field" + case TAG: + return "*::tag" + default: + return "*" + } +} + +// CloneExpr returns a deep copy of the expression. +func CloneExpr(expr Expr) Expr { + if expr == nil { + return nil + } + switch expr := expr.(type) { + case *BinaryExpr: + return &BinaryExpr{Op: expr.Op, LHS: CloneExpr(expr.LHS), RHS: CloneExpr(expr.RHS)} + case *BooleanLiteral: + return &BooleanLiteral{Val: expr.Val} + case *Call: + args := make([]Expr, len(expr.Args)) + for i, arg := range expr.Args { + args[i] = CloneExpr(arg) + } + return &Call{Name: expr.Name, Args: args} + case *Distinct: + return &Distinct{Val: expr.Val} + case *DurationLiteral: + return &DurationLiteral{Val: expr.Val} + case *IntegerLiteral: + return &IntegerLiteral{Val: expr.Val} + case *NumberLiteral: + return &NumberLiteral{Val: expr.Val} + case *ParenExpr: + return &ParenExpr{Expr: CloneExpr(expr.Expr)} + case *RegexLiteral: + return &RegexLiteral{Val: expr.Val} + case *StringLiteral: + return &StringLiteral{Val: expr.Val} + case *TimeLiteral: + return &TimeLiteral{Val: expr.Val} + case *VarRef: + return &VarRef{Val: expr.Val, Type: expr.Type} + case *Wildcard: + return &Wildcard{Type: expr.Type} + } + panic("unreachable") +} + +// HasTimeExpr returns true if the expression has a time term. +func HasTimeExpr(expr Expr) bool { + switch n := expr.(type) { + case *BinaryExpr: + if n.Op == AND || n.Op == OR { + return HasTimeExpr(n.LHS) || HasTimeExpr(n.RHS) + } + if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" { + return true + } + return false + case *ParenExpr: + // walk down the tree + return HasTimeExpr(n.Expr) + default: + return false + } +} + +// OnlyTimeExpr returns true if the expression only has time constraints. +func OnlyTimeExpr(expr Expr) bool { + if expr == nil { + return false + } + switch n := expr.(type) { + case *BinaryExpr: + if n.Op == AND || n.Op == OR { + return OnlyTimeExpr(n.LHS) && OnlyTimeExpr(n.RHS) + } + if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" { + return true + } + return false + case *ParenExpr: + // walk down the tree + return OnlyTimeExpr(n.Expr) + default: + return false + } +} + +// TimeRange returns the minimum and maximum times specified by an expression. +// It returns zero times if there is no bound. +func TimeRange(expr Expr, loc *time.Location) (min, max time.Time, err error) { + WalkFunc(expr, func(n Node) { + if err != nil { + return + } + + if n, ok := n.(*BinaryExpr); ok { + // Extract literal expression & operator on LHS. + // Check for "time" on the left-hand side first. + // Otherwise check for for the right-hand side and flip the operator. + op := n.Op + var value time.Time + value, err = timeExprValue(n.LHS, n.RHS, loc) + if err != nil { + return + } else if value.IsZero() { + if value, err = timeExprValue(n.RHS, n.LHS, loc); value.IsZero() || err != nil { + return + } else if op == LT { + op = GT + } else if op == LTE { + op = GTE + } else if op == GT { + op = LT + } else if op == GTE { + op = LTE + } + } + + // Update the min/max depending on the operator. + // The GT & LT update the value by +/- 1ns not make them "not equal". + switch op { + case GT: + if min.IsZero() || value.After(min) { + min = value.Add(time.Nanosecond) + } + case GTE: + if min.IsZero() || value.After(min) { + min = value + } + case LT: + if max.IsZero() || value.Before(max) { + max = value.Add(-time.Nanosecond) + } + case LTE: + if max.IsZero() || value.Before(max) { + max = value + } + case EQ: + if min.IsZero() || value.After(min) { + min = value + } + if max.IsZero() || value.Before(max) { + max = value + } + } + } + }) + return +} + +// TimeRangeAsEpochNano returns the minimum and maximum times, as epoch nano, specified by +// an expression. If there is no lower bound, the minimum time is returned +// for minimum. If there is no higher bound, the maximum time is returned. +func TimeRangeAsEpochNano(expr Expr) (min, max int64, err error) { + tmin, tmax, err := TimeRange(expr, nil) + if err != nil { + return 0, 0, err + } + + if tmin.IsZero() { + min = time.Unix(0, MinTime).UnixNano() + } else { + min = tmin.UnixNano() + } + if tmax.IsZero() { + max = time.Unix(0, MaxTime).UnixNano() + } else { + max = tmax.UnixNano() + } + return +} + +// timeExprValue returns the time literal value of a "time == " expression. +// Returns zero time if the expression is not a time expression. +func timeExprValue(ref Expr, lit Expr, loc *time.Location) (t time.Time, err error) { + if ref, ok := ref.(*VarRef); ok && strings.ToLower(ref.Val) == "time" { + // If literal looks like a date time then parse it as a time literal. + if strlit, ok := lit.(*StringLiteral); ok { + if strlit.IsTimeLiteral() { + t, err := strlit.ToTimeLiteral(loc) + if err != nil { + return time.Time{}, err + } + lit = t + } + } + + switch lit := lit.(type) { + case *TimeLiteral: + if lit.Val.After(time.Unix(0, MaxTime)) { + return time.Time{}, fmt.Errorf("time %s overflows time literal", lit.Val.Format(time.RFC3339)) + } else if lit.Val.Before(time.Unix(0, MinTime+1)) { + // The minimum allowable time literal is one greater than the minimum time because the minimum time + // is a sentinel value only used internally. + return time.Time{}, fmt.Errorf("time %s underflows time literal", lit.Val.Format(time.RFC3339)) + } + return lit.Val, nil + case *DurationLiteral: + return time.Unix(0, int64(lit.Val)).UTC(), nil + case *NumberLiteral: + return time.Unix(0, int64(lit.Val)).UTC(), nil + case *IntegerLiteral: + return time.Unix(0, lit.Val).UTC(), nil + default: + return time.Time{}, fmt.Errorf("invalid operation: time and %T are not compatible", lit) + } + } + return time.Time{}, nil +} + +// Visitor can be called by Walk to traverse an AST hierarchy. +// The Visit() function is called once per node. +type Visitor interface { + Visit(Node) Visitor +} + +// Walk traverses a node hierarchy in depth-first order. +func Walk(v Visitor, node Node) { + if node == nil { + return + } + + if v = v.Visit(node); v == nil { + return + } + + switch n := node.(type) { + case *BinaryExpr: + Walk(v, n.LHS) + Walk(v, n.RHS) + + case *Call: + for _, expr := range n.Args { + Walk(v, expr) + } + + case *CreateContinuousQueryStatement: + Walk(v, n.Source) + + case *Dimension: + Walk(v, n.Expr) + + case Dimensions: + for _, c := range n { + Walk(v, c) + } + + case *DeleteSeriesStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + + case *DropSeriesStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + + case *Field: + Walk(v, n.Expr) + + case Fields: + for _, c := range n { + Walk(v, c) + } + + case *ParenExpr: + Walk(v, n.Expr) + + case *Query: + Walk(v, n.Statements) + + case *SelectStatement: + Walk(v, n.Fields) + Walk(v, n.Target) + Walk(v, n.Dimensions) + Walk(v, n.Sources) + Walk(v, n.Condition) + Walk(v, n.SortFields) + + case *ShowSeriesStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + + case *ShowTagKeysStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + Walk(v, n.SortFields) + + case *ShowTagValuesStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + Walk(v, n.SortFields) + + case *ShowFieldKeysStatement: + Walk(v, n.Sources) + Walk(v, n.SortFields) + + case SortFields: + for _, sf := range n { + Walk(v, sf) + } + + case Sources: + for _, s := range n { + Walk(v, s) + } + + case *SubQuery: + Walk(v, n.Statement) + + case Statements: + for _, s := range n { + Walk(v, s) + } + + case *Target: + if n != nil { + Walk(v, n.Measurement) + } + } +} + +// WalkFunc traverses a node hierarchy in depth-first order. +func WalkFunc(node Node, fn func(Node)) { + Walk(walkFuncVisitor(fn), node) +} + +type walkFuncVisitor func(Node) + +func (fn walkFuncVisitor) Visit(n Node) Visitor { fn(n); return fn } + +// Rewriter can be called by Rewrite to replace nodes in the AST hierarchy. +// The Rewrite() function is called once per node. +type Rewriter interface { + Rewrite(Node) Node +} + +// Rewrite recursively invokes the rewriter to replace each node. +// Nodes are traversed depth-first and rewritten from leaf to root. +func Rewrite(r Rewriter, node Node) Node { + switch n := node.(type) { + case *Query: + n.Statements = Rewrite(r, n.Statements).(Statements) + + case Statements: + for i, s := range n { + n[i] = Rewrite(r, s).(Statement) + } + + case *SelectStatement: + n.Fields = Rewrite(r, n.Fields).(Fields) + n.Dimensions = Rewrite(r, n.Dimensions).(Dimensions) + n.Sources = Rewrite(r, n.Sources).(Sources) + + // Rewrite may return nil. Nil does not satisfy the Expr + // interface. We only assert the rewritten result to be an + // Expr if it is not nil: + if cond := Rewrite(r, n.Condition); cond != nil { + n.Condition = cond.(Expr) + } else { + n.Condition = nil + } + + case *SubQuery: + n.Statement = Rewrite(r, n.Statement).(*SelectStatement) + + case Fields: + for i, f := range n { + n[i] = Rewrite(r, f).(*Field) + } + + case *Field: + n.Expr = Rewrite(r, n.Expr).(Expr) + + case Dimensions: + for i, d := range n { + n[i] = Rewrite(r, d).(*Dimension) + } + + case *Dimension: + n.Expr = Rewrite(r, n.Expr).(Expr) + + case *BinaryExpr: + n.LHS = Rewrite(r, n.LHS).(Expr) + n.RHS = Rewrite(r, n.RHS).(Expr) + + case *ParenExpr: + n.Expr = Rewrite(r, n.Expr).(Expr) + + case *Call: + for i, expr := range n.Args { + n.Args[i] = Rewrite(r, expr).(Expr) + } + } + + return r.Rewrite(node) +} + +// RewriteFunc rewrites a node hierarchy. +func RewriteFunc(node Node, fn func(Node) Node) Node { + return Rewrite(rewriterFunc(fn), node) +} + +type rewriterFunc func(Node) Node + +func (fn rewriterFunc) Rewrite(n Node) Node { return fn(n) } + +// RewriteExpr recursively invokes the function to replace each expr. +// Nodes are traversed depth-first and rewritten from leaf to root. +func RewriteExpr(expr Expr, fn func(Expr) Expr) Expr { + switch e := expr.(type) { + case *BinaryExpr: + e.LHS = RewriteExpr(e.LHS, fn) + e.RHS = RewriteExpr(e.RHS, fn) + if e.LHS != nil && e.RHS == nil { + expr = e.LHS + } else if e.RHS != nil && e.LHS == nil { + expr = e.RHS + } else if e.LHS == nil && e.RHS == nil { + return nil + } + + case *ParenExpr: + e.Expr = RewriteExpr(e.Expr, fn) + if e.Expr == nil { + return nil + } + + case *Call: + for i, expr := range e.Args { + e.Args[i] = RewriteExpr(expr, fn) + } + } + + return fn(expr) +} + +// Eval evaluates expr against a map. +func Eval(expr Expr, m map[string]interface{}) interface{} { + if expr == nil { + return nil + } + + switch expr := expr.(type) { + case *BinaryExpr: + return evalBinaryExpr(expr, m) + case *BooleanLiteral: + return expr.Val + case *IntegerLiteral: + return expr.Val + case *NumberLiteral: + return expr.Val + case *ParenExpr: + return Eval(expr.Expr, m) + case *RegexLiteral: + return expr.Val + case *StringLiteral: + return expr.Val + case *VarRef: + return m[expr.Val] + default: + return nil + } +} + +func evalBinaryExpr(expr *BinaryExpr, m map[string]interface{}) interface{} { + lhs := Eval(expr.LHS, m) + rhs := Eval(expr.RHS, m) + if lhs == nil && rhs != nil { + // When the LHS is nil and the RHS is a boolean, implicitly cast the + // nil to false. + if _, ok := rhs.(bool); ok { + lhs = false + } + } else if lhs != nil && rhs == nil { + // Implicit cast of the RHS nil to false when the LHS is a boolean. + if _, ok := lhs.(bool); ok { + rhs = false + } + } + + // Evaluate if both sides are simple types. + switch lhs := lhs.(type) { + case bool: + rhs, ok := rhs.(bool) + switch expr.Op { + case AND: + return ok && (lhs && rhs) + case OR: + return ok && (lhs || rhs) + case BITWISE_AND: + return ok && (lhs && rhs) + case BITWISE_OR: + return ok && (lhs || rhs) + case BITWISE_XOR: + return ok && (lhs != rhs) + case EQ: + return ok && (lhs == rhs) + case NEQ: + return ok && (lhs != rhs) + } + case float64: + // Try the rhs as a float64 or int64 + rhsf, ok := rhs.(float64) + if !ok { + var rhsi int64 + if rhsi, ok = rhs.(int64); ok { + rhsf = float64(rhsi) + } + } + + rhs := rhsf + switch expr.Op { + case EQ: + return ok && (lhs == rhs) + case NEQ: + return ok && (lhs != rhs) + case LT: + return ok && (lhs < rhs) + case LTE: + return ok && (lhs <= rhs) + case GT: + return ok && (lhs > rhs) + case GTE: + return ok && (lhs >= rhs) + case ADD: + if !ok { + return nil + } + return lhs + rhs + case SUB: + if !ok { + return nil + } + return lhs - rhs + case MUL: + if !ok { + return nil + } + return lhs * rhs + case DIV: + if !ok { + return nil + } else if rhs == 0 { + return float64(0) + } + return lhs / rhs + case MOD: + if !ok { + return nil + } + return math.Mod(lhs, rhs) + } + case int64: + // Try as a float64 to see if a float cast is required. + rhsf, ok := rhs.(float64) + if ok { + lhs := float64(lhs) + rhs := rhsf + switch expr.Op { + case EQ: + return lhs == rhs + case NEQ: + return lhs != rhs + case LT: + return lhs < rhs + case LTE: + return lhs <= rhs + case GT: + return lhs > rhs + case GTE: + return lhs >= rhs + case ADD: + return lhs + rhs + case SUB: + return lhs - rhs + case MUL: + return lhs * rhs + case DIV: + if rhs == 0 { + return float64(0) + } + return lhs / rhs + case MOD: + return math.Mod(lhs, rhs) + } + } else { + rhs, ok := rhs.(int64) + switch expr.Op { + case EQ: + return ok && (lhs == rhs) + case NEQ: + return ok && (lhs != rhs) + case LT: + return ok && (lhs < rhs) + case LTE: + return ok && (lhs <= rhs) + case GT: + return ok && (lhs > rhs) + case GTE: + return ok && (lhs >= rhs) + case ADD: + if !ok { + return nil + } + return lhs + rhs + case SUB: + if !ok { + return nil + } + return lhs - rhs + case MUL: + if !ok { + return nil + } + return lhs * rhs + case DIV: + if !ok { + return nil + } else if rhs == 0 { + return float64(0) + } + return lhs / rhs + case MOD: + if !ok { + return nil + } else if rhs == 0 { + return int64(0) + } + return lhs % rhs + case BITWISE_AND: + if !ok { + return nil + } + return lhs & rhs + case BITWISE_OR: + if !ok { + return nil + } + return lhs | rhs + case BITWISE_XOR: + if !ok { + return nil + } + return lhs ^ rhs + } + } + case string: + switch expr.Op { + case EQ: + rhs, ok := rhs.(string) + if !ok { + return nil + } + return lhs == rhs + case NEQ: + rhs, ok := rhs.(string) + if !ok { + return nil + } + return lhs != rhs + case EQREGEX: + rhs, ok := rhs.(*regexp.Regexp) + if !ok { + return nil + } + return rhs.MatchString(lhs) + case NEQREGEX: + rhs, ok := rhs.(*regexp.Regexp) + if !ok { + return nil + } + return !rhs.MatchString(lhs) + } + } + return nil +} + +// EvalBool evaluates expr and returns true if result is a boolean true. +// Otherwise returns false. +func EvalBool(expr Expr, m map[string]interface{}) bool { + v, _ := Eval(expr, m).(bool) + return v +} + +// TypeMapper maps a data type to the measurement and field. +type TypeMapper interface { + MapType(measurement *Measurement, field string) DataType +} + +type nilTypeMapper struct{} + +func (nilTypeMapper) MapType(*Measurement, string) DataType { return Unknown } + +// EvalType evaluates the expression's type. +func EvalType(expr Expr, sources Sources, typmap TypeMapper) DataType { + if typmap == nil { + typmap = nilTypeMapper{} + } + + switch expr := expr.(type) { + case *VarRef: + // If this variable already has an assigned type, just use that. + if expr.Type != Unknown && expr.Type != AnyField { + return expr.Type + } + + var typ DataType + for _, src := range sources { + switch src := src.(type) { + case *Measurement: + if t := typmap.MapType(src, expr.Val); typ.LessThan(t) { + typ = t + } + case *SubQuery: + _, e := src.Statement.FieldExprByName(expr.Val) + if e != nil { + if t := EvalType(e, src.Statement.Sources, typmap); typ.LessThan(t) { + typ = t + } + } + + if typ == Unknown { + for _, d := range src.Statement.Dimensions { + if d, ok := d.Expr.(*VarRef); ok && expr.Val == d.Val { + typ = Tag + } + } + } + } + } + return typ + case *Call: + switch expr.Name { + case "mean", "median", "integral": + return Float + case "count": + return Integer + default: + return EvalType(expr.Args[0], sources, typmap) + } + case *ParenExpr: + return EvalType(expr.Expr, sources, typmap) + case *NumberLiteral: + return Float + case *IntegerLiteral: + return Integer + case *StringLiteral: + return String + case *BooleanLiteral: + return Boolean + case *BinaryExpr: + lhs := EvalType(expr.LHS, sources, typmap) + rhs := EvalType(expr.RHS, sources, typmap) + if lhs != Unknown && rhs != Unknown { + if lhs < rhs { + return lhs + } else { + return rhs + } + } else if lhs != Unknown { + return lhs + } else { + return rhs + } + } + return Unknown +} + +func FieldDimensions(sources Sources, m FieldMapper) (fields map[string]DataType, dimensions map[string]struct{}, err error) { + fields = make(map[string]DataType) + dimensions = make(map[string]struct{}) + + for _, src := range sources { + switch src := src.(type) { + case *Measurement: + f, d, err := m.FieldDimensions(src) + if err != nil { + return nil, nil, err + } + + for k, typ := range f { + if _, ok := fields[k]; typ != Unknown && (!ok || typ < fields[k]) { + fields[k] = typ + } + } + for k := range d { + dimensions[k] = struct{}{} + } + case *SubQuery: + for _, f := range src.Statement.Fields { + k := f.Name() + typ := EvalType(f.Expr, src.Statement.Sources, m) + + if _, ok := fields[k]; typ != Unknown && (!ok || typ < fields[k]) { + fields[k] = typ + } + } + + for _, d := range src.Statement.Dimensions { + if expr, ok := d.Expr.(*VarRef); ok { + dimensions[expr.Val] = struct{}{} + } + } + } + } + return +} + +// Reduce evaluates expr using the available values in valuer. +// References that don't exist in valuer are ignored. +func Reduce(expr Expr, valuer Valuer) Expr { + expr = reduce(expr, valuer) + + // Unwrap parens at top level. + if expr, ok := expr.(*ParenExpr); ok { + return expr.Expr + } + return expr +} + +func reduce(expr Expr, valuer Valuer) Expr { + if expr == nil { + return nil + } + + switch expr := expr.(type) { + case *BinaryExpr: + return reduceBinaryExpr(expr, valuer) + case *Call: + return reduceCall(expr, valuer) + case *ParenExpr: + return reduceParenExpr(expr, valuer) + case *VarRef: + return reduceVarRef(expr, valuer) + case *nilLiteral: + return expr + default: + return CloneExpr(expr) + } +} + +func reduceBinaryExpr(expr *BinaryExpr, valuer Valuer) Expr { + // Reduce both sides first. + op := expr.Op + lhs := reduce(expr.LHS, valuer) + rhs := reduce(expr.RHS, valuer) + + loc := time.UTC + if v, ok := valuer.(ZoneValuer); ok { + loc = v.Zone() + } + + // Do not evaluate if one side is nil. + if lhs == nil || rhs == nil { + return &BinaryExpr{LHS: lhs, RHS: rhs, Op: expr.Op} + } + + // If we have a logical operator (AND, OR) and one side is a boolean literal + // then we need to have special handling. + if op == AND { + if isFalseLiteral(lhs) || isFalseLiteral(rhs) { + return &BooleanLiteral{Val: false} + } else if isTrueLiteral(lhs) { + return rhs + } else if isTrueLiteral(rhs) { + return lhs + } + } else if op == OR { + if isTrueLiteral(lhs) || isTrueLiteral(rhs) { + return &BooleanLiteral{Val: true} + } else if isFalseLiteral(lhs) { + return rhs + } else if isFalseLiteral(rhs) { + return lhs + } + } + + // Evaluate if both sides are simple types. + switch lhs := lhs.(type) { + case *BooleanLiteral: + return reduceBinaryExprBooleanLHS(op, lhs, rhs) + case *DurationLiteral: + return reduceBinaryExprDurationLHS(op, lhs, rhs, loc) + case *IntegerLiteral: + return reduceBinaryExprIntegerLHS(op, lhs, rhs, loc) + case *nilLiteral: + return reduceBinaryExprNilLHS(op, lhs, rhs) + case *NumberLiteral: + return reduceBinaryExprNumberLHS(op, lhs, rhs) + case *StringLiteral: + return reduceBinaryExprStringLHS(op, lhs, rhs, loc) + case *TimeLiteral: + return reduceBinaryExprTimeLHS(op, lhs, rhs, loc) + default: + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} + } +} + +func reduceBinaryExprBooleanLHS(op Token, lhs *BooleanLiteral, rhs Expr) Expr { + switch rhs := rhs.(type) { + case *BooleanLiteral: + switch op { + case EQ: + return &BooleanLiteral{Val: lhs.Val == rhs.Val} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != rhs.Val} + case AND: + return &BooleanLiteral{Val: lhs.Val && rhs.Val} + case OR: + return &BooleanLiteral{Val: lhs.Val || rhs.Val} + case BITWISE_AND: + return &BooleanLiteral{Val: lhs.Val && rhs.Val} + case BITWISE_OR: + return &BooleanLiteral{Val: lhs.Val || rhs.Val} + case BITWISE_XOR: + return &BooleanLiteral{Val: lhs.Val != rhs.Val} + } + case *nilLiteral: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprDurationLHS(op Token, lhs *DurationLiteral, rhs Expr, loc *time.Location) Expr { + switch rhs := rhs.(type) { + case *DurationLiteral: + switch op { + case ADD: + return &DurationLiteral{Val: lhs.Val + rhs.Val} + case SUB: + return &DurationLiteral{Val: lhs.Val - rhs.Val} + case EQ: + return &BooleanLiteral{Val: lhs.Val == rhs.Val} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != rhs.Val} + case GT: + return &BooleanLiteral{Val: lhs.Val > rhs.Val} + case GTE: + return &BooleanLiteral{Val: lhs.Val >= rhs.Val} + case LT: + return &BooleanLiteral{Val: lhs.Val < rhs.Val} + case LTE: + return &BooleanLiteral{Val: lhs.Val <= rhs.Val} + } + case *NumberLiteral: + switch op { + case MUL: + return &DurationLiteral{Val: lhs.Val * time.Duration(rhs.Val)} + case DIV: + if rhs.Val == 0 { + return &DurationLiteral{Val: 0} + } + return &DurationLiteral{Val: lhs.Val / time.Duration(rhs.Val)} + } + case *IntegerLiteral: + switch op { + case MUL: + return &DurationLiteral{Val: lhs.Val * time.Duration(rhs.Val)} + case DIV: + if rhs.Val == 0 { + return &DurationLiteral{Val: 0} + } + return &DurationLiteral{Val: lhs.Val / time.Duration(rhs.Val)} + } + case *TimeLiteral: + switch op { + case ADD: + return &TimeLiteral{Val: rhs.Val.Add(lhs.Val)} + } + case *StringLiteral: + t, err := rhs.ToTimeLiteral(loc) + if err != nil { + break + } + expr := reduceBinaryExprDurationLHS(op, lhs, t, loc) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *nilLiteral: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprIntegerLHS(op Token, lhs *IntegerLiteral, rhs Expr, loc *time.Location) Expr { + switch rhs := rhs.(type) { + case *NumberLiteral: + return reduceBinaryExprNumberLHS(op, &NumberLiteral{Val: float64(lhs.Val)}, rhs) + case *IntegerLiteral: + switch op { + case ADD: + return &IntegerLiteral{Val: lhs.Val + rhs.Val} + case SUB: + return &IntegerLiteral{Val: lhs.Val - rhs.Val} + case MUL: + return &IntegerLiteral{Val: lhs.Val * rhs.Val} + case DIV: + if rhs.Val == 0 { + return &NumberLiteral{Val: 0} + } + return &NumberLiteral{Val: float64(lhs.Val) / float64(rhs.Val)} + case MOD: + if rhs.Val == 0 { + return &IntegerLiteral{Val: 0} + } + return &IntegerLiteral{Val: lhs.Val % rhs.Val} + case BITWISE_AND: + return &IntegerLiteral{Val: lhs.Val & rhs.Val} + case BITWISE_OR: + return &IntegerLiteral{Val: lhs.Val | rhs.Val} + case BITWISE_XOR: + return &IntegerLiteral{Val: lhs.Val ^ rhs.Val} + case EQ: + return &BooleanLiteral{Val: lhs.Val == rhs.Val} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != rhs.Val} + case GT: + return &BooleanLiteral{Val: lhs.Val > rhs.Val} + case GTE: + return &BooleanLiteral{Val: lhs.Val >= rhs.Val} + case LT: + return &BooleanLiteral{Val: lhs.Val < rhs.Val} + case LTE: + return &BooleanLiteral{Val: lhs.Val <= rhs.Val} + } + case *DurationLiteral: + // Treat the integer as a timestamp. + switch op { + case ADD: + return &TimeLiteral{Val: time.Unix(0, lhs.Val).Add(rhs.Val)} + case SUB: + return &TimeLiteral{Val: time.Unix(0, lhs.Val).Add(-rhs.Val)} + } + case *TimeLiteral: + d := &DurationLiteral{Val: time.Duration(lhs.Val)} + expr := reduceBinaryExprDurationLHS(op, d, rhs, loc) + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *StringLiteral: + t, err := rhs.ToTimeLiteral(loc) + if err != nil { + break + } + d := &DurationLiteral{Val: time.Duration(lhs.Val)} + expr := reduceBinaryExprDurationLHS(op, d, t, loc) + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *nilLiteral: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprNilLHS(op Token, lhs *nilLiteral, rhs Expr) Expr { + switch op { + case EQ, NEQ: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprNumberLHS(op Token, lhs *NumberLiteral, rhs Expr) Expr { + switch rhs := rhs.(type) { + case *NumberLiteral: + switch op { + case ADD: + return &NumberLiteral{Val: lhs.Val + rhs.Val} + case SUB: + return &NumberLiteral{Val: lhs.Val - rhs.Val} + case MUL: + return &NumberLiteral{Val: lhs.Val * rhs.Val} + case DIV: + if rhs.Val == 0 { + return &NumberLiteral{Val: 0} + } + return &NumberLiteral{Val: lhs.Val / rhs.Val} + case MOD: + return &NumberLiteral{Val: math.Mod(lhs.Val, rhs.Val)} + case EQ: + return &BooleanLiteral{Val: lhs.Val == rhs.Val} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != rhs.Val} + case GT: + return &BooleanLiteral{Val: lhs.Val > rhs.Val} + case GTE: + return &BooleanLiteral{Val: lhs.Val >= rhs.Val} + case LT: + return &BooleanLiteral{Val: lhs.Val < rhs.Val} + case LTE: + return &BooleanLiteral{Val: lhs.Val <= rhs.Val} + } + case *IntegerLiteral: + switch op { + case ADD: + return &NumberLiteral{Val: lhs.Val + float64(rhs.Val)} + case SUB: + return &NumberLiteral{Val: lhs.Val - float64(rhs.Val)} + case MUL: + return &NumberLiteral{Val: lhs.Val * float64(rhs.Val)} + case DIV: + if float64(rhs.Val) == 0 { + return &NumberLiteral{Val: 0} + } + return &NumberLiteral{Val: lhs.Val / float64(rhs.Val)} + case MOD: + return &NumberLiteral{Val: math.Mod(lhs.Val, float64(rhs.Val))} + case EQ: + return &BooleanLiteral{Val: lhs.Val == float64(rhs.Val)} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != float64(rhs.Val)} + case GT: + return &BooleanLiteral{Val: lhs.Val > float64(rhs.Val)} + case GTE: + return &BooleanLiteral{Val: lhs.Val >= float64(rhs.Val)} + case LT: + return &BooleanLiteral{Val: lhs.Val < float64(rhs.Val)} + case LTE: + return &BooleanLiteral{Val: lhs.Val <= float64(rhs.Val)} + } + case *nilLiteral: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprStringLHS(op Token, lhs *StringLiteral, rhs Expr, loc *time.Location) Expr { + switch rhs := rhs.(type) { + case *StringLiteral: + switch op { + case EQ: + var expr Expr = &BooleanLiteral{Val: lhs.Val == rhs.Val} + // This might be a comparison between time literals. + // If it is, parse the time literals and then compare since it + // could be a different result if they use different formats + // for the same time. + if lhs.IsTimeLiteral() && rhs.IsTimeLiteral() { + tlhs, err := lhs.ToTimeLiteral(loc) + if err != nil { + return expr + } + + trhs, err := rhs.ToTimeLiteral(loc) + if err != nil { + return expr + } + + t := reduceBinaryExprTimeLHS(op, tlhs, trhs, loc) + if _, ok := t.(*BinaryExpr); !ok { + expr = t + } + } + return expr + case NEQ: + var expr Expr = &BooleanLiteral{Val: lhs.Val != rhs.Val} + // This might be a comparison between time literals. + // If it is, parse the time literals and then compare since it + // could be a different result if they use different formats + // for the same time. + if lhs.IsTimeLiteral() && rhs.IsTimeLiteral() { + tlhs, err := lhs.ToTimeLiteral(loc) + if err != nil { + return expr + } + + trhs, err := rhs.ToTimeLiteral(loc) + if err != nil { + return expr + } + + t := reduceBinaryExprTimeLHS(op, tlhs, trhs, loc) + if _, ok := t.(*BinaryExpr); !ok { + expr = t + } + } + return expr + case ADD: + return &StringLiteral{Val: lhs.Val + rhs.Val} + default: + // Attempt to convert the string literal to a time literal. + t, err := lhs.ToTimeLiteral(loc) + if err != nil { + break + } + expr := reduceBinaryExprTimeLHS(op, t, rhs, loc) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + } + case *DurationLiteral: + // Attempt to convert the string literal to a time literal. + t, err := lhs.ToTimeLiteral(loc) + if err != nil { + break + } + expr := reduceBinaryExprTimeLHS(op, t, rhs, loc) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *TimeLiteral: + // Attempt to convert the string literal to a time literal. + t, err := lhs.ToTimeLiteral(loc) + if err != nil { + break + } + expr := reduceBinaryExprTimeLHS(op, t, rhs, loc) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *IntegerLiteral: + // Attempt to convert the string literal to a time literal. + t, err := lhs.ToTimeLiteral(loc) + if err != nil { + break + } + expr := reduceBinaryExprTimeLHS(op, t, rhs, loc) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *nilLiteral: + switch op { + case EQ, NEQ: + return &BooleanLiteral{Val: false} + } + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprTimeLHS(op Token, lhs *TimeLiteral, rhs Expr, loc *time.Location) Expr { + switch rhs := rhs.(type) { + case *DurationLiteral: + switch op { + case ADD: + return &TimeLiteral{Val: lhs.Val.Add(rhs.Val)} + case SUB: + return &TimeLiteral{Val: lhs.Val.Add(-rhs.Val)} + } + case *IntegerLiteral: + d := &DurationLiteral{Val: time.Duration(rhs.Val)} + expr := reduceBinaryExprTimeLHS(op, lhs, d, loc) + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *TimeLiteral: + switch op { + case SUB: + return &DurationLiteral{Val: lhs.Val.Sub(rhs.Val)} + case EQ: + return &BooleanLiteral{Val: lhs.Val.Equal(rhs.Val)} + case NEQ: + return &BooleanLiteral{Val: !lhs.Val.Equal(rhs.Val)} + case GT: + return &BooleanLiteral{Val: lhs.Val.After(rhs.Val)} + case GTE: + return &BooleanLiteral{Val: lhs.Val.After(rhs.Val) || lhs.Val.Equal(rhs.Val)} + case LT: + return &BooleanLiteral{Val: lhs.Val.Before(rhs.Val)} + case LTE: + return &BooleanLiteral{Val: lhs.Val.Before(rhs.Val) || lhs.Val.Equal(rhs.Val)} + } + case *StringLiteral: + t, err := rhs.ToTimeLiteral(loc) + if err != nil { + break + } + expr := reduceBinaryExprTimeLHS(op, lhs, t, loc) + + // If the returned expression is still a binary expr, that means + // we couldn't reduce it so this wasn't used in a time literal context. + if _, ok := expr.(*BinaryExpr); !ok { + return expr + } + case *nilLiteral: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceCall(expr *Call, valuer Valuer) Expr { + // Evaluate "now()" if valuer is set. + if expr.Name == "now" && len(expr.Args) == 0 && valuer != nil { + if v, ok := valuer.Value("now()"); ok { + v, _ := v.(time.Time) + return &TimeLiteral{Val: v} + } + } + + // Otherwise reduce arguments. + args := make([]Expr, len(expr.Args)) + for i, arg := range expr.Args { + args[i] = reduce(arg, valuer) + } + return &Call{Name: expr.Name, Args: args} +} + +func reduceParenExpr(expr *ParenExpr, valuer Valuer) Expr { + subexpr := reduce(expr.Expr, valuer) + if subexpr, ok := subexpr.(*BinaryExpr); ok { + return &ParenExpr{Expr: subexpr} + } + return subexpr +} + +func reduceVarRef(expr *VarRef, valuer Valuer) Expr { + // Ignore if there is no valuer. + if valuer == nil { + return &VarRef{Val: expr.Val, Type: expr.Type} + } + + // Retrieve the value of the ref. + // Ignore if the value doesn't exist. + v, ok := valuer.Value(expr.Val) + if !ok { + return &VarRef{Val: expr.Val, Type: expr.Type} + } + + // Return the value as a literal. + switch v := v.(type) { + case bool: + return &BooleanLiteral{Val: v} + case time.Duration: + return &DurationLiteral{Val: v} + case float64: + return &NumberLiteral{Val: v} + case string: + return &StringLiteral{Val: v} + case time.Time: + return &TimeLiteral{Val: v} + default: + return &nilLiteral{} + } +} + +// Valuer is the interface that wraps the Value() method. +type Valuer interface { + // Value returns the value and existence flag for a given key. + Value(key string) (interface{}, bool) +} + +// ZoneValuer is the interface that specifies the current time zone. +type ZoneValuer interface { + // Zone returns the time zone location. + Zone() *time.Location +} + +// NowValuer returns only the value for "now()". +type NowValuer struct { + Now time.Time + Location *time.Location +} + +// Value is a method that returns the value and existence flag for a given key. +func (v *NowValuer) Value(key string) (interface{}, bool) { + if key == "now()" { + return v.Now, true + } + return nil, false +} + +// Zone is a method that returns the time.Location. +func (v *NowValuer) Zone() *time.Location { + if v.Location != nil { + return v.Location + } + return time.UTC +} + +// ContainsVarRef returns true if expr is a VarRef or contains one. +func ContainsVarRef(expr Expr) bool { + var v containsVarRefVisitor + Walk(&v, expr) + return v.contains +} + +type containsVarRefVisitor struct { + contains bool +} + +func (v *containsVarRefVisitor) Visit(n Node) Visitor { + switch n.(type) { + case *Call: + return nil + case *VarRef: + v.contains = true + } + return v +} + +func IsSelector(expr Expr) bool { + if call, ok := expr.(*Call); ok { + switch call.Name { + case "first", "last", "min", "max", "percentile", "sample", "top", "bottom": + return true + } + } + return false +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/ast_test.go b/vendor/github.com/influxdata/influxdb/influxql/ast_test.go new file mode 100644 index 0000000..0c45589 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/ast_test.go @@ -0,0 +1,1861 @@ +package influxql_test + +import ( + "fmt" + "go/importer" + "reflect" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/influxql" +) + +func BenchmarkQuery_String(b *testing.B) { + p := influxql.NewParser(strings.NewReader(`SELECT foo AS zoo, a AS b FROM bar WHERE value > 10 AND q = 'hello'`)) + q, _ := p.ParseStatement() + for i := 0; i < b.N; i++ { + _ = q.String() + } +} + +// Ensure a value's data type can be retrieved. +func TestInspectDataType(t *testing.T) { + for i, tt := range []struct { + v interface{} + typ influxql.DataType + }{ + {float64(100), influxql.Float}, + {int64(100), influxql.Integer}, + {int32(100), influxql.Integer}, + {100, influxql.Integer}, + {true, influxql.Boolean}, + {"string", influxql.String}, + {time.Now(), influxql.Time}, + {time.Second, influxql.Duration}, + {nil, influxql.Unknown}, + } { + if typ := influxql.InspectDataType(tt.v); tt.typ != typ { + t.Errorf("%d. %v (%s): unexpected type: %s", i, tt.v, tt.typ, typ) + continue + } + } +} + +func TestDataType_String(t *testing.T) { + for i, tt := range []struct { + typ influxql.DataType + v string + }{ + {influxql.Float, "float"}, + {influxql.Integer, "integer"}, + {influxql.Boolean, "boolean"}, + {influxql.String, "string"}, + {influxql.Time, "time"}, + {influxql.Duration, "duration"}, + {influxql.Tag, "tag"}, + {influxql.Unknown, "unknown"}, + } { + if v := tt.typ.String(); tt.v != v { + t.Errorf("%d. %v (%s): unexpected string: %s", i, tt.typ, tt.v, v) + } + } +} + +func TestDataType_LessThan(t *testing.T) { + for i, tt := range []struct { + typ influxql.DataType + other influxql.DataType + exp bool + }{ + {typ: influxql.Unknown, other: influxql.Unknown, exp: true}, + {typ: influxql.Unknown, other: influxql.Float, exp: true}, + {typ: influxql.Unknown, other: influxql.Integer, exp: true}, + {typ: influxql.Unknown, other: influxql.String, exp: true}, + {typ: influxql.Unknown, other: influxql.Boolean, exp: true}, + {typ: influxql.Unknown, other: influxql.Tag, exp: true}, + {typ: influxql.Float, other: influxql.Unknown, exp: false}, + {typ: influxql.Integer, other: influxql.Unknown, exp: false}, + {typ: influxql.String, other: influxql.Unknown, exp: false}, + {typ: influxql.Boolean, other: influxql.Unknown, exp: false}, + {typ: influxql.Tag, other: influxql.Unknown, exp: false}, + {typ: influxql.Float, other: influxql.Float, exp: false}, + {typ: influxql.Float, other: influxql.Integer, exp: false}, + {typ: influxql.Float, other: influxql.String, exp: false}, + {typ: influxql.Float, other: influxql.Boolean, exp: false}, + {typ: influxql.Float, other: influxql.Tag, exp: false}, + {typ: influxql.Integer, other: influxql.Float, exp: true}, + {typ: influxql.Integer, other: influxql.Integer, exp: false}, + {typ: influxql.Integer, other: influxql.String, exp: false}, + {typ: influxql.Integer, other: influxql.Boolean, exp: false}, + {typ: influxql.Integer, other: influxql.Tag, exp: false}, + {typ: influxql.String, other: influxql.Float, exp: true}, + {typ: influxql.String, other: influxql.Integer, exp: true}, + {typ: influxql.String, other: influxql.String, exp: false}, + {typ: influxql.String, other: influxql.Boolean, exp: false}, + {typ: influxql.String, other: influxql.Tag, exp: false}, + {typ: influxql.Boolean, other: influxql.Float, exp: true}, + {typ: influxql.Boolean, other: influxql.Integer, exp: true}, + {typ: influxql.Boolean, other: influxql.String, exp: true}, + {typ: influxql.Boolean, other: influxql.Boolean, exp: false}, + {typ: influxql.Boolean, other: influxql.Tag, exp: false}, + {typ: influxql.Tag, other: influxql.Float, exp: true}, + {typ: influxql.Tag, other: influxql.Integer, exp: true}, + {typ: influxql.Tag, other: influxql.String, exp: true}, + {typ: influxql.Tag, other: influxql.Boolean, exp: true}, + {typ: influxql.Tag, other: influxql.Tag, exp: false}, + } { + if got, exp := tt.typ.LessThan(tt.other), tt.exp; got != exp { + t.Errorf("%d. %q.LessThan(%q) = %v; exp = %v", i, tt.typ, tt.other, got, exp) + } + } +} + +// Ensure the SELECT statement can extract GROUP BY interval. +func TestSelectStatement_GroupByInterval(t *testing.T) { + q := "SELECT sum(value) from foo where time < now() GROUP BY time(10m)" + stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", stmt, err) + } + + s := stmt.(*influxql.SelectStatement) + d, err := s.GroupByInterval() + if d != 10*time.Minute { + t.Fatalf("group by interval not equal:\nexp=%s\ngot=%s", 10*time.Minute, d) + } + if err != nil { + t.Fatalf("error parsing group by interval: %s", err.Error()) + } +} + +// Ensure the SELECT statement can have its start and end time set +func TestSelectStatement_SetTimeRange(t *testing.T) { + q := "SELECT sum(value) from foo where time < now() GROUP BY time(10m)" + stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", stmt, err) + } + + s := stmt.(*influxql.SelectStatement) + start := time.Now().Add(-20 * time.Hour).Round(time.Second).UTC() + end := time.Now().Add(10 * time.Hour).Round(time.Second).UTC() + s.SetTimeRange(start, end) + min, max := MustTimeRange(s.Condition) + + if min != start { + t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) + } + // the end range is actually one nanosecond before the given one since end is exclusive + end = end.Add(-time.Nanosecond) + if max != end { + t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) + } + + // ensure we can set a time on a select that already has one set + start = time.Now().Add(-20 * time.Hour).Round(time.Second).UTC() + end = time.Now().Add(10 * time.Hour).Round(time.Second).UTC() + q = fmt.Sprintf("SELECT sum(value) from foo WHERE time >= %ds and time <= %ds GROUP BY time(10m)", start.Unix(), end.Unix()) + stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", stmt, err) + } + + s = stmt.(*influxql.SelectStatement) + min, max = MustTimeRange(s.Condition) + if start != min || end != max { + t.Fatalf("start and end times weren't equal:\n exp: %s\n got: %s\n exp: %s\n got:%s\n", start, min, end, max) + } + + // update and ensure it saves it + start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC() + end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC() + s.SetTimeRange(start, end) + min, max = MustTimeRange(s.Condition) + + // TODO: right now the SetTimeRange can't override the start time if it's more recent than what they're trying to set it to. + // shouldn't matter for our purposes with continuous queries, but fix this later + + if min != start { + t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) + } + // the end range is actually one nanosecond before the given one since end is exclusive + end = end.Add(-time.Nanosecond) + if max != end { + t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) + } + + // ensure that when we set a time range other where clause conditions are still there + q = "SELECT sum(value) from foo WHERE foo = 'bar' and time < now() GROUP BY time(10m)" + stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", stmt, err) + } + + s = stmt.(*influxql.SelectStatement) + + // update and ensure it saves it + start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC() + end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC() + s.SetTimeRange(start, end) + min, max = MustTimeRange(s.Condition) + + if min != start { + t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) + } + // the end range is actually one nanosecond before the given one since end is exclusive + end = end.Add(-time.Nanosecond) + if max != end { + t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) + } + + // ensure the where clause is there + hasWhere := false + influxql.WalkFunc(s.Condition, func(n influxql.Node) { + if ex, ok := n.(*influxql.BinaryExpr); ok { + if lhs, ok := ex.LHS.(*influxql.VarRef); ok { + if lhs.Val == "foo" { + if rhs, ok := ex.RHS.(*influxql.StringLiteral); ok { + if rhs.Val == "bar" { + hasWhere = true + } + } + } + } + } + }) + if !hasWhere { + t.Fatal("set time range cleared out the where clause") + } +} + +// Ensure the idents from the select clause can come out +func TestSelect_NamesInSelect(t *testing.T) { + s := MustParseSelectStatement("select count(asdf), count(bar) from cpu") + a := s.NamesInSelect() + if !reflect.DeepEqual(a, []string{"asdf", "bar"}) { + t.Fatal("expected names asdf and bar") + } +} + +// Ensure the idents from the where clause can come out +func TestSelect_NamesInWhere(t *testing.T) { + s := MustParseSelectStatement("select * from cpu where time > 23s AND (asdf = 'jkl' OR (foo = 'bar' AND baz = 'bar'))") + a := s.NamesInWhere() + if !reflect.DeepEqual(a, []string{"time", "asdf", "foo", "baz"}) { + t.Fatalf("exp: time,asdf,foo,baz\ngot: %s\n", strings.Join(a, ",")) + } +} + +func TestSelectStatement_HasWildcard(t *testing.T) { + var tests = []struct { + stmt string + wildcard bool + }{ + // No wildcards + { + stmt: `SELECT value FROM cpu`, + wildcard: false, + }, + + // Query wildcard + { + stmt: `SELECT * FROM cpu`, + wildcard: true, + }, + + // No GROUP BY wildcards + { + stmt: `SELECT value FROM cpu GROUP BY host`, + wildcard: false, + }, + + // No GROUP BY wildcards, time only + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, + wildcard: false, + }, + + // GROUP BY wildcard + { + stmt: `SELECT value FROM cpu GROUP BY *`, + wildcard: true, + }, + + // GROUP BY wildcard with time + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`, + wildcard: true, + }, + + // GROUP BY wildcard with explicit + { + stmt: `SELECT value FROM cpu GROUP BY *,host`, + wildcard: true, + }, + + // GROUP BY multiple wildcards + { + stmt: `SELECT value FROM cpu GROUP BY *,*`, + wildcard: true, + }, + + // Combo + { + stmt: `SELECT * FROM cpu GROUP BY *`, + wildcard: true, + }, + } + + for i, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + // Test wildcard detection. + if w := stmt.(*influxql.SelectStatement).HasWildcard(); tt.wildcard != w { + t.Errorf("%d. %q: unexpected wildcard detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.wildcard, w) + continue + } + } +} + +// Test SELECT statement field rewrite. +func TestSelectStatement_RewriteFields(t *testing.T) { + var tests = []struct { + stmt string + rewrite string + err string + }{ + // No wildcards + { + stmt: `SELECT value FROM cpu`, + rewrite: `SELECT value FROM cpu`, + }, + + // Query wildcard + { + stmt: `SELECT * FROM cpu`, + rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer FROM cpu`, + }, + + // Parser fundamentally prohibits multiple query sources + + // Query wildcard with explicit + { + stmt: `SELECT *,value1 FROM cpu`, + rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer, value1::float FROM cpu`, + }, + + // Query multiple wildcards + { + stmt: `SELECT *,* FROM cpu`, + rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer, host::tag, region::tag, value1::float, value2::integer FROM cpu`, + }, + + // Query wildcards with group by + { + stmt: `SELECT * FROM cpu GROUP BY host`, + rewrite: `SELECT region::tag, value1::float, value2::integer FROM cpu GROUP BY host`, + }, + + // No GROUP BY wildcards + { + stmt: `SELECT value FROM cpu GROUP BY host`, + rewrite: `SELECT value FROM cpu GROUP BY host`, + }, + + // No GROUP BY wildcards, time only + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, + rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY time(5ms)`, + }, + + // GROUP BY wildcard + { + stmt: `SELECT value FROM cpu GROUP BY *`, + rewrite: `SELECT value FROM cpu GROUP BY host, region`, + }, + + // GROUP BY wildcard with time + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`, + rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m)`, + }, + + // GROUP BY wildcard with fill + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m) fill(0)`, + rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m) fill(0)`, + }, + + // GROUP BY wildcard with explicit + { + stmt: `SELECT value FROM cpu GROUP BY *,host`, + rewrite: `SELECT value FROM cpu GROUP BY host, region, host`, + }, + + // GROUP BY multiple wildcards + { + stmt: `SELECT value FROM cpu GROUP BY *,*`, + rewrite: `SELECT value FROM cpu GROUP BY host, region, host, region`, + }, + + // Combo + { + stmt: `SELECT * FROM cpu GROUP BY *`, + rewrite: `SELECT value1::float, value2::integer FROM cpu GROUP BY host, region`, + }, + + // Wildcard function with all fields. + { + stmt: `SELECT mean(*) FROM cpu`, + rewrite: `SELECT mean(value1::float) AS mean_value1, mean(value2::integer) AS mean_value2 FROM cpu`, + }, + + { + stmt: `SELECT distinct(*) FROM strings`, + rewrite: `SELECT distinct(string::string) AS distinct_string, distinct(value::float) AS distinct_value FROM strings`, + }, + + { + stmt: `SELECT distinct(*) FROM bools`, + rewrite: `SELECT distinct(bool::boolean) AS distinct_bool, distinct(value::float) AS distinct_value FROM bools`, + }, + + // Wildcard function with some fields excluded. + { + stmt: `SELECT mean(*) FROM strings`, + rewrite: `SELECT mean(value::float) AS mean_value FROM strings`, + }, + + { + stmt: `SELECT mean(*) FROM bools`, + rewrite: `SELECT mean(value::float) AS mean_value FROM bools`, + }, + + // Wildcard function with an alias. + { + stmt: `SELECT mean(*) AS alias FROM cpu`, + rewrite: `SELECT mean(value1::float) AS alias_value1, mean(value2::integer) AS alias_value2 FROM cpu`, + }, + + // Query regex + { + stmt: `SELECT /1/ FROM cpu`, + rewrite: `SELECT value1::float FROM cpu`, + }, + + { + stmt: `SELECT value1 FROM cpu GROUP BY /h/`, + rewrite: `SELECT value1::float FROM cpu GROUP BY host`, + }, + + // Query regex + { + stmt: `SELECT mean(/1/) FROM cpu`, + rewrite: `SELECT mean(value1::float) AS mean_value1 FROM cpu`, + }, + + // Rewrite subquery + { + stmt: `SELECT * FROM (SELECT mean(value1) FROM cpu GROUP BY host) GROUP BY *`, + rewrite: `SELECT mean::float FROM (SELECT mean(value1::float) FROM cpu GROUP BY host) GROUP BY host`, + }, + + // Invalid queries that can't be rewritten should return an error (to + // avoid a panic in the query engine) + { + stmt: `SELECT count(*) / 2 FROM cpu`, + err: `unsupported expression with wildcard: count(*) / 2`, + }, + + { + stmt: `SELECT * / 2 FROM (SELECT count(*) FROM cpu)`, + err: `unsupported expression with wildcard: * / 2`, + }, + + { + stmt: `SELECT count(/value/) / 2 FROM cpu`, + err: `unsupported expression with regex field: count(/value/) / 2`, + }, + + // This one should be possible though since there's no wildcard in the + // binary expression. + { + stmt: `SELECT value1 + value2, * FROM cpu`, + rewrite: `SELECT value1::float + value2::integer, host::tag, region::tag, value1::float, value2::integer FROM cpu`, + }, + + { + stmt: `SELECT value1 + value2, /value/ FROM cpu`, + rewrite: `SELECT value1::float + value2::integer, value1::float, value2::integer FROM cpu`, + }, + } + + for i, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + var ic IteratorCreator + ic.FieldDimensionsFn = func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + switch m.Name { + case "cpu": + fields = map[string]influxql.DataType{ + "value1": influxql.Float, + "value2": influxql.Integer, + } + case "strings": + fields = map[string]influxql.DataType{ + "value": influxql.Float, + "string": influxql.String, + } + case "bools": + fields = map[string]influxql.DataType{ + "value": influxql.Float, + "bool": influxql.Boolean, + } + } + dimensions = map[string]struct{}{"host": struct{}{}, "region": struct{}{}} + return + } + + // Rewrite statement. + rw, err := stmt.(*influxql.SelectStatement).RewriteFields(&ic) + if tt.err != "" { + if err != nil && err.Error() != tt.err { + t.Errorf("%d. %q: unexpected error: %s != %s", i, tt.stmt, err.Error(), tt.err) + } else if err == nil { + t.Errorf("%d. %q: expected error", i, tt.stmt) + } + } else { + if err != nil { + t.Errorf("%d. %q: error: %s", i, tt.stmt, err) + } else if rw == nil && tt.err == "" { + t.Errorf("%d. %q: unexpected nil statement", i, tt.stmt) + } else if rw := rw.String(); tt.rewrite != rw { + t.Errorf("%d. %q: unexpected rewrite:\n\nexp=%s\n\ngot=%s\n\n", i, tt.stmt, tt.rewrite, rw) + } + } + } +} + +// Test SELECT statement regex conditions rewrite. +func TestSelectStatement_RewriteRegexConditions(t *testing.T) { + var tests = []struct { + in string + out string + }{ + {in: `SELECT value FROM cpu`, out: `SELECT value FROM cpu`}, + {in: `SELECT value FROM cpu WHERE host='server-1'`, out: `SELECT value FROM cpu WHERE host='server-1'`}, + {in: `SELECT value FROM cpu WHERE host = 'server-1'`, out: `SELECT value FROM cpu WHERE host = 'server-1'`}, + {in: `SELECT value FROM cpu WHERE host != 'server-1'`, out: `SELECT value FROM cpu WHERE host != 'server-1'`}, + + // Non matching regex + {in: `SELECT value FROM cpu WHERE host =~ /server-1|server-2|server-3/`, out: `SELECT value FROM cpu WHERE host =~ /server-1|server-2|server-3/`}, + {in: `SELECT value FROM cpu WHERE host =~ /server-1/`, out: `SELECT value FROM cpu WHERE host =~ /server-1/`}, + {in: `SELECT value FROM cpu WHERE host !~ /server-1/`, out: `SELECT value FROM cpu WHERE host !~ /server-1/`}, + {in: `SELECT value FROM cpu WHERE host =~ /^server-1/`, out: `SELECT value FROM cpu WHERE host =~ /^server-1/`}, + {in: `SELECT value FROM cpu WHERE host =~ /server-1$/`, out: `SELECT value FROM cpu WHERE host =~ /server-1$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /\^server-1$/`, out: `SELECT value FROM cpu WHERE host !~ /\^server-1$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /\^$/`, out: `SELECT value FROM cpu WHERE host !~ /\^$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^server-1\$/`, out: `SELECT value FROM cpu WHERE host !~ /^server-1\$/`}, + {in: `SELECT value FROM cpu WHERE host =~ /^\$/`, out: `SELECT value FROM cpu WHERE host =~ /^\$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^a/`, out: `SELECT value FROM cpu WHERE host !~ /^a/`}, + + // These regexes are not supported due to the presence of escaped or meta characters. + {in: `SELECT value FROM cpu WHERE host !~ /^(foo|bar)$/`, out: `SELECT value FROM cpu WHERE host !~ /^(foo|bar)$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^?a$/`, out: `SELECT value FROM cpu WHERE host !~ /^?a$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^[a-z]$/`, out: `SELECT value FROM cpu WHERE host !~ /^[a-z]$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^\d$/`, out: `SELECT value FROM cpu WHERE host !~ /^\d$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^a*$/`, out: `SELECT value FROM cpu WHERE host !~ /^a*$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^a.b$/`, out: `SELECT value FROM cpu WHERE host !~ /^a.b$/`}, + {in: `SELECT value FROM cpu WHERE host !~ /^ab+$/`, out: `SELECT value FROM cpu WHERE host !~ /^ab+$/`}, + {in: `SELECT value FROM cpu WHERE host =~ /^hello\world$/`, out: `SELECT value FROM cpu WHERE host =~ /^hello\world$/`}, + + // These regexes all match and will be rewritten. + {in: `SELECT value FROM cpu WHERE host !~ /^a[2]$/`, out: `SELECT value FROM cpu WHERE host != 'a2'`}, + {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/`, out: `SELECT value FROM cpu WHERE host = 'server-1'`}, + {in: `SELECT value FROM cpu WHERE host !~ /^server-1$/`, out: `SELECT value FROM cpu WHERE host != 'server-1'`}, + {in: `SELECT value FROM cpu WHERE host =~ /^server 1$/`, out: `SELECT value FROM cpu WHERE host = 'server 1'`}, + {in: `SELECT value FROM cpu WHERE host =~ /^$/`, out: `SELECT value FROM cpu WHERE host = ''`}, + {in: `SELECT value FROM cpu WHERE host !~ /^$/`, out: `SELECT value FROM cpu WHERE host != ''`}, + {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/ OR host =~ /^server-2$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server-2'`}, + {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/ OR host =~ /^server]a$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server]a'`}, + {in: `SELECT value FROM cpu WHERE host =~ /^hello\?$/`, out: `SELECT value FROM cpu WHERE host = 'hello?'`}, + {in: `SELECT value FROM cpu WHERE host !~ /^\\$/`, out: `SELECT value FROM cpu WHERE host != '\\'`}, + {in: `SELECT value FROM cpu WHERE host !~ /^\\\$$/`, out: `SELECT value FROM cpu WHERE host != '\\$'`}, + } + + for i, test := range tests { + stmt, err := influxql.NewParser(strings.NewReader(test.in)).ParseStatement() + if err != nil { + t.Fatalf("[Example %d], %v", i, err) + } + + // Rewrite any supported regex conditions. + stmt.(*influxql.SelectStatement).RewriteRegexConditions() + + // Get the expected rewritten statement. + expStmt, err := influxql.NewParser(strings.NewReader(test.out)).ParseStatement() + if err != nil { + t.Fatalf("[Example %d], %v", i, err) + } + + // Compare the (potentially) rewritten AST to the expected AST. + if got, exp := stmt, expStmt; !reflect.DeepEqual(got, exp) { + t.Errorf("[Example %d]\nattempting %v\ngot %v\n%s\n\nexpected %v\n%s\n", i+1, test.in, got, mustMarshalJSON(got), exp, mustMarshalJSON(exp)) + } + } +} + +// Test SELECT statement time field rewrite. +func TestSelectStatement_RewriteTimeFields(t *testing.T) { + var tests = []struct { + s string + stmt influxql.Statement + }{ + { + s: `SELECT time, field1 FROM cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "field1"}}, + }, + Sources: []influxql.Source{ + &influxql.Measurement{Name: "cpu"}, + }, + }, + }, + { + s: `SELECT time AS timestamp, field1 FROM cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "field1"}}, + }, + Sources: []influxql.Source{ + &influxql.Measurement{Name: "cpu"}, + }, + TimeAlias: "timestamp", + }, + }, + } + + for i, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.s)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.s, err) + } + + // Rewrite statement. + stmt.(*influxql.SelectStatement).RewriteTimeFields() + if !reflect.DeepEqual(tt.stmt, stmt) { + t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt)) + t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String()) + t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt) + } + } +} + +// Ensure that the IsRawQuery flag gets set properly +func TestSelectStatement_IsRawQuerySet(t *testing.T) { + var tests = []struct { + stmt string + isRaw bool + }{ + { + stmt: "select * from foo", + isRaw: true, + }, + { + stmt: "select value1,value2 from foo", + isRaw: true, + }, + { + stmt: "select value1,value2 from foo, time(10m)", + isRaw: true, + }, + { + stmt: "select mean(value) from foo where time < now() group by time(5m)", + isRaw: false, + }, + { + stmt: "select mean(value) from foo group by bar", + isRaw: false, + }, + { + stmt: "select mean(value) from foo group by *", + isRaw: false, + }, + { + stmt: "select mean(value) from foo group by *", + isRaw: false, + }, + } + + for _, tt := range tests { + s := MustParseSelectStatement(tt.stmt) + if s.IsRawQuery != tt.isRaw { + t.Errorf("'%s', IsRawQuery should be %v", tt.stmt, tt.isRaw) + } + } +} + +func TestSelectStatement_HasDerivative(t *testing.T) { + var tests = []struct { + stmt string + derivative bool + }{ + // No derivatives + { + stmt: `SELECT value FROM cpu`, + derivative: false, + }, + + // Query derivative + { + stmt: `SELECT derivative(value) FROM cpu`, + derivative: true, + }, + + // No GROUP BY time only + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, + derivative: false, + }, + + // No GROUP BY derivatives, time only + { + stmt: `SELECT derivative(mean(value)) FROM cpu where time < now() GROUP BY time(5ms)`, + derivative: true, + }, + + { + stmt: `SELECT value FROM cpu`, + derivative: false, + }, + + // Query derivative + { + stmt: `SELECT non_negative_derivative(value) FROM cpu`, + derivative: true, + }, + + // No GROUP BY derivatives, time only + { + stmt: `SELECT non_negative_derivative(mean(value)) FROM cpu where time < now() GROUP BY time(5ms)`, + derivative: true, + }, + + // Invalid derivative function name + { + stmt: `SELECT typoDerivative(value) FROM cpu where time < now()`, + derivative: false, + }, + } + + for i, tt := range tests { + // Parse statement. + t.Logf("index: %d, statement: %s", i, tt.stmt) + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + // Test derivative detection. + if d := stmt.(*influxql.SelectStatement).HasDerivative(); tt.derivative != d { + t.Errorf("%d. %q: unexpected derivative detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.derivative, d) + continue + } + } +} + +func TestSelectStatement_IsSimpleDerivative(t *testing.T) { + var tests = []struct { + stmt string + derivative bool + }{ + // No derivatives + { + stmt: `SELECT value FROM cpu`, + derivative: false, + }, + + // Query derivative + { + stmt: `SELECT derivative(value) FROM cpu`, + derivative: true, + }, + + // Query derivative + { + stmt: `SELECT non_negative_derivative(value) FROM cpu`, + derivative: true, + }, + + // No GROUP BY time only + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, + derivative: false, + }, + + // No GROUP BY derivatives, time only + { + stmt: `SELECT non_negative_derivative(mean(value)) FROM cpu where time < now() GROUP BY time(5ms)`, + derivative: false, + }, + + // Invalid derivative function name + { + stmt: `SELECT typoDerivative(value) FROM cpu where time < now()`, + derivative: false, + }, + } + + for i, tt := range tests { + // Parse statement. + t.Logf("index: %d, statement: %s", i, tt.stmt) + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + // Test derivative detection. + if d := stmt.(*influxql.SelectStatement).IsSimpleDerivative(); tt.derivative != d { + t.Errorf("%d. %q: unexpected derivative detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.derivative, d) + continue + } + } +} + +// Ensure binary expression names can be evaluated. +func TestBinaryExprName(t *testing.T) { + for i, tt := range []struct { + expr string + name string + }{ + {expr: `value + 1`, name: `value`}, + {expr: `"user" / total`, name: `user_total`}, + {expr: `("user" + total) / total`, name: `user_total_total`}, + } { + expr := influxql.MustParseExpr(tt.expr) + switch expr := expr.(type) { + case *influxql.BinaryExpr: + name := influxql.BinaryExprName(expr) + if name != tt.name { + t.Errorf("%d. unexpected name %s, got %s", i, name, tt.name) + } + default: + t.Errorf("%d. unexpected expr type: %T", i, expr) + } + } +} + +// Ensure the time range of an expression can be extracted. +func TestTimeRange(t *testing.T) { + for i, tt := range []struct { + expr string + min, max, err string + loc string + }{ + // LHS VarRef + {expr: `time > '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00.000000001Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `time >= '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `time < '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `1999-12-31T23:59:59.999999999Z`}, + {expr: `time <= '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`}, + + // RHS VarRef + {expr: `'2000-01-01 00:00:00' > time`, min: `0001-01-01T00:00:00Z`, max: `1999-12-31T23:59:59.999999999Z`}, + {expr: `'2000-01-01 00:00:00' >= time`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`}, + {expr: `'2000-01-01 00:00:00' < time`, min: `2000-01-01T00:00:00.000000001Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `'2000-01-01 00:00:00' <= time`, min: `2000-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + + // number literal + {expr: `time < 10`, min: `0001-01-01T00:00:00Z`, max: `1970-01-01T00:00:00.000000009Z`}, + + // Equality + {expr: `time = '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`}, + + // Multiple time expressions. + {expr: `time >= '2000-01-01 00:00:00' AND time < '2000-01-02 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T23:59:59.999999999Z`}, + + // Min/max crossover + {expr: `time >= '2000-01-01 00:00:00' AND time <= '1999-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `1999-01-01T00:00:00Z`}, + + // Absolute time + {expr: `time = 1388534400s`, min: `2014-01-01T00:00:00Z`, max: `2014-01-01T00:00:00Z`}, + + // Non-comparative expressions. + {expr: `time`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `time + 2`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `time - '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `time AND '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + + // Invalid time expressions. + {expr: `time > "2000-01-01 00:00:00"`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`, err: `invalid operation: time and *influxql.VarRef are not compatible`}, + {expr: `time > '2262-04-11 23:47:17'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`, err: `time 2262-04-11T23:47:17Z overflows time literal`}, + {expr: `time > '1677-09-20 19:12:43'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`, err: `time 1677-09-20T19:12:43Z underflows time literal`}, + + // Time zone expressions. + {expr: `time >= '2000-01-01'`, loc: `America/Los_Angeles`, min: `2000-01-01T00:00:00-08:00`, max: `0001-01-01T00:00:00Z`}, + {expr: `time <= '2000-01-01'`, loc: `America/Los_Angeles`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T00:00:00-08:00`}, + {expr: `time >= '2000-01-01 03:17:00'`, loc: `America/Los_Angeles`, min: `2000-01-01T03:17:00-08:00`, max: `0001-01-01T00:00:00Z`}, + {expr: `time <= '2000-01-01 03:17:00'`, loc: `America/Los_Angeles`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T03:17:00-08:00`}, + } { + t.Run(tt.expr, func(t *testing.T) { + // Load the time zone if one was specified. + var loc *time.Location + if tt.loc != "" { + l, err := time.LoadLocation(tt.loc) + if err != nil { + t.Fatalf("unable to load time zone %s: %s", tt.loc, err) + } + loc = l + } + + // Extract time range. + expr := MustParseExpr(tt.expr) + min, max, err := influxql.TimeRange(expr, loc) + + // Compare with expected min/max. + if min := min.Format(time.RFC3339Nano); tt.min != min { + t.Fatalf("%d. %s: unexpected min:\n\nexp=%s\n\ngot=%s\n\n", i, tt.expr, tt.min, min) + } + if max := max.Format(time.RFC3339Nano); tt.max != max { + t.Fatalf("%d. %s: unexpected max:\n\nexp=%s\n\ngot=%s\n\n", i, tt.expr, tt.max, max) + } + if (err != nil && err.Error() != tt.err) || (err == nil && tt.err != "") { + t.Fatalf("%d. %s: unexpected error:\n\nexp=%s\n\ngot=%s\n\n", i, tt.expr, tt.err, err) + } + }) + } +} + +// Ensure that we see if a where clause has only time limitations +func TestOnlyTimeExpr(t *testing.T) { + var tests = []struct { + stmt string + exp bool + }{ + { + stmt: `SELECT value FROM myseries WHERE value > 1`, + exp: false, + }, + { + stmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z'`, + exp: true, + }, + { + stmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:05Z'`, + exp: true, + }, + { + stmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z' AND asdf = 'bar'`, + exp: false, + }, + { + stmt: `SELECT value FROM foo WHERE asdf = 'jkl' AND (time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:05Z')`, + exp: false, + }, + } + + for i, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + if influxql.OnlyTimeExpr(stmt.(*influxql.SelectStatement).Condition) != tt.exp { + t.Fatalf("%d. expected statement to return only time dimension to be %t: %s", i, tt.exp, tt.stmt) + } + } +} + +// Ensure an AST node can be rewritten. +func TestRewrite(t *testing.T) { + expr := MustParseExpr(`time > 1 OR foo = 2`) + + // Flip LHS & RHS in all binary expressions. + act := influxql.RewriteFunc(expr, func(n influxql.Node) influxql.Node { + switch n := n.(type) { + case *influxql.BinaryExpr: + return &influxql.BinaryExpr{Op: n.Op, LHS: n.RHS, RHS: n.LHS} + default: + return n + } + }) + + // Verify that everything is flipped. + if act := act.String(); act != `2 = foo OR 1 > time` { + t.Fatalf("unexpected result: %s", act) + } +} + +// Ensure an Expr can be rewritten handling nils. +func TestRewriteExpr(t *testing.T) { + expr := MustParseExpr(`(time > 1 AND time < 10) OR foo = 2`) + + // Remove all time expressions. + act := influxql.RewriteExpr(expr, func(e influxql.Expr) influxql.Expr { + switch e := e.(type) { + case *influxql.BinaryExpr: + if lhs, ok := e.LHS.(*influxql.VarRef); ok && lhs.Val == "time" { + return nil + } + } + return e + }) + + // Verify that everything is flipped. + if act := act.String(); act != `foo = 2` { + t.Fatalf("unexpected result: %s", act) + } +} + +// Ensure that the String() value of a statement is parseable +func TestParseString(t *testing.T) { + var tests = []struct { + stmt string + }{ + { + stmt: `SELECT "cpu load" FROM myseries`, + }, + { + stmt: `SELECT "cpu load" FROM "my series"`, + }, + { + stmt: `SELECT "cpu\"load" FROM myseries`, + }, + { + stmt: `SELECT "cpu'load" FROM myseries`, + }, + { + stmt: `SELECT "cpu load" FROM "my\"series"`, + }, + { + stmt: `SELECT "field with spaces" FROM "\"ugly\" db"."\"ugly\" rp"."\"ugly\" measurement"`, + }, + { + stmt: `SELECT * FROM myseries`, + }, + { + stmt: `DROP DATABASE "!"`, + }, + { + stmt: `DROP RETENTION POLICY "my rp" ON "a database"`, + }, + { + stmt: `CREATE RETENTION POLICY "my rp" ON "a database" DURATION 1d REPLICATION 1`, + }, + { + stmt: `ALTER RETENTION POLICY "my rp" ON "a database" DEFAULT`, + }, + { + stmt: `SHOW RETENTION POLICIES ON "a database"`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY IN ("a long name", short)`, + }, + { + stmt: `DROP CONTINUOUS QUERY "my query" ON "my database"`, + }, + // See issues https://github.com/influxdata/influxdb/issues/1647 + // and https://github.com/influxdata/influxdb/issues/4404 + //{ + // stmt: `DELETE FROM "my db"."my rp"."my measurement"`, + //}, + { + stmt: `DROP SUBSCRIPTION "ugly \"subscription\" name" ON "\"my\" db"."\"my\" rp"`, + }, + { + stmt: `CREATE SUBSCRIPTION "ugly \"subscription\" name" ON "\"my\" db"."\"my\" rp" DESTINATIONS ALL 'my host', 'my other host'`, + }, + { + stmt: `SHOW MEASUREMENTS WITH MEASUREMENT =~ /foo/`, + }, + { + stmt: `SHOW MEASUREMENTS WITH MEASUREMENT = "and/or"`, + }, + { + stmt: `DROP USER "user with spaces"`, + }, + { + stmt: `GRANT ALL PRIVILEGES ON "db with spaces" TO "user with spaces"`, + }, + { + stmt: `GRANT ALL PRIVILEGES TO "user with spaces"`, + }, + { + stmt: `SHOW GRANTS FOR "user with spaces"`, + }, + { + stmt: `REVOKE ALL PRIVILEGES ON "db with spaces" FROM "user with spaces"`, + }, + { + stmt: `REVOKE ALL PRIVILEGES FROM "user with spaces"`, + }, + { + stmt: `CREATE DATABASE "db with spaces"`, + }, + } + + for _, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + stmtCopy, err := influxql.NewParser(strings.NewReader(stmt.String())).ParseStatement() + if err != nil { + t.Fatalf("failed to parse string: %v\norig: %v\ngot: %v", err, tt.stmt, stmt.String()) + } + + if !reflect.DeepEqual(stmt, stmtCopy) { + t.Fatalf("statement changed after stringifying and re-parsing:\noriginal : %v\nre-parsed: %v\n", tt.stmt, stmtCopy.String()) + } + } +} + +// Ensure an expression can be reduced. +func TestEval(t *testing.T) { + for i, tt := range []struct { + in string + out interface{} + data map[string]interface{} + }{ + // Number literals. + {in: `1 + 2`, out: int64(3)}, + {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: float64(26.5), data: map[string]interface{}{"foo": float64(5)}}, + {in: `foo / 2`, out: float64(2), data: map[string]interface{}{"foo": float64(4)}}, + {in: `4 = 4`, out: true}, + {in: `4 <> 4`, out: false}, + {in: `6 > 4`, out: true}, + {in: `4 >= 4`, out: true}, + {in: `4 < 6`, out: true}, + {in: `4 <= 4`, out: true}, + {in: `4 AND 5`, out: nil}, + {in: `0 = 'test'`, out: false}, + {in: `1.0 = 1`, out: true}, + {in: `1.2 = 1`, out: false}, + + // Boolean literals. + {in: `true AND false`, out: false}, + {in: `true OR false`, out: true}, + {in: `false = 4`, out: false}, + + // String literals. + {in: `'foo' = 'bar'`, out: false}, + {in: `'foo' = 'foo'`, out: true}, + {in: `'' = 4`, out: nil}, + + // Regex literals. + {in: `'foo' =~ /f.*/`, out: true}, + {in: `'foo' =~ /b.*/`, out: false}, + {in: `'foo' !~ /f.*/`, out: false}, + {in: `'foo' !~ /b.*/`, out: true}, + + // Variable references. + {in: `foo`, out: "bar", data: map[string]interface{}{"foo": "bar"}}, + {in: `foo = 'bar'`, out: true, data: map[string]interface{}{"foo": "bar"}}, + {in: `foo = 'bar'`, out: nil, data: map[string]interface{}{"foo": nil}}, + {in: `'bar' = foo`, out: nil, data: map[string]interface{}{"foo": nil}}, + {in: `foo <> 'bar'`, out: true, data: map[string]interface{}{"foo": "xxx"}}, + {in: `foo =~ /b.*/`, out: true, data: map[string]interface{}{"foo": "bar"}}, + {in: `foo !~ /b.*/`, out: false, data: map[string]interface{}{"foo": "bar"}}, + {in: `foo > 2 OR bar > 3`, out: true, data: map[string]interface{}{"foo": float64(4)}}, + {in: `foo > 2 OR bar > 3`, out: true, data: map[string]interface{}{"bar": float64(4)}}, + } { + // Evaluate expression. + out := influxql.Eval(MustParseExpr(tt.in), tt.data) + + // Compare with expected output. + if !reflect.DeepEqual(tt.out, out) { + t.Errorf("%d. %s: unexpected output:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.in, tt.out, out) + continue + } + } +} + +type EvalFixture map[string]map[string]influxql.DataType + +func (e EvalFixture) MapType(measurement *influxql.Measurement, field string) influxql.DataType { + m := e[measurement.Name] + if m == nil { + return influxql.Unknown + } + return m[field] +} + +func TestEvalType(t *testing.T) { + for i, tt := range []struct { + name string + in string + typ influxql.DataType + data EvalFixture + }{ + { + name: `a single data type`, + in: `min(value)`, + typ: influxql.Integer, + data: EvalFixture{ + "cpu": map[string]influxql.DataType{ + "value": influxql.Integer, + }, + }, + }, + { + name: `multiple data types`, + in: `min(value)`, + typ: influxql.Integer, + data: EvalFixture{ + "cpu": map[string]influxql.DataType{ + "value": influxql.Integer, + }, + "mem": map[string]influxql.DataType{ + "value": influxql.String, + }, + }, + }, + { + name: `count() with a float`, + in: `count(value)`, + typ: influxql.Integer, + data: EvalFixture{ + "cpu": map[string]influxql.DataType{ + "value": influxql.Float, + }, + }, + }, + { + name: `mean() with an integer`, + in: `mean(value)`, + typ: influxql.Float, + data: EvalFixture{ + "cpu": map[string]influxql.DataType{ + "value": influxql.Integer, + }, + }, + }, + { + name: `value inside a parenthesis`, + in: `(value)`, + typ: influxql.Float, + data: EvalFixture{ + "cpu": map[string]influxql.DataType{ + "value": influxql.Float, + }, + }, + }, + } { + sources := make([]influxql.Source, 0, len(tt.data)) + for src := range tt.data { + sources = append(sources, &influxql.Measurement{Name: src}) + } + + expr := influxql.MustParseExpr(tt.in) + typ := influxql.EvalType(expr, sources, tt.data) + if typ != tt.typ { + t.Errorf("%d. %s: unexpected type:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.name, tt.typ, typ) + } + } +} + +// Ensure an expression can be reduced. +func TestReduce(t *testing.T) { + now := mustParseTime("2000-01-01T00:00:00Z") + + for i, tt := range []struct { + in string + out string + data Valuer + }{ + // Number literals. + {in: `1 + 2`, out: `3`}, + {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: `(foo * 2) + 16.500`}, + {in: `foo(bar(2 + 3), 4)`, out: `foo(bar(5), 4)`}, + {in: `4 / 0`, out: `0.000`}, + {in: `1 / 2`, out: `0.500`}, + {in: `2 % 3`, out: `2`}, + {in: `5 % 2`, out: `1`}, + {in: `2 % 0`, out: `0`}, + {in: `2.5 % 0`, out: `NaN`}, + {in: `254 & 3`, out: `2`}, + {in: `254 | 3`, out: `255`}, + {in: `254 ^ 3`, out: `253`}, + {in: `-3 & 3`, out: `1`}, + {in: `8 & -3`, out: `8`}, + {in: `8.5 & -3`, out: `8.500 & -3`}, + {in: `4 = 4`, out: `true`}, + {in: `4 <> 4`, out: `false`}, + {in: `6 > 4`, out: `true`}, + {in: `4 >= 4`, out: `true`}, + {in: `4 < 6`, out: `true`}, + {in: `4 <= 4`, out: `true`}, + {in: `4 AND 5`, out: `4 AND 5`}, + + // Boolean literals. + {in: `true AND false`, out: `false`}, + {in: `true OR false`, out: `true`}, + {in: `true OR (foo = bar AND 1 > 2)`, out: `true`}, + {in: `(foo = bar AND 1 > 2) OR true`, out: `true`}, + {in: `false OR (foo = bar AND 1 > 2)`, out: `false`}, + {in: `(foo = bar AND 1 > 2) OR false`, out: `false`}, + {in: `true = false`, out: `false`}, + {in: `true <> false`, out: `true`}, + {in: `true + false`, out: `true + false`}, + + // Time literals with now(). + {in: `now() + 2h`, out: `'2000-01-01T02:00:00Z'`, data: map[string]interface{}{"now()": now}}, + {in: `now() / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`, data: map[string]interface{}{"now()": now}}, + {in: `4µ + now()`, out: `'2000-01-01T00:00:00.000004Z'`, data: map[string]interface{}{"now()": now}}, + {in: `now() + 2000000000`, out: `'2000-01-01T00:00:02Z'`, data: map[string]interface{}{"now()": now}}, + {in: `2000000000 + now()`, out: `'2000-01-01T00:00:02Z'`, data: map[string]interface{}{"now()": now}}, + {in: `now() - 2000000000`, out: `'1999-12-31T23:59:58Z'`, data: map[string]interface{}{"now()": now}}, + {in: `now() = now()`, out: `true`, data: map[string]interface{}{"now()": now}}, + {in: `now() <> now()`, out: `false`, data: map[string]interface{}{"now()": now}}, + {in: `now() < now() + 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, + {in: `now() <= now() + 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, + {in: `now() >= now() - 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, + {in: `now() > now() - 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, + {in: `now() - (now() - 60s)`, out: `1m`, data: map[string]interface{}{"now()": now}}, + {in: `now() AND now()`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, data: map[string]interface{}{"now()": now}}, + {in: `now()`, out: `now()`}, + {in: `946684800000000000 + 2h`, out: `'2000-01-01T02:00:00Z'`}, + + // Time literals. + {in: `'2000-01-01T00:00:00Z' + 2h`, out: `'2000-01-01T02:00:00Z'`}, + {in: `'2000-01-01T00:00:00Z' / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`}, + {in: `4µ + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00.000004Z'`}, + {in: `'2000-01-01T00:00:00Z' + 2000000000`, out: `'2000-01-01T00:00:02Z'`}, + {in: `2000000000 + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:02Z'`}, + {in: `'2000-01-01T00:00:00Z' - 2000000000`, out: `'1999-12-31T23:59:58Z'`}, + {in: `'2000-01-01T00:00:00Z' = '2000-01-01T00:00:00Z'`, out: `true`}, + {in: `'2000-01-01T00:00:00.000000000Z' = '2000-01-01T00:00:00Z'`, out: `true`}, + {in: `'2000-01-01T00:00:00Z' <> '2000-01-01T00:00:00Z'`, out: `false`}, + {in: `'2000-01-01T00:00:00.000000000Z' <> '2000-01-01T00:00:00Z'`, out: `false`}, + {in: `'2000-01-01T00:00:00Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00.000000000Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00.000000000Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00.000000000Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00.000000000Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`}, + {in: `'2000-01-01T00:00:00Z' - ('2000-01-01T00:00:00Z' - 60s)`, out: `1m`}, + {in: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`}, + + // Duration literals. + {in: `10m + 1h - 60s`, out: `69m`}, + {in: `(10m / 2) * 5`, out: `25m`}, + {in: `60s = 1m`, out: `true`}, + {in: `60s <> 1m`, out: `false`}, + {in: `60s < 1h`, out: `true`}, + {in: `60s <= 1h`, out: `true`}, + {in: `60s > 12s`, out: `true`}, + {in: `60s >= 1m`, out: `true`}, + {in: `60s AND 1m`, out: `1m AND 1m`}, + {in: `60m / 0`, out: `0s`}, + {in: `60m + 50`, out: `1h + 50`}, + + // String literals. + {in: `'foo' + 'bar'`, out: `'foobar'`}, + + // Variable references. + {in: `foo`, out: `'bar'`, data: map[string]interface{}{"foo": "bar"}}, + {in: `foo = 'bar'`, out: `true`, data: map[string]interface{}{"foo": "bar"}}, + {in: `foo = 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, + {in: `foo <> 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, + } { + // Fold expression. + expr := influxql.Reduce(MustParseExpr(tt.in), tt.data) + + // Compare with expected output. + if out := expr.String(); tt.out != out { + t.Errorf("%d. %s: unexpected expr:\n\nexp=%s\n\ngot=%s\n\n", i, tt.in, tt.out, out) + continue + } + } +} + +func Test_fieldsNames(t *testing.T) { + for _, test := range []struct { + in []string + out []string + alias []string + }{ + { //case: binary expr(valRef) + in: []string{"value+value"}, + out: []string{"value", "value"}, + alias: []string{"value_value"}, + }, + { //case: binary expr + valRef + in: []string{"value+value", "temperature"}, + out: []string{"value", "value", "temperature"}, + alias: []string{"value_value", "temperature"}, + }, + { //case: aggregate expr + in: []string{"mean(value)"}, + out: []string{"mean"}, + alias: []string{"mean"}, + }, + { //case: binary expr(aggregate expr) + in: []string{"mean(value) + max(value)"}, + out: []string{"value", "value"}, + alias: []string{"mean_max"}, + }, + { //case: binary expr(aggregate expr) + valRef + in: []string{"mean(value) + max(value)", "temperature"}, + out: []string{"value", "value", "temperature"}, + alias: []string{"mean_max", "temperature"}, + }, + { //case: mixed aggregate and varRef + in: []string{"mean(value) + temperature"}, + out: []string{"value", "temperature"}, + alias: []string{"mean_temperature"}, + }, + { //case: ParenExpr(varRef) + in: []string{"(value)"}, + out: []string{"value"}, + alias: []string{"value"}, + }, + { //case: ParenExpr(varRef + varRef) + in: []string{"(value + value)"}, + out: []string{"value", "value"}, + alias: []string{"value_value"}, + }, + { //case: ParenExpr(aggregate) + in: []string{"(mean(value))"}, + out: []string{"value"}, + alias: []string{"mean"}, + }, + { //case: ParenExpr(aggregate + aggregate) + in: []string{"(mean(value) + max(value))"}, + out: []string{"value", "value"}, + alias: []string{"mean_max"}, + }, + } { + fields := influxql.Fields{} + for _, s := range test.in { + expr := MustParseExpr(s) + fields = append(fields, &influxql.Field{Expr: expr}) + } + got := fields.Names() + if !reflect.DeepEqual(got, test.out) { + t.Errorf("get fields name:\nexp=%v\ngot=%v\n", test.out, got) + } + alias := fields.AliasNames() + if !reflect.DeepEqual(alias, test.alias) { + t.Errorf("get fields alias name:\nexp=%v\ngot=%v\n", test.alias, alias) + } + } + +} + +func TestSelect_ColumnNames(t *testing.T) { + for i, tt := range []struct { + stmt *influxql.SelectStatement + columns []string + }{ + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + }), + }, + columns: []string{"time", "value"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "value_1"}}, + }), + }, + columns: []string{"time", "value", "value_1", "value_1_1"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "value_1"}}, + {Expr: &influxql.VarRef{Val: "value"}}, + }), + }, + columns: []string{"time", "value", "value_1", "value_2"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "total"}, Alias: "value"}, + {Expr: &influxql.VarRef{Val: "value"}}, + }), + }, + columns: []string{"time", "value_1", "value", "value_2"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + }), + TimeAlias: "timestamp", + }, + columns: []string{"timestamp", "value"}, + }, + } { + columns := tt.stmt.ColumnNames() + if !reflect.DeepEqual(columns, tt.columns) { + t.Errorf("%d. expected %s, got %s", i, tt.columns, columns) + } + } +} + +func TestSelect_Privileges(t *testing.T) { + stmt := &influxql.SelectStatement{ + Target: &influxql.Target{ + Measurement: &influxql.Measurement{Database: "db2"}, + }, + Sources: []influxql.Source{ + &influxql.Measurement{Database: "db0"}, + &influxql.Measurement{Database: "db1"}, + }, + } + + exp := influxql.ExecutionPrivileges{ + influxql.ExecutionPrivilege{Name: "db0", Privilege: influxql.ReadPrivilege}, + influxql.ExecutionPrivilege{Name: "db1", Privilege: influxql.ReadPrivilege}, + influxql.ExecutionPrivilege{Name: "db2", Privilege: influxql.WritePrivilege}, + } + + got, err := stmt.RequiredPrivileges() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(exp, got) { + t.Errorf("exp: %v, got: %v", exp, got) + } +} + +func TestSelect_SubqueryPrivileges(t *testing.T) { + stmt := &influxql.SelectStatement{ + Target: &influxql.Target{ + Measurement: &influxql.Measurement{Database: "db2"}, + }, + Sources: []influxql.Source{ + &influxql.Measurement{Database: "db0"}, + &influxql.SubQuery{ + Statement: &influxql.SelectStatement{ + Sources: []influxql.Source{ + &influxql.Measurement{Database: "db1"}, + }, + }, + }, + }, + } + + exp := influxql.ExecutionPrivileges{ + influxql.ExecutionPrivilege{Name: "db0", Privilege: influxql.ReadPrivilege}, + influxql.ExecutionPrivilege{Name: "db1", Privilege: influxql.ReadPrivilege}, + influxql.ExecutionPrivilege{Name: "db2", Privilege: influxql.WritePrivilege}, + } + + got, err := stmt.RequiredPrivileges() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(exp, got) { + t.Errorf("exp: %v, got: %v", exp, got) + } +} + +func TestShow_Privileges(t *testing.T) { + for _, c := range []struct { + stmt influxql.Statement + exp influxql.ExecutionPrivileges + }{ + { + stmt: &influxql.ShowDatabasesStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.NoPrivileges}}, + }, + { + stmt: &influxql.ShowFieldKeysStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, + }, + { + stmt: &influxql.ShowMeasurementsStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, + }, + { + stmt: &influxql.ShowQueriesStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, + }, + { + stmt: &influxql.ShowRetentionPoliciesStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, + }, + { + stmt: &influxql.ShowSeriesStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, + }, + { + stmt: &influxql.ShowShardGroupsStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, + }, + { + stmt: &influxql.ShowShardsStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, + }, + { + stmt: &influxql.ShowStatsStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, + }, + { + stmt: &influxql.ShowSubscriptionsStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, + }, + { + stmt: &influxql.ShowDiagnosticsStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, + }, + { + stmt: &influxql.ShowTagKeysStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, + }, + { + stmt: &influxql.ShowTagValuesStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, + }, + { + stmt: &influxql.ShowUsersStatement{}, + exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, + }, + } { + got, err := c.stmt.RequiredPrivileges() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(c.exp, got) { + t.Errorf("exp: %v, got: %v", c.exp, got) + } + } +} + +func TestSources_Names(t *testing.T) { + sources := influxql.Sources([]influxql.Source{ + &influxql.Measurement{ + Name: "cpu", + }, + &influxql.Measurement{ + Name: "mem", + }, + }) + + names := sources.Names() + if names[0] != "cpu" { + t.Errorf("expected cpu, got %s", names[0]) + } + if names[1] != "mem" { + t.Errorf("expected mem, got %s", names[1]) + } +} + +func TestSources_HasSystemSource(t *testing.T) { + sources := influxql.Sources([]influxql.Source{ + &influxql.Measurement{ + Name: "_measurements", + }, + }) + + ok := sources.HasSystemSource() + if !ok { + t.Errorf("expected to find a system source, found none") + } + + sources = influxql.Sources([]influxql.Source{ + &influxql.Measurement{ + Name: "cpu", + }, + }) + + ok = sources.HasSystemSource() + if ok { + t.Errorf("expected to find no system source, found one") + } +} + +// Parse statements that might appear valid but should return an error. +// If allowed to execute, at least some of these statements would result in a panic. +func TestParse_Errors(t *testing.T) { + for _, tt := range []struct { + tmpl string + good string + bad string + }{ + // Second argument to derivative must be duration + {tmpl: `SELECT derivative(f, %s) FROM m`, good: "1h", bad: "true"}, + } { + good := fmt.Sprintf(tt.tmpl, tt.good) + if _, err := influxql.ParseStatement(good); err != nil { + t.Fatalf("statement %q should have parsed correctly but returned error: %s", good, err) + } + + bad := fmt.Sprintf(tt.tmpl, tt.bad) + if _, err := influxql.ParseStatement(bad); err == nil { + t.Fatalf("statement %q should have resulted in a parse error but did not", bad) + } + } +} + +// This test checks to ensure that we have given thought to the database +// context required for security checks. If a new statement is added, this +// test will fail until it is categorized into the correct bucket below. +func Test_EnforceHasDefaultDatabase(t *testing.T) { + pkg, err := importer.Default().Import("github.com/influxdata/influxdb/influxql") + if err != nil { + fmt.Printf("error: %s\n", err.Error()) + return + } + statements := []string{} + + // this is a list of statements that do not have a database context + exemptStatements := []string{ + "CreateDatabaseStatement", + "CreateUserStatement", + "DeleteSeriesStatement", + "DropDatabaseStatement", + "DropMeasurementStatement", + "DropSeriesStatement", + "DropShardStatement", + "DropUserStatement", + "GrantAdminStatement", + "KillQueryStatement", + "RevokeAdminStatement", + "SelectStatement", + "SetPasswordUserStatement", + "ShowContinuousQueriesStatement", + "ShowDatabasesStatement", + "ShowDiagnosticsStatement", + "ShowGrantsForUserStatement", + "ShowQueriesStatement", + "ShowShardGroupsStatement", + "ShowShardsStatement", + "ShowStatsStatement", + "ShowSubscriptionsStatement", + "ShowUsersStatement", + } + + exists := func(stmt string) bool { + switch stmt { + // These are functions with the word statement in them, and can be ignored + case "Statement", "MustParseStatement", "ParseStatement", "RewriteStatement": + return true + default: + // check the exempt statements + for _, s := range exemptStatements { + if s == stmt { + return true + } + } + // check the statements that passed the interface test for HasDefaultDatabase + for _, s := range statements { + if s == stmt { + return true + } + } + return false + } + } + + needsHasDefault := []interface{}{ + &influxql.AlterRetentionPolicyStatement{}, + &influxql.CreateContinuousQueryStatement{}, + &influxql.CreateRetentionPolicyStatement{}, + &influxql.CreateSubscriptionStatement{}, + &influxql.DeleteStatement{}, + &influxql.DropContinuousQueryStatement{}, + &influxql.DropRetentionPolicyStatement{}, + &influxql.DropSubscriptionStatement{}, + &influxql.GrantStatement{}, + &influxql.RevokeStatement{}, + &influxql.ShowFieldKeysStatement{}, + &influxql.ShowMeasurementsStatement{}, + &influxql.ShowRetentionPoliciesStatement{}, + &influxql.ShowSeriesStatement{}, + &influxql.ShowTagKeysStatement{}, + &influxql.ShowTagValuesStatement{}, + } + + for _, stmt := range needsHasDefault { + statements = append(statements, strings.TrimPrefix(fmt.Sprintf("%T", stmt), "*influxql.")) + if _, ok := stmt.(influxql.HasDefaultDatabase); !ok { + t.Errorf("%T was expected to declare DefaultDatabase method", stmt) + } + + } + + for _, declName := range pkg.Scope().Names() { + if strings.HasSuffix(declName, "Statement") { + if !exists(declName) { + t.Errorf("unchecked statement %s. please update this test to determine if this statement needs to declare 'DefaultDatabase'", declName) + } + } + } +} + +// Valuer represents a simple wrapper around a map to implement the influxql.Valuer interface. +type Valuer map[string]interface{} + +// Value returns the value and existence of a key. +func (o Valuer) Value(key string) (v interface{}, ok bool) { + v, ok = o[key] + return +} + +// MustTimeRange will parse a time range. Panic on error. +func MustTimeRange(expr influxql.Expr) (min, max time.Time) { + min, max, err := influxql.TimeRange(expr, nil) + if err != nil { + panic(err) + } + return min, max +} + +// mustParseTime parses an IS0-8601 string. Panic on error. +func mustParseTime(s string) time.Time { + t, err := time.Parse(time.RFC3339, s) + if err != nil { + panic(err.Error()) + } + return t +} + +// BenchmarkExprNames benchmarks how long it takes to run ExprNames. +func BenchmarkExprNames(b *testing.B) { + exprs := make([]string, 100) + for i := range exprs { + exprs[i] = fmt.Sprintf("host = 'server%02d'", i) + } + condition := MustParseExpr(strings.Join(exprs, " OR ")) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + refs := influxql.ExprNames(condition) + if have, want := refs, []influxql.VarRef{{Val: "host"}}; !reflect.DeepEqual(have, want) { + b.Fatalf("unexpected expression names: have=%s want=%s", have, want) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go b/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go new file mode 100644 index 0000000..22623a1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go @@ -0,0 +1,1069 @@ +package influxql + +import ( + "fmt" + "math" + "sort" + "time" +) + +/* +This file contains iterator implementations for each function call available +in InfluxQL. Call iterators are separated into two groups: + +1. Map/reduce-style iterators - these are passed to IteratorCreator so that + processing can be at the low-level storage and aggregates are returned. + +2. Raw aggregate iterators - these require the full set of data for a window. + These are handled by the select() function and raw points are streamed in + from the low-level storage. + +There are helpers to aid in building aggregate iterators. For simple map/reduce +iterators, you can use the reduceIterator types and pass a reduce function. This +reduce function is passed a previous and current value and the new timestamp, +value, and auxilary fields are returned from it. + +For raw aggregate iterators, you can use the reduceSliceIterators which pass +in a slice of all points to the function and return a point. For more complex +iterator types, you may need to create your own iterators by hand. + +Once your iterator is complete, you'll need to add it to the NewCallIterator() +function if it is to be available to IteratorCreators and add it to the select() +function to allow it to be included during planning. +*/ + +// NewCallIterator returns a new iterator for a Call. +func NewCallIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + name := opt.Expr.(*Call).Name + switch name { + case "count": + return newCountIterator(input, opt) + case "min": + return newMinIterator(input, opt) + case "max": + return newMaxIterator(input, opt) + case "sum": + return newSumIterator(input, opt) + case "first": + return newFirstIterator(input, opt) + case "last": + return newLastIterator(input, opt) + case "mean": + return newMeanIterator(input, opt) + default: + return nil, fmt.Errorf("unsupported function call: %s", name) + } +} + +// newCountIterator returns an iterator for operating on a count() call. +func newCountIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + // FIXME: Wrap iterator in int-type iterator and always output int value. + + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, IntegerPointEmitter) { + fn := NewFloatFuncIntegerReducer(FloatCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newFloatReduceIntegerIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, IntegerPointEmitter) { + fn := NewStringFuncIntegerReducer(StringCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newStringReduceIntegerIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { + fn := NewBooleanFuncIntegerReducer(BooleanCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newBooleanReduceIntegerIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported count iterator type: %T", input) + } +} + +// FloatCountReduce returns the count of points. +func FloatCountReduce(prev *IntegerPoint, curr *FloatPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// IntegerCountReduce returns the count of points. +func IntegerCountReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// StringCountReduce returns the count of points. +func StringCountReduce(prev *IntegerPoint, curr *StringPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// BooleanCountReduce returns the count of points. +func BooleanCountReduce(prev *IntegerPoint, curr *BooleanPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// newMinIterator returns an iterator for operating on a min() call. +func newMinIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatMinReduce, nil) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerMinReduce, nil) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanMinReduce, nil) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported min iterator type: %T", input) + } +} + +// FloatMinReduce returns the minimum value between prev & curr. +func FloatMinReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerMinReduce returns the minimum value between prev & curr. +func IntegerMinReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanMinReduce returns the minimum value between prev & curr. +func BooleanMinReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || (curr.Value != prev.Value && !curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// newMaxIterator returns an iterator for operating on a max() call. +func newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatMaxReduce, nil) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerMaxReduce, nil) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanMaxReduce, nil) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported max iterator type: %T", input) + } +} + +// FloatMaxReduce returns the maximum value between prev & curr. +func FloatMaxReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerMaxReduce returns the maximum value between prev & curr. +func IntegerMaxReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanMaxReduce returns the minimum value between prev & curr. +func BooleanMaxReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || (curr.Value != prev.Value && curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// newSumIterator returns an iterator for operating on a sum() call. +func newSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatSumReduce, &FloatPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerSumReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported sum iterator type: %T", input) + } +} + +// FloatSumReduce returns the sum prev value & curr value. +func FloatSumReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil { + return ZeroTime, curr.Value, nil + } + return prev.Time, prev.Value + curr.Value, nil +} + +// IntegerSumReduce returns the sum prev value & curr value. +func IntegerSumReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, curr.Value, nil + } + return prev.Time, prev.Value + curr.Value, nil +} + +// newFirstIterator returns an iterator for operating on a first() call. +func newFirstIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatFirstReduce, nil) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerFirstReduce, nil) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringFuncReducer(StringFirstReduce, nil) + return fn, fn + } + return newStringReduceStringIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanFirstReduce, nil) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported first iterator type: %T", input) + } +} + +// FloatFirstReduce returns the first point sorted by time. +func FloatFirstReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerFirstReduce returns the first point sorted by time. +func IntegerFirstReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// StringFirstReduce returns the first point sorted by time. +func StringFirstReduce(prev, curr *StringPoint) (int64, string, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanFirstReduce returns the first point sorted by time. +func BooleanFirstReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && !curr.Value && prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// newLastIterator returns an iterator for operating on a last() call. +func newLastIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatLastReduce, nil) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerLastReduce, nil) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringFuncReducer(StringLastReduce, nil) + return fn, fn + } + return newStringReduceStringIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanLastReduce, nil) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported last iterator type: %T", input) + } +} + +// FloatLastReduce returns the last point sorted by time. +func FloatLastReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerLastReduce returns the last point sorted by time. +func IntegerLastReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// StringLastReduce returns the first point sorted by time. +func StringLastReduce(prev, curr *StringPoint) (int64, string, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanLastReduce returns the first point sorted by time. +func BooleanLastReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value && !prev.Value) { + return curr.Time, curr.Value, cloneAux(curr.Aux) + } + return prev.Time, prev.Value, prev.Aux +} + +// NewDistinctIterator returns an iterator for operating on a distinct() call. +func NewDistinctIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatDistinctReducer() + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerDistinctReducer() + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringDistinctReducer() + return fn, fn + } + return newStringReduceStringIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanDistinctReducer() + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported distinct iterator type: %T", input) + } +} + +// newMeanIterator returns an iterator for operating on a mean() call. +func newMeanIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatMeanReducer() + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerMeanReducer() + return fn, fn + } + return newIntegerReduceFloatIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported mean iterator type: %T", input) + } +} + +// NewMedianIterator returns an iterator for operating on a median() call. +func NewMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + return newMedianIterator(input, opt) +} + +// newMedianIterator returns an iterator for operating on a median() call. +func newMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatMedianReduceSlice) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerSliceFuncFloatReducer(IntegerMedianReduceSlice) + return fn, fn + } + return newIntegerReduceFloatIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported median iterator type: %T", input) + } +} + +// FloatMedianReduceSlice returns the median value within a window. +func FloatMedianReduceSlice(a []FloatPoint) []FloatPoint { + if len(a) == 1 { + return a + } + + // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. + + // Return the middle value from the points. + // If there are an even number of points then return the mean of the two middle points. + sort.Sort(floatPointsByValue(a)) + if len(a)%2 == 0 { + lo, hi := a[len(a)/2-1], a[(len(a)/2)] + return []FloatPoint{{Time: ZeroTime, Value: lo.Value + (hi.Value-lo.Value)/2}} + } + return []FloatPoint{{Time: ZeroTime, Value: a[len(a)/2].Value}} +} + +// IntegerMedianReduceSlice returns the median value within a window. +func IntegerMedianReduceSlice(a []IntegerPoint) []FloatPoint { + if len(a) == 1 { + return []FloatPoint{{Time: ZeroTime, Value: float64(a[0].Value)}} + } + + // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. + + // Return the middle value from the points. + // If there are an even number of points then return the mean of the two middle points. + sort.Sort(integerPointsByValue(a)) + if len(a)%2 == 0 { + lo, hi := a[len(a)/2-1], a[(len(a)/2)] + return []FloatPoint{{Time: ZeroTime, Value: float64(lo.Value) + float64(hi.Value-lo.Value)/2}} + } + return []FloatPoint{{Time: ZeroTime, Value: float64(a[len(a)/2].Value)}} +} + +// newModeIterator returns an iterator for operating on a mode() call. +func NewModeIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatModeReduceSlice) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(IntegerModeReduceSlice) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringSliceFuncReducer(StringModeReduceSlice) + return fn, fn + } + return newStringReduceStringIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanSliceFuncReducer(BooleanModeReduceSlice) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported median iterator type: %T", input) + } +} + +// FloatModeReduceSlice returns the mode value within a window. +func FloatModeReduceSlice(a []FloatPoint) []FloatPoint { + if len(a) == 1 { + return a + } + + sort.Sort(floatPointsByValue(a)) + + mostFreq := 0 + currFreq := 0 + currMode := a[0].Value + mostMode := a[0].Value + mostTime := a[0].Time + currTime := a[0].Time + + for _, p := range a { + if p.Value != currMode { + currFreq = 1 + currMode = p.Value + currTime = p.Time + continue + } + currFreq++ + if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { + continue + } + mostFreq = currFreq + mostMode = p.Value + mostTime = p.Time + } + + return []FloatPoint{{Time: ZeroTime, Value: mostMode}} +} + +// IntegerModeReduceSlice returns the mode value within a window. +func IntegerModeReduceSlice(a []IntegerPoint) []IntegerPoint { + if len(a) == 1 { + return a + } + sort.Sort(integerPointsByValue(a)) + + mostFreq := 0 + currFreq := 0 + currMode := a[0].Value + mostMode := a[0].Value + mostTime := a[0].Time + currTime := a[0].Time + + for _, p := range a { + if p.Value != currMode { + currFreq = 1 + currMode = p.Value + currTime = p.Time + continue + } + currFreq++ + if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { + continue + } + mostFreq = currFreq + mostMode = p.Value + mostTime = p.Time + } + + return []IntegerPoint{{Time: ZeroTime, Value: mostMode}} +} + +// StringModeReduceSlice returns the mode value within a window. +func StringModeReduceSlice(a []StringPoint) []StringPoint { + if len(a) == 1 { + return a + } + + sort.Sort(stringPointsByValue(a)) + + mostFreq := 0 + currFreq := 0 + currMode := a[0].Value + mostMode := a[0].Value + mostTime := a[0].Time + currTime := a[0].Time + + for _, p := range a { + if p.Value != currMode { + currFreq = 1 + currMode = p.Value + currTime = p.Time + continue + } + currFreq++ + if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { + continue + } + mostFreq = currFreq + mostMode = p.Value + mostTime = p.Time + } + + return []StringPoint{{Time: ZeroTime, Value: mostMode}} +} + +// BooleanModeReduceSlice returns the mode value within a window. +func BooleanModeReduceSlice(a []BooleanPoint) []BooleanPoint { + if len(a) == 1 { + return a + } + + trueFreq := 0 + falsFreq := 0 + mostMode := false + + for _, p := range a { + if p.Value { + trueFreq++ + } else { + falsFreq++ + } + } + // In case either of true or false are mode then retuned mode value wont be + // of metric with oldest timestamp + if trueFreq >= falsFreq { + mostMode = true + } + + return []BooleanPoint{{Time: ZeroTime, Value: mostMode}} +} + +// newStddevIterator returns an iterator for operating on a stddev() call. +func newStddevIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatStddevReduceSlice) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerSliceFuncFloatReducer(IntegerStddevReduceSlice) + return fn, fn + } + return newIntegerReduceFloatIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported stddev iterator type: %T", input) + } +} + +// FloatStddevReduceSlice returns the stddev value within a window. +func FloatStddevReduceSlice(a []FloatPoint) []FloatPoint { + // If there is only one point then return 0. + if len(a) < 2 { + return []FloatPoint{{Time: ZeroTime, Nil: true}} + } + + // Calculate the mean. + var mean float64 + var count int + for _, p := range a { + if math.IsNaN(p.Value) { + continue + } + count++ + mean += (p.Value - mean) / float64(count) + } + + // Calculate the variance. + var variance float64 + for _, p := range a { + if math.IsNaN(p.Value) { + continue + } + variance += math.Pow(p.Value-mean, 2) + } + return []FloatPoint{{ + Time: ZeroTime, + Value: math.Sqrt(variance / float64(count-1)), + }} +} + +// IntegerStddevReduceSlice returns the stddev value within a window. +func IntegerStddevReduceSlice(a []IntegerPoint) []FloatPoint { + // If there is only one point then return 0. + if len(a) < 2 { + return []FloatPoint{{Time: ZeroTime, Nil: true}} + } + + // Calculate the mean. + var mean float64 + var count int + for _, p := range a { + count++ + mean += (float64(p.Value) - mean) / float64(count) + } + + // Calculate the variance. + var variance float64 + for _, p := range a { + variance += math.Pow(float64(p.Value)-mean, 2) + } + return []FloatPoint{{ + Time: ZeroTime, + Value: math.Sqrt(variance / float64(count-1)), + }} +} + +// newSpreadIterator returns an iterator for operating on a spread() call. +func newSpreadIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatSpreadReduceSlice) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(IntegerSpreadReduceSlice) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported spread iterator type: %T", input) + } +} + +// FloatSpreadReduceSlice returns the spread value within a window. +func FloatSpreadReduceSlice(a []FloatPoint) []FloatPoint { + // Find min & max values. + min, max := a[0].Value, a[0].Value + for _, p := range a[1:] { + min = math.Min(min, p.Value) + max = math.Max(max, p.Value) + } + return []FloatPoint{{Time: ZeroTime, Value: max - min}} +} + +// IntegerSpreadReduceSlice returns the spread value within a window. +func IntegerSpreadReduceSlice(a []IntegerPoint) []IntegerPoint { + // Find min & max values. + min, max := a[0].Value, a[0].Value + for _, p := range a[1:] { + if p.Value < min { + min = p.Value + } + if p.Value > max { + max = p.Value + } + } + return []IntegerPoint{{Time: ZeroTime, Value: max - min}} +} + +func newTopIterator(input Iterator, opt IteratorOptions, n int, keepTags bool) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatTopReducer(n) + return fn, fn + } + itr := newFloatReduceFloatIterator(input, opt, createFn) + itr.keepTags = keepTags + return itr, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerTopReducer(n) + return fn, fn + } + itr := newIntegerReduceIntegerIterator(input, opt, createFn) + itr.keepTags = keepTags + return itr, nil + default: + return nil, fmt.Errorf("unsupported top iterator type: %T", input) + } +} + +func newBottomIterator(input Iterator, opt IteratorOptions, n int, keepTags bool) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatBottomReducer(n) + return fn, fn + } + itr := newFloatReduceFloatIterator(input, opt, createFn) + itr.keepTags = keepTags + return itr, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerBottomReducer(n) + return fn, fn + } + itr := newIntegerReduceIntegerIterator(input, opt, createFn) + itr.keepTags = keepTags + return itr, nil + default: + return nil, fmt.Errorf("unsupported bottom iterator type: %T", input) + } +} + +// newPercentileIterator returns an iterator for operating on a percentile() call. +func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + floatPercentileReduceSlice := NewFloatPercentileReduceSliceFunc(percentile) + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(floatPercentileReduceSlice) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + integerPercentileReduceSlice := NewIntegerPercentileReduceSliceFunc(percentile) + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(integerPercentileReduceSlice) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported percentile iterator type: %T", input) + } +} + +// NewFloatPercentileReduceSliceFunc returns the percentile value within a window. +func NewFloatPercentileReduceSliceFunc(percentile float64) FloatReduceSliceFunc { + return func(a []FloatPoint) []FloatPoint { + length := len(a) + i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 + + if i < 0 || i >= length { + return nil + } + + sort.Sort(floatPointsByValue(a)) + return []FloatPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}} + } +} + +// NewIntegerPercentileReduceSliceFunc returns the percentile value within a window. +func NewIntegerPercentileReduceSliceFunc(percentile float64) IntegerReduceSliceFunc { + return func(a []IntegerPoint) []IntegerPoint { + length := len(a) + i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 + + if i < 0 || i >= length { + return nil + } + + sort.Sort(integerPointsByValue(a)) + return []IntegerPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}} + } +} + +// newDerivativeIterator returns an iterator for operating on a derivative() call. +func newDerivativeIterator(input Iterator, opt IteratorOptions, interval Interval, isNonNegative bool) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatDerivativeReducer(interval, isNonNegative, opt.Ascending) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerDerivativeReducer(interval, isNonNegative, opt.Ascending) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported derivative iterator type: %T", input) + } +} + +// newDifferenceIterator returns an iterator for operating on a difference() call. +func newDifferenceIterator(input Iterator, opt IteratorOptions, isNonNegative bool) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatDifferenceReducer(isNonNegative) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerDifferenceReducer(isNonNegative) + return fn, fn + } + return newIntegerStreamIntegerIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported difference iterator type: %T", input) + } +} + +// newElapsedIterator returns an iterator for operating on a elapsed() call. +func newElapsedIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, IntegerPointEmitter) { + fn := NewFloatElapsedReducer(interval) + return fn, fn + } + return newFloatStreamIntegerIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerElapsedReducer(interval) + return fn, fn + } + return newIntegerStreamIntegerIterator(input, createFn, opt), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { + fn := NewBooleanElapsedReducer(interval) + return fn, fn + } + return newBooleanStreamIntegerIterator(input, createFn, opt), nil + case StringIterator: + createFn := func() (StringPointAggregator, IntegerPointEmitter) { + fn := NewStringElapsedReducer(interval) + return fn, fn + } + return newStringStreamIntegerIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) + } +} + +// newMovingAverageIterator returns an iterator for operating on a moving_average() call. +func newMovingAverageIterator(input Iterator, n int, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatMovingAverageReducer(n) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerMovingAverageReducer(n) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported moving average iterator type: %T", input) + } +} + +// newCumulativeSumIterator returns an iterator for operating on a cumulative_sum() call. +func newCumulativeSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatCumulativeSumReducer() + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerCumulativeSumReducer() + return fn, fn + } + return newIntegerStreamIntegerIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported cumulative sum iterator type: %T", input) + } +} + +// newHoltWintersIterator returns an iterator for operating on a holt_winters() call. +func newHoltWintersIterator(input Iterator, opt IteratorOptions, h, m int, includeFitData bool, interval time.Duration) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval) + return fn, fn + } + return newIntegerReduceFloatIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) + } +} + +// NewSampleIterator returns an iterator for operating on a sample() call (exported for use in test). +func NewSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, error) { + return newSampleIterator(input, opt, size) +} + +// newSampleIterator returns an iterator for operating on a sample() call. +func newSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSampleReducer(size) + return fn, fn + } + return newFloatReduceFloatIterator(input, opt, createFn), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSampleReducer(size) + return fn, fn + } + return newIntegerReduceIntegerIterator(input, opt, createFn), nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringSampleReducer(size) + return fn, fn + } + return newStringReduceStringIterator(input, opt, createFn), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanSampleReducer(size) + return fn, fn + } + return newBooleanReduceBooleanIterator(input, opt, createFn), nil + default: + return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) + } +} + +// newIntegralIterator returns an iterator for operating on a integral() call. +func newIntegralIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatIntegralReducer(interval, opt) + return fn, fn + } + return newFloatStreamFloatIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerIntegralReducer(interval, opt) + return fn, fn + } + return newIntegerStreamFloatIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported integral iterator type: %T", input) + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/call_iterator_test.go b/vendor/github.com/influxdata/influxdb/influxql/call_iterator_test.go new file mode 100644 index 0000000..583c92c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/call_iterator_test.go @@ -0,0 +1,983 @@ +package influxql_test + +import ( + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +// Ensure that a float iterator can be created for a count() call. +func TestCallIterator_Count_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a count() call. +func TestCallIterator_Count_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a count() call. +func TestCallIterator_Count_String(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + {Name: "cpu", Time: 5, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: "b", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a count() call. +func TestCallIterator_Count_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Name: "cpu", Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + {Name: "cpu", Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a min() call. +func TestCallIterator_Min_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, + {&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a integer iterator can be created for a min() call. +func TestCallIterator_Min_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, + {&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a min() call. +func TestCallIterator_Min_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 23, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a max() call. +func TestCallIterator_Max_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a integer iterator can be created for a max() call. +func TestCallIterator_Max_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a max() call. +func TestCallIterator_Max_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 23, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a sum() call. +func TestCallIterator_Sum_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`sum("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a sum() call. +func TestCallIterator_Sum_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`sum("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a first() call. +func TestCallIterator_First_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a first() call. +func TestCallIterator_First_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a first() call. +func TestCallIterator_First_String(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &StringIterator{Points: []influxql.StringPoint{ + {Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Time: 0, Value: "d", Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a first() call. +func TestCallIterator_First_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a last() call. +func TestCallIterator_Last_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a last() call. +func TestCallIterator_Last_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a last() call. +func TestCallIterator_Last_String(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &StringIterator{Points: []influxql.StringPoint{ + {Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Time: 2, Value: "b", Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a last() call. +func TestCallIterator_Last_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a mode() call. +func TestCallIterator_Mode_Float(t *testing.T) { + itr, _ := influxql.NewModeIterator(&FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 0}}, + {&influxql.FloatPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA"), Aggregated: 0}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 0}}, + {&influxql.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 0}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a integer iterator can be created for a mode() call. +func TestCallIterator_Mode_Integer(t *testing.T) { + itr, _ := influxql.NewModeIterator(&IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA")}}, + {&influxql.IntegerPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA")}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB")}}, + {&influxql.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB")}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a mode() call. +func TestCallIterator_Mode_String(t *testing.T) { + itr, _ := influxql.NewModeIterator(&StringIterator{Points: []influxql.StringPoint{ + {Time: 0, Value: "15", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "10", Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: "20", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: "21", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: "21", Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: "11", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 22, Value: "8", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: "8", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: "25", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Time: 0, Value: "10", Tags: ParseTags("host=hostA")}}, + {&influxql.StringPoint{Time: 5, Value: "21", Tags: ParseTags("host=hostA")}}, + {&influxql.StringPoint{Time: 1, Value: "11", Tags: ParseTags("host=hostB")}}, + {&influxql.StringPoint{Time: 20, Value: "8", Tags: ParseTags("host=hostB")}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a modBooleanl. +func TestCallIterator_Mode_Boolean(t *testing.T) { + itr, _ := influxql.NewModeIterator(&BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + {Time: 2, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 3, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 7, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 8, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 22, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 24, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`mode("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + Ordered: true, + Ascending: true, + }, + ) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA")}}, + {&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA")}}, + {&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB")}}, + {&influxql.BooleanPoint{Time: 20, Value: true, Tags: ParseTags("host=hostB")}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestNewCallIterator_UnsupportedExprName(t *testing.T) { + _, err := influxql.NewCallIterator( + &FloatIterator{}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`foobar("value")`), + }, + ) + + if err == nil || err.Error() != "unsupported function call: foobar" { + t.Errorf("unexpected error: %s", err) + } +} + +func BenchmarkCountIterator_1K(b *testing.B) { benchmarkCountIterator(b, 1000) } +func BenchmarkCountIterator_100K(b *testing.B) { benchmarkCountIterator(b, 100000) } +func BenchmarkCountIterator_1M(b *testing.B) { benchmarkCountIterator(b, 1000000) } + +func benchmarkCountIterator(b *testing.B, pointN int) { + benchmarkCallIterator(b, influxql.IteratorOptions{ + Expr: MustParseExpr("count(value)"), + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + }, pointN) +} + +func benchmarkCallIterator(b *testing.B, opt influxql.IteratorOptions, pointN int) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Create a lightweight point generator. + p := influxql.FloatPoint{Name: "cpu", Value: 100} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *influxql.FloatPoint { return &p }, + } + + // Execute call against input. + itr, err := influxql.NewCallIterator(&input, opt) + if err != nil { + b.Fatal(err) + } + influxql.DrainIterator(itr) + } +} + +func BenchmarkSampleIterator_1k(b *testing.B) { benchmarkSampleIterator(b, 1000) } +func BenchmarkSampleIterator_100k(b *testing.B) { benchmarkSampleIterator(b, 100000) } +func BenchmarkSampleIterator_1M(b *testing.B) { benchmarkSampleIterator(b, 1000000) } + +func benchmarkSampleIterator(b *testing.B, pointN int) { + b.ReportAllocs() + + // Create a lightweight point generator. + p := influxql.FloatPoint{Name: "cpu"} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *influxql.FloatPoint { + p.Value = float64(i) + return &p + }, + } + + for i := 0; i < b.N; i++ { + // Execute call against input. + itr, err := influxql.NewSampleIterator(&input, influxql.IteratorOptions{}, 100) + if err != nil { + b.Fatal(err) + } + influxql.DrainIterator(itr) + } +} + +func BenchmarkDistinctIterator_1K(b *testing.B) { benchmarkDistinctIterator(b, 1000) } +func BenchmarkDistinctIterator_100K(b *testing.B) { benchmarkDistinctIterator(b, 100000) } +func BenchmarkDistinctIterator_1M(b *testing.B) { benchmarkDistinctIterator(b, 1000000) } + +func benchmarkDistinctIterator(b *testing.B, pointN int) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Create a lightweight point generator. + p := influxql.FloatPoint{Name: "cpu"} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *influxql.FloatPoint { + p.Value = float64(i % 10) + return &p + }, + } + + // Execute call against input. + itr, err := influxql.NewDistinctIterator(&input, influxql.IteratorOptions{}) + if err != nil { + b.Fatal(err) + } + influxql.DrainIterator(itr) + } +} + +func BenchmarkModeIterator_1K(b *testing.B) { benchmarkModeIterator(b, 1000) } +func BenchmarkModeIterator_100K(b *testing.B) { benchmarkModeIterator(b, 100000) } +func BenchmarkModeIterator_1M(b *testing.B) { benchmarkModeIterator(b, 1000000) } + +func benchmarkModeIterator(b *testing.B, pointN int) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Create a lightweight point generator. + p := influxql.FloatPoint{Name: "cpu"} + input := FloatPointGenerator{ + N: pointN, + Fn: func(i int) *influxql.FloatPoint { + p.Value = float64(10) + return &p + }, + } + + // Execute call against input. + itr, err := influxql.NewModeIterator(&input, influxql.IteratorOptions{}) + if err != nil { + b.Fatal(err) + } + influxql.DrainIterator(itr) + } +} + +type FloatPointGenerator struct { + i int + N int + Fn func(i int) *influxql.FloatPoint +} + +func (g *FloatPointGenerator) Close() error { return nil } +func (g *FloatPointGenerator) Stats() influxql.IteratorStats { return influxql.IteratorStats{} } + +func (g *FloatPointGenerator) Next() (*influxql.FloatPoint, error) { + if g.i == g.N { + return nil, nil + } + p := g.Fn(g.i) + g.i++ + return p, nil +} + +func MustCallIterator(input influxql.Iterator, opt influxql.IteratorOptions) influxql.Iterator { + itr, err := influxql.NewCallIterator(input, opt) + if err != nil { + panic(err) + } + return itr +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/cast.go b/vendor/github.com/influxdata/influxdb/influxql/cast.go new file mode 100644 index 0000000..b993a17 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/cast.go @@ -0,0 +1,41 @@ +package influxql + +func castToFloat(v interface{}) float64 { + switch v := v.(type) { + case float64: + return v + case int64: + return float64(v) + default: + return float64(0) + } +} + +func castToInteger(v interface{}) int64 { + switch v := v.(type) { + case float64: + return int64(v) + case int64: + return v + default: + return int64(0) + } +} + +func castToString(v interface{}) string { + switch v := v.(type) { + case string: + return v + default: + return "" + } +} + +func castToBoolean(v interface{}) bool { + switch v := v.(type) { + case bool: + return v + default: + return false + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/doc.go b/vendor/github.com/influxdata/influxdb/influxql/doc.go new file mode 100644 index 0000000..8b814c9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/doc.go @@ -0,0 +1,12 @@ +/* +Package influxql implements a parser for the InfluxDB query language. + +InfluxQL is a DML and DDL language for the InfluxDB time series database. +It provides the ability to query for aggregate statistics as well as create +and configure the InfluxDB server. + +See https://docs.influxdata.com/influxdb/latest/query_language/ +for a reference on using InfluxQL. + +*/ +package influxql diff --git a/vendor/github.com/influxdata/influxdb/influxql/emitter.go b/vendor/github.com/influxdata/influxdb/influxql/emitter.go new file mode 100644 index 0000000..625c7ca --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/emitter.go @@ -0,0 +1,225 @@ +package influxql + +import ( + "fmt" + "time" + + "github.com/influxdata/influxdb/models" +) + +// Emitter groups values together by name, tags, and time. +type Emitter struct { + buf []Point + itrs []Iterator + ascending bool + chunkSize int + + tags Tags + row *models.Row + + // The columns to attach to each row. + Columns []string + + // The time zone location. + Location *time.Location + + // Removes the "time" column from output. + // Used for meta queries where time does not apply. + OmitTime bool +} + +// NewEmitter returns a new instance of Emitter that pulls from itrs. +func NewEmitter(itrs []Iterator, ascending bool, chunkSize int) *Emitter { + return &Emitter{ + buf: make([]Point, len(itrs)), + itrs: itrs, + ascending: ascending, + chunkSize: chunkSize, + Location: time.UTC, + } +} + +// Close closes the underlying iterators. +func (e *Emitter) Close() error { + return Iterators(e.itrs).Close() +} + +// Emit returns the next row from the iterators. +func (e *Emitter) Emit() (*models.Row, bool, error) { + // Immediately end emission if there are no iterators. + if len(e.itrs) == 0 { + return nil, false, nil + } + + // Continually read from iterators until they are exhausted. + for { + // Fill buffer. Return row if no more points remain. + t, name, tags, err := e.loadBuf() + if err != nil { + return nil, false, err + } else if t == ZeroTime { + row := e.row + e.row = nil + return row, false, nil + } + + // Read next set of values from all iterators at a given time/name/tags. + // If no values are returned then return row. + values := e.readAt(t, name, tags) + if values == nil { + row := e.row + e.row = nil + return row, false, nil + } + + // If there's no row yet then create one. + // If the name and tags match the existing row, append to that row if + // the number of values doesn't exceed the chunk size. + // Otherwise return existing row and add values to next emitted row. + if e.row == nil { + e.createRow(name, tags, values) + } else if e.row.Name == name && e.tags.Equals(&tags) { + if e.chunkSize > 0 && len(e.row.Values) >= e.chunkSize { + row := e.row + row.Partial = true + e.createRow(name, tags, values) + return row, true, nil + } + e.row.Values = append(e.row.Values, values) + } else { + row := e.row + e.createRow(name, tags, values) + return row, true, nil + } + } +} + +// loadBuf reads in points into empty buffer slots. +// Returns the next time/name/tags to emit for. +func (e *Emitter) loadBuf() (t int64, name string, tags Tags, err error) { + t = ZeroTime + + for i := range e.itrs { + // Load buffer, if empty. + if e.buf[i] == nil { + e.buf[i], err = e.readIterator(e.itrs[i]) + if err != nil { + break + } + } + + // Skip if buffer is empty. + p := e.buf[i] + if p == nil { + continue + } + itrTime, itrName, itrTags := p.time(), p.name(), p.tags() + + // Initialize range values if not set. + if t == ZeroTime { + t, name, tags = itrTime, itrName, itrTags + continue + } + + // Update range values if lower and emitter is in time ascending order. + if e.ascending { + if (itrName < name) || (itrName == name && itrTags.ID() < tags.ID()) || (itrName == name && itrTags.ID() == tags.ID() && itrTime < t) { + t, name, tags = itrTime, itrName, itrTags + } + continue + } + + // Update range values if higher and emitter is in time descending order. + if (itrName > name) || (itrName == name && itrTags.ID() > tags.ID()) || (itrName == name && itrTags.ID() == tags.ID() && itrTime > t) { + t, name, tags = itrTime, itrName, itrTags + } + } + return +} + +// createRow creates a new row attached to the emitter. +func (e *Emitter) createRow(name string, tags Tags, values []interface{}) { + e.tags = tags + e.row = &models.Row{ + Name: name, + Tags: tags.KeyValues(), + Columns: e.Columns, + Values: [][]interface{}{values}, + } +} + +// readAt returns the next slice of values from the iterators at time/name/tags. +// Returns nil values once the iterators are exhausted. +func (e *Emitter) readAt(t int64, name string, tags Tags) []interface{} { + offset := 1 + if e.OmitTime { + offset = 0 + } + + values := make([]interface{}, len(e.itrs)+offset) + if !e.OmitTime { + values[0] = time.Unix(0, t).In(e.Location) + } + e.readInto(t, name, tags, values[offset:]) + return values +} + +func (e *Emitter) readInto(t int64, name string, tags Tags, values []interface{}) { + for i, p := range e.buf { + // Skip if buffer is empty. + if p == nil { + values[i] = nil + continue + } + + // Skip point if it doesn't match time/name/tags. + pTags := p.tags() + if p.time() != t || p.name() != name || !pTags.Equals(&tags) { + values[i] = nil + continue + } + + // Read point value. + values[i] = p.value() + + // Clear buffer. + e.buf[i] = nil + } +} + +// readIterator reads the next point from itr. +func (e *Emitter) readIterator(itr Iterator) (Point, error) { + if itr == nil { + return nil, nil + } + + switch itr := itr.(type) { + case FloatIterator: + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil + } + case IntegerIterator: + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil + } + case StringIterator: + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil + } + case BooleanIterator: + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil + } + default: + panic(fmt.Sprintf("unsupported iterator: %T", itr)) + } + return nil, nil +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/emitter_test.go b/vendor/github.com/influxdata/influxdb/influxql/emitter_test.go new file mode 100644 index 0000000..fdf8be0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/emitter_test.go @@ -0,0 +1,125 @@ +package influxql_test + +import ( + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/deep" +) + +// Ensure the emitter can group iterators together into rows. +func TestEmitter_Emit(t *testing.T) { + // Build an emitter that pulls from two iterators. + e := influxql.NewEmitter([]influxql.Iterator{ + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 2}, + }}, + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=north"), Time: 0, Value: 4}, + {Name: "mem", Time: 4, Value: 5}, + }}, + }, true, 0) + e.Columns = []string{"col1", "col2"} + + // Verify the cpu region=west is emitted first. + if row, _, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(0): %s", err) + } else if !deep.Equal(row, &models.Row{ + Name: "cpu", + Tags: map[string]string{"region": "west"}, + Columns: []string{"col1", "col2"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), float64(1), nil}, + {time.Unix(0, 1).UTC(), float64(2), float64(4)}, + }, + }) { + t.Fatalf("unexpected row(0): %s", spew.Sdump(row)) + } + + // Verify the cpu region=north is emitted next. + if row, _, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(1): %s", err) + } else if !deep.Equal(row, &models.Row{ + Name: "cpu", + Tags: map[string]string{"region": "north"}, + Columns: []string{"col1", "col2"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), nil, float64(4)}, + }, + }) { + t.Fatalf("unexpected row(1): %s", spew.Sdump(row)) + } + + // Verify the mem series is emitted last. + if row, _, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(2): %s", err) + } else if !deep.Equal(row, &models.Row{ + Name: "mem", + Columns: []string{"col1", "col2"}, + Values: [][]interface{}{ + {time.Unix(0, 4).UTC(), nil, float64(5)}, + }, + }) { + t.Fatalf("unexpected row(2): %s", spew.Sdump(row)) + } + + // Verify EOF. + if row, _, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(eof): %s", err) + } else if row != nil { + t.Fatalf("unexpected eof: %s", spew.Sdump(row)) + } +} + +// Ensure the emitter will limit the chunked output from a series. +func TestEmitter_ChunkSize(t *testing.T) { + // Build an emitter that pulls from one iterator with multiple points in the same series. + e := influxql.NewEmitter([]influxql.Iterator{ + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 2}, + }}, + }, true, 1) + e.Columns = []string{"col1"} + + // Verify the cpu region=west is emitted first. + if row, _, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(0): %s", err) + } else if !deep.Equal(row, &models.Row{ + Name: "cpu", + Tags: map[string]string{"region": "west"}, + Columns: []string{"col1"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), float64(1)}, + }, + Partial: true, + }) { + t.Fatalf("unexpected row(0): %s", spew.Sdump(row)) + } + + // Verify the cpu region=north is emitted next. + if row, _, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(1): %s", err) + } else if !deep.Equal(row, &models.Row{ + Name: "cpu", + Tags: map[string]string{"region": "west"}, + Columns: []string{"col1"}, + Values: [][]interface{}{ + {time.Unix(0, 1).UTC(), float64(2)}, + }, + }) { + t.Fatalf("unexpected row(1): %s", spew.Sdump(row)) + } + + // Verify EOF. + if row, _, err := e.Emit(); err != nil { + t.Fatalf("unexpected error(eof): %s", err) + } else if row != nil { + t.Fatalf("unexpected eof: %s", spew.Sdump(row)) + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go new file mode 100644 index 0000000..5723d56 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go @@ -0,0 +1,1669 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: functions.gen.go.tmpl + +package influxql + +import ( + "math/rand" + "sort" + "time" +) + +// FloatPointAggregator aggregates points to produce a single point. +type FloatPointAggregator interface { + AggregateFloat(p *FloatPoint) +} + +// FloatBulkPointAggregator aggregates multiple points at a time. +type FloatBulkPointAggregator interface { + AggregateFloatBulk(points []FloatPoint) +} + +// AggregateFloatPoints feeds a slice of FloatPoint into an +// aggregator. If the aggregator is a FloatBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateFloatPoints(a FloatPointAggregator, points []FloatPoint) { + switch a := a.(type) { + case FloatBulkPointAggregator: + a.AggregateFloatBulk(points) + default: + for _, p := range points { + a.AggregateFloat(&p) + } + } +} + +// FloatPointEmitter produces a single point from an aggregate. +type FloatPointEmitter interface { + Emit() []FloatPoint +} + +// FloatReduceFunc is the function called by a FloatPoint reducer. +type FloatReduceFunc func(prev *FloatPoint, curr *FloatPoint) (t int64, v float64, aux []interface{}) + +// FloatFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncReducer struct { + prev *FloatPoint + fn FloatReduceFunc +} + +// NewFloatFuncReducer creates a new FloatFuncFloatReducer. +func NewFloatFuncReducer(fn FloatReduceFunc, prev *FloatPoint) *FloatFuncReducer { + return &FloatFuncReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// FloatReduceSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceSliceFunc func(a []FloatPoint) []FloatPoint + +// FloatSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncReducer struct { + points []FloatPoint + fn FloatReduceSliceFunc +} + +// NewFloatSliceFuncReducer creates a new FloatSliceFuncReducer. +func NewFloatSliceFuncReducer(fn FloatReduceSliceFunc) *FloatSliceFuncReducer { + return &FloatSliceFuncReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// FloatReduceIntegerFunc is the function called by a FloatPoint reducer. +type FloatReduceIntegerFunc func(prev *IntegerPoint, curr *FloatPoint) (t int64, v int64, aux []interface{}) + +// FloatFuncIntegerReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncIntegerReducer struct { + prev *IntegerPoint + fn FloatReduceIntegerFunc +} + +// NewFloatFuncIntegerReducer creates a new FloatFuncIntegerReducer. +func NewFloatFuncIntegerReducer(fn FloatReduceIntegerFunc, prev *IntegerPoint) *FloatFuncIntegerReducer { + return &FloatFuncIntegerReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncIntegerReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// FloatReduceIntegerSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceIntegerSliceFunc func(a []FloatPoint) []IntegerPoint + +// FloatSliceFuncIntegerReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncIntegerReducer struct { + points []FloatPoint + fn FloatReduceIntegerSliceFunc +} + +// NewFloatSliceFuncIntegerReducer creates a new FloatSliceFuncIntegerReducer. +func NewFloatSliceFuncIntegerReducer(fn FloatReduceIntegerSliceFunc) *FloatSliceFuncIntegerReducer { + return &FloatSliceFuncIntegerReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncIntegerReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncIntegerReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// FloatReduceStringFunc is the function called by a FloatPoint reducer. +type FloatReduceStringFunc func(prev *StringPoint, curr *FloatPoint) (t int64, v string, aux []interface{}) + +// FloatFuncStringReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncStringReducer struct { + prev *StringPoint + fn FloatReduceStringFunc +} + +// NewFloatFuncStringReducer creates a new FloatFuncStringReducer. +func NewFloatFuncStringReducer(fn FloatReduceStringFunc, prev *StringPoint) *FloatFuncStringReducer { + return &FloatFuncStringReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncStringReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// FloatReduceStringSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceStringSliceFunc func(a []FloatPoint) []StringPoint + +// FloatSliceFuncStringReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncStringReducer struct { + points []FloatPoint + fn FloatReduceStringSliceFunc +} + +// NewFloatSliceFuncStringReducer creates a new FloatSliceFuncStringReducer. +func NewFloatSliceFuncStringReducer(fn FloatReduceStringSliceFunc) *FloatSliceFuncStringReducer { + return &FloatSliceFuncStringReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncStringReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncStringReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// FloatReduceBooleanFunc is the function called by a FloatPoint reducer. +type FloatReduceBooleanFunc func(prev *BooleanPoint, curr *FloatPoint) (t int64, v bool, aux []interface{}) + +// FloatFuncBooleanReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type FloatFuncBooleanReducer struct { + prev *BooleanPoint + fn FloatReduceBooleanFunc +} + +// NewFloatFuncBooleanReducer creates a new FloatFuncBooleanReducer. +func NewFloatFuncBooleanReducer(fn FloatReduceBooleanFunc, prev *BooleanPoint) *FloatFuncBooleanReducer { + return &FloatFuncBooleanReducer{fn: fn, prev: prev} +} + +// AggregateFloat takes a FloatPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *FloatFuncBooleanReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. +func (r *FloatFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// FloatReduceBooleanSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceBooleanSliceFunc func(a []FloatPoint) []BooleanPoint + +// FloatSliceFuncBooleanReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type FloatSliceFuncBooleanReducer struct { + points []FloatPoint + fn FloatReduceBooleanSliceFunc +} + +// NewFloatSliceFuncBooleanReducer creates a new FloatSliceFuncBooleanReducer. +func NewFloatSliceFuncBooleanReducer(fn FloatReduceBooleanSliceFunc) *FloatSliceFuncBooleanReducer { + return &FloatSliceFuncBooleanReducer{fn: fn} +} + +// AggregateFloat copies the FloatPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *FloatSliceFuncBooleanReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. +// This is a more efficient version of calling AggregateFloat on each point. +func (r *FloatSliceFuncBooleanReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *FloatSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// FloatDistinctReducer returns the distinct points in a series. +type FloatDistinctReducer struct { + m map[float64]FloatPoint +} + +// NewFloatDistinctReducer creates a new FloatDistinctReducer. +func NewFloatDistinctReducer() *FloatDistinctReducer { + return &FloatDistinctReducer{m: make(map[float64]FloatPoint)} +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatDistinctReducer) AggregateFloat(p *FloatPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *FloatDistinctReducer) Emit() []FloatPoint { + points := make([]FloatPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, FloatPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(floatPoints(points)) + return points +} + +// FloatElapsedReducer calculates the elapsed of the aggregated points. +type FloatElapsedReducer struct { + unitConversion int64 + prev FloatPoint + curr FloatPoint +} + +// NewFloatElapsedReducer creates a new FloatElapsedReducer. +func NewFloatElapsedReducer(interval Interval) *FloatElapsedReducer { + return &FloatElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatElapsedReducer) AggregateFloat(p *FloatPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *FloatElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// FloatSampleReducer implements a reservoir sampling to calculate a random subset of points +type FloatSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points floatPoints // the reservoir +} + +// NewFloatSampleReducer creates a new FloatSampleReducer +func NewFloatSampleReducer(size int) *FloatSampleReducer { + return &FloatSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(floatPoints, size), + } +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatSampleReducer) AggregateFloat(p *FloatPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *FloatSampleReducer) Emit() []FloatPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + +// IntegerPointAggregator aggregates points to produce a single point. +type IntegerPointAggregator interface { + AggregateInteger(p *IntegerPoint) +} + +// IntegerBulkPointAggregator aggregates multiple points at a time. +type IntegerBulkPointAggregator interface { + AggregateIntegerBulk(points []IntegerPoint) +} + +// AggregateIntegerPoints feeds a slice of IntegerPoint into an +// aggregator. If the aggregator is a IntegerBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateIntegerPoints(a IntegerPointAggregator, points []IntegerPoint) { + switch a := a.(type) { + case IntegerBulkPointAggregator: + a.AggregateIntegerBulk(points) + default: + for _, p := range points { + a.AggregateInteger(&p) + } + } +} + +// IntegerPointEmitter produces a single point from an aggregate. +type IntegerPointEmitter interface { + Emit() []IntegerPoint +} + +// IntegerReduceFloatFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFloatFunc func(prev *FloatPoint, curr *IntegerPoint) (t int64, v float64, aux []interface{}) + +// IntegerFuncFloatReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncFloatReducer struct { + prev *FloatPoint + fn IntegerReduceFloatFunc +} + +// NewIntegerFuncFloatReducer creates a new IntegerFuncFloatReducer. +func NewIntegerFuncFloatReducer(fn IntegerReduceFloatFunc, prev *FloatPoint) *IntegerFuncFloatReducer { + return &IntegerFuncFloatReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncFloatReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// IntegerReduceFloatSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFloatSliceFunc func(a []IntegerPoint) []FloatPoint + +// IntegerSliceFuncFloatReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncFloatReducer struct { + points []IntegerPoint + fn IntegerReduceFloatSliceFunc +} + +// NewIntegerSliceFuncFloatReducer creates a new IntegerSliceFuncFloatReducer. +func NewIntegerSliceFuncFloatReducer(fn IntegerReduceFloatSliceFunc) *IntegerSliceFuncFloatReducer { + return &IntegerSliceFuncFloatReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncFloatReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncFloatReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// IntegerReduceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFunc func(prev *IntegerPoint, curr *IntegerPoint) (t int64, v int64, aux []interface{}) + +// IntegerFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncReducer struct { + prev *IntegerPoint + fn IntegerReduceFunc +} + +// NewIntegerFuncReducer creates a new IntegerFuncIntegerReducer. +func NewIntegerFuncReducer(fn IntegerReduceFunc, prev *IntegerPoint) *IntegerFuncReducer { + return &IntegerFuncReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// IntegerReduceSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceSliceFunc func(a []IntegerPoint) []IntegerPoint + +// IntegerSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncReducer struct { + points []IntegerPoint + fn IntegerReduceSliceFunc +} + +// NewIntegerSliceFuncReducer creates a new IntegerSliceFuncReducer. +func NewIntegerSliceFuncReducer(fn IntegerReduceSliceFunc) *IntegerSliceFuncReducer { + return &IntegerSliceFuncReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// IntegerReduceStringFunc is the function called by a IntegerPoint reducer. +type IntegerReduceStringFunc func(prev *StringPoint, curr *IntegerPoint) (t int64, v string, aux []interface{}) + +// IntegerFuncStringReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncStringReducer struct { + prev *StringPoint + fn IntegerReduceStringFunc +} + +// NewIntegerFuncStringReducer creates a new IntegerFuncStringReducer. +func NewIntegerFuncStringReducer(fn IntegerReduceStringFunc, prev *StringPoint) *IntegerFuncStringReducer { + return &IntegerFuncStringReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncStringReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// IntegerReduceStringSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceStringSliceFunc func(a []IntegerPoint) []StringPoint + +// IntegerSliceFuncStringReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncStringReducer struct { + points []IntegerPoint + fn IntegerReduceStringSliceFunc +} + +// NewIntegerSliceFuncStringReducer creates a new IntegerSliceFuncStringReducer. +func NewIntegerSliceFuncStringReducer(fn IntegerReduceStringSliceFunc) *IntegerSliceFuncStringReducer { + return &IntegerSliceFuncStringReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncStringReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncStringReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// IntegerReduceBooleanFunc is the function called by a IntegerPoint reducer. +type IntegerReduceBooleanFunc func(prev *BooleanPoint, curr *IntegerPoint) (t int64, v bool, aux []interface{}) + +// IntegerFuncBooleanReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type IntegerFuncBooleanReducer struct { + prev *BooleanPoint + fn IntegerReduceBooleanFunc +} + +// NewIntegerFuncBooleanReducer creates a new IntegerFuncBooleanReducer. +func NewIntegerFuncBooleanReducer(fn IntegerReduceBooleanFunc, prev *BooleanPoint) *IntegerFuncBooleanReducer { + return &IntegerFuncBooleanReducer{fn: fn, prev: prev} +} + +// AggregateInteger takes a IntegerPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *IntegerFuncBooleanReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. +func (r *IntegerFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// IntegerReduceBooleanSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceBooleanSliceFunc func(a []IntegerPoint) []BooleanPoint + +// IntegerSliceFuncBooleanReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type IntegerSliceFuncBooleanReducer struct { + points []IntegerPoint + fn IntegerReduceBooleanSliceFunc +} + +// NewIntegerSliceFuncBooleanReducer creates a new IntegerSliceFuncBooleanReducer. +func NewIntegerSliceFuncBooleanReducer(fn IntegerReduceBooleanSliceFunc) *IntegerSliceFuncBooleanReducer { + return &IntegerSliceFuncBooleanReducer{fn: fn} +} + +// AggregateInteger copies the IntegerPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *IntegerSliceFuncBooleanReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. +// This is a more efficient version of calling AggregateInteger on each point. +func (r *IntegerSliceFuncBooleanReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *IntegerSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// IntegerDistinctReducer returns the distinct points in a series. +type IntegerDistinctReducer struct { + m map[int64]IntegerPoint +} + +// NewIntegerDistinctReducer creates a new IntegerDistinctReducer. +func NewIntegerDistinctReducer() *IntegerDistinctReducer { + return &IntegerDistinctReducer{m: make(map[int64]IntegerPoint)} +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerDistinctReducer) AggregateInteger(p *IntegerPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *IntegerDistinctReducer) Emit() []IntegerPoint { + points := make([]IntegerPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, IntegerPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(integerPoints(points)) + return points +} + +// IntegerElapsedReducer calculates the elapsed of the aggregated points. +type IntegerElapsedReducer struct { + unitConversion int64 + prev IntegerPoint + curr IntegerPoint +} + +// NewIntegerElapsedReducer creates a new IntegerElapsedReducer. +func NewIntegerElapsedReducer(interval Interval) *IntegerElapsedReducer { + return &IntegerElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerElapsedReducer) AggregateInteger(p *IntegerPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *IntegerElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// IntegerSampleReducer implements a reservoir sampling to calculate a random subset of points +type IntegerSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points integerPoints // the reservoir +} + +// NewIntegerSampleReducer creates a new IntegerSampleReducer +func NewIntegerSampleReducer(size int) *IntegerSampleReducer { + return &IntegerSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(integerPoints, size), + } +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerSampleReducer) AggregateInteger(p *IntegerPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *IntegerSampleReducer) Emit() []IntegerPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + +// StringPointAggregator aggregates points to produce a single point. +type StringPointAggregator interface { + AggregateString(p *StringPoint) +} + +// StringBulkPointAggregator aggregates multiple points at a time. +type StringBulkPointAggregator interface { + AggregateStringBulk(points []StringPoint) +} + +// AggregateStringPoints feeds a slice of StringPoint into an +// aggregator. If the aggregator is a StringBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateStringPoints(a StringPointAggregator, points []StringPoint) { + switch a := a.(type) { + case StringBulkPointAggregator: + a.AggregateStringBulk(points) + default: + for _, p := range points { + a.AggregateString(&p) + } + } +} + +// StringPointEmitter produces a single point from an aggregate. +type StringPointEmitter interface { + Emit() []StringPoint +} + +// StringReduceFloatFunc is the function called by a StringPoint reducer. +type StringReduceFloatFunc func(prev *FloatPoint, curr *StringPoint) (t int64, v float64, aux []interface{}) + +// StringFuncFloatReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncFloatReducer struct { + prev *FloatPoint + fn StringReduceFloatFunc +} + +// NewStringFuncFloatReducer creates a new StringFuncFloatReducer. +func NewStringFuncFloatReducer(fn StringReduceFloatFunc, prev *FloatPoint) *StringFuncFloatReducer { + return &StringFuncFloatReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncFloatReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// StringReduceFloatSliceFunc is the function called by a StringPoint reducer. +type StringReduceFloatSliceFunc func(a []StringPoint) []FloatPoint + +// StringSliceFuncFloatReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncFloatReducer struct { + points []StringPoint + fn StringReduceFloatSliceFunc +} + +// NewStringSliceFuncFloatReducer creates a new StringSliceFuncFloatReducer. +func NewStringSliceFuncFloatReducer(fn StringReduceFloatSliceFunc) *StringSliceFuncFloatReducer { + return &StringSliceFuncFloatReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncFloatReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncFloatReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// StringReduceIntegerFunc is the function called by a StringPoint reducer. +type StringReduceIntegerFunc func(prev *IntegerPoint, curr *StringPoint) (t int64, v int64, aux []interface{}) + +// StringFuncIntegerReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncIntegerReducer struct { + prev *IntegerPoint + fn StringReduceIntegerFunc +} + +// NewStringFuncIntegerReducer creates a new StringFuncIntegerReducer. +func NewStringFuncIntegerReducer(fn StringReduceIntegerFunc, prev *IntegerPoint) *StringFuncIntegerReducer { + return &StringFuncIntegerReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncIntegerReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// StringReduceIntegerSliceFunc is the function called by a StringPoint reducer. +type StringReduceIntegerSliceFunc func(a []StringPoint) []IntegerPoint + +// StringSliceFuncIntegerReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncIntegerReducer struct { + points []StringPoint + fn StringReduceIntegerSliceFunc +} + +// NewStringSliceFuncIntegerReducer creates a new StringSliceFuncIntegerReducer. +func NewStringSliceFuncIntegerReducer(fn StringReduceIntegerSliceFunc) *StringSliceFuncIntegerReducer { + return &StringSliceFuncIntegerReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncIntegerReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncIntegerReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// StringReduceFunc is the function called by a StringPoint reducer. +type StringReduceFunc func(prev *StringPoint, curr *StringPoint) (t int64, v string, aux []interface{}) + +// StringFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncReducer struct { + prev *StringPoint + fn StringReduceFunc +} + +// NewStringFuncReducer creates a new StringFuncStringReducer. +func NewStringFuncReducer(fn StringReduceFunc, prev *StringPoint) *StringFuncReducer { + return &StringFuncReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// StringReduceSliceFunc is the function called by a StringPoint reducer. +type StringReduceSliceFunc func(a []StringPoint) []StringPoint + +// StringSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncReducer struct { + points []StringPoint + fn StringReduceSliceFunc +} + +// NewStringSliceFuncReducer creates a new StringSliceFuncReducer. +func NewStringSliceFuncReducer(fn StringReduceSliceFunc) *StringSliceFuncReducer { + return &StringSliceFuncReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// StringReduceBooleanFunc is the function called by a StringPoint reducer. +type StringReduceBooleanFunc func(prev *BooleanPoint, curr *StringPoint) (t int64, v bool, aux []interface{}) + +// StringFuncBooleanReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type StringFuncBooleanReducer struct { + prev *BooleanPoint + fn StringReduceBooleanFunc +} + +// NewStringFuncBooleanReducer creates a new StringFuncBooleanReducer. +func NewStringFuncBooleanReducer(fn StringReduceBooleanFunc, prev *BooleanPoint) *StringFuncBooleanReducer { + return &StringFuncBooleanReducer{fn: fn, prev: prev} +} + +// AggregateString takes a StringPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *StringFuncBooleanReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateString. +func (r *StringFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// StringReduceBooleanSliceFunc is the function called by a StringPoint reducer. +type StringReduceBooleanSliceFunc func(a []StringPoint) []BooleanPoint + +// StringSliceFuncBooleanReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type StringSliceFuncBooleanReducer struct { + points []StringPoint + fn StringReduceBooleanSliceFunc +} + +// NewStringSliceFuncBooleanReducer creates a new StringSliceFuncBooleanReducer. +func NewStringSliceFuncBooleanReducer(fn StringReduceBooleanSliceFunc) *StringSliceFuncBooleanReducer { + return &StringSliceFuncBooleanReducer{fn: fn} +} + +// AggregateString copies the StringPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *StringSliceFuncBooleanReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. +// This is a more efficient version of calling AggregateString on each point. +func (r *StringSliceFuncBooleanReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *StringSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// StringDistinctReducer returns the distinct points in a series. +type StringDistinctReducer struct { + m map[string]StringPoint +} + +// NewStringDistinctReducer creates a new StringDistinctReducer. +func NewStringDistinctReducer() *StringDistinctReducer { + return &StringDistinctReducer{m: make(map[string]StringPoint)} +} + +// AggregateString aggregates a point into the reducer. +func (r *StringDistinctReducer) AggregateString(p *StringPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *StringDistinctReducer) Emit() []StringPoint { + points := make([]StringPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, StringPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(stringPoints(points)) + return points +} + +// StringElapsedReducer calculates the elapsed of the aggregated points. +type StringElapsedReducer struct { + unitConversion int64 + prev StringPoint + curr StringPoint +} + +// NewStringElapsedReducer creates a new StringElapsedReducer. +func NewStringElapsedReducer(interval Interval) *StringElapsedReducer { + return &StringElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: StringPoint{Nil: true}, + curr: StringPoint{Nil: true}, + } +} + +// AggregateString aggregates a point into the reducer and updates the current window. +func (r *StringElapsedReducer) AggregateString(p *StringPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *StringElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// StringSampleReducer implements a reservoir sampling to calculate a random subset of points +type StringSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points stringPoints // the reservoir +} + +// NewStringSampleReducer creates a new StringSampleReducer +func NewStringSampleReducer(size int) *StringSampleReducer { + return &StringSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(stringPoints, size), + } +} + +// AggregateString aggregates a point into the reducer. +func (r *StringSampleReducer) AggregateString(p *StringPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *StringSampleReducer) Emit() []StringPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + +// BooleanPointAggregator aggregates points to produce a single point. +type BooleanPointAggregator interface { + AggregateBoolean(p *BooleanPoint) +} + +// BooleanBulkPointAggregator aggregates multiple points at a time. +type BooleanBulkPointAggregator interface { + AggregateBooleanBulk(points []BooleanPoint) +} + +// AggregateBooleanPoints feeds a slice of BooleanPoint into an +// aggregator. If the aggregator is a BooleanBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateBooleanPoints(a BooleanPointAggregator, points []BooleanPoint) { + switch a := a.(type) { + case BooleanBulkPointAggregator: + a.AggregateBooleanBulk(points) + default: + for _, p := range points { + a.AggregateBoolean(&p) + } + } +} + +// BooleanPointEmitter produces a single point from an aggregate. +type BooleanPointEmitter interface { + Emit() []BooleanPoint +} + +// BooleanReduceFloatFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFloatFunc func(prev *FloatPoint, curr *BooleanPoint) (t int64, v float64, aux []interface{}) + +// BooleanFuncFloatReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncFloatReducer struct { + prev *FloatPoint + fn BooleanReduceFloatFunc +} + +// NewBooleanFuncFloatReducer creates a new BooleanFuncFloatReducer. +func NewBooleanFuncFloatReducer(fn BooleanReduceFloatFunc, prev *FloatPoint) *BooleanFuncFloatReducer { + return &BooleanFuncFloatReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncFloatReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// BooleanReduceFloatSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFloatSliceFunc func(a []BooleanPoint) []FloatPoint + +// BooleanSliceFuncFloatReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncFloatReducer struct { + points []BooleanPoint + fn BooleanReduceFloatSliceFunc +} + +// NewBooleanSliceFuncFloatReducer creates a new BooleanSliceFuncFloatReducer. +func NewBooleanSliceFuncFloatReducer(fn BooleanReduceFloatSliceFunc) *BooleanSliceFuncFloatReducer { + return &BooleanSliceFuncFloatReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncFloatReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncFloatReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// BooleanReduceIntegerFunc is the function called by a BooleanPoint reducer. +type BooleanReduceIntegerFunc func(prev *IntegerPoint, curr *BooleanPoint) (t int64, v int64, aux []interface{}) + +// BooleanFuncIntegerReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncIntegerReducer struct { + prev *IntegerPoint + fn BooleanReduceIntegerFunc +} + +// NewBooleanFuncIntegerReducer creates a new BooleanFuncIntegerReducer. +func NewBooleanFuncIntegerReducer(fn BooleanReduceIntegerFunc, prev *IntegerPoint) *BooleanFuncIntegerReducer { + return &BooleanFuncIntegerReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// BooleanReduceIntegerSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceIntegerSliceFunc func(a []BooleanPoint) []IntegerPoint + +// BooleanSliceFuncIntegerReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncIntegerReducer struct { + points []BooleanPoint + fn BooleanReduceIntegerSliceFunc +} + +// NewBooleanSliceFuncIntegerReducer creates a new BooleanSliceFuncIntegerReducer. +func NewBooleanSliceFuncIntegerReducer(fn BooleanReduceIntegerSliceFunc) *BooleanSliceFuncIntegerReducer { + return &BooleanSliceFuncIntegerReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncIntegerReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// BooleanReduceStringFunc is the function called by a BooleanPoint reducer. +type BooleanReduceStringFunc func(prev *StringPoint, curr *BooleanPoint) (t int64, v string, aux []interface{}) + +// BooleanFuncStringReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncStringReducer struct { + prev *StringPoint + fn BooleanReduceStringFunc +} + +// NewBooleanFuncStringReducer creates a new BooleanFuncStringReducer. +func NewBooleanFuncStringReducer(fn BooleanReduceStringFunc, prev *StringPoint) *BooleanFuncStringReducer { + return &BooleanFuncStringReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncStringReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// BooleanReduceStringSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceStringSliceFunc func(a []BooleanPoint) []StringPoint + +// BooleanSliceFuncStringReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncStringReducer struct { + points []BooleanPoint + fn BooleanReduceStringSliceFunc +} + +// NewBooleanSliceFuncStringReducer creates a new BooleanSliceFuncStringReducer. +func NewBooleanSliceFuncStringReducer(fn BooleanReduceStringSliceFunc) *BooleanSliceFuncStringReducer { + return &BooleanSliceFuncStringReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncStringReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncStringReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// BooleanReduceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFunc func(prev *BooleanPoint, curr *BooleanPoint) (t int64, v bool, aux []interface{}) + +// BooleanFuncReducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type BooleanFuncReducer struct { + prev *BooleanPoint + fn BooleanReduceFunc +} + +// NewBooleanFuncReducer creates a new BooleanFuncBooleanReducer. +func NewBooleanFuncReducer(fn BooleanReduceFunc, prev *BooleanPoint) *BooleanFuncReducer { + return &BooleanFuncReducer{fn: fn, prev: prev} +} + +// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the +// current and new point to modify the current point. +func (r *BooleanFuncReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. +func (r *BooleanFuncReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// BooleanReduceSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceSliceFunc func(a []BooleanPoint) []BooleanPoint + +// BooleanSliceFuncReducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type BooleanSliceFuncReducer struct { + points []BooleanPoint + fn BooleanReduceSliceFunc +} + +// NewBooleanSliceFuncReducer creates a new BooleanSliceFuncReducer. +func NewBooleanSliceFuncReducer(fn BooleanReduceSliceFunc) *BooleanSliceFuncReducer { + return &BooleanSliceFuncReducer{fn: fn} +} + +// AggregateBoolean copies the BooleanPoint into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *BooleanSliceFuncReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p.Clone()) +} + +// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. +// This is a more efficient version of calling AggregateBoolean on each point. +func (r *BooleanSliceFuncReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *BooleanSliceFuncReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// BooleanDistinctReducer returns the distinct points in a series. +type BooleanDistinctReducer struct { + m map[bool]BooleanPoint +} + +// NewBooleanDistinctReducer creates a new BooleanDistinctReducer. +func NewBooleanDistinctReducer() *BooleanDistinctReducer { + return &BooleanDistinctReducer{m: make(map[bool]BooleanPoint)} +} + +// AggregateBoolean aggregates a point into the reducer. +func (r *BooleanDistinctReducer) AggregateBoolean(p *BooleanPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *BooleanDistinctReducer) Emit() []BooleanPoint { + points := make([]BooleanPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, BooleanPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(booleanPoints(points)) + return points +} + +// BooleanElapsedReducer calculates the elapsed of the aggregated points. +type BooleanElapsedReducer struct { + unitConversion int64 + prev BooleanPoint + curr BooleanPoint +} + +// NewBooleanElapsedReducer creates a new BooleanElapsedReducer. +func NewBooleanElapsedReducer(interval Interval) *BooleanElapsedReducer { + return &BooleanElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: BooleanPoint{Nil: true}, + curr: BooleanPoint{Nil: true}, + } +} + +// AggregateBoolean aggregates a point into the reducer and updates the current window. +func (r *BooleanElapsedReducer) AggregateBoolean(p *BooleanPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *BooleanElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// BooleanSampleReducer implements a reservoir sampling to calculate a random subset of points +type BooleanSampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points booleanPoints // the reservoir +} + +// NewBooleanSampleReducer creates a new BooleanSampleReducer +func NewBooleanSampleReducer(size int) *BooleanSampleReducer { + return &BooleanSampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make(booleanPoints, size), + } +} + +// AggregateBoolean aggregates a point into the reducer. +func (r *BooleanSampleReducer) AggregateBoolean(p *BooleanPoint) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *BooleanSampleReducer) Emit() []BooleanPoint { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl new file mode 100644 index 0000000..b23820e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl @@ -0,0 +1,219 @@ +package influxql + +import ( +"sort" +"time" +"math/rand" +) + +{{with $types := .}}{{range $k := $types}} + +// {{$k.Name}}PointAggregator aggregates points to produce a single point. +type {{$k.Name}}PointAggregator interface { + Aggregate{{$k.Name}}(p *{{$k.Name}}Point) +} + +// {{$k.Name}}BulkPointAggregator aggregates multiple points at a time. +type {{$k.Name}}BulkPointAggregator interface { + Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) +} + +// Aggregate{{$k.Name}}Points feeds a slice of {{$k.Name}}Point into an +// aggregator. If the aggregator is a {{$k.Name}}BulkPointAggregator, it will +// use the AggregateBulk method. +func Aggregate{{$k.Name}}Points(a {{$k.Name}}PointAggregator, points []{{$k.Name}}Point) { + switch a := a.(type) { + case {{$k.Name}}BulkPointAggregator: + a.Aggregate{{$k.Name}}Bulk(points) + default: + for _, p := range points { + a.Aggregate{{$k.Name}}(&p) + } + } +} + +// {{$k.Name}}PointEmitter produces a single point from an aggregate. +type {{$k.Name}}PointEmitter interface { + Emit() []{{$k.Name}}Point +} + +{{range $v := $types}} + +// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func is the function called by a {{$k.Name}}Point reducer. +type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func func(prev *{{$v.Name}}Point, curr *{{$k.Name}}Point) (t int64, v {{$v.Type}}, aux []interface{}) + +// {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that reduces +// the passed in points to a single point using a reduce function. +type {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct { + prev *{{$v.Name}}Point + fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func +} + +// New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}Func{{$v.Name}}Reducer. +func New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func, prev *{{$v.Name}}Point) *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer { + return &{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn, prev: prev} +} + +// Aggregate{{$k.Name}} takes a {{$k.Name}}Point and invokes the reduce function with the +// current and new point to modify the current point. +func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &{{$v.Name}}Point{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +// Emit emits the point that was generated when reducing the points fed in with Aggregate{{$k.Name}}. +func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { + return []{{$v.Name}}Point{*r.prev} +} + +// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc is the function called by a {{$k.Name}}Point reducer. +type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc func(a []{{$k.Name}}Point) []{{$v.Name}}Point + +// {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that aggregates +// the passed in points and then invokes the function to reduce the points when they are emitted. +type {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct { + points []{{$k.Name}}Point + fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc +} + +// New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer. +func New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc) *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer { + return &{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn} +} + +// Aggregate{{$k.Name}} copies the {{$k.Name}}Point into the internal slice to be passed +// to the reduce function when Emit is called. +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.points = append(r.points, *p.Clone()) +} + +// Aggregate{{$k.Name}}Bulk performs a bulk copy of {{$k.Name}}Points into the internal slice. +// This is a more efficient version of calling Aggregate{{$k.Name}} on each point. +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) { + r.points = append(r.points, points...) +} + +// Emit invokes the reduce function on the aggregated points to generate the aggregated points. +// This method does not clear the points from the internal slice. +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { + return r.fn(r.points) +} +{{end}} + +// {{$k.Name}}DistinctReducer returns the distinct points in a series. +type {{$k.Name}}DistinctReducer struct { + m map[{{$k.Type}}]{{$k.Name}}Point +} + +// New{{$k.Name}}DistinctReducer creates a new {{$k.Name}}DistinctReducer. +func New{{$k.Name}}DistinctReducer() *{{$k.Name}}DistinctReducer { + return &{{$k.Name}}DistinctReducer{m: make(map[{{$k.Type}}]{{$k.Name}}Point)} +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer. +func (r *{{$k.Name}}DistinctReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *{{$k.Name}}DistinctReducer) Emit() []{{$k.Name}}Point { + points := make([]{{$k.Name}}Point, 0, len(r.m)) + for _, p := range r.m { + points = append(points, {{$k.Name}}Point{Time: p.Time, Value: p.Value}) + } + sort.Sort({{$k.name}}Points(points)) + return points +} + +// {{$k.Name}}ElapsedReducer calculates the elapsed of the aggregated points. +type {{$k.Name}}ElapsedReducer struct { + unitConversion int64 + prev {{$k.Name}}Point + curr {{$k.Name}}Point +} + +// New{{$k.Name}}ElapsedReducer creates a new {{$k.Name}}ElapsedReducer. +func New{{$k.Name}}ElapsedReducer(interval Interval) *{{$k.Name}}ElapsedReducer { + return &{{$k.Name}}ElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: {{$k.Name}}Point{Nil: true}, + curr: {{$k.Name}}Point{Nil: true}, + } +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer and updates the current window. +func (r *{{$k.Name}}ElapsedReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *{{$k.Name}}ElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + +// {{$k.Name}}SampleReducer implements a reservoir sampling to calculate a random subset of points +type {{$k.Name}}SampleReducer struct { + count int // how many points we've iterated over + rng *rand.Rand // random number generator for each reducer + + points {{$k.name}}Points // the reservoir +} + +// New{{$k.Name}}SampleReducer creates a new {{$k.Name}}SampleReducer +func New{{$k.Name}}SampleReducer(size int) *{{$k.Name}}SampleReducer { + return &{{$k.Name}}SampleReducer{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ + points: make({{$k.name}}Points, size), + } +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer. +func (r *{{$k.Name}}SampleReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.count++ + // Fill the reservoir with the first n points + if r.count-1 < len(r.points) { + p.CopyTo(&r.points[r.count-1]) + return + } + + // Generate a random integer between 1 and the count and + // if that number is less than the length of the slice + // replace the point at that index rnd with p. + rnd := r.rng.Intn(r.count) + if rnd < len(r.points) { + p.CopyTo(&r.points[rnd]) + } +} + +// Emit emits the reservoir sample as many points. +func (r *{{$k.Name}}SampleReducer) Emit() []{{$k.Name}}Point { + min := len(r.points) + if r.count < min { + min = r.count + } + pts := r.points[:min] + sort.Sort(pts) + return pts +} + + +{{end}}{{end}} diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions.go b/vendor/github.com/influxdata/influxdb/influxql/functions.go new file mode 100644 index 0000000..4fad6e7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/functions.go @@ -0,0 +1,1163 @@ +package influxql + +import ( + "container/heap" + "math" + "sort" + "time" + + "github.com/influxdata/influxdb/influxql/neldermead" +) + +// FloatMeanReducer calculates the mean of the aggregated points. +type FloatMeanReducer struct { + sum float64 + count uint32 +} + +// NewFloatMeanReducer creates a new FloatMeanReducer. +func NewFloatMeanReducer() *FloatMeanReducer { + return &FloatMeanReducer{} +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatMeanReducer) AggregateFloat(p *FloatPoint) { + if p.Aggregated >= 2 { + r.sum += p.Value * float64(p.Aggregated) + r.count += p.Aggregated + } else { + r.sum += p.Value + r.count++ + } +} + +// Emit emits the mean of the aggregated points as a single point. +func (r *FloatMeanReducer) Emit() []FloatPoint { + return []FloatPoint{{ + Time: ZeroTime, + Value: r.sum / float64(r.count), + Aggregated: r.count, + }} +} + +// IntegerMeanReducer calculates the mean of the aggregated points. +type IntegerMeanReducer struct { + sum int64 + count uint32 +} + +// NewIntegerMeanReducer creates a new IntegerMeanReducer. +func NewIntegerMeanReducer() *IntegerMeanReducer { + return &IntegerMeanReducer{} +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerMeanReducer) AggregateInteger(p *IntegerPoint) { + if p.Aggregated >= 2 { + r.sum += p.Value * int64(p.Aggregated) + r.count += p.Aggregated + } else { + r.sum += p.Value + r.count++ + } +} + +// Emit emits the mean of the aggregated points as a single point. +func (r *IntegerMeanReducer) Emit() []FloatPoint { + return []FloatPoint{{ + Time: ZeroTime, + Value: float64(r.sum) / float64(r.count), + Aggregated: r.count, + }} +} + +// FloatDerivativeReducer calculates the derivative of the aggregated points. +type FloatDerivativeReducer struct { + interval Interval + prev FloatPoint + curr FloatPoint + isNonNegative bool + ascending bool +} + +// NewFloatDerivativeReducer creates a new FloatDerivativeReducer. +func NewFloatDerivativeReducer(interval Interval, isNonNegative, ascending bool) *FloatDerivativeReducer { + return &FloatDerivativeReducer{ + interval: interval, + isNonNegative: isNonNegative, + ascending: ascending, + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatDerivativeReducer) AggregateFloat(p *FloatPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the derivative of the reducer at the current point. +func (r *FloatDerivativeReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + diff := r.curr.Value - r.prev.Value + elapsed := r.curr.Time - r.prev.Time + if !r.ascending { + elapsed = -elapsed + } + value := diff / (float64(elapsed) / float64(r.interval.Duration)) + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + + // Drop negative values for non-negative derivatives. + if r.isNonNegative && diff < 0 { + return nil + } + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// IntegerDerivativeReducer calculates the derivative of the aggregated points. +type IntegerDerivativeReducer struct { + interval Interval + prev IntegerPoint + curr IntegerPoint + isNonNegative bool + ascending bool +} + +// NewIntegerDerivativeReducer creates a new IntegerDerivativeReducer. +func NewIntegerDerivativeReducer(interval Interval, isNonNegative, ascending bool) *IntegerDerivativeReducer { + return &IntegerDerivativeReducer{ + interval: interval, + isNonNegative: isNonNegative, + ascending: ascending, + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerDerivativeReducer) AggregateInteger(p *IntegerPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the derivative of the reducer at the current point. +func (r *IntegerDerivativeReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + diff := float64(r.curr.Value - r.prev.Value) + elapsed := r.curr.Time - r.prev.Time + if !r.ascending { + elapsed = -elapsed + } + value := diff / (float64(elapsed) / float64(r.interval.Duration)) + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + + // Drop negative values for non-negative derivatives. + if r.isNonNegative && diff < 0 { + return nil + } + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// FloatDifferenceReducer calculates the derivative of the aggregated points. +type FloatDifferenceReducer struct { + isNonNegative bool + prev FloatPoint + curr FloatPoint +} + +// NewFloatDifferenceReducer creates a new FloatDifferenceReducer. +func NewFloatDifferenceReducer(isNonNegative bool) *FloatDifferenceReducer { + return &FloatDifferenceReducer{ + isNonNegative: isNonNegative, + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatDifferenceReducer) AggregateFloat(p *FloatPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the difference of the reducer at the current point. +func (r *FloatDifferenceReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the difference of successive points. + value := r.curr.Value - r.prev.Value + + // If it is non_negative_difference discard any negative value. Since + // prev is still marked as unread. The correctness can be ensured. + if r.isNonNegative && value < 0 { + return nil + } + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// IntegerDifferenceReducer calculates the derivative of the aggregated points. +type IntegerDifferenceReducer struct { + isNonNegative bool + prev IntegerPoint + curr IntegerPoint +} + +// NewIntegerDifferenceReducer creates a new IntegerDifferenceReducer. +func NewIntegerDifferenceReducer(isNonNegative bool) *IntegerDifferenceReducer { + return &IntegerDifferenceReducer{ + isNonNegative: isNonNegative, + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerDifferenceReducer) AggregateInteger(p *IntegerPoint) { + // Skip past a point when it does not advance the stream. A joined series + // may have multiple points at the same time so we will discard anything + // except the first point we encounter. + if !r.curr.Nil && r.curr.Time == p.Time { + return + } + + r.prev = r.curr + r.curr = *p +} + +// Emit emits the difference of the reducer at the current point. +func (r *IntegerDifferenceReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + // Calculate the difference of successive points. + value := r.curr.Value - r.prev.Value + + // If it is non_negative_difference discard any negative value. Since + // prev is still marked as unread. The correctness can be ensured. + if r.isNonNegative && value < 0 { + return nil + } + + // Mark this point as read by changing the previous point to nil. + r.prev.Nil = true + + return []IntegerPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// FloatMovingAverageReducer calculates the moving average of the aggregated points. +type FloatMovingAverageReducer struct { + pos int + sum float64 + time int64 + buf []float64 +} + +// NewFloatMovingAverageReducer creates a new FloatMovingAverageReducer. +func NewFloatMovingAverageReducer(n int) *FloatMovingAverageReducer { + return &FloatMovingAverageReducer{ + buf: make([]float64, 0, n), + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatMovingAverageReducer) AggregateFloat(p *FloatPoint) { + if len(r.buf) != cap(r.buf) { + r.buf = append(r.buf, p.Value) + } else { + r.sum -= r.buf[r.pos] + r.buf[r.pos] = p.Value + } + r.sum += p.Value + r.time = p.Time + r.pos++ + if r.pos >= cap(r.buf) { + r.pos = 0 + } +} + +// Emit emits the moving average of the current window. Emit should be called +// after every call to AggregateFloat and it will produce one point if there +// is enough data to fill a window, otherwise it will produce zero points. +func (r *FloatMovingAverageReducer) Emit() []FloatPoint { + if len(r.buf) != cap(r.buf) { + return []FloatPoint{} + } + return []FloatPoint{ + { + Value: r.sum / float64(len(r.buf)), + Time: r.time, + Aggregated: uint32(len(r.buf)), + }, + } +} + +// IntegerMovingAverageReducer calculates the moving average of the aggregated points. +type IntegerMovingAverageReducer struct { + pos int + sum int64 + time int64 + buf []int64 +} + +// NewIntegerMovingAverageReducer creates a new IntegerMovingAverageReducer. +func NewIntegerMovingAverageReducer(n int) *IntegerMovingAverageReducer { + return &IntegerMovingAverageReducer{ + buf: make([]int64, 0, n), + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerMovingAverageReducer) AggregateInteger(p *IntegerPoint) { + if len(r.buf) != cap(r.buf) { + r.buf = append(r.buf, p.Value) + } else { + r.sum -= r.buf[r.pos] + r.buf[r.pos] = p.Value + } + r.sum += p.Value + r.time = p.Time + r.pos++ + if r.pos >= cap(r.buf) { + r.pos = 0 + } +} + +// Emit emits the moving average of the current window. Emit should be called +// after every call to AggregateInteger and it will produce one point if there +// is enough data to fill a window, otherwise it will produce zero points. +func (r *IntegerMovingAverageReducer) Emit() []FloatPoint { + if len(r.buf) != cap(r.buf) { + return []FloatPoint{} + } + return []FloatPoint{ + { + Value: float64(r.sum) / float64(len(r.buf)), + Time: r.time, + Aggregated: uint32(len(r.buf)), + }, + } +} + +// FloatCumulativeSumReducer cumulates the values from each point. +type FloatCumulativeSumReducer struct { + curr FloatPoint +} + +// NewFloatCumulativeSumReducer creates a new FloatCumulativeSumReducer. +func NewFloatCumulativeSumReducer() *FloatCumulativeSumReducer { + return &FloatCumulativeSumReducer{ + curr: FloatPoint{Nil: true}, + } +} + +func (r *FloatCumulativeSumReducer) AggregateFloat(p *FloatPoint) { + r.curr.Value += p.Value + r.curr.Time = p.Time + r.curr.Nil = false +} + +func (r *FloatCumulativeSumReducer) Emit() []FloatPoint { + var pts []FloatPoint + if !r.curr.Nil { + pts = []FloatPoint{r.curr} + } + return pts +} + +// IntegerCumulativeSumReducer cumulates the values from each point. +type IntegerCumulativeSumReducer struct { + curr IntegerPoint +} + +// NewIntegerCumulativeSumReducer creates a new IntegerCumulativeSumReducer. +func NewIntegerCumulativeSumReducer() *IntegerCumulativeSumReducer { + return &IntegerCumulativeSumReducer{ + curr: IntegerPoint{Nil: true}, + } +} + +func (r *IntegerCumulativeSumReducer) AggregateInteger(p *IntegerPoint) { + r.curr.Value += p.Value + r.curr.Time = p.Time + r.curr.Nil = false +} + +func (r *IntegerCumulativeSumReducer) Emit() []IntegerPoint { + var pts []IntegerPoint + if !r.curr.Nil { + pts = []IntegerPoint{r.curr} + } + return pts +} + +// FloatHoltWintersReducer forecasts a series into the future. +// This is done using the Holt-Winters damped method. +// 1. Using the series the initial values are calculated using a SSE. +// 2. The series is forecasted into the future using the iterative relations. +type FloatHoltWintersReducer struct { + // Season period + m int + seasonal bool + + // Horizon + h int + + // Interval between points + interval int64 + // interval / 2 -- used to perform rounding + halfInterval int64 + + // Whether to include all data or only future values + includeFitData bool + + // NelderMead optimizer + optim *neldermead.Optimizer + // Small difference bound for the optimizer + epsilon float64 + + y []float64 + points []FloatPoint +} + +const ( + // Arbitrary weight for initializing some intial guesses. + // This should be in the range [0,1] + hwWeight = 0.5 + // Epsilon value for the minimization process + hwDefaultEpsilon = 1.0e-4 + // Define a grid of initial guesses for the parameters: alpha, beta, gamma, and phi. + // Keep in mind that this grid is N^4 so we should keep N small + // The starting lower guess + hwGuessLower = 0.3 + // The upper bound on the grid + hwGuessUpper = 1.0 + // The step between guesses + hwGuessStep = 0.4 +) + +// NewFloatHoltWintersReducer creates a new FloatHoltWintersReducer. +func NewFloatHoltWintersReducer(h, m int, includeFitData bool, interval time.Duration) *FloatHoltWintersReducer { + seasonal := true + if m < 2 { + seasonal = false + } + return &FloatHoltWintersReducer{ + h: h, + m: m, + seasonal: seasonal, + includeFitData: includeFitData, + interval: int64(interval), + halfInterval: int64(interval) / 2, + optim: neldermead.New(), + epsilon: hwDefaultEpsilon, + } +} + +func (r *FloatHoltWintersReducer) aggregate(time int64, value float64) { + r.points = append(r.points, FloatPoint{ + Time: time, + Value: value, + }) +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatHoltWintersReducer) AggregateFloat(p *FloatPoint) { + r.aggregate(p.Time, p.Value) +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *FloatHoltWintersReducer) AggregateInteger(p *IntegerPoint) { + r.aggregate(p.Time, float64(p.Value)) +} + +func (r *FloatHoltWintersReducer) roundTime(t int64) int64 { + // Overflow safe round function + remainder := t % r.interval + if remainder > r.halfInterval { + // Round up + return (t/r.interval + 1) * r.interval + } + // Round down + return (t / r.interval) * r.interval +} + +// Emit returns the points generated by the HoltWinters algorithm. +func (r *FloatHoltWintersReducer) Emit() []FloatPoint { + if l := len(r.points); l < 2 || r.seasonal && l < r.m || r.h <= 0 { + return nil + } + // First fill in r.y with values and NaNs for missing values + start, stop := r.roundTime(r.points[0].Time), r.roundTime(r.points[len(r.points)-1].Time) + count := (stop - start) / r.interval + if count <= 0 { + return nil + } + r.y = make([]float64, 1, count) + r.y[0] = r.points[0].Value + t := r.roundTime(r.points[0].Time) + for _, p := range r.points[1:] { + rounded := r.roundTime(p.Time) + if rounded <= t { + // Drop values that occur for the same time bucket + continue + } + t += r.interval + // Add any missing values before the next point + for rounded != t { + // Add in a NaN so we can skip it later. + r.y = append(r.y, math.NaN()) + t += r.interval + } + r.y = append(r.y, p.Value) + } + + // Seasonality + m := r.m + + // Starting guesses + // NOTE: Since these values are guesses + // in the cases where we were missing data, + // we can just skip the value and call it good. + + l0 := 0.0 + if r.seasonal { + for i := 0; i < m; i++ { + if !math.IsNaN(r.y[i]) { + l0 += (1 / float64(m)) * r.y[i] + } + } + } else { + l0 += hwWeight * r.y[0] + } + + b0 := 0.0 + if r.seasonal { + for i := 0; i < m && m+i < len(r.y); i++ { + if !math.IsNaN(r.y[i]) && !math.IsNaN(r.y[m+i]) { + b0 += 1 / float64(m*m) * (r.y[m+i] - r.y[i]) + } + } + } else { + if !math.IsNaN(r.y[1]) { + b0 = hwWeight * (r.y[1] - r.y[0]) + } + } + + var s []float64 + if r.seasonal { + s = make([]float64, m) + for i := 0; i < m; i++ { + if !math.IsNaN(r.y[i]) { + s[i] = r.y[i] / l0 + } else { + s[i] = 0 + } + } + } + + parameters := make([]float64, 6+len(s)) + parameters[4] = l0 + parameters[5] = b0 + o := len(parameters) - len(s) + for i := range s { + parameters[i+o] = s[i] + } + + // Determine best fit for the various parameters + minSSE := math.Inf(1) + var bestParams []float64 + for alpha := hwGuessLower; alpha < hwGuessUpper; alpha += hwGuessStep { + for beta := hwGuessLower; beta < hwGuessUpper; beta += hwGuessStep { + for gamma := hwGuessLower; gamma < hwGuessUpper; gamma += hwGuessStep { + for phi := hwGuessLower; phi < hwGuessUpper; phi += hwGuessStep { + parameters[0] = alpha + parameters[1] = beta + parameters[2] = gamma + parameters[3] = phi + sse, params := r.optim.Optimize(r.sse, parameters, r.epsilon, 1) + if sse < minSSE || bestParams == nil { + minSSE = sse + bestParams = params + } + } + } + } + } + + // Forecast + forecasted := r.forecast(r.h, bestParams) + var points []FloatPoint + if r.includeFitData { + start := r.points[0].Time + points = make([]FloatPoint, 0, len(forecasted)) + for i, v := range forecasted { + if !math.IsNaN(v) { + t := start + r.interval*(int64(i)) + points = append(points, FloatPoint{ + Value: v, + Time: t, + }) + } + } + } else { + stop := r.points[len(r.points)-1].Time + points = make([]FloatPoint, 0, r.h) + for i, v := range forecasted[len(r.y):] { + if !math.IsNaN(v) { + t := stop + r.interval*(int64(i)+1) + points = append(points, FloatPoint{ + Value: v, + Time: t, + }) + } + } + } + // Clear data set + r.y = r.y[0:0] + return points +} + +// Using the recursive relations compute the next values +func (r *FloatHoltWintersReducer) next(alpha, beta, gamma, phi, phiH, yT, lTp, bTp, sTm, sTmh float64) (yTh, lT, bT, sT float64) { + lT = alpha*(yT/sTm) + (1-alpha)*(lTp+phi*bTp) + bT = beta*(lT-lTp) + (1-beta)*phi*bTp + sT = gamma*(yT/(lTp+phi*bTp)) + (1-gamma)*sTm + yTh = (lT + phiH*bT) * sTmh + return +} + +// Forecast the data h points into the future. +func (r *FloatHoltWintersReducer) forecast(h int, params []float64) []float64 { + // Constrain parameters + r.constrain(params) + + yT := r.y[0] + + phi := params[3] + phiH := phi + + lT := params[4] + bT := params[5] + + // seasonals is a ring buffer of past sT values + var seasonals []float64 + var m, so int + if r.seasonal { + seasonals = params[6:] + m = len(params[6:]) + if m == 1 { + seasonals[0] = 1 + } + // Season index offset + so = m - 1 + } + + forecasted := make([]float64, len(r.y)+h) + forecasted[0] = yT + l := len(r.y) + var hm int + stm, stmh := 1.0, 1.0 + for t := 1; t < l+h; t++ { + if r.seasonal { + hm = t % m + stm = seasonals[(t-m+so)%m] + stmh = seasonals[(t-m+hm+so)%m] + } + var sT float64 + yT, lT, bT, sT = r.next( + params[0], // alpha + params[1], // beta + params[2], // gamma + phi, + phiH, + yT, + lT, + bT, + stm, + stmh, + ) + phiH += math.Pow(phi, float64(t)) + + if r.seasonal { + seasonals[(t+so)%m] = sT + so++ + } + + forecasted[t] = yT + } + return forecasted +} + +// Compute sum squared error for the given parameters. +func (r *FloatHoltWintersReducer) sse(params []float64) float64 { + sse := 0.0 + forecasted := r.forecast(0, params) + for i := range forecasted { + // Skip missing values since we cannot use them to compute an error. + if !math.IsNaN(r.y[i]) { + // Compute error + if math.IsNaN(forecasted[i]) { + // Penalize forecasted NaNs + return math.Inf(1) + } + diff := forecasted[i] - r.y[i] + sse += diff * diff + } + } + return sse +} + +// Constrain alpha, beta, gamma, phi in the range [0, 1] +func (r *FloatHoltWintersReducer) constrain(x []float64) { + // alpha + if x[0] > 1 { + x[0] = 1 + } + if x[0] < 0 { + x[0] = 0 + } + // beta + if x[1] > 1 { + x[1] = 1 + } + if x[1] < 0 { + x[1] = 0 + } + // gamma + if x[2] > 1 { + x[2] = 1 + } + if x[2] < 0 { + x[2] = 0 + } + // phi + if x[3] > 1 { + x[3] = 1 + } + if x[3] < 0 { + x[3] = 0 + } +} + +// FloatIntegralReducer calculates the time-integral of the aggregated points. +type FloatIntegralReducer struct { + interval Interval + sum float64 + prev FloatPoint + window struct { + start int64 + end int64 + } + ch chan FloatPoint + opt IteratorOptions +} + +// NewFloatIntegralReducer creates a new FloatIntegralReducer. +func NewFloatIntegralReducer(interval Interval, opt IteratorOptions) *FloatIntegralReducer { + return &FloatIntegralReducer{ + interval: interval, + prev: FloatPoint{Nil: true}, + ch: make(chan FloatPoint, 1), + opt: opt, + } +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatIntegralReducer) AggregateFloat(p *FloatPoint) { + // If this is the first point, just save it + if r.prev.Nil { + r.prev = *p + if !r.opt.Interval.IsZero() { + // Record the end of the time interval. + // We do not care for whether the last number is inclusive or exclusive + // because we treat both the same for the involved math. + if r.opt.Ascending { + r.window.start, r.window.end = r.opt.Window(p.Time) + } else { + r.window.end, r.window.start = r.opt.Window(p.Time) + } + } + return + } + + // If this point has the same timestamp as the previous one, + // skip the point. Points sent into this reducer are expected + // to be fed in order. + if r.prev.Time == p.Time { + r.prev = *p + return + } else if !r.opt.Interval.IsZero() && ((r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end)) { + // If our previous time is not equal to the window, we need to + // interpolate the area at the end of this interval. + if r.prev.Time != r.window.end { + value := linearFloat(r.window.end, r.prev.Time, p.Time, r.prev.Value, p.Value) + elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) + r.sum += 0.5 * (value + r.prev.Value) * elapsed + + r.prev.Value = value + r.prev.Time = r.window.end + } + + // Emit the current point through the channel and then clear it. + r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} + if r.opt.Ascending { + r.window.start, r.window.end = r.opt.Window(p.Time) + } else { + r.window.end, r.window.start = r.opt.Window(p.Time) + } + r.sum = 0.0 + } + + // Normal operation: update the sum using the trapezium rule + elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) + r.sum += 0.5 * (p.Value + r.prev.Value) * elapsed + r.prev = *p +} + +// Emit emits the time-integral of the aggregated points as a single point. +// InfluxQL convention dictates that outside a group-by-time clause we return +// a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime +// and a higher level will change it to the start of the time group. +func (r *FloatIntegralReducer) Emit() []FloatPoint { + select { + case pt, ok := <-r.ch: + if !ok { + return nil + } + return []FloatPoint{pt} + default: + return nil + } +} + +// Close flushes any in progress points to ensure any remaining points are +// emitted. +func (r *FloatIntegralReducer) Close() error { + // If our last point is at the start time, then discard this point since + // there is no area within this bucket. Otherwise, send off what we + // currently have as the final point. + if !r.prev.Nil && r.prev.Time != r.window.start { + r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} + } + close(r.ch) + return nil +} + +// IntegerIntegralReducer calculates the time-integral of the aggregated points. +type IntegerIntegralReducer struct { + interval Interval + sum float64 + prev IntegerPoint + window struct { + start int64 + end int64 + } + ch chan FloatPoint + opt IteratorOptions +} + +// NewIntegerIntegralReducer creates a new IntegerIntegralReducer. +func NewIntegerIntegralReducer(interval Interval, opt IteratorOptions) *IntegerIntegralReducer { + return &IntegerIntegralReducer{ + interval: interval, + prev: IntegerPoint{Nil: true}, + ch: make(chan FloatPoint, 1), + opt: opt, + } +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerIntegralReducer) AggregateInteger(p *IntegerPoint) { + // If this is the first point, just save it + if r.prev.Nil { + r.prev = *p + + // Record the end of the time interval. + // We do not care for whether the last number is inclusive or exclusive + // because we treat both the same for the involved math. + if r.opt.Ascending { + r.window.start, r.window.end = r.opt.Window(p.Time) + } else { + r.window.end, r.window.start = r.opt.Window(p.Time) + } + + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if r.window.start == MinTime { + r.window.start = 0 + } + return + } + + // If this point has the same timestamp as the previous one, + // skip the point. Points sent into this reducer are expected + // to be fed in order. + value := float64(p.Value) + if r.prev.Time == p.Time { + r.prev = *p + return + } else if (r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end) { + // If our previous time is not equal to the window, we need to + // interpolate the area at the end of this interval. + if r.prev.Time != r.window.end { + value = linearFloat(r.window.end, r.prev.Time, p.Time, float64(r.prev.Value), value) + elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) + r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed + + r.prev.Time = r.window.end + } + + // Emit the current point through the channel and then clear it. + r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} + if r.opt.Ascending { + r.window.start, r.window.end = r.opt.Window(p.Time) + } else { + r.window.end, r.window.start = r.opt.Window(p.Time) + } + r.sum = 0.0 + } + + // Normal operation: update the sum using the trapezium rule + elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) + r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed + r.prev = *p +} + +// Emit emits the time-integral of the aggregated points as a single FLOAT point +// InfluxQL convention dictates that outside a group-by-time clause we return +// a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime +// and a higher level will change it to the start of the time group. +func (r *IntegerIntegralReducer) Emit() []FloatPoint { + select { + case pt, ok := <-r.ch: + if !ok { + return nil + } + return []FloatPoint{pt} + default: + return nil + } +} + +// Close flushes any in progress points to ensure any remaining points are +// emitted. +func (r *IntegerIntegralReducer) Close() error { + // If our last point is at the start time, then discard this point since + // there is no area within this bucket. Otherwise, send off what we + // currently have as the final point. + if !r.prev.Nil && r.prev.Time != r.window.start { + r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} + } + close(r.ch) + return nil +} + +type FloatTopReducer struct { + h *floatPointsByFunc +} + +func NewFloatTopReducer(n int) *FloatTopReducer { + return &FloatTopReducer{ + h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool { + if a.Value != b.Value { + return a.Value < b.Value + } + return a.Time > b.Time + }), + } +} + +func (r *FloatTopReducer) AggregateFloat(p *FloatPoint) { + if r.h.Len() == cap(r.h.points) { + // Compare the minimum point and the aggregated point. If our value is + // larger, replace the current min value. + if !r.h.cmp(&r.h.points[0], p) { + return + } + r.h.points[0] = *p + heap.Fix(r.h, 0) + return + } + heap.Push(r.h, *p) +} + +func (r *FloatTopReducer) Emit() []FloatPoint { + // Ensure the points are sorted with the maximum value last. While the + // first point may be the minimum value, the rest is not guaranteed to be + // in any particular order while it is a heap. + points := make([]FloatPoint, len(r.h.points)) + for i, p := range r.h.points { + p.Aggregated = 0 + points[i] = p + } + h := floatPointsByFunc{points: points, cmp: r.h.cmp} + sort.Sort(sort.Reverse(&h)) + return points +} + +type IntegerTopReducer struct { + h *integerPointsByFunc +} + +func NewIntegerTopReducer(n int) *IntegerTopReducer { + return &IntegerTopReducer{ + h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool { + if a.Value != b.Value { + return a.Value < b.Value + } + return a.Time > b.Time + }), + } +} + +func (r *IntegerTopReducer) AggregateInteger(p *IntegerPoint) { + if r.h.Len() == cap(r.h.points) { + // Compare the minimum point and the aggregated point. If our value is + // larger, replace the current min value. + if !r.h.cmp(&r.h.points[0], p) { + return + } + r.h.points[0] = *p + heap.Fix(r.h, 0) + return + } + heap.Push(r.h, *p) +} + +func (r *IntegerTopReducer) Emit() []IntegerPoint { + // Ensure the points are sorted with the maximum value last. While the + // first point may be the minimum value, the rest is not guaranteed to be + // in any particular order while it is a heap. + points := make([]IntegerPoint, len(r.h.points)) + for i, p := range r.h.points { + p.Aggregated = 0 + points[i] = p + } + h := integerPointsByFunc{points: points, cmp: r.h.cmp} + sort.Sort(sort.Reverse(&h)) + return points +} + +type FloatBottomReducer struct { + h *floatPointsByFunc +} + +func NewFloatBottomReducer(n int) *FloatBottomReducer { + return &FloatBottomReducer{ + h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool { + if a.Value != b.Value { + return a.Value > b.Value + } + return a.Time > b.Time + }), + } +} + +func (r *FloatBottomReducer) AggregateFloat(p *FloatPoint) { + if r.h.Len() == cap(r.h.points) { + // Compare the minimum point and the aggregated point. If our value is + // larger, replace the current min value. + if !r.h.cmp(&r.h.points[0], p) { + return + } + r.h.points[0] = *p + heap.Fix(r.h, 0) + return + } + heap.Push(r.h, *p) +} + +func (r *FloatBottomReducer) Emit() []FloatPoint { + // Ensure the points are sorted with the maximum value last. While the + // first point may be the minimum value, the rest is not guaranteed to be + // in any particular order while it is a heap. + points := make([]FloatPoint, len(r.h.points)) + for i, p := range r.h.points { + p.Aggregated = 0 + points[i] = p + } + h := floatPointsByFunc{points: points, cmp: r.h.cmp} + sort.Sort(sort.Reverse(&h)) + return points +} + +type IntegerBottomReducer struct { + h *integerPointsByFunc +} + +func NewIntegerBottomReducer(n int) *IntegerBottomReducer { + return &IntegerBottomReducer{ + h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool { + if a.Value != b.Value { + return a.Value > b.Value + } + return a.Time > b.Time + }), + } +} + +func (r *IntegerBottomReducer) AggregateInteger(p *IntegerPoint) { + if r.h.Len() == cap(r.h.points) { + // Compare the minimum point and the aggregated point. If our value is + // larger, replace the current min value. + if !r.h.cmp(&r.h.points[0], p) { + return + } + r.h.points[0] = *p + heap.Fix(r.h, 0) + return + } + heap.Push(r.h, *p) +} + +func (r *IntegerBottomReducer) Emit() []IntegerPoint { + // Ensure the points are sorted with the maximum value last. While the + // first point may be the minimum value, the rest is not guaranteed to be + // in any particular order while it is a heap. + points := make([]IntegerPoint, len(r.h.points)) + for i, p := range r.h.points { + p.Aggregated = 0 + points[i] = p + } + h := integerPointsByFunc{points: points, cmp: r.h.cmp} + sort.Sort(sort.Reverse(&h)) + return points +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions_test.go b/vendor/github.com/influxdata/influxdb/influxql/functions_test.go new file mode 100644 index 0000000..d843cc8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/functions_test.go @@ -0,0 +1,498 @@ +package influxql_test + +import ( + "math" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +func almostEqual(got, exp float64) bool { + return math.Abs(got-exp) < 1e-5 && !math.IsNaN(got) +} + +func TestHoltWinters_AusTourists(t *testing.T) { + hw := influxql.NewFloatHoltWintersReducer(10, 4, false, 1) + // Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists + austourists := []influxql.FloatPoint{ + {Time: 1, Value: 30.052513}, + {Time: 2, Value: 19.148496}, + {Time: 3, Value: 25.317692}, + {Time: 4, Value: 27.591437}, + {Time: 5, Value: 32.076456}, + {Time: 6, Value: 23.487961}, + {Time: 7, Value: 28.47594}, + {Time: 8, Value: 35.123753}, + {Time: 9, Value: 36.838485}, + {Time: 10, Value: 25.007017}, + {Time: 11, Value: 30.72223}, + {Time: 12, Value: 28.693759}, + {Time: 13, Value: 36.640986}, + {Time: 14, Value: 23.824609}, + {Time: 15, Value: 29.311683}, + {Time: 16, Value: 31.770309}, + {Time: 17, Value: 35.177877}, + {Time: 18, Value: 19.775244}, + {Time: 19, Value: 29.60175}, + {Time: 20, Value: 34.538842}, + {Time: 21, Value: 41.273599}, + {Time: 22, Value: 26.655862}, + {Time: 23, Value: 28.279859}, + {Time: 24, Value: 35.191153}, + {Time: 25, Value: 41.727458}, + {Time: 26, Value: 24.04185}, + {Time: 27, Value: 32.328103}, + {Time: 28, Value: 37.328708}, + {Time: 29, Value: 46.213153}, + {Time: 30, Value: 29.346326}, + {Time: 31, Value: 36.48291}, + {Time: 32, Value: 42.977719}, + {Time: 33, Value: 48.901525}, + {Time: 34, Value: 31.180221}, + {Time: 35, Value: 37.717881}, + {Time: 36, Value: 40.420211}, + {Time: 37, Value: 51.206863}, + {Time: 38, Value: 31.887228}, + {Time: 39, Value: 40.978263}, + {Time: 40, Value: 43.772491}, + {Time: 41, Value: 55.558567}, + {Time: 42, Value: 33.850915}, + {Time: 43, Value: 42.076383}, + {Time: 44, Value: 45.642292}, + {Time: 45, Value: 59.76678}, + {Time: 46, Value: 35.191877}, + {Time: 47, Value: 44.319737}, + {Time: 48, Value: 47.913736}, + } + + for _, p := range austourists { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: 49, Value: 51.85064132137853}, + {Time: 50, Value: 43.26055282315273}, + {Time: 51, Value: 41.827258044814464}, + {Time: 52, Value: 54.3990354591749}, + {Time: 53, Value: 54.62334472770803}, + {Time: 54, Value: 45.57155693625209}, + {Time: 55, Value: 44.06051240252263}, + {Time: 56, Value: 57.30029870759433}, + {Time: 57, Value: 57.53591513519172}, + {Time: 58, Value: 47.999008139396096}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_AusTourists_Missing(t *testing.T) { + hw := influxql.NewFloatHoltWintersReducer(10, 4, false, 1) + // Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists + austourists := []influxql.FloatPoint{ + {Time: 1, Value: 30.052513}, + {Time: 3, Value: 25.317692}, + {Time: 4, Value: 27.591437}, + {Time: 5, Value: 32.076456}, + {Time: 6, Value: 23.487961}, + {Time: 7, Value: 28.47594}, + {Time: 9, Value: 36.838485}, + {Time: 10, Value: 25.007017}, + {Time: 11, Value: 30.72223}, + {Time: 12, Value: 28.693759}, + {Time: 13, Value: 36.640986}, + {Time: 14, Value: 23.824609}, + {Time: 15, Value: 29.311683}, + {Time: 16, Value: 31.770309}, + {Time: 17, Value: 35.177877}, + {Time: 19, Value: 29.60175}, + {Time: 20, Value: 34.538842}, + {Time: 21, Value: 41.273599}, + {Time: 22, Value: 26.655862}, + {Time: 23, Value: 28.279859}, + {Time: 24, Value: 35.191153}, + {Time: 25, Value: 41.727458}, + {Time: 26, Value: 24.04185}, + {Time: 27, Value: 32.328103}, + {Time: 28, Value: 37.328708}, + {Time: 30, Value: 29.346326}, + {Time: 31, Value: 36.48291}, + {Time: 32, Value: 42.977719}, + {Time: 34, Value: 31.180221}, + {Time: 35, Value: 37.717881}, + {Time: 36, Value: 40.420211}, + {Time: 37, Value: 51.206863}, + {Time: 38, Value: 31.887228}, + {Time: 41, Value: 55.558567}, + {Time: 42, Value: 33.850915}, + {Time: 43, Value: 42.076383}, + {Time: 44, Value: 45.642292}, + {Time: 45, Value: 59.76678}, + {Time: 46, Value: 35.191877}, + {Time: 47, Value: 44.319737}, + {Time: 48, Value: 47.913736}, + } + + for _, p := range austourists { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: 49, Value: 54.84533610387743}, + {Time: 50, Value: 41.19329421863249}, + {Time: 51, Value: 45.71673175112451}, + {Time: 52, Value: 56.05759298805955}, + {Time: 53, Value: 59.32337460282217}, + {Time: 54, Value: 44.75280096850461}, + {Time: 55, Value: 49.98865098113751}, + {Time: 56, Value: 61.86084934967605}, + {Time: 57, Value: 65.95805633454883}, + {Time: 58, Value: 50.1502170480547}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_USPopulation(t *testing.T) { + series := []influxql.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 5.31}, + {Time: 3, Value: 7.24}, + {Time: 4, Value: 9.64}, + {Time: 5, Value: 12.90}, + {Time: 6, Value: 17.10}, + {Time: 7, Value: 23.20}, + {Time: 8, Value: 31.40}, + {Time: 9, Value: 39.80}, + {Time: 10, Value: 50.20}, + {Time: 11, Value: 62.90}, + {Time: 12, Value: 76.00}, + {Time: 13, Value: 92.00}, + {Time: 14, Value: 105.70}, + {Time: 15, Value: 122.80}, + {Time: 16, Value: 131.70}, + {Time: 17, Value: 151.30}, + {Time: 18, Value: 179.30}, + {Time: 19, Value: 203.20}, + } + hw := influxql.NewFloatHoltWintersReducer(10, 0, true, 1) + for _, p := range series { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 4.957405463559748}, + {Time: 3, Value: 7.012210102535647}, + {Time: 4, Value: 10.099589257439924}, + {Time: 5, Value: 14.229926188104242}, + {Time: 6, Value: 19.418878968703797}, + {Time: 7, Value: 25.68749172281409}, + {Time: 8, Value: 33.062351305731305}, + {Time: 9, Value: 41.575791076125206}, + {Time: 10, Value: 51.26614395589263}, + {Time: 11, Value: 62.178047564264595}, + {Time: 12, Value: 74.36280483872488}, + {Time: 13, Value: 87.87880423073163}, + {Time: 14, Value: 102.79200429905801}, + {Time: 15, Value: 119.17648832929542}, + {Time: 16, Value: 137.11509549747296}, + {Time: 17, Value: 156.70013608313175}, + {Time: 18, Value: 178.03419933863566}, + {Time: 19, Value: 201.23106385518594}, + {Time: 20, Value: 226.4167216525905}, + {Time: 21, Value: 253.73052878285205}, + {Time: 22, Value: 283.32649700397553}, + {Time: 23, Value: 315.37474308085984}, + {Time: 24, Value: 350.06311454009256}, + {Time: 25, Value: 387.59901328556873}, + {Time: 26, Value: 428.21144141893404}, + {Time: 27, Value: 472.1532969569147}, + {Time: 28, Value: 519.7039509590035}, + {Time: 29, Value: 571.1721419458248}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_USPopulation_Missing(t *testing.T) { + series := []influxql.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 5.31}, + {Time: 3, Value: 7.24}, + {Time: 4, Value: 9.64}, + {Time: 5, Value: 12.90}, + {Time: 6, Value: 17.10}, + {Time: 7, Value: 23.20}, + {Time: 8, Value: 31.40}, + {Time: 10, Value: 50.20}, + {Time: 11, Value: 62.90}, + {Time: 12, Value: 76.00}, + {Time: 13, Value: 92.00}, + {Time: 15, Value: 122.80}, + {Time: 16, Value: 131.70}, + {Time: 17, Value: 151.30}, + {Time: 19, Value: 203.20}, + } + hw := influxql.NewFloatHoltWintersReducer(10, 0, true, 1) + for _, p := range series { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: 1, Value: 3.93}, + {Time: 2, Value: 4.8931364428135105}, + {Time: 3, Value: 6.962653629047061}, + {Time: 4, Value: 10.056207765903274}, + {Time: 5, Value: 14.18435088129532}, + {Time: 6, Value: 19.362939306110846}, + {Time: 7, Value: 25.613247940326584}, + {Time: 8, Value: 32.96213087008264}, + {Time: 9, Value: 41.442230043017204}, + {Time: 10, Value: 51.09223428526052}, + {Time: 11, Value: 61.95719155158485}, + {Time: 12, Value: 74.08887794968567}, + {Time: 13, Value: 87.54622778052787}, + {Time: 14, Value: 102.39582960014131}, + {Time: 15, Value: 118.7124941463221}, + {Time: 16, Value: 136.57990089987464}, + {Time: 17, Value: 156.09133107941278}, + {Time: 18, Value: 177.35049601833734}, + {Time: 19, Value: 200.472471161683}, + {Time: 20, Value: 225.58474737097785}, + {Time: 21, Value: 252.82841286206823}, + {Time: 22, Value: 282.35948095261017}, + {Time: 23, Value: 314.3503808953992}, + {Time: 24, Value: 348.99163145856954}, + {Time: 25, Value: 386.49371962730555}, + {Time: 26, Value: 427.08920989407727}, + {Time: 27, Value: 471.0351131332573}, + {Time: 28, Value: 518.615548088049}, + {Time: 29, Value: 570.1447331101863}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} +func TestHoltWinters_RoundTime(t *testing.T) { + maxTime := time.Unix(0, influxql.MaxTime).Round(time.Second).UnixNano() + data := []influxql.FloatPoint{ + {Time: maxTime - int64(5*time.Second), Value: 1}, + {Time: maxTime - int64(4*time.Second+103*time.Millisecond), Value: 10}, + {Time: maxTime - int64(3*time.Second+223*time.Millisecond), Value: 2}, + {Time: maxTime - int64(2*time.Second+481*time.Millisecond), Value: 11}, + } + hw := influxql.NewFloatHoltWintersReducer(2, 2, true, time.Second) + for _, p := range data { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: maxTime - int64(5*time.Second), Value: 1}, + {Time: maxTime - int64(4*time.Second), Value: 10.006729104838234}, + {Time: maxTime - int64(3*time.Second), Value: 1.998341814469269}, + {Time: maxTime - int64(2*time.Second), Value: 10.997858830631172}, + {Time: maxTime - int64(1*time.Second), Value: 4.085860238030013}, + {Time: maxTime - int64(0*time.Second), Value: 11.35713604403339}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +func TestHoltWinters_MaxTime(t *testing.T) { + data := []influxql.FloatPoint{ + {Time: influxql.MaxTime - 1, Value: 1}, + {Time: influxql.MaxTime, Value: 2}, + } + hw := influxql.NewFloatHoltWintersReducer(1, 0, true, 1) + for _, p := range data { + hw.AggregateFloat(&p) + } + points := hw.Emit() + + forecasted := []influxql.FloatPoint{ + {Time: influxql.MaxTime - 1, Value: 1}, + {Time: influxql.MaxTime, Value: 2.001516944066403}, + {Time: influxql.MaxTime + 1, Value: 2.5365248972488343}, + } + + if exp, got := len(forecasted), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + for i := range forecasted { + if exp, got := forecasted[i].Time, points[i].Time; got != exp { + t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) + } + if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { + t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) + } + } +} + +// TestSample_AllSamplesSeen attempts to verify that it is possible +// to get every subsample in a reasonable number of iterations. +// +// The idea here is that 30 iterations should be enough to hit every possible +// sequence at least once. +func TestSample_AllSamplesSeen(t *testing.T) { + ps := []influxql.FloatPoint{ + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + } + + // List of all the possible subsamples + samples := [][]influxql.FloatPoint{ + { + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + }, + { + {Time: 1, Value: 1}, + {Time: 3, Value: 3}, + }, + { + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + }, + } + + // 30 iterations should be sufficient to guarantee that + // we hit every possible subsample. + for i := 0; i < 30; i++ { + s := influxql.NewFloatSampleReducer(2) + for _, p := range ps { + s.AggregateFloat(&p) + } + + points := s.Emit() + + for i, sample := range samples { + // if we find a sample that it matches, remove it from + // this list of possible samples + if deep.Equal(sample, points) { + samples = append(samples[:i], samples[i+1:]...) + break + } + } + + // if samples is empty we've seen every sample, so we're done + if len(samples) == 0 { + return + } + + // The FloatSampleReducer is seeded with time.Now().UnixNano(), and without this sleep, + // this test will fail on machines where UnixNano doesn't return full resolution. + // Specifically, some Windows machines will only return timestamps accurate to 100ns. + // While iterating through this test without an explicit sleep, + // we would only see one or two unique seeds across all the calls to NewFloatSampleReducer. + time.Sleep(time.Millisecond) + } + + // If we missed a sample, report the error + if len(samples) != 0 { + t.Fatalf("expected all samples to be seen; unseen samples: %#v", samples) + } +} + +func TestSample_SampleSizeLessThanNumPoints(t *testing.T) { + s := influxql.NewFloatSampleReducer(2) + + ps := []influxql.FloatPoint{ + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + } + + for _, p := range ps { + s.AggregateFloat(&p) + } + + points := s.Emit() + + if exp, got := 2, len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } +} + +func TestSample_SampleSizeGreaterThanNumPoints(t *testing.T) { + s := influxql.NewFloatSampleReducer(4) + + ps := []influxql.FloatPoint{ + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + } + + for _, p := range ps { + s.AggregateFloat(&p) + } + + points := s.Emit() + + if exp, got := len(ps), len(points); exp != got { + t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) + } + + if !deep.Equal(ps, points) { + t.Fatalf("unexpected points: %s", spew.Sdump(points)) + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/influxql.go b/vendor/github.com/influxdata/influxdb/influxql/influxql.go new file mode 100644 index 0000000..324e399 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/influxql.go @@ -0,0 +1,7 @@ +package influxql // import "github.com/influxdata/influxdb/influxql" + +//go:generate tmpl -data=@tmpldata iterator.gen.go.tmpl +//go:generate tmpl -data=@tmpldata point.gen.go.tmpl +//go:generate tmpl -data=@tmpldata functions.gen.go.tmpl + +//go:generate protoc --gogo_out=. internal/internal.proto diff --git a/vendor/github.com/influxdata/influxdb/influxql/internal/internal.pb.go b/vendor/github.com/influxdata/influxdb/influxql/internal/internal.pb.go new file mode 100644 index 0000000..8134e1b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/internal/internal.pb.go @@ -0,0 +1,564 @@ +// Code generated by protoc-gen-gogo. +// source: internal/internal.proto +// DO NOT EDIT! + +/* +Package influxql is a generated protocol buffer package. + +It is generated from these files: + internal/internal.proto + +It has these top-level messages: + Point + Aux + IteratorOptions + Measurements + Measurement + Interval + IteratorStats + VarRef +*/ +package influxql + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Point struct { + Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` + Tags *string `protobuf:"bytes,2,req,name=Tags" json:"Tags,omitempty"` + Time *int64 `protobuf:"varint,3,req,name=Time" json:"Time,omitempty"` + Nil *bool `protobuf:"varint,4,req,name=Nil" json:"Nil,omitempty"` + Aux []*Aux `protobuf:"bytes,5,rep,name=Aux" json:"Aux,omitempty"` + Aggregated *uint32 `protobuf:"varint,6,opt,name=Aggregated" json:"Aggregated,omitempty"` + FloatValue *float64 `protobuf:"fixed64,7,opt,name=FloatValue" json:"FloatValue,omitempty"` + IntegerValue *int64 `protobuf:"varint,8,opt,name=IntegerValue" json:"IntegerValue,omitempty"` + StringValue *string `protobuf:"bytes,9,opt,name=StringValue" json:"StringValue,omitempty"` + BooleanValue *bool `protobuf:"varint,10,opt,name=BooleanValue" json:"BooleanValue,omitempty"` + Stats *IteratorStats `protobuf:"bytes,11,opt,name=Stats" json:"Stats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} } + +func (m *Point) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Point) GetTags() string { + if m != nil && m.Tags != nil { + return *m.Tags + } + return "" +} + +func (m *Point) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *Point) GetNil() bool { + if m != nil && m.Nil != nil { + return *m.Nil + } + return false +} + +func (m *Point) GetAux() []*Aux { + if m != nil { + return m.Aux + } + return nil +} + +func (m *Point) GetAggregated() uint32 { + if m != nil && m.Aggregated != nil { + return *m.Aggregated + } + return 0 +} + +func (m *Point) GetFloatValue() float64 { + if m != nil && m.FloatValue != nil { + return *m.FloatValue + } + return 0 +} + +func (m *Point) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Point) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Point) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *Point) GetStats() *IteratorStats { + if m != nil { + return m.Stats + } + return nil +} + +type Aux struct { + DataType *int32 `protobuf:"varint,1,req,name=DataType" json:"DataType,omitempty"` + FloatValue *float64 `protobuf:"fixed64,2,opt,name=FloatValue" json:"FloatValue,omitempty"` + IntegerValue *int64 `protobuf:"varint,3,opt,name=IntegerValue" json:"IntegerValue,omitempty"` + StringValue *string `protobuf:"bytes,4,opt,name=StringValue" json:"StringValue,omitempty"` + BooleanValue *bool `protobuf:"varint,5,opt,name=BooleanValue" json:"BooleanValue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Aux) Reset() { *m = Aux{} } +func (m *Aux) String() string { return proto.CompactTextString(m) } +func (*Aux) ProtoMessage() {} +func (*Aux) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} } + +func (m *Aux) GetDataType() int32 { + if m != nil && m.DataType != nil { + return *m.DataType + } + return 0 +} + +func (m *Aux) GetFloatValue() float64 { + if m != nil && m.FloatValue != nil { + return *m.FloatValue + } + return 0 +} + +func (m *Aux) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Aux) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Aux) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +type IteratorOptions struct { + Expr *string `protobuf:"bytes,1,opt,name=Expr" json:"Expr,omitempty"` + Aux []string `protobuf:"bytes,2,rep,name=Aux" json:"Aux,omitempty"` + Fields []*VarRef `protobuf:"bytes,17,rep,name=Fields" json:"Fields,omitempty"` + Sources []*Measurement `protobuf:"bytes,3,rep,name=Sources" json:"Sources,omitempty"` + Interval *Interval `protobuf:"bytes,4,opt,name=Interval" json:"Interval,omitempty"` + Dimensions []string `protobuf:"bytes,5,rep,name=Dimensions" json:"Dimensions,omitempty"` + GroupBy []string `protobuf:"bytes,19,rep,name=GroupBy" json:"GroupBy,omitempty"` + Fill *int32 `protobuf:"varint,6,opt,name=Fill" json:"Fill,omitempty"` + FillValue *float64 `protobuf:"fixed64,7,opt,name=FillValue" json:"FillValue,omitempty"` + Condition *string `protobuf:"bytes,8,opt,name=Condition" json:"Condition,omitempty"` + StartTime *int64 `protobuf:"varint,9,opt,name=StartTime" json:"StartTime,omitempty"` + EndTime *int64 `protobuf:"varint,10,opt,name=EndTime" json:"EndTime,omitempty"` + Location *string `protobuf:"bytes,21,opt,name=Location" json:"Location,omitempty"` + Ascending *bool `protobuf:"varint,11,opt,name=Ascending" json:"Ascending,omitempty"` + Limit *int64 `protobuf:"varint,12,opt,name=Limit" json:"Limit,omitempty"` + Offset *int64 `protobuf:"varint,13,opt,name=Offset" json:"Offset,omitempty"` + SLimit *int64 `protobuf:"varint,14,opt,name=SLimit" json:"SLimit,omitempty"` + SOffset *int64 `protobuf:"varint,15,opt,name=SOffset" json:"SOffset,omitempty"` + Dedupe *bool `protobuf:"varint,16,opt,name=Dedupe" json:"Dedupe,omitempty"` + MaxSeriesN *int64 `protobuf:"varint,18,opt,name=MaxSeriesN" json:"MaxSeriesN,omitempty"` + Ordered *bool `protobuf:"varint,20,opt,name=Ordered" json:"Ordered,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IteratorOptions) Reset() { *m = IteratorOptions{} } +func (m *IteratorOptions) String() string { return proto.CompactTextString(m) } +func (*IteratorOptions) ProtoMessage() {} +func (*IteratorOptions) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{2} } + +func (m *IteratorOptions) GetExpr() string { + if m != nil && m.Expr != nil { + return *m.Expr + } + return "" +} + +func (m *IteratorOptions) GetAux() []string { + if m != nil { + return m.Aux + } + return nil +} + +func (m *IteratorOptions) GetFields() []*VarRef { + if m != nil { + return m.Fields + } + return nil +} + +func (m *IteratorOptions) GetSources() []*Measurement { + if m != nil { + return m.Sources + } + return nil +} + +func (m *IteratorOptions) GetInterval() *Interval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *IteratorOptions) GetDimensions() []string { + if m != nil { + return m.Dimensions + } + return nil +} + +func (m *IteratorOptions) GetGroupBy() []string { + if m != nil { + return m.GroupBy + } + return nil +} + +func (m *IteratorOptions) GetFill() int32 { + if m != nil && m.Fill != nil { + return *m.Fill + } + return 0 +} + +func (m *IteratorOptions) GetFillValue() float64 { + if m != nil && m.FillValue != nil { + return *m.FillValue + } + return 0 +} + +func (m *IteratorOptions) GetCondition() string { + if m != nil && m.Condition != nil { + return *m.Condition + } + return "" +} + +func (m *IteratorOptions) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *IteratorOptions) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *IteratorOptions) GetLocation() string { + if m != nil && m.Location != nil { + return *m.Location + } + return "" +} + +func (m *IteratorOptions) GetAscending() bool { + if m != nil && m.Ascending != nil { + return *m.Ascending + } + return false +} + +func (m *IteratorOptions) GetLimit() int64 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *IteratorOptions) GetOffset() int64 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *IteratorOptions) GetSLimit() int64 { + if m != nil && m.SLimit != nil { + return *m.SLimit + } + return 0 +} + +func (m *IteratorOptions) GetSOffset() int64 { + if m != nil && m.SOffset != nil { + return *m.SOffset + } + return 0 +} + +func (m *IteratorOptions) GetDedupe() bool { + if m != nil && m.Dedupe != nil { + return *m.Dedupe + } + return false +} + +func (m *IteratorOptions) GetMaxSeriesN() int64 { + if m != nil && m.MaxSeriesN != nil { + return *m.MaxSeriesN + } + return 0 +} + +func (m *IteratorOptions) GetOrdered() bool { + if m != nil && m.Ordered != nil { + return *m.Ordered + } + return false +} + +type Measurements struct { + Items []*Measurement `protobuf:"bytes,1,rep,name=Items" json:"Items,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Measurements) Reset() { *m = Measurements{} } +func (m *Measurements) String() string { return proto.CompactTextString(m) } +func (*Measurements) ProtoMessage() {} +func (*Measurements) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{3} } + +func (m *Measurements) GetItems() []*Measurement { + if m != nil { + return m.Items + } + return nil +} + +type Measurement struct { + Database *string `protobuf:"bytes,1,opt,name=Database" json:"Database,omitempty"` + RetentionPolicy *string `protobuf:"bytes,2,opt,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` + Name *string `protobuf:"bytes,3,opt,name=Name" json:"Name,omitempty"` + Regex *string `protobuf:"bytes,4,opt,name=Regex" json:"Regex,omitempty"` + IsTarget *bool `protobuf:"varint,5,opt,name=IsTarget" json:"IsTarget,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Measurement) Reset() { *m = Measurement{} } +func (m *Measurement) String() string { return proto.CompactTextString(m) } +func (*Measurement) ProtoMessage() {} +func (*Measurement) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{4} } + +func (m *Measurement) GetDatabase() string { + if m != nil && m.Database != nil { + return *m.Database + } + return "" +} + +func (m *Measurement) GetRetentionPolicy() string { + if m != nil && m.RetentionPolicy != nil { + return *m.RetentionPolicy + } + return "" +} + +func (m *Measurement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Measurement) GetRegex() string { + if m != nil && m.Regex != nil { + return *m.Regex + } + return "" +} + +func (m *Measurement) GetIsTarget() bool { + if m != nil && m.IsTarget != nil { + return *m.IsTarget + } + return false +} + +type Interval struct { + Duration *int64 `protobuf:"varint,1,opt,name=Duration" json:"Duration,omitempty"` + Offset *int64 `protobuf:"varint,2,opt,name=Offset" json:"Offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Interval) Reset() { *m = Interval{} } +func (m *Interval) String() string { return proto.CompactTextString(m) } +func (*Interval) ProtoMessage() {} +func (*Interval) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{5} } + +func (m *Interval) GetDuration() int64 { + if m != nil && m.Duration != nil { + return *m.Duration + } + return 0 +} + +func (m *Interval) GetOffset() int64 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +type IteratorStats struct { + SeriesN *int64 `protobuf:"varint,1,opt,name=SeriesN" json:"SeriesN,omitempty"` + PointN *int64 `protobuf:"varint,2,opt,name=PointN" json:"PointN,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IteratorStats) Reset() { *m = IteratorStats{} } +func (m *IteratorStats) String() string { return proto.CompactTextString(m) } +func (*IteratorStats) ProtoMessage() {} +func (*IteratorStats) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{6} } + +func (m *IteratorStats) GetSeriesN() int64 { + if m != nil && m.SeriesN != nil { + return *m.SeriesN + } + return 0 +} + +func (m *IteratorStats) GetPointN() int64 { + if m != nil && m.PointN != nil { + return *m.PointN + } + return 0 +} + +type VarRef struct { + Val *string `protobuf:"bytes,1,req,name=Val" json:"Val,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=Type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *VarRef) Reset() { *m = VarRef{} } +func (m *VarRef) String() string { return proto.CompactTextString(m) } +func (*VarRef) ProtoMessage() {} +func (*VarRef) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{7} } + +func (m *VarRef) GetVal() string { + if m != nil && m.Val != nil { + return *m.Val + } + return "" +} + +func (m *VarRef) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} + +func init() { + proto.RegisterType((*Point)(nil), "influxql.Point") + proto.RegisterType((*Aux)(nil), "influxql.Aux") + proto.RegisterType((*IteratorOptions)(nil), "influxql.IteratorOptions") + proto.RegisterType((*Measurements)(nil), "influxql.Measurements") + proto.RegisterType((*Measurement)(nil), "influxql.Measurement") + proto.RegisterType((*Interval)(nil), "influxql.Interval") + proto.RegisterType((*IteratorStats)(nil), "influxql.IteratorStats") + proto.RegisterType((*VarRef)(nil), "influxql.VarRef") +} + +func init() { proto.RegisterFile("internal/internal.proto", fileDescriptorInternal) } + +var fileDescriptorInternal = []byte{ + // 737 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0xdd, 0x6a, 0xdb, 0x4a, + 0x10, 0x46, 0x92, 0xe5, 0x58, 0xeb, 0xf8, 0x24, 0x67, 0x4f, 0x7e, 0x96, 0xc3, 0xe1, 0x54, 0xe8, + 0x4a, 0x50, 0xea, 0x40, 0x6e, 0x0b, 0x05, 0xa7, 0x49, 0x8a, 0x21, 0x71, 0xc2, 0x2a, 0xe4, 0x7e, + 0x6b, 0x8d, 0xc5, 0x82, 0x2c, 0xb9, 0xab, 0x55, 0x71, 0x1e, 0xa5, 0xcf, 0xd0, 0x87, 0xe9, 0xab, + 0xf4, 0x11, 0xca, 0xce, 0x4a, 0x96, 0x92, 0x42, 0x73, 0xa5, 0xf9, 0xbe, 0x99, 0x1d, 0xed, 0xcc, + 0x37, 0x3b, 0xe4, 0x54, 0x16, 0x1a, 0x54, 0x21, 0xf2, 0xb3, 0xd6, 0x98, 0x6e, 0x54, 0xa9, 0x4b, + 0x3a, 0x92, 0xc5, 0x2a, 0xaf, 0xb7, 0x5f, 0xf2, 0xe8, 0x87, 0x4b, 0xfc, 0xfb, 0x52, 0x16, 0x9a, + 0x52, 0x32, 0x58, 0x88, 0x35, 0x30, 0x27, 0x74, 0xe3, 0x80, 0xa3, 0x6d, 0xb8, 0x07, 0x91, 0x55, + 0xcc, 0xb5, 0x9c, 0xb1, 0x91, 0x93, 0x6b, 0x60, 0x5e, 0xe8, 0xc6, 0x1e, 0x47, 0x9b, 0x1e, 0x12, + 0x6f, 0x21, 0x73, 0x36, 0x08, 0xdd, 0x78, 0xc4, 0x8d, 0x49, 0xdf, 0x10, 0x6f, 0x56, 0x6f, 0x99, + 0x1f, 0x7a, 0xf1, 0xf8, 0x7c, 0x32, 0x6d, 0xff, 0x37, 0x9d, 0xd5, 0x5b, 0x6e, 0x3c, 0xf4, 0x7f, + 0x42, 0x66, 0x59, 0xa6, 0x20, 0x13, 0x1a, 0x52, 0x36, 0x0c, 0x9d, 0x78, 0xc2, 0x7b, 0x8c, 0xf1, + 0x5f, 0xe7, 0xa5, 0xd0, 0x8f, 0x22, 0xaf, 0x81, 0xed, 0x85, 0x4e, 0xec, 0xf0, 0x1e, 0x43, 0x23, + 0xb2, 0x3f, 0x2f, 0x34, 0x64, 0xa0, 0x6c, 0xc4, 0x28, 0x74, 0x62, 0x8f, 0x3f, 0xe3, 0x68, 0x48, + 0xc6, 0x89, 0x56, 0xb2, 0xc8, 0x6c, 0x48, 0x10, 0x3a, 0x71, 0xc0, 0xfb, 0x94, 0xc9, 0x72, 0x51, + 0x96, 0x39, 0x88, 0xc2, 0x86, 0x90, 0xd0, 0x89, 0x47, 0xfc, 0x19, 0x47, 0xdf, 0x11, 0x3f, 0xd1, + 0x42, 0x57, 0x6c, 0x1c, 0x3a, 0xf1, 0xf8, 0xfc, 0xb4, 0x2b, 0x66, 0xae, 0x41, 0x09, 0x5d, 0x2a, + 0x74, 0x73, 0x1b, 0x15, 0x7d, 0x77, 0xb0, 0x74, 0xfa, 0x2f, 0x19, 0x5d, 0x0a, 0x2d, 0x1e, 0x9e, + 0x36, 0xb6, 0xa7, 0x3e, 0xdf, 0xe1, 0x17, 0xc5, 0xb9, 0xaf, 0x16, 0xe7, 0xbd, 0x5e, 0xdc, 0xe0, + 0xf5, 0xe2, 0xfc, 0xdf, 0x8b, 0x8b, 0x7e, 0x0e, 0xc8, 0x41, 0x5b, 0xc6, 0xdd, 0x46, 0xcb, 0xb2, + 0x40, 0x85, 0xaf, 0xb6, 0x1b, 0xc5, 0x1c, 0x4c, 0x89, 0xb6, 0x51, 0xd8, 0xe8, 0xe9, 0x86, 0x5e, + 0x1c, 0x58, 0x01, 0x63, 0x32, 0xbc, 0x96, 0x90, 0xa7, 0x15, 0xfb, 0x1b, 0x45, 0x3e, 0xec, 0xfa, + 0xf2, 0x28, 0x14, 0x87, 0x15, 0x6f, 0xfc, 0xf4, 0x8c, 0xec, 0x25, 0x65, 0xad, 0x96, 0x50, 0x31, + 0x0f, 0x43, 0x8f, 0xbb, 0xd0, 0x5b, 0x10, 0x55, 0xad, 0x60, 0x0d, 0x85, 0xe6, 0x6d, 0x14, 0x9d, + 0x92, 0x91, 0x29, 0x55, 0x7d, 0x15, 0x39, 0xd6, 0x35, 0x3e, 0xa7, 0xbd, 0xa6, 0x37, 0x1e, 0xbe, + 0x8b, 0x31, 0xed, 0xbc, 0x94, 0x6b, 0x28, 0x2a, 0x73, 0x7d, 0x9c, 0xb9, 0x80, 0xf7, 0x18, 0xca, + 0xc8, 0xde, 0x27, 0x55, 0xd6, 0x9b, 0x8b, 0x27, 0xf6, 0x0f, 0x3a, 0x5b, 0x68, 0x4a, 0xbd, 0x96, + 0x79, 0x8e, 0xf3, 0xe7, 0x73, 0xb4, 0xe9, 0x7f, 0x24, 0x30, 0xdf, 0xfe, 0xe0, 0x75, 0x84, 0xf1, + 0x7e, 0x2c, 0x8b, 0x54, 0x9a, 0x56, 0xe1, 0xd0, 0x05, 0xbc, 0x23, 0x8c, 0x37, 0xd1, 0x42, 0x69, + 0x7c, 0x21, 0x01, 0xaa, 0xd6, 0x11, 0xe6, 0x1e, 0x57, 0x45, 0x8a, 0x3e, 0x82, 0xbe, 0x16, 0x9a, + 0x61, 0xb9, 0x29, 0x97, 0x02, 0x93, 0x1e, 0x63, 0xd2, 0x1d, 0x36, 0x39, 0x67, 0xd5, 0x12, 0x8a, + 0x54, 0x16, 0x19, 0xce, 0xe0, 0x88, 0x77, 0x04, 0x3d, 0x22, 0xfe, 0x8d, 0x5c, 0x4b, 0xcd, 0xf6, + 0x31, 0xa3, 0x05, 0xf4, 0x84, 0x0c, 0xef, 0x56, 0xab, 0x0a, 0x34, 0x9b, 0x20, 0xdd, 0x20, 0xc3, + 0x27, 0x36, 0xfc, 0x2f, 0xcb, 0x5b, 0x64, 0x6e, 0x96, 0x34, 0x07, 0x0e, 0xec, 0xcd, 0x92, 0xee, + 0xc4, 0x25, 0xa4, 0xf5, 0x06, 0xd8, 0x21, 0xfe, 0xba, 0x41, 0xa6, 0xe7, 0xb7, 0x62, 0x9b, 0x80, + 0x92, 0x50, 0x2d, 0x18, 0xc5, 0x43, 0x3d, 0xc6, 0x64, 0xbc, 0x53, 0x29, 0x28, 0x48, 0xd9, 0x11, + 0x1e, 0x6c, 0x61, 0xf4, 0x9e, 0xec, 0xf7, 0x54, 0xaf, 0xe8, 0x5b, 0xe2, 0xcf, 0x35, 0xac, 0x2b, + 0xe6, 0xfc, 0x69, 0x38, 0x6c, 0x4c, 0xf4, 0xcd, 0x21, 0xe3, 0x1e, 0xdd, 0xbe, 0xb2, 0xcf, 0xa2, + 0x82, 0x66, 0x5e, 0x77, 0x98, 0xc6, 0xe4, 0x80, 0x83, 0x86, 0xc2, 0x74, 0xf1, 0xbe, 0xcc, 0xe5, + 0xf2, 0x09, 0x9f, 0x5a, 0xc0, 0x5f, 0xd2, 0xbb, 0xdd, 0xe7, 0xd9, 0x89, 0xc7, 0xdd, 0x77, 0x44, + 0x7c, 0x0e, 0x19, 0x6c, 0x9b, 0x97, 0x65, 0x81, 0xf9, 0xdf, 0xbc, 0x7a, 0x10, 0x2a, 0x03, 0xdd, + 0xbc, 0xa7, 0x1d, 0x8e, 0x3e, 0x74, 0x63, 0x8b, 0xf7, 0xaa, 0x95, 0x15, 0xd4, 0xc1, 0xe6, 0xec, + 0x70, 0x4f, 0x1c, 0xb7, 0x2f, 0x4e, 0x34, 0x23, 0x93, 0x67, 0x1b, 0x05, 0x55, 0x69, 0x1a, 0xec, + 0x34, 0xaa, 0x34, 0xdd, 0x3d, 0x21, 0x43, 0xdc, 0xda, 0x8b, 0x36, 0x85, 0x45, 0xd1, 0x94, 0x0c, + 0xed, 0xe3, 0x33, 0x0f, 0xf6, 0x51, 0xe4, 0xcd, 0x36, 0x37, 0x26, 0x2e, 0x6e, 0xb3, 0x8c, 0x5c, + 0x3b, 0xeb, 0xc6, 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0xca, 0x3e, 0x5e, 0x08, 0x22, 0x06, 0x00, + 0x00, +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/internal/internal.proto b/vendor/github.com/influxdata/influxdb/influxql/internal/internal.proto new file mode 100644 index 0000000..158372b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/internal/internal.proto @@ -0,0 +1,77 @@ +syntax = "proto2"; +package influxql; + +message Point { + required string Name = 1; + required string Tags = 2; + required int64 Time = 3; + required bool Nil = 4; + repeated Aux Aux = 5; + optional uint32 Aggregated = 6; + + optional double FloatValue = 7; + optional int64 IntegerValue = 8; + optional string StringValue = 9; + optional bool BooleanValue = 10; + + optional IteratorStats Stats = 11; +} + +message Aux { + required int32 DataType = 1; + optional double FloatValue = 2; + optional int64 IntegerValue = 3; + optional string StringValue = 4; + optional bool BooleanValue = 5; +} + +message IteratorOptions { + optional string Expr = 1; + repeated string Aux = 2; + repeated VarRef Fields = 17; + repeated Measurement Sources = 3; + optional Interval Interval = 4; + repeated string Dimensions = 5; + repeated string GroupBy = 19; + optional int32 Fill = 6; + optional double FillValue = 7; + optional string Condition = 8; + optional int64 StartTime = 9; + optional int64 EndTime = 10; + optional string Location = 21; + optional bool Ascending = 11; + optional int64 Limit = 12; + optional int64 Offset = 13; + optional int64 SLimit = 14; + optional int64 SOffset = 15; + optional bool Dedupe = 16; + optional int64 MaxSeriesN = 18; + optional bool Ordered = 20; +} + +message Measurements { + repeated Measurement Items = 1; +} + +message Measurement { + optional string Database = 1; + optional string RetentionPolicy = 2; + optional string Name = 3; + optional string Regex = 4; + optional bool IsTarget = 5; +} + +message Interval { + optional int64 Duration = 1; + optional int64 Offset = 2; +} + +message IteratorStats { + optional int64 SeriesN = 1; + optional int64 PointN = 2; +} + +message VarRef { + required string Val = 1; + optional int32 Type = 2; +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go new file mode 100644 index 0000000..1d6e982 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go @@ -0,0 +1,11929 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: iterator.gen.go.tmpl + +package influxql + +import ( + "container/heap" + "encoding/binary" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" +) + +// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval. +const DefaultStatsInterval = 10 * time.Second + +// FloatIterator represents a stream of float points. +type FloatIterator interface { + Iterator + Next() (*FloatPoint, error) +} + +// newFloatIterators converts a slice of Iterator to a slice of FloatIterator. +// Drop and closes any iterator in itrs that is not a FloatIterator and cannot +// be cast to a FloatIterator. +func newFloatIterators(itrs []Iterator) []FloatIterator { + a := make([]FloatIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case FloatIterator: + a = append(a, itr) + + case IntegerIterator: + a = append(a, &integerFloatCastIterator{input: itr}) + + default: + itr.Close() + } + } + return a +} + +// bufFloatIterator represents a buffered FloatIterator. +type bufFloatIterator struct { + itr FloatIterator + buf *FloatPoint +} + +// newBufFloatIterator returns a buffered FloatIterator. +func newBufFloatIterator(itr FloatIterator) *bufFloatIterator { + return &bufFloatIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufFloatIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufFloatIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufFloatIterator) peek() (*FloatPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufFloatIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufFloatIterator) Next() (*FloatPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufFloatIterator) NextInWindow(startTime, endTime int64) (*FloatPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufFloatIterator) unread(v *FloatPoint) { itr.buf = v } + +// floatMergeIterator represents an iterator that combines multiple float iterators. +type floatMergeIterator struct { + inputs []FloatIterator + heap *floatMergeHeap + init bool + + // Current iterator and window. + curr *floatMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newFloatMergeIterator returns a new instance of floatMergeIterator. +func newFloatMergeIterator(inputs []FloatIterator, opt IteratorOptions) *floatMergeIterator { + itr := &floatMergeIterator{ + inputs: inputs, + heap: &floatMergeHeap{ + items: make([]*floatMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufFloatIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &floatMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *floatMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *floatMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + return nil +} + +// Next returns the next point from the iterator. +func (itr *floatMergeIterator) Next() (*FloatPoint, error) { + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*floatMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*floatMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// floatMergeHeap represents a heap of floatMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type floatMergeHeap struct { + opt IteratorOptions + items []*floatMergeHeapItem +} + +func (h *floatMergeHeap) Len() int { return len(h.items) } +func (h *floatMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *floatMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *floatMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*floatMergeHeapItem)) +} + +func (h *floatMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type floatMergeHeapItem struct { + itr *bufFloatIterator +} + +// floatSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type floatSortedMergeIterator struct { + inputs []FloatIterator + heap *floatSortedMergeHeap + init bool +} + +// newFloatSortedMergeIterator returns an instance of floatSortedMergeIterator. +func newFloatSortedMergeIterator(inputs []FloatIterator, opt IteratorOptions) Iterator { + itr := &floatSortedMergeIterator{ + inputs: inputs, + heap: &floatSortedMergeHeap{ + items: make([]*floatSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &floatSortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *floatSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *floatSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *floatSortedMergeIterator) Next() (*FloatPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*floatSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*floatSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems. +type floatSortedMergeHeap struct { + opt IteratorOptions + items []*floatSortedMergeHeapItem +} + +func (h *floatSortedMergeHeap) Len() int { return len(h.items) } +func (h *floatSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *floatSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + return x.Time > y.Time +} + +func (h *floatSortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*floatSortedMergeHeapItem)) +} + +func (h *floatSortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type floatSortedMergeHeapItem struct { + point *FloatPoint + err error + itr FloatIterator +} + +// floatParallelIterator represents an iterator that pulls data in a separate goroutine. +type floatParallelIterator struct { + input FloatIterator + ch chan floatPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newFloatParallelIterator returns a new instance of floatParallelIterator. +func newFloatParallelIterator(input FloatIterator) *floatParallelIterator { + itr := &floatParallelIterator{ + input: input, + ch: make(chan floatPointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *floatParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *floatParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *floatParallelIterator) Next() (*FloatPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *floatParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- floatPointError{point: p, err: err}: + } + } +} + +type floatPointError struct { + point *FloatPoint + err error +} + +// floatLimitIterator represents an iterator that limits points per group. +type floatLimitIterator struct { + input FloatIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newFloatLimitIterator returns a new instance of floatLimitIterator. +func newFloatLimitIterator(input FloatIterator, opt IteratorOptions) *floatLimitIterator { + return &floatLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *floatLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *floatLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *floatLimitIterator) Next() (*FloatPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type floatFillIterator struct { + input *bufFloatIterator + prev FloatPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func newFloatFillIterator(input FloatIterator, expr Expr, opt IteratorOptions) *floatFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = float64(0) + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &floatFillIterator{ + input: newBufFloatIterator(input), + prev: FloatPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *floatFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatFillIterator) Close() error { return itr.input.Close() } + +func (itr *floatFillIterator) Next() (*FloatPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = FloatPoint{Nil: true} + break + } + + // Check if the point is our next expected point. + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &FloatPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case LinearFill: + if !itr.prev.Nil { + next, err := itr.input.peek() + if err != nil { + return nil, err + } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { + interval := int64(itr.opt.Interval.Duration) + start := itr.window.time / interval + p.Value = linearFloat(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) + } else { + p.Nil = true + } + } else { + p.Nil = true + } + + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToFloat(itr.opt.FillValue) + case PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// floatIntervalIterator represents a float implementation of IntervalIterator. +type floatIntervalIterator struct { + input FloatIterator + opt IteratorOptions +} + +func newFloatIntervalIterator(input FloatIterator, opt IteratorOptions) *floatIntervalIterator { + return &floatIntervalIterator{input: input, opt: opt} +} + +func (itr *floatIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *floatIntervalIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == MinTime { + p.Time = 0 + } + return p, nil +} + +// floatInterruptIterator represents a float implementation of InterruptIterator. +type floatInterruptIterator struct { + input FloatIterator + closing <-chan struct{} + count int +} + +func newFloatInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatInterruptIterator { + return &floatInterruptIterator{input: input, closing: closing} +} + +func (itr *floatInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *floatInterruptIterator) Next() (*FloatPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// floatCloseInterruptIterator represents a float implementation of CloseInterruptIterator. +type floatCloseInterruptIterator struct { + input FloatIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newFloatCloseInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatCloseInterruptIterator { + itr := &floatCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *floatCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *floatCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *floatCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *floatCloseInterruptIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// auxFloatPoint represents a combination of a point and an error for the AuxIterator. +type auxFloatPoint struct { + point *FloatPoint + err error +} + +// floatAuxIterator represents a float implementation of AuxIterator. +type floatAuxIterator struct { + input *bufFloatIterator + output chan auxFloatPoint + fields *auxIteratorFields + background bool +} + +func newFloatAuxIterator(input FloatIterator, opt IteratorOptions) *floatAuxIterator { + return &floatAuxIterator{ + input: newBufFloatIterator(input), + output: make(chan auxFloatPoint, 1), + fields: newAuxIteratorFields(opt), + } +} + +func (itr *floatAuxIterator) Background() { + itr.background = true + itr.Start() + go DrainIterator(itr) +} + +func (itr *floatAuxIterator) Start() { go itr.stream() } +func (itr *floatAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatAuxIterator) Close() error { return itr.input.Close() } +func (itr *floatAuxIterator) Next() (*FloatPoint, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *floatAuxIterator) Iterator(name string, typ DataType) Iterator { + return itr.fields.iterator(name, typ) +} + +func (itr *floatAuxIterator) stream() { + for { + // Read next point. + p, err := itr.input.Next() + if err != nil { + itr.output <- auxFloatPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- auxFloatPoint{point: p} + if ok := itr.fields.send(p); !ok && itr.background { + break + } + } + + close(itr.output) + itr.fields.close() +} + +// floatChanIterator represents a new instance of floatChanIterator. +type floatChanIterator struct { + buf struct { + i int + filled bool + points [2]FloatPoint + } + err error + cond *sync.Cond + done bool +} + +func (itr *floatChanIterator) Stats() IteratorStats { return IteratorStats{} } + +func (itr *floatChanIterator) Close() error { + itr.cond.L.Lock() + // Mark the channel iterator as done and signal all waiting goroutines to start again. + itr.done = true + itr.cond.Broadcast() + // Do not defer the unlock so we don't create an unnecessary allocation. + itr.cond.L.Unlock() + return nil +} + +func (itr *floatChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Wait for either the iterator to be done (so we don't have to set the value) + // or for the buffer to have been read and ready for another write. + for !itr.done && itr.buf.filled { + itr.cond.Wait() + } + + // Do not set the value and return false to signal that the iterator is closed. + // Do this after the above wait as the above for loop may have exited because + // the iterator was closed. + if itr.done { + return false + } + + switch v := value.(type) { + case float64: + itr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Value: v} + + case int64: + itr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Value: float64(v)} + + default: + itr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Nil: true} + } + itr.buf.filled = true + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() + return true +} + +func (itr *floatChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *floatChanIterator) Next() (*FloatPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } + + // Wait until either a value is available in the buffer or + // the iterator is closed. + for !itr.done && !itr.buf.filled { + itr.cond.Wait() + } + + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + return nil, nil + } + + // Always read from the buffer if it exists, even if the iterator + // is closed. This prevents the last value from being truncated by + // the parent iterator. + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false + itr.cond.Signal() + return p, nil +} + +// floatReduceFloatIterator executes a reducer for every interval and buffers the result. +type floatReduceFloatIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + points []FloatPoint + keepTags bool +} + +func newFloatReduceFloatIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, FloatPointEmitter)) *floatReduceFloatIterator { + return &floatReduceFloatIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceFloatPoint stores the reduced data for a name/tag combination. +type floatReduceFloatPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*floatReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + + return a, nil +} + +// floatStreamFloatIterator streams inputs into the iterator and emits points gradually. +type floatStreamFloatIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + m map[string]*floatReduceFloatPoint + points []FloatPoint +} + +// newFloatStreamFloatIterator returns a new instance of floatStreamFloatIterator. +func newFloatStreamFloatIterator(input FloatIterator, createFn func() (FloatPointAggregator, FloatPointEmitter), opt IteratorOptions) *floatStreamFloatIterator { + return &floatStreamFloatIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*floatReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamFloatIterator) reduce() ([]FloatPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []FloatPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type floatExprIterator struct { + left *bufFloatIterator + right *bufFloatIterator + fn floatExprFunc + points []FloatPoint // must be size 2 + storePrev bool +} + +func newFloatExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) float64) *floatExprIterator { + var points []FloatPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []FloatPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToFloat(opt.FillValue) + points = []FloatPoint{{Value: value}, {Value: value}} + } + return &floatExprIterator{ + left: newBufFloatIterator(left), + right: newBufFloatIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *floatExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *floatExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *floatExprIterator) Next() (*FloatPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = 0 + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = 0 + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + if a.Nil { + return a, nil + } else if b.Nil { + return b, nil + } + a.Value = itr.fn(a.Value, b.Value) + return a, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *floatExprIterator) next() (a, b *FloatPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// floatExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type floatExprFunc func(a, b float64) float64 + +// floatReduceIntegerIterator executes a reducer for every interval and buffers the result. +type floatReduceIntegerIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + points []IntegerPoint + keepTags bool +} + +func newFloatReduceIntegerIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, IntegerPointEmitter)) *floatReduceIntegerIterator { + return &floatReduceIntegerIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceIntegerPoint stores the reduced data for a name/tag combination. +type floatReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*floatReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + + return a, nil +} + +// floatStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type floatStreamIntegerIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + m map[string]*floatReduceIntegerPoint + points []IntegerPoint +} + +// newFloatStreamIntegerIterator returns a new instance of floatStreamIntegerIterator. +func newFloatStreamIntegerIterator(input FloatIterator, createFn func() (FloatPointAggregator, IntegerPointEmitter), opt IteratorOptions) *floatStreamIntegerIterator { + return &floatStreamIntegerIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*floatReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []IntegerPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatIntegerExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type floatIntegerExprIterator struct { + left *bufFloatIterator + right *bufFloatIterator + fn floatIntegerExprFunc + points []FloatPoint // must be size 2 + storePrev bool +} + +func newFloatIntegerExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) int64) *floatIntegerExprIterator { + var points []FloatPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []FloatPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToFloat(opt.FillValue) + points = []FloatPoint{{Value: value}, {Value: value}} + } + return &floatIntegerExprIterator{ + left: newBufFloatIterator(left), + right: newBufFloatIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *floatIntegerExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *floatIntegerExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *floatIntegerExprIterator) Next() (*IntegerPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = 0 + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = 0 + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &IntegerPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *floatIntegerExprIterator) next() (a, b *FloatPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// floatIntegerExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type floatIntegerExprFunc func(a, b float64) int64 + +// floatReduceStringIterator executes a reducer for every interval and buffers the result. +type floatReduceStringIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + points []StringPoint + keepTags bool +} + +func newFloatReduceStringIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, StringPointEmitter)) *floatReduceStringIterator { + return &floatReduceStringIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceStringPoint stores the reduced data for a name/tag combination. +type floatReduceStringPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*floatReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + + return a, nil +} + +// floatStreamStringIterator streams inputs into the iterator and emits points gradually. +type floatStreamStringIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + m map[string]*floatReduceStringPoint + points []StringPoint +} + +// newFloatStreamStringIterator returns a new instance of floatStreamStringIterator. +func newFloatStreamStringIterator(input FloatIterator, createFn func() (FloatPointAggregator, StringPointEmitter), opt IteratorOptions) *floatStreamStringIterator { + return &floatStreamStringIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*floatReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamStringIterator) reduce() ([]StringPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []StringPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatStringExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type floatStringExprIterator struct { + left *bufFloatIterator + right *bufFloatIterator + fn floatStringExprFunc + points []FloatPoint // must be size 2 + storePrev bool +} + +func newFloatStringExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) string) *floatStringExprIterator { + var points []FloatPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []FloatPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToFloat(opt.FillValue) + points = []FloatPoint{{Value: value}, {Value: value}} + } + return &floatStringExprIterator{ + left: newBufFloatIterator(left), + right: newBufFloatIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *floatStringExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *floatStringExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *floatStringExprIterator) Next() (*StringPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = 0 + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = 0 + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &StringPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *floatStringExprIterator) next() (a, b *FloatPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// floatStringExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type floatStringExprFunc func(a, b float64) string + +// floatReduceBooleanIterator executes a reducer for every interval and buffers the result. +type floatReduceBooleanIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + points []BooleanPoint + keepTags bool +} + +func newFloatReduceBooleanIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, BooleanPointEmitter)) *floatReduceBooleanIterator { + return &floatReduceBooleanIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *floatReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// floatReduceBooleanPoint stores the reduced data for a name/tag combination. +type floatReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*floatReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + + return a, nil +} + +// floatStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type floatStreamBooleanIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + m map[string]*floatReduceBooleanPoint + points []BooleanPoint +} + +// newFloatStreamBooleanIterator returns a new instance of floatStreamBooleanIterator. +func newFloatStreamBooleanIterator(input FloatIterator, createFn func() (FloatPointAggregator, BooleanPointEmitter), opt IteratorOptions) *floatStreamBooleanIterator { + return &floatStreamBooleanIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*floatReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *floatStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *floatStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []BooleanPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// floatBooleanExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type floatBooleanExprIterator struct { + left *bufFloatIterator + right *bufFloatIterator + fn floatBooleanExprFunc + points []FloatPoint // must be size 2 + storePrev bool +} + +func newFloatBooleanExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) bool) *floatBooleanExprIterator { + var points []FloatPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []FloatPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToFloat(opt.FillValue) + points = []FloatPoint{{Value: value}, {Value: value}} + } + return &floatBooleanExprIterator{ + left: newBufFloatIterator(left), + right: newBufFloatIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *floatBooleanExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *floatBooleanExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *floatBooleanExprIterator) Next() (*BooleanPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = 0 + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = 0 + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &BooleanPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *floatBooleanExprIterator) next() (a, b *FloatPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// floatBooleanExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type floatBooleanExprFunc func(a, b float64) bool + +// floatTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type floatTransformIterator struct { + input FloatIterator + fn floatTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *floatTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatTransformIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + p = itr.fn(p) + } + return p, nil +} + +// floatTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type floatTransformFunc func(p *FloatPoint) *FloatPoint + +// floatBoolTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type floatBoolTransformIterator struct { + input FloatIterator + fn floatBoolTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *floatBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// floatBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type floatBoolTransformFunc func(p *FloatPoint) *BooleanPoint + +// floatDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type floatDedupeIterator struct { + input FloatIterator + m map[string]struct{} // lookup of points already sent +} + +type floatIteratorMapper struct { + e *Emitter + buf []interface{} + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point FloatPoint +} + +func newFloatIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *floatIteratorMapper { + e := NewEmitter(itrs, opt.Ascending, 0) + e.OmitTime = true + return &floatIteratorMapper{ + e: e, + buf: make([]interface{}, len(itrs)), + driver: driver, + fields: fields, + point: FloatPoint{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *floatIteratorMapper) Next() (*FloatPoint, error) { + t, name, tags, err := itr.e.loadBuf() + if err != nil || t == ZeroTime { + return nil, err + } + itr.point.Time = t + itr.point.Name = name + itr.point.Tags = tags + + itr.e.readInto(t, name, tags, itr.buf) + if itr.driver != nil { + if v := itr.driver.Value(tags, itr.buf); v != nil { + if v, ok := v.(float64); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = 0 + itr.point.Nil = true + } + } else { + itr.point.Value = 0 + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(tags, itr.buf) + } + return &itr.point, nil +} + +func (itr *floatIteratorMapper) Stats() IteratorStats { + stats := IteratorStats{} + for _, itr := range itr.e.itrs { + stats.Add(itr.Stats()) + } + return stats +} + +func (itr *floatIteratorMapper) Close() error { + return itr.e.Close() +} + +type floatFilterIterator struct { + input FloatIterator + cond Expr + opt IteratorOptions + m map[string]interface{} +} + +func newFloatFilterIterator(input FloatIterator, cond Expr, opt IteratorOptions) FloatIterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := RewriteFunc(CloneExpr(cond), func(n Node) Node { + switch n := n.(type) { + case *BinaryExpr: + if n.LHS.String() == "time" { + return &BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(Expr) + if cond == nil { + return input + } else if n, ok := cond.(*BooleanLiteral); ok && n.Val { + return input + } + + return &floatFilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *floatFilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatFilterIterator) Close() error { return itr.input.Close() } + +func (itr *floatFilterIterator) Next() (*FloatPoint, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +// newFloatDedupeIterator returns a new instance of floatDedupeIterator. +func newFloatDedupeIterator(input FloatIterator) *floatDedupeIterator { + return &floatDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *floatDedupeIterator) Next() (*FloatPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeFloatPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// floatReaderIterator represents an iterator that streams from a reader. +type floatReaderIterator struct { + r io.Reader + dec *FloatPointDecoder +} + +// newFloatReaderIterator returns a new instance of floatReaderIterator. +func newFloatReaderIterator(r io.Reader, stats IteratorStats) *floatReaderIterator { + dec := NewFloatPointDecoder(r) + dec.stats = stats + + return &floatReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *floatReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *floatReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *floatReaderIterator) Next() (*FloatPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &FloatPoint{} + if err := itr.dec.DecodeFloatPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// IntegerIterator represents a stream of integer points. +type IntegerIterator interface { + Iterator + Next() (*IntegerPoint, error) +} + +// newIntegerIterators converts a slice of Iterator to a slice of IntegerIterator. +// Drop and closes any iterator in itrs that is not a IntegerIterator and cannot +// be cast to a IntegerIterator. +func newIntegerIterators(itrs []Iterator) []IntegerIterator { + a := make([]IntegerIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case IntegerIterator: + a = append(a, itr) + + default: + itr.Close() + } + } + return a +} + +// bufIntegerIterator represents a buffered IntegerIterator. +type bufIntegerIterator struct { + itr IntegerIterator + buf *IntegerPoint +} + +// newBufIntegerIterator returns a buffered IntegerIterator. +func newBufIntegerIterator(itr IntegerIterator) *bufIntegerIterator { + return &bufIntegerIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufIntegerIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufIntegerIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufIntegerIterator) peek() (*IntegerPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufIntegerIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufIntegerIterator) Next() (*IntegerPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufIntegerIterator) NextInWindow(startTime, endTime int64) (*IntegerPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufIntegerIterator) unread(v *IntegerPoint) { itr.buf = v } + +// integerMergeIterator represents an iterator that combines multiple integer iterators. +type integerMergeIterator struct { + inputs []IntegerIterator + heap *integerMergeHeap + init bool + + // Current iterator and window. + curr *integerMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newIntegerMergeIterator returns a new instance of integerMergeIterator. +func newIntegerMergeIterator(inputs []IntegerIterator, opt IteratorOptions) *integerMergeIterator { + itr := &integerMergeIterator{ + inputs: inputs, + heap: &integerMergeHeap{ + items: make([]*integerMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufIntegerIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &integerMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *integerMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *integerMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + return nil +} + +// Next returns the next point from the iterator. +func (itr *integerMergeIterator) Next() (*IntegerPoint, error) { + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*integerMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*integerMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// integerMergeHeap represents a heap of integerMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type integerMergeHeap struct { + opt IteratorOptions + items []*integerMergeHeapItem +} + +func (h *integerMergeHeap) Len() int { return len(h.items) } +func (h *integerMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *integerMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *integerMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*integerMergeHeapItem)) +} + +func (h *integerMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type integerMergeHeapItem struct { + itr *bufIntegerIterator +} + +// integerSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type integerSortedMergeIterator struct { + inputs []IntegerIterator + heap *integerSortedMergeHeap + init bool +} + +// newIntegerSortedMergeIterator returns an instance of integerSortedMergeIterator. +func newIntegerSortedMergeIterator(inputs []IntegerIterator, opt IteratorOptions) Iterator { + itr := &integerSortedMergeIterator{ + inputs: inputs, + heap: &integerSortedMergeHeap{ + items: make([]*integerSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &integerSortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *integerSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *integerSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *integerSortedMergeIterator) Next() (*IntegerPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*integerSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*integerSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems. +type integerSortedMergeHeap struct { + opt IteratorOptions + items []*integerSortedMergeHeapItem +} + +func (h *integerSortedMergeHeap) Len() int { return len(h.items) } +func (h *integerSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *integerSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + return x.Time > y.Time +} + +func (h *integerSortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*integerSortedMergeHeapItem)) +} + +func (h *integerSortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type integerSortedMergeHeapItem struct { + point *IntegerPoint + err error + itr IntegerIterator +} + +// integerParallelIterator represents an iterator that pulls data in a separate goroutine. +type integerParallelIterator struct { + input IntegerIterator + ch chan integerPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newIntegerParallelIterator returns a new instance of integerParallelIterator. +func newIntegerParallelIterator(input IntegerIterator) *integerParallelIterator { + itr := &integerParallelIterator{ + input: input, + ch: make(chan integerPointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *integerParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *integerParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *integerParallelIterator) Next() (*IntegerPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *integerParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- integerPointError{point: p, err: err}: + } + } +} + +type integerPointError struct { + point *IntegerPoint + err error +} + +// integerLimitIterator represents an iterator that limits points per group. +type integerLimitIterator struct { + input IntegerIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newIntegerLimitIterator returns a new instance of integerLimitIterator. +func newIntegerLimitIterator(input IntegerIterator, opt IteratorOptions) *integerLimitIterator { + return &integerLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *integerLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *integerLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *integerLimitIterator) Next() (*IntegerPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type integerFillIterator struct { + input *bufIntegerIterator + prev IntegerPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func newIntegerFillIterator(input IntegerIterator, expr Expr, opt IteratorOptions) *integerFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = int64(0) + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &integerFillIterator{ + input: newBufIntegerIterator(input), + prev: IntegerPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *integerFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerFillIterator) Close() error { return itr.input.Close() } + +func (itr *integerFillIterator) Next() (*IntegerPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = IntegerPoint{Nil: true} + break + } + + // Check if the point is our next expected point. + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &IntegerPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case LinearFill: + if !itr.prev.Nil { + next, err := itr.input.peek() + if err != nil { + return nil, err + } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { + interval := int64(itr.opt.Interval.Duration) + start := itr.window.time / interval + p.Value = linearInteger(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) + } else { + p.Nil = true + } + } else { + p.Nil = true + } + + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToInteger(itr.opt.FillValue) + case PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// integerIntervalIterator represents a integer implementation of IntervalIterator. +type integerIntervalIterator struct { + input IntegerIterator + opt IteratorOptions +} + +func newIntegerIntervalIterator(input IntegerIterator, opt IteratorOptions) *integerIntervalIterator { + return &integerIntervalIterator{input: input, opt: opt} +} + +func (itr *integerIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *integerIntervalIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == MinTime { + p.Time = 0 + } + return p, nil +} + +// integerInterruptIterator represents a integer implementation of InterruptIterator. +type integerInterruptIterator struct { + input IntegerIterator + closing <-chan struct{} + count int +} + +func newIntegerInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerInterruptIterator { + return &integerInterruptIterator{input: input, closing: closing} +} + +func (itr *integerInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *integerInterruptIterator) Next() (*IntegerPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// integerCloseInterruptIterator represents a integer implementation of CloseInterruptIterator. +type integerCloseInterruptIterator struct { + input IntegerIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newIntegerCloseInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerCloseInterruptIterator { + itr := &integerCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *integerCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *integerCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *integerCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *integerCloseInterruptIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// auxIntegerPoint represents a combination of a point and an error for the AuxIterator. +type auxIntegerPoint struct { + point *IntegerPoint + err error +} + +// integerAuxIterator represents a integer implementation of AuxIterator. +type integerAuxIterator struct { + input *bufIntegerIterator + output chan auxIntegerPoint + fields *auxIteratorFields + background bool +} + +func newIntegerAuxIterator(input IntegerIterator, opt IteratorOptions) *integerAuxIterator { + return &integerAuxIterator{ + input: newBufIntegerIterator(input), + output: make(chan auxIntegerPoint, 1), + fields: newAuxIteratorFields(opt), + } +} + +func (itr *integerAuxIterator) Background() { + itr.background = true + itr.Start() + go DrainIterator(itr) +} + +func (itr *integerAuxIterator) Start() { go itr.stream() } +func (itr *integerAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerAuxIterator) Close() error { return itr.input.Close() } +func (itr *integerAuxIterator) Next() (*IntegerPoint, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *integerAuxIterator) Iterator(name string, typ DataType) Iterator { + return itr.fields.iterator(name, typ) +} + +func (itr *integerAuxIterator) stream() { + for { + // Read next point. + p, err := itr.input.Next() + if err != nil { + itr.output <- auxIntegerPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- auxIntegerPoint{point: p} + if ok := itr.fields.send(p); !ok && itr.background { + break + } + } + + close(itr.output) + itr.fields.close() +} + +// integerChanIterator represents a new instance of integerChanIterator. +type integerChanIterator struct { + buf struct { + i int + filled bool + points [2]IntegerPoint + } + err error + cond *sync.Cond + done bool +} + +func (itr *integerChanIterator) Stats() IteratorStats { return IteratorStats{} } + +func (itr *integerChanIterator) Close() error { + itr.cond.L.Lock() + // Mark the channel iterator as done and signal all waiting goroutines to start again. + itr.done = true + itr.cond.Broadcast() + // Do not defer the unlock so we don't create an unnecessary allocation. + itr.cond.L.Unlock() + return nil +} + +func (itr *integerChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Wait for either the iterator to be done (so we don't have to set the value) + // or for the buffer to have been read and ready for another write. + for !itr.done && itr.buf.filled { + itr.cond.Wait() + } + + // Do not set the value and return false to signal that the iterator is closed. + // Do this after the above wait as the above for loop may have exited because + // the iterator was closed. + if itr.done { + return false + } + + switch v := value.(type) { + case int64: + itr.buf.points[itr.buf.i] = IntegerPoint{Name: name, Tags: tags, Time: time, Value: v} + + default: + itr.buf.points[itr.buf.i] = IntegerPoint{Name: name, Tags: tags, Time: time, Nil: true} + } + itr.buf.filled = true + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() + return true +} + +func (itr *integerChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *integerChanIterator) Next() (*IntegerPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } + + // Wait until either a value is available in the buffer or + // the iterator is closed. + for !itr.done && !itr.buf.filled { + itr.cond.Wait() + } + + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + return nil, nil + } + + // Always read from the buffer if it exists, even if the iterator + // is closed. This prevents the last value from being truncated by + // the parent iterator. + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false + itr.cond.Signal() + return p, nil +} + +// integerReduceFloatIterator executes a reducer for every interval and buffers the result. +type integerReduceFloatIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + points []FloatPoint + keepTags bool +} + +func newIntegerReduceFloatIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, FloatPointEmitter)) *integerReduceFloatIterator { + return &integerReduceFloatIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceFloatPoint stores the reduced data for a name/tag combination. +type integerReduceFloatPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*integerReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + + return a, nil +} + +// integerStreamFloatIterator streams inputs into the iterator and emits points gradually. +type integerStreamFloatIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + m map[string]*integerReduceFloatPoint + points []FloatPoint +} + +// newIntegerStreamFloatIterator returns a new instance of integerStreamFloatIterator. +func newIntegerStreamFloatIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, FloatPointEmitter), opt IteratorOptions) *integerStreamFloatIterator { + return &integerStreamFloatIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*integerReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamFloatIterator) reduce() ([]FloatPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []FloatPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerFloatExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type integerFloatExprIterator struct { + left *bufIntegerIterator + right *bufIntegerIterator + fn integerFloatExprFunc + points []IntegerPoint // must be size 2 + storePrev bool +} + +func newIntegerFloatExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) float64) *integerFloatExprIterator { + var points []IntegerPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []IntegerPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToInteger(opt.FillValue) + points = []IntegerPoint{{Value: value}, {Value: value}} + } + return &integerFloatExprIterator{ + left: newBufIntegerIterator(left), + right: newBufIntegerIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *integerFloatExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *integerFloatExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *integerFloatExprIterator) Next() (*FloatPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = 0 + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = 0 + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &FloatPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *integerFloatExprIterator) next() (a, b *IntegerPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// integerFloatExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type integerFloatExprFunc func(a, b int64) float64 + +// integerReduceIntegerIterator executes a reducer for every interval and buffers the result. +type integerReduceIntegerIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + points []IntegerPoint + keepTags bool +} + +func newIntegerReduceIntegerIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, IntegerPointEmitter)) *integerReduceIntegerIterator { + return &integerReduceIntegerIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceIntegerPoint stores the reduced data for a name/tag combination. +type integerReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*integerReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + + return a, nil +} + +// integerStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type integerStreamIntegerIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + m map[string]*integerReduceIntegerPoint + points []IntegerPoint +} + +// newIntegerStreamIntegerIterator returns a new instance of integerStreamIntegerIterator. +func newIntegerStreamIntegerIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, IntegerPointEmitter), opt IteratorOptions) *integerStreamIntegerIterator { + return &integerStreamIntegerIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*integerReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []IntegerPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type integerExprIterator struct { + left *bufIntegerIterator + right *bufIntegerIterator + fn integerExprFunc + points []IntegerPoint // must be size 2 + storePrev bool +} + +func newIntegerExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) int64) *integerExprIterator { + var points []IntegerPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []IntegerPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToInteger(opt.FillValue) + points = []IntegerPoint{{Value: value}, {Value: value}} + } + return &integerExprIterator{ + left: newBufIntegerIterator(left), + right: newBufIntegerIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *integerExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *integerExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *integerExprIterator) Next() (*IntegerPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = 0 + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = 0 + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + if a.Nil { + return a, nil + } else if b.Nil { + return b, nil + } + a.Value = itr.fn(a.Value, b.Value) + return a, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *integerExprIterator) next() (a, b *IntegerPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// integerExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type integerExprFunc func(a, b int64) int64 + +// integerReduceStringIterator executes a reducer for every interval and buffers the result. +type integerReduceStringIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + points []StringPoint + keepTags bool +} + +func newIntegerReduceStringIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, StringPointEmitter)) *integerReduceStringIterator { + return &integerReduceStringIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceStringPoint stores the reduced data for a name/tag combination. +type integerReduceStringPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*integerReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + + return a, nil +} + +// integerStreamStringIterator streams inputs into the iterator and emits points gradually. +type integerStreamStringIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + m map[string]*integerReduceStringPoint + points []StringPoint +} + +// newIntegerStreamStringIterator returns a new instance of integerStreamStringIterator. +func newIntegerStreamStringIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, StringPointEmitter), opt IteratorOptions) *integerStreamStringIterator { + return &integerStreamStringIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*integerReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamStringIterator) reduce() ([]StringPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []StringPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerStringExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type integerStringExprIterator struct { + left *bufIntegerIterator + right *bufIntegerIterator + fn integerStringExprFunc + points []IntegerPoint // must be size 2 + storePrev bool +} + +func newIntegerStringExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) string) *integerStringExprIterator { + var points []IntegerPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []IntegerPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToInteger(opt.FillValue) + points = []IntegerPoint{{Value: value}, {Value: value}} + } + return &integerStringExprIterator{ + left: newBufIntegerIterator(left), + right: newBufIntegerIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *integerStringExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *integerStringExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *integerStringExprIterator) Next() (*StringPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = 0 + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = 0 + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &StringPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *integerStringExprIterator) next() (a, b *IntegerPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// integerStringExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type integerStringExprFunc func(a, b int64) string + +// integerReduceBooleanIterator executes a reducer for every interval and buffers the result. +type integerReduceBooleanIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + points []BooleanPoint + keepTags bool +} + +func newIntegerReduceBooleanIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, BooleanPointEmitter)) *integerReduceBooleanIterator { + return &integerReduceBooleanIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *integerReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// integerReduceBooleanPoint stores the reduced data for a name/tag combination. +type integerReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*integerReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + + return a, nil +} + +// integerStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type integerStreamBooleanIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + m map[string]*integerReduceBooleanPoint + points []BooleanPoint +} + +// newIntegerStreamBooleanIterator returns a new instance of integerStreamBooleanIterator. +func newIntegerStreamBooleanIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, BooleanPointEmitter), opt IteratorOptions) *integerStreamBooleanIterator { + return &integerStreamBooleanIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*integerReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *integerStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *integerStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []BooleanPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// integerBooleanExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type integerBooleanExprIterator struct { + left *bufIntegerIterator + right *bufIntegerIterator + fn integerBooleanExprFunc + points []IntegerPoint // must be size 2 + storePrev bool +} + +func newIntegerBooleanExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) bool) *integerBooleanExprIterator { + var points []IntegerPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []IntegerPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToInteger(opt.FillValue) + points = []IntegerPoint{{Value: value}, {Value: value}} + } + return &integerBooleanExprIterator{ + left: newBufIntegerIterator(left), + right: newBufIntegerIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *integerBooleanExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *integerBooleanExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *integerBooleanExprIterator) Next() (*BooleanPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = 0 + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = 0 + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &BooleanPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *integerBooleanExprIterator) next() (a, b *IntegerPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// integerBooleanExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type integerBooleanExprFunc func(a, b int64) bool + +// integerTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type integerTransformIterator struct { + input IntegerIterator + fn integerTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *integerTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerTransformIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + p = itr.fn(p) + } + return p, nil +} + +// integerTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type integerTransformFunc func(p *IntegerPoint) *IntegerPoint + +// integerBoolTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type integerBoolTransformIterator struct { + input IntegerIterator + fn integerBoolTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *integerBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// integerBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type integerBoolTransformFunc func(p *IntegerPoint) *BooleanPoint + +// integerDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type integerDedupeIterator struct { + input IntegerIterator + m map[string]struct{} // lookup of points already sent +} + +type integerIteratorMapper struct { + e *Emitter + buf []interface{} + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point IntegerPoint +} + +func newIntegerIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *integerIteratorMapper { + e := NewEmitter(itrs, opt.Ascending, 0) + e.OmitTime = true + return &integerIteratorMapper{ + e: e, + buf: make([]interface{}, len(itrs)), + driver: driver, + fields: fields, + point: IntegerPoint{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *integerIteratorMapper) Next() (*IntegerPoint, error) { + t, name, tags, err := itr.e.loadBuf() + if err != nil || t == ZeroTime { + return nil, err + } + itr.point.Time = t + itr.point.Name = name + itr.point.Tags = tags + + itr.e.readInto(t, name, tags, itr.buf) + if itr.driver != nil { + if v := itr.driver.Value(tags, itr.buf); v != nil { + if v, ok := v.(int64); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = 0 + itr.point.Nil = true + } + } else { + itr.point.Value = 0 + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(tags, itr.buf) + } + return &itr.point, nil +} + +func (itr *integerIteratorMapper) Stats() IteratorStats { + stats := IteratorStats{} + for _, itr := range itr.e.itrs { + stats.Add(itr.Stats()) + } + return stats +} + +func (itr *integerIteratorMapper) Close() error { + return itr.e.Close() +} + +type integerFilterIterator struct { + input IntegerIterator + cond Expr + opt IteratorOptions + m map[string]interface{} +} + +func newIntegerFilterIterator(input IntegerIterator, cond Expr, opt IteratorOptions) IntegerIterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := RewriteFunc(CloneExpr(cond), func(n Node) Node { + switch n := n.(type) { + case *BinaryExpr: + if n.LHS.String() == "time" { + return &BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(Expr) + if cond == nil { + return input + } else if n, ok := cond.(*BooleanLiteral); ok && n.Val { + return input + } + + return &integerFilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *integerFilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerFilterIterator) Close() error { return itr.input.Close() } + +func (itr *integerFilterIterator) Next() (*IntegerPoint, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +// newIntegerDedupeIterator returns a new instance of integerDedupeIterator. +func newIntegerDedupeIterator(input IntegerIterator) *integerDedupeIterator { + return &integerDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *integerDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *integerDedupeIterator) Next() (*IntegerPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeIntegerPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// integerReaderIterator represents an iterator that streams from a reader. +type integerReaderIterator struct { + r io.Reader + dec *IntegerPointDecoder +} + +// newIntegerReaderIterator returns a new instance of integerReaderIterator. +func newIntegerReaderIterator(r io.Reader, stats IteratorStats) *integerReaderIterator { + dec := NewIntegerPointDecoder(r) + dec.stats = stats + + return &integerReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *integerReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *integerReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *integerReaderIterator) Next() (*IntegerPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &IntegerPoint{} + if err := itr.dec.DecodeIntegerPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// StringIterator represents a stream of string points. +type StringIterator interface { + Iterator + Next() (*StringPoint, error) +} + +// newStringIterators converts a slice of Iterator to a slice of StringIterator. +// Drop and closes any iterator in itrs that is not a StringIterator and cannot +// be cast to a StringIterator. +func newStringIterators(itrs []Iterator) []StringIterator { + a := make([]StringIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case StringIterator: + a = append(a, itr) + + default: + itr.Close() + } + } + return a +} + +// bufStringIterator represents a buffered StringIterator. +type bufStringIterator struct { + itr StringIterator + buf *StringPoint +} + +// newBufStringIterator returns a buffered StringIterator. +func newBufStringIterator(itr StringIterator) *bufStringIterator { + return &bufStringIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufStringIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufStringIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufStringIterator) peek() (*StringPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufStringIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufStringIterator) Next() (*StringPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufStringIterator) NextInWindow(startTime, endTime int64) (*StringPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufStringIterator) unread(v *StringPoint) { itr.buf = v } + +// stringMergeIterator represents an iterator that combines multiple string iterators. +type stringMergeIterator struct { + inputs []StringIterator + heap *stringMergeHeap + init bool + + // Current iterator and window. + curr *stringMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newStringMergeIterator returns a new instance of stringMergeIterator. +func newStringMergeIterator(inputs []StringIterator, opt IteratorOptions) *stringMergeIterator { + itr := &stringMergeIterator{ + inputs: inputs, + heap: &stringMergeHeap{ + items: make([]*stringMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufStringIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &stringMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *stringMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *stringMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + return nil +} + +// Next returns the next point from the iterator. +func (itr *stringMergeIterator) Next() (*StringPoint, error) { + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*stringMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*stringMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// stringMergeHeap represents a heap of stringMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type stringMergeHeap struct { + opt IteratorOptions + items []*stringMergeHeapItem +} + +func (h *stringMergeHeap) Len() int { return len(h.items) } +func (h *stringMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *stringMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *stringMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*stringMergeHeapItem)) +} + +func (h *stringMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type stringMergeHeapItem struct { + itr *bufStringIterator +} + +// stringSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type stringSortedMergeIterator struct { + inputs []StringIterator + heap *stringSortedMergeHeap + init bool +} + +// newStringSortedMergeIterator returns an instance of stringSortedMergeIterator. +func newStringSortedMergeIterator(inputs []StringIterator, opt IteratorOptions) Iterator { + itr := &stringSortedMergeIterator{ + inputs: inputs, + heap: &stringSortedMergeHeap{ + items: make([]*stringSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &stringSortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *stringSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *stringSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *stringSortedMergeIterator) Next() (*StringPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*stringSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*stringSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems. +type stringSortedMergeHeap struct { + opt IteratorOptions + items []*stringSortedMergeHeapItem +} + +func (h *stringSortedMergeHeap) Len() int { return len(h.items) } +func (h *stringSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *stringSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + return x.Time > y.Time +} + +func (h *stringSortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*stringSortedMergeHeapItem)) +} + +func (h *stringSortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type stringSortedMergeHeapItem struct { + point *StringPoint + err error + itr StringIterator +} + +// stringParallelIterator represents an iterator that pulls data in a separate goroutine. +type stringParallelIterator struct { + input StringIterator + ch chan stringPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newStringParallelIterator returns a new instance of stringParallelIterator. +func newStringParallelIterator(input StringIterator) *stringParallelIterator { + itr := &stringParallelIterator{ + input: input, + ch: make(chan stringPointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *stringParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *stringParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *stringParallelIterator) Next() (*StringPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *stringParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- stringPointError{point: p, err: err}: + } + } +} + +type stringPointError struct { + point *StringPoint + err error +} + +// stringLimitIterator represents an iterator that limits points per group. +type stringLimitIterator struct { + input StringIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newStringLimitIterator returns a new instance of stringLimitIterator. +func newStringLimitIterator(input StringIterator, opt IteratorOptions) *stringLimitIterator { + return &stringLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *stringLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *stringLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *stringLimitIterator) Next() (*StringPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type stringFillIterator struct { + input *bufStringIterator + prev StringPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func newStringFillIterator(input StringIterator, expr Expr, opt IteratorOptions) *stringFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = "" + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &stringFillIterator{ + input: newBufStringIterator(input), + prev: StringPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *stringFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringFillIterator) Close() error { return itr.input.Close() } + +func (itr *stringFillIterator) Next() (*StringPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = StringPoint{Nil: true} + break + } + + // Check if the point is our next expected point. + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &StringPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case LinearFill: + fallthrough + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToString(itr.opt.FillValue) + case PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// stringIntervalIterator represents a string implementation of IntervalIterator. +type stringIntervalIterator struct { + input StringIterator + opt IteratorOptions +} + +func newStringIntervalIterator(input StringIterator, opt IteratorOptions) *stringIntervalIterator { + return &stringIntervalIterator{input: input, opt: opt} +} + +func (itr *stringIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *stringIntervalIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == MinTime { + p.Time = 0 + } + return p, nil +} + +// stringInterruptIterator represents a string implementation of InterruptIterator. +type stringInterruptIterator struct { + input StringIterator + closing <-chan struct{} + count int +} + +func newStringInterruptIterator(input StringIterator, closing <-chan struct{}) *stringInterruptIterator { + return &stringInterruptIterator{input: input, closing: closing} +} + +func (itr *stringInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *stringInterruptIterator) Next() (*StringPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// stringCloseInterruptIterator represents a string implementation of CloseInterruptIterator. +type stringCloseInterruptIterator struct { + input StringIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newStringCloseInterruptIterator(input StringIterator, closing <-chan struct{}) *stringCloseInterruptIterator { + itr := &stringCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *stringCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *stringCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *stringCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *stringCloseInterruptIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// auxStringPoint represents a combination of a point and an error for the AuxIterator. +type auxStringPoint struct { + point *StringPoint + err error +} + +// stringAuxIterator represents a string implementation of AuxIterator. +type stringAuxIterator struct { + input *bufStringIterator + output chan auxStringPoint + fields *auxIteratorFields + background bool +} + +func newStringAuxIterator(input StringIterator, opt IteratorOptions) *stringAuxIterator { + return &stringAuxIterator{ + input: newBufStringIterator(input), + output: make(chan auxStringPoint, 1), + fields: newAuxIteratorFields(opt), + } +} + +func (itr *stringAuxIterator) Background() { + itr.background = true + itr.Start() + go DrainIterator(itr) +} + +func (itr *stringAuxIterator) Start() { go itr.stream() } +func (itr *stringAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringAuxIterator) Close() error { return itr.input.Close() } +func (itr *stringAuxIterator) Next() (*StringPoint, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *stringAuxIterator) Iterator(name string, typ DataType) Iterator { + return itr.fields.iterator(name, typ) +} + +func (itr *stringAuxIterator) stream() { + for { + // Read next point. + p, err := itr.input.Next() + if err != nil { + itr.output <- auxStringPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- auxStringPoint{point: p} + if ok := itr.fields.send(p); !ok && itr.background { + break + } + } + + close(itr.output) + itr.fields.close() +} + +// stringChanIterator represents a new instance of stringChanIterator. +type stringChanIterator struct { + buf struct { + i int + filled bool + points [2]StringPoint + } + err error + cond *sync.Cond + done bool +} + +func (itr *stringChanIterator) Stats() IteratorStats { return IteratorStats{} } + +func (itr *stringChanIterator) Close() error { + itr.cond.L.Lock() + // Mark the channel iterator as done and signal all waiting goroutines to start again. + itr.done = true + itr.cond.Broadcast() + // Do not defer the unlock so we don't create an unnecessary allocation. + itr.cond.L.Unlock() + return nil +} + +func (itr *stringChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Wait for either the iterator to be done (so we don't have to set the value) + // or for the buffer to have been read and ready for another write. + for !itr.done && itr.buf.filled { + itr.cond.Wait() + } + + // Do not set the value and return false to signal that the iterator is closed. + // Do this after the above wait as the above for loop may have exited because + // the iterator was closed. + if itr.done { + return false + } + + switch v := value.(type) { + case string: + itr.buf.points[itr.buf.i] = StringPoint{Name: name, Tags: tags, Time: time, Value: v} + + default: + itr.buf.points[itr.buf.i] = StringPoint{Name: name, Tags: tags, Time: time, Nil: true} + } + itr.buf.filled = true + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() + return true +} + +func (itr *stringChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *stringChanIterator) Next() (*StringPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } + + // Wait until either a value is available in the buffer or + // the iterator is closed. + for !itr.done && !itr.buf.filled { + itr.cond.Wait() + } + + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + return nil, nil + } + + // Always read from the buffer if it exists, even if the iterator + // is closed. This prevents the last value from being truncated by + // the parent iterator. + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false + itr.cond.Signal() + return p, nil +} + +// stringReduceFloatIterator executes a reducer for every interval and buffers the result. +type stringReduceFloatIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + points []FloatPoint + keepTags bool +} + +func newStringReduceFloatIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, FloatPointEmitter)) *stringReduceFloatIterator { + return &stringReduceFloatIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceFloatPoint stores the reduced data for a name/tag combination. +type stringReduceFloatPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*stringReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + + return a, nil +} + +// stringStreamFloatIterator streams inputs into the iterator and emits points gradually. +type stringStreamFloatIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + m map[string]*stringReduceFloatPoint + points []FloatPoint +} + +// newStringStreamFloatIterator returns a new instance of stringStreamFloatIterator. +func newStringStreamFloatIterator(input StringIterator, createFn func() (StringPointAggregator, FloatPointEmitter), opt IteratorOptions) *stringStreamFloatIterator { + return &stringStreamFloatIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*stringReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamFloatIterator) reduce() ([]FloatPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []FloatPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringFloatExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type stringFloatExprIterator struct { + left *bufStringIterator + right *bufStringIterator + fn stringFloatExprFunc + points []StringPoint // must be size 2 + storePrev bool +} + +func newStringFloatExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) float64) *stringFloatExprIterator { + var points []StringPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []StringPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToString(opt.FillValue) + points = []StringPoint{{Value: value}, {Value: value}} + } + return &stringFloatExprIterator{ + left: newBufStringIterator(left), + right: newBufStringIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *stringFloatExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *stringFloatExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *stringFloatExprIterator) Next() (*FloatPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = "" + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = "" + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &FloatPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *stringFloatExprIterator) next() (a, b *StringPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// stringFloatExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type stringFloatExprFunc func(a, b string) float64 + +// stringReduceIntegerIterator executes a reducer for every interval and buffers the result. +type stringReduceIntegerIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + points []IntegerPoint + keepTags bool +} + +func newStringReduceIntegerIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, IntegerPointEmitter)) *stringReduceIntegerIterator { + return &stringReduceIntegerIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceIntegerPoint stores the reduced data for a name/tag combination. +type stringReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*stringReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + + return a, nil +} + +// stringStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type stringStreamIntegerIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + m map[string]*stringReduceIntegerPoint + points []IntegerPoint +} + +// newStringStreamIntegerIterator returns a new instance of stringStreamIntegerIterator. +func newStringStreamIntegerIterator(input StringIterator, createFn func() (StringPointAggregator, IntegerPointEmitter), opt IteratorOptions) *stringStreamIntegerIterator { + return &stringStreamIntegerIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*stringReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []IntegerPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringIntegerExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type stringIntegerExprIterator struct { + left *bufStringIterator + right *bufStringIterator + fn stringIntegerExprFunc + points []StringPoint // must be size 2 + storePrev bool +} + +func newStringIntegerExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) int64) *stringIntegerExprIterator { + var points []StringPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []StringPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToString(opt.FillValue) + points = []StringPoint{{Value: value}, {Value: value}} + } + return &stringIntegerExprIterator{ + left: newBufStringIterator(left), + right: newBufStringIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *stringIntegerExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *stringIntegerExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *stringIntegerExprIterator) Next() (*IntegerPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = "" + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = "" + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &IntegerPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *stringIntegerExprIterator) next() (a, b *StringPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// stringIntegerExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type stringIntegerExprFunc func(a, b string) int64 + +// stringReduceStringIterator executes a reducer for every interval and buffers the result. +type stringReduceStringIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + points []StringPoint + keepTags bool +} + +func newStringReduceStringIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, StringPointEmitter)) *stringReduceStringIterator { + return &stringReduceStringIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceStringPoint stores the reduced data for a name/tag combination. +type stringReduceStringPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*stringReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + + return a, nil +} + +// stringStreamStringIterator streams inputs into the iterator and emits points gradually. +type stringStreamStringIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + m map[string]*stringReduceStringPoint + points []StringPoint +} + +// newStringStreamStringIterator returns a new instance of stringStreamStringIterator. +func newStringStreamStringIterator(input StringIterator, createFn func() (StringPointAggregator, StringPointEmitter), opt IteratorOptions) *stringStreamStringIterator { + return &stringStreamStringIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*stringReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamStringIterator) reduce() ([]StringPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []StringPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type stringExprIterator struct { + left *bufStringIterator + right *bufStringIterator + fn stringExprFunc + points []StringPoint // must be size 2 + storePrev bool +} + +func newStringExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) string) *stringExprIterator { + var points []StringPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []StringPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToString(opt.FillValue) + points = []StringPoint{{Value: value}, {Value: value}} + } + return &stringExprIterator{ + left: newBufStringIterator(left), + right: newBufStringIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *stringExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *stringExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *stringExprIterator) Next() (*StringPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = "" + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = "" + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + if a.Nil { + return a, nil + } else if b.Nil { + return b, nil + } + a.Value = itr.fn(a.Value, b.Value) + return a, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *stringExprIterator) next() (a, b *StringPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// stringExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type stringExprFunc func(a, b string) string + +// stringReduceBooleanIterator executes a reducer for every interval and buffers the result. +type stringReduceBooleanIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + points []BooleanPoint + keepTags bool +} + +func newStringReduceBooleanIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, BooleanPointEmitter)) *stringReduceBooleanIterator { + return &stringReduceBooleanIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *stringReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// stringReduceBooleanPoint stores the reduced data for a name/tag combination. +type stringReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*stringReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + + return a, nil +} + +// stringStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type stringStreamBooleanIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + m map[string]*stringReduceBooleanPoint + points []BooleanPoint +} + +// newStringStreamBooleanIterator returns a new instance of stringStreamBooleanIterator. +func newStringStreamBooleanIterator(input StringIterator, createFn func() (StringPointAggregator, BooleanPointEmitter), opt IteratorOptions) *stringStreamBooleanIterator { + return &stringStreamBooleanIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*stringReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *stringStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *stringStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []BooleanPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateString(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// stringBooleanExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type stringBooleanExprIterator struct { + left *bufStringIterator + right *bufStringIterator + fn stringBooleanExprFunc + points []StringPoint // must be size 2 + storePrev bool +} + +func newStringBooleanExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) bool) *stringBooleanExprIterator { + var points []StringPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []StringPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToString(opt.FillValue) + points = []StringPoint{{Value: value}, {Value: value}} + } + return &stringBooleanExprIterator{ + left: newBufStringIterator(left), + right: newBufStringIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *stringBooleanExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *stringBooleanExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *stringBooleanExprIterator) Next() (*BooleanPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = "" + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = "" + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &BooleanPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *stringBooleanExprIterator) next() (a, b *StringPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// stringBooleanExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type stringBooleanExprFunc func(a, b string) bool + +// stringTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type stringTransformIterator struct { + input StringIterator + fn stringTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *stringTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringTransformIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + p = itr.fn(p) + } + return p, nil +} + +// stringTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type stringTransformFunc func(p *StringPoint) *StringPoint + +// stringBoolTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type stringBoolTransformIterator struct { + input StringIterator + fn stringBoolTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *stringBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// stringBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type stringBoolTransformFunc func(p *StringPoint) *BooleanPoint + +// stringDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type stringDedupeIterator struct { + input StringIterator + m map[string]struct{} // lookup of points already sent +} + +type stringIteratorMapper struct { + e *Emitter + buf []interface{} + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point StringPoint +} + +func newStringIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *stringIteratorMapper { + e := NewEmitter(itrs, opt.Ascending, 0) + e.OmitTime = true + return &stringIteratorMapper{ + e: e, + buf: make([]interface{}, len(itrs)), + driver: driver, + fields: fields, + point: StringPoint{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *stringIteratorMapper) Next() (*StringPoint, error) { + t, name, tags, err := itr.e.loadBuf() + if err != nil || t == ZeroTime { + return nil, err + } + itr.point.Time = t + itr.point.Name = name + itr.point.Tags = tags + + itr.e.readInto(t, name, tags, itr.buf) + if itr.driver != nil { + if v := itr.driver.Value(tags, itr.buf); v != nil { + if v, ok := v.(string); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = "" + itr.point.Nil = true + } + } else { + itr.point.Value = "" + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(tags, itr.buf) + } + return &itr.point, nil +} + +func (itr *stringIteratorMapper) Stats() IteratorStats { + stats := IteratorStats{} + for _, itr := range itr.e.itrs { + stats.Add(itr.Stats()) + } + return stats +} + +func (itr *stringIteratorMapper) Close() error { + return itr.e.Close() +} + +type stringFilterIterator struct { + input StringIterator + cond Expr + opt IteratorOptions + m map[string]interface{} +} + +func newStringFilterIterator(input StringIterator, cond Expr, opt IteratorOptions) StringIterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := RewriteFunc(CloneExpr(cond), func(n Node) Node { + switch n := n.(type) { + case *BinaryExpr: + if n.LHS.String() == "time" { + return &BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(Expr) + if cond == nil { + return input + } else if n, ok := cond.(*BooleanLiteral); ok && n.Val { + return input + } + + return &stringFilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *stringFilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringFilterIterator) Close() error { return itr.input.Close() } + +func (itr *stringFilterIterator) Next() (*StringPoint, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +// newStringDedupeIterator returns a new instance of stringDedupeIterator. +func newStringDedupeIterator(input StringIterator) *stringDedupeIterator { + return &stringDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *stringDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *stringDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *stringDedupeIterator) Next() (*StringPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeStringPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// stringReaderIterator represents an iterator that streams from a reader. +type stringReaderIterator struct { + r io.Reader + dec *StringPointDecoder +} + +// newStringReaderIterator returns a new instance of stringReaderIterator. +func newStringReaderIterator(r io.Reader, stats IteratorStats) *stringReaderIterator { + dec := NewStringPointDecoder(r) + dec.stats = stats + + return &stringReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *stringReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *stringReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *stringReaderIterator) Next() (*StringPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &StringPoint{} + if err := itr.dec.DecodeStringPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// BooleanIterator represents a stream of boolean points. +type BooleanIterator interface { + Iterator + Next() (*BooleanPoint, error) +} + +// newBooleanIterators converts a slice of Iterator to a slice of BooleanIterator. +// Drop and closes any iterator in itrs that is not a BooleanIterator and cannot +// be cast to a BooleanIterator. +func newBooleanIterators(itrs []Iterator) []BooleanIterator { + a := make([]BooleanIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case BooleanIterator: + a = append(a, itr) + + default: + itr.Close() + } + } + return a +} + +// bufBooleanIterator represents a buffered BooleanIterator. +type bufBooleanIterator struct { + itr BooleanIterator + buf *BooleanPoint +} + +// newBufBooleanIterator returns a buffered BooleanIterator. +func newBufBooleanIterator(itr BooleanIterator) *bufBooleanIterator { + return &bufBooleanIterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *bufBooleanIterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *bufBooleanIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufBooleanIterator) peek() (*BooleanPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufBooleanIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufBooleanIterator) Next() (*BooleanPoint, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufBooleanIterator) NextInWindow(startTime, endTime int64) (*BooleanPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufBooleanIterator) unread(v *BooleanPoint) { itr.buf = v } + +// booleanMergeIterator represents an iterator that combines multiple boolean iterators. +type booleanMergeIterator struct { + inputs []BooleanIterator + heap *booleanMergeHeap + init bool + + // Current iterator and window. + curr *booleanMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newBooleanMergeIterator returns a new instance of booleanMergeIterator. +func newBooleanMergeIterator(inputs []BooleanIterator, opt IteratorOptions) *booleanMergeIterator { + itr := &booleanMergeIterator{ + inputs: inputs, + heap: &booleanMergeHeap{ + items: make([]*booleanMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufBooleanIterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &booleanMergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *booleanMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *booleanMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + return nil +} + +// Next returns the next point from the iterator. +func (itr *booleanMergeIterator) Next() (*BooleanPoint, error) { + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*booleanMergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*booleanMergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// booleanMergeHeap represents a heap of booleanMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type booleanMergeHeap struct { + opt IteratorOptions + items []*booleanMergeHeapItem +} + +func (h *booleanMergeHeap) Len() int { return len(h.items) } +func (h *booleanMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *booleanMergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *booleanMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*booleanMergeHeapItem)) +} + +func (h *booleanMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type booleanMergeHeapItem struct { + itr *bufBooleanIterator +} + +// booleanSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type booleanSortedMergeIterator struct { + inputs []BooleanIterator + heap *booleanSortedMergeHeap + init bool +} + +// newBooleanSortedMergeIterator returns an instance of booleanSortedMergeIterator. +func newBooleanSortedMergeIterator(inputs []BooleanIterator, opt IteratorOptions) Iterator { + itr := &booleanSortedMergeIterator{ + inputs: inputs, + heap: &booleanSortedMergeHeap{ + items: make([]*booleanSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &booleanSortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *booleanSortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *booleanSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *booleanSortedMergeIterator) Next() (*BooleanPoint, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*booleanSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*booleanSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems. +type booleanSortedMergeHeap struct { + opt IteratorOptions + items []*booleanSortedMergeHeapItem +} + +func (h *booleanSortedMergeHeap) Len() int { return len(h.items) } +func (h *booleanSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *booleanSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + return x.Time > y.Time +} + +func (h *booleanSortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*booleanSortedMergeHeapItem)) +} + +func (h *booleanSortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type booleanSortedMergeHeapItem struct { + point *BooleanPoint + err error + itr BooleanIterator +} + +// booleanParallelIterator represents an iterator that pulls data in a separate goroutine. +type booleanParallelIterator struct { + input BooleanIterator + ch chan booleanPointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// newBooleanParallelIterator returns a new instance of booleanParallelIterator. +func newBooleanParallelIterator(input BooleanIterator) *booleanParallelIterator { + itr := &booleanParallelIterator{ + input: input, + ch: make(chan booleanPointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *booleanParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *booleanParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *booleanParallelIterator) Next() (*BooleanPoint, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *booleanParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- booleanPointError{point: p, err: err}: + } + } +} + +type booleanPointError struct { + point *BooleanPoint + err error +} + +// booleanLimitIterator represents an iterator that limits points per group. +type booleanLimitIterator struct { + input BooleanIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newBooleanLimitIterator returns a new instance of booleanLimitIterator. +func newBooleanLimitIterator(input BooleanIterator, opt IteratorOptions) *booleanLimitIterator { + return &booleanLimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *booleanLimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *booleanLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *booleanLimitIterator) Next() (*BooleanPoint, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type booleanFillIterator struct { + input *bufBooleanIterator + prev BooleanPoint + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func newBooleanFillIterator(input BooleanIterator, expr Expr, opt IteratorOptions) *booleanFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = false + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &booleanFillIterator{ + input: newBufBooleanIterator(input), + prev: BooleanPoint{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *booleanFillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanFillIterator) Close() error { return itr.input.Close() } + +func (itr *booleanFillIterator) Next() (*BooleanPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = BooleanPoint{Nil: true} + break + } + + // Check if the point is our next expected point. + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &BooleanPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case LinearFill: + fallthrough + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToBoolean(itr.opt.FillValue) + case PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// booleanIntervalIterator represents a boolean implementation of IntervalIterator. +type booleanIntervalIterator struct { + input BooleanIterator + opt IteratorOptions +} + +func newBooleanIntervalIterator(input BooleanIterator, opt IteratorOptions) *booleanIntervalIterator { + return &booleanIntervalIterator{input: input, opt: opt} +} + +func (itr *booleanIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *booleanIntervalIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == MinTime { + p.Time = 0 + } + return p, nil +} + +// booleanInterruptIterator represents a boolean implementation of InterruptIterator. +type booleanInterruptIterator struct { + input BooleanIterator + closing <-chan struct{} + count int +} + +func newBooleanInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanInterruptIterator { + return &booleanInterruptIterator{input: input, closing: closing} +} + +func (itr *booleanInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanInterruptIterator) Close() error { return itr.input.Close() } + +func (itr *booleanInterruptIterator) Next() (*BooleanPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count&0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// booleanCloseInterruptIterator represents a boolean implementation of CloseInterruptIterator. +type booleanCloseInterruptIterator struct { + input BooleanIterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func newBooleanCloseInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanCloseInterruptIterator { + itr := &booleanCloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *booleanCloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *booleanCloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *booleanCloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *booleanCloseInterruptIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// auxBooleanPoint represents a combination of a point and an error for the AuxIterator. +type auxBooleanPoint struct { + point *BooleanPoint + err error +} + +// booleanAuxIterator represents a boolean implementation of AuxIterator. +type booleanAuxIterator struct { + input *bufBooleanIterator + output chan auxBooleanPoint + fields *auxIteratorFields + background bool +} + +func newBooleanAuxIterator(input BooleanIterator, opt IteratorOptions) *booleanAuxIterator { + return &booleanAuxIterator{ + input: newBufBooleanIterator(input), + output: make(chan auxBooleanPoint, 1), + fields: newAuxIteratorFields(opt), + } +} + +func (itr *booleanAuxIterator) Background() { + itr.background = true + itr.Start() + go DrainIterator(itr) +} + +func (itr *booleanAuxIterator) Start() { go itr.stream() } +func (itr *booleanAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanAuxIterator) Close() error { return itr.input.Close() } +func (itr *booleanAuxIterator) Next() (*BooleanPoint, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *booleanAuxIterator) Iterator(name string, typ DataType) Iterator { + return itr.fields.iterator(name, typ) +} + +func (itr *booleanAuxIterator) stream() { + for { + // Read next point. + p, err := itr.input.Next() + if err != nil { + itr.output <- auxBooleanPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- auxBooleanPoint{point: p} + if ok := itr.fields.send(p); !ok && itr.background { + break + } + } + + close(itr.output) + itr.fields.close() +} + +// booleanChanIterator represents a new instance of booleanChanIterator. +type booleanChanIterator struct { + buf struct { + i int + filled bool + points [2]BooleanPoint + } + err error + cond *sync.Cond + done bool +} + +func (itr *booleanChanIterator) Stats() IteratorStats { return IteratorStats{} } + +func (itr *booleanChanIterator) Close() error { + itr.cond.L.Lock() + // Mark the channel iterator as done and signal all waiting goroutines to start again. + itr.done = true + itr.cond.Broadcast() + // Do not defer the unlock so we don't create an unnecessary allocation. + itr.cond.L.Unlock() + return nil +} + +func (itr *booleanChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Wait for either the iterator to be done (so we don't have to set the value) + // or for the buffer to have been read and ready for another write. + for !itr.done && itr.buf.filled { + itr.cond.Wait() + } + + // Do not set the value and return false to signal that the iterator is closed. + // Do this after the above wait as the above for loop may have exited because + // the iterator was closed. + if itr.done { + return false + } + + switch v := value.(type) { + case bool: + itr.buf.points[itr.buf.i] = BooleanPoint{Name: name, Tags: tags, Time: time, Value: v} + + default: + itr.buf.points[itr.buf.i] = BooleanPoint{Name: name, Tags: tags, Time: time, Nil: true} + } + itr.buf.filled = true + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() + return true +} + +func (itr *booleanChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *booleanChanIterator) Next() (*BooleanPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } + + // Wait until either a value is available in the buffer or + // the iterator is closed. + for !itr.done && !itr.buf.filled { + itr.cond.Wait() + } + + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + return nil, nil + } + + // Always read from the buffer if it exists, even if the iterator + // is closed. This prevents the last value from being truncated by + // the parent iterator. + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false + itr.cond.Signal() + return p, nil +} + +// booleanReduceFloatIterator executes a reducer for every interval and buffers the result. +type booleanReduceFloatIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + points []FloatPoint + keepTags bool +} + +func newBooleanReduceFloatIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, FloatPointEmitter)) *booleanReduceFloatIterator { + return &booleanReduceFloatIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceFloatPoint stores the reduced data for a name/tag combination. +type booleanReduceFloatPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*booleanReduceFloatPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + + return a, nil +} + +// booleanStreamFloatIterator streams inputs into the iterator and emits points gradually. +type booleanStreamFloatIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, FloatPointEmitter) + dims []string + opt IteratorOptions + m map[string]*booleanReduceFloatPoint + points []FloatPoint +} + +// newBooleanStreamFloatIterator returns a new instance of booleanStreamFloatIterator. +func newBooleanStreamFloatIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, FloatPointEmitter), opt IteratorOptions) *booleanStreamFloatIterator { + return &booleanStreamFloatIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*booleanReduceFloatPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamFloatIterator) Next() (*FloatPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamFloatIterator) reduce() ([]FloatPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []FloatPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanFloatExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type booleanFloatExprIterator struct { + left *bufBooleanIterator + right *bufBooleanIterator + fn booleanFloatExprFunc + points []BooleanPoint // must be size 2 + storePrev bool +} + +func newBooleanFloatExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) float64) *booleanFloatExprIterator { + var points []BooleanPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []BooleanPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToBoolean(opt.FillValue) + points = []BooleanPoint{{Value: value}, {Value: value}} + } + return &booleanFloatExprIterator{ + left: newBufBooleanIterator(left), + right: newBufBooleanIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *booleanFloatExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *booleanFloatExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *booleanFloatExprIterator) Next() (*FloatPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = false + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = false + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &FloatPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *booleanFloatExprIterator) next() (a, b *BooleanPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// booleanFloatExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type booleanFloatExprFunc func(a, b bool) float64 + +// booleanReduceIntegerIterator executes a reducer for every interval and buffers the result. +type booleanReduceIntegerIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + points []IntegerPoint + keepTags bool +} + +func newBooleanReduceIntegerIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, IntegerPointEmitter)) *booleanReduceIntegerIterator { + return &booleanReduceIntegerIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceIntegerPoint stores the reduced data for a name/tag combination. +type booleanReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*booleanReduceIntegerPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + + return a, nil +} + +// booleanStreamIntegerIterator streams inputs into the iterator and emits points gradually. +type booleanStreamIntegerIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, IntegerPointEmitter) + dims []string + opt IteratorOptions + m map[string]*booleanReduceIntegerPoint + points []IntegerPoint +} + +// newBooleanStreamIntegerIterator returns a new instance of booleanStreamIntegerIterator. +func newBooleanStreamIntegerIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, IntegerPointEmitter), opt IteratorOptions) *booleanStreamIntegerIterator { + return &booleanStreamIntegerIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*booleanReduceIntegerPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamIntegerIterator) Next() (*IntegerPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamIntegerIterator) reduce() ([]IntegerPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []IntegerPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanIntegerExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type booleanIntegerExprIterator struct { + left *bufBooleanIterator + right *bufBooleanIterator + fn booleanIntegerExprFunc + points []BooleanPoint // must be size 2 + storePrev bool +} + +func newBooleanIntegerExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) int64) *booleanIntegerExprIterator { + var points []BooleanPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []BooleanPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToBoolean(opt.FillValue) + points = []BooleanPoint{{Value: value}, {Value: value}} + } + return &booleanIntegerExprIterator{ + left: newBufBooleanIterator(left), + right: newBufBooleanIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *booleanIntegerExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *booleanIntegerExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *booleanIntegerExprIterator) Next() (*IntegerPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = false + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = false + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &IntegerPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *booleanIntegerExprIterator) next() (a, b *BooleanPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// booleanIntegerExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type booleanIntegerExprFunc func(a, b bool) int64 + +// booleanReduceStringIterator executes a reducer for every interval and buffers the result. +type booleanReduceStringIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + points []StringPoint + keepTags bool +} + +func newBooleanReduceStringIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, StringPointEmitter)) *booleanReduceStringIterator { + return &booleanReduceStringIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceStringPoint stores the reduced data for a name/tag combination. +type booleanReduceStringPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*booleanReduceStringPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + + return a, nil +} + +// booleanStreamStringIterator streams inputs into the iterator and emits points gradually. +type booleanStreamStringIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, StringPointEmitter) + dims []string + opt IteratorOptions + m map[string]*booleanReduceStringPoint + points []StringPoint +} + +// newBooleanStreamStringIterator returns a new instance of booleanStreamStringIterator. +func newBooleanStreamStringIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, StringPointEmitter), opt IteratorOptions) *booleanStreamStringIterator { + return &booleanStreamStringIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*booleanReduceStringPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamStringIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamStringIterator) Next() (*StringPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamStringIterator) reduce() ([]StringPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []StringPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanStringExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type booleanStringExprIterator struct { + left *bufBooleanIterator + right *bufBooleanIterator + fn booleanStringExprFunc + points []BooleanPoint // must be size 2 + storePrev bool +} + +func newBooleanStringExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) string) *booleanStringExprIterator { + var points []BooleanPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []BooleanPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToBoolean(opt.FillValue) + points = []BooleanPoint{{Value: value}, {Value: value}} + } + return &booleanStringExprIterator{ + left: newBufBooleanIterator(left), + right: newBufBooleanIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *booleanStringExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *booleanStringExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *booleanStringExprIterator) Next() (*StringPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = false + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = false + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + p := &StringPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *booleanStringExprIterator) next() (a, b *BooleanPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// booleanStringExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type booleanStringExprFunc func(a, b bool) string + +// booleanReduceBooleanIterator executes a reducer for every interval and buffers the result. +type booleanReduceBooleanIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + points []BooleanPoint + keepTags bool +} + +func newBooleanReduceBooleanIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, BooleanPointEmitter)) *booleanReduceBooleanIterator { + return &booleanReduceBooleanIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// booleanReduceBooleanPoint stores the reduced data for a name/tag combination. +type booleanReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*booleanReduceBooleanPoint) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + + return a, nil +} + +// booleanStreamBooleanIterator streams inputs into the iterator and emits points gradually. +type booleanStreamBooleanIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, BooleanPointEmitter) + dims []string + opt IteratorOptions + m map[string]*booleanReduceBooleanPoint + points []BooleanPoint +} + +// newBooleanStreamBooleanIterator returns a new instance of booleanStreamBooleanIterator. +func newBooleanStreamBooleanIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, BooleanPointEmitter), opt IteratorOptions) *booleanStreamBooleanIterator { + return &booleanStreamBooleanIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*booleanReduceBooleanPoint), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanStreamBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *booleanStreamBooleanIterator) Next() (*BooleanPoint, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *booleanStreamBooleanIterator) reduce() ([]BooleanPoint, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []BooleanPoint + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// booleanExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type booleanExprIterator struct { + left *bufBooleanIterator + right *bufBooleanIterator + fn booleanExprFunc + points []BooleanPoint // must be size 2 + storePrev bool +} + +func newBooleanExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) bool) *booleanExprIterator { + var points []BooleanPoint + switch opt.Fill { + case NullFill, PreviousFill: + points = []BooleanPoint{{Nil: true}, {Nil: true}} + case NumberFill: + value := castToBoolean(opt.FillValue) + points = []BooleanPoint{{Value: value}, {Value: value}} + } + return &booleanExprIterator{ + left: newBufBooleanIterator(left), + right: newBufBooleanIterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *booleanExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *booleanExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *booleanExprIterator) Next() (*BooleanPoint, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = false + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = false + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + + if a.Nil { + return a, nil + } else if b.Nil { + return b, nil + } + a.Value = itr.fn(a.Value, b.Value) + return a, nil + + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *booleanExprIterator) next() (a, b *BooleanPoint, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// booleanExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type booleanExprFunc func(a, b bool) bool + +// booleanTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type booleanTransformIterator struct { + input BooleanIterator + fn booleanTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *booleanTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + p = itr.fn(p) + } + return p, nil +} + +// booleanTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type booleanTransformFunc func(p *BooleanPoint) *BooleanPoint + +// booleanBoolTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type booleanBoolTransformIterator struct { + input BooleanIterator + fn booleanBoolTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *booleanBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// booleanBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type booleanBoolTransformFunc func(p *BooleanPoint) *BooleanPoint + +// booleanDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type booleanDedupeIterator struct { + input BooleanIterator + m map[string]struct{} // lookup of points already sent +} + +type booleanIteratorMapper struct { + e *Emitter + buf []interface{} + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point BooleanPoint +} + +func newBooleanIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *booleanIteratorMapper { + e := NewEmitter(itrs, opt.Ascending, 0) + e.OmitTime = true + return &booleanIteratorMapper{ + e: e, + buf: make([]interface{}, len(itrs)), + driver: driver, + fields: fields, + point: BooleanPoint{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *booleanIteratorMapper) Next() (*BooleanPoint, error) { + t, name, tags, err := itr.e.loadBuf() + if err != nil || t == ZeroTime { + return nil, err + } + itr.point.Time = t + itr.point.Name = name + itr.point.Tags = tags + + itr.e.readInto(t, name, tags, itr.buf) + if itr.driver != nil { + if v := itr.driver.Value(tags, itr.buf); v != nil { + if v, ok := v.(bool); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = false + itr.point.Nil = true + } + } else { + itr.point.Value = false + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(tags, itr.buf) + } + return &itr.point, nil +} + +func (itr *booleanIteratorMapper) Stats() IteratorStats { + stats := IteratorStats{} + for _, itr := range itr.e.itrs { + stats.Add(itr.Stats()) + } + return stats +} + +func (itr *booleanIteratorMapper) Close() error { + return itr.e.Close() +} + +type booleanFilterIterator struct { + input BooleanIterator + cond Expr + opt IteratorOptions + m map[string]interface{} +} + +func newBooleanFilterIterator(input BooleanIterator, cond Expr, opt IteratorOptions) BooleanIterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := RewriteFunc(CloneExpr(cond), func(n Node) Node { + switch n := n.(type) { + case *BinaryExpr: + if n.LHS.String() == "time" { + return &BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(Expr) + if cond == nil { + return input + } else if n, ok := cond.(*BooleanLiteral); ok && n.Val { + return input + } + + return &booleanFilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *booleanFilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanFilterIterator) Close() error { return itr.input.Close() } + +func (itr *booleanFilterIterator) Next() (*BooleanPoint, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +// newBooleanDedupeIterator returns a new instance of booleanDedupeIterator. +func newBooleanDedupeIterator(input BooleanIterator) *booleanDedupeIterator { + return &booleanDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *booleanDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *booleanDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *booleanDedupeIterator) Next() (*BooleanPoint, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeBooleanPoint(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// booleanReaderIterator represents an iterator that streams from a reader. +type booleanReaderIterator struct { + r io.Reader + dec *BooleanPointDecoder +} + +// newBooleanReaderIterator returns a new instance of booleanReaderIterator. +func newBooleanReaderIterator(r io.Reader, stats IteratorStats) *booleanReaderIterator { + dec := NewBooleanPointDecoder(r) + dec.stats = stats + + return &booleanReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *booleanReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *booleanReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *booleanReaderIterator) Next() (*BooleanPoint, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &BooleanPoint{} + if err := itr.dec.DecodeBooleanPoint(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} + +// IteratorEncoder is an encoder for encoding an iterator's points to w. +type IteratorEncoder struct { + w io.Writer + + // Frequency with which stats are emitted. + StatsInterval time.Duration +} + +// NewIteratorEncoder encodes an iterator's points to w. +func NewIteratorEncoder(w io.Writer) *IteratorEncoder { + return &IteratorEncoder{ + w: w, + + StatsInterval: DefaultStatsInterval, + } +} + +// EncodeIterator encodes and writes all of itr's points to the underlying writer. +func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error { + switch itr := itr.(type) { + case FloatIterator: + return enc.encodeFloatIterator(itr) + case IntegerIterator: + return enc.encodeIntegerIterator(itr) + case StringIterator: + return enc.encodeStringIterator(itr) + case BooleanIterator: + return enc.encodeBooleanIterator(itr) + default: + panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr)) + } +} + +// encodeFloatIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeFloatIterator(itr FloatIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewFloatPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeFloatPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encodeIntegerIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeIntegerIterator(itr IntegerIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewIntegerPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeIntegerPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encodeStringIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeStringIterator(itr StringIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewStringPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeStringPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encodeBooleanIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeBooleanIterator(itr BooleanIterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := NewBooleanPointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.EncodeBooleanPoint(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +// encode a stats object in the point stream. +func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error { + buf, err := proto.Marshal(&internal.Point{ + Name: proto.String(""), + Tags: proto.String(""), + Time: proto.Int64(0), + Nil: proto.Bool(false), + + Stats: encodeIteratorStats(&stats), + }) + if err != nil { + return err + } + + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl new file mode 100644 index 0000000..5cc894d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl @@ -0,0 +1,1818 @@ +package influxql + +import ( + "container/heap" + "encoding/binary" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" +) + +// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval. +const DefaultStatsInterval = 10 * time.Second + +{{with $types := .}}{{range $k := $types}} + +// {{$k.Name}}Iterator represents a stream of {{$k.name}} points. +type {{$k.Name}}Iterator interface { + Iterator + Next() (*{{$k.Name}}Point, error) +} + +// new{{$k.Name}}Iterators converts a slice of Iterator to a slice of {{$k.Name}}Iterator. +// Drop and closes any iterator in itrs that is not a {{$k.Name}}Iterator and cannot +// be cast to a {{$k.Name}}Iterator. +func new{{$k.Name}}Iterators(itrs []Iterator) []{{$k.Name}}Iterator { + a := make([]{{$k.Name}}Iterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case {{$k.Name}}Iterator: + a = append(a, itr) +{{if eq .Name "Float"}} + case IntegerIterator: + a = append(a, &integerFloatCastIterator{input: itr}) +{{end}} + default: + itr.Close() + } + } + return a +} + + +// buf{{$k.Name}}Iterator represents a buffered {{$k.Name}}Iterator. +type buf{{$k.Name}}Iterator struct { + itr {{$k.Name}}Iterator + buf *{{$k.Name}}Point +} + +// newBuf{{$k.Name}}Iterator returns a buffered {{$k.Name}}Iterator. +func newBuf{{$k.Name}}Iterator(itr {{$k.Name}}Iterator) *buf{{$k.Name}}Iterator { + return &buf{{$k.Name}}Iterator{itr: itr} +} + +// Stats returns statistics from the input iterator. +func (itr *buf{{$k.Name}}Iterator) Stats() IteratorStats { return itr.itr.Stats() } + +// Close closes the underlying iterator. +func (itr *buf{{$k.Name}}Iterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *buf{{$k.Name}}Iterator) peek() (*{{$k.Name}}Point, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } + itr.unread(p) + return p, nil +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *buf{{$k.Name}}Iterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err + } + return p.Time, nil +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *buf{{$k.Name}}Iterator) Next() (*{{$k.Name}}Point, error) { + buf := itr.buf + if buf != nil { + itr.buf = nil + return buf, nil + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *buf{{$k.Name}}Iterator) NextInWindow(startTime, endTime int64) (*{{$k.Name}}Point, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { + itr.unread(v) + return nil, nil + } + return v, nil +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *buf{{$k.Name}}Iterator) unread(v *{{$k.Name}}Point) { itr.buf = v } + +// {{$k.name}}MergeIterator represents an iterator that combines multiple {{$k.name}} iterators. +type {{$k.name}}MergeIterator struct { + inputs []{{$k.Name}}Iterator + heap *{{$k.name}}MergeHeap + init bool + + // Current iterator and window. + curr *{{$k.name}}MergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// new{{$k.Name}}MergeIterator returns a new instance of {{$k.name}}MergeIterator. +func new{{$k.Name}}MergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}MergeIterator { + itr := &{{$k.name}}MergeIterator{ + inputs: inputs, + heap: &{{$k.name}}MergeHeap{ + items: make([]*{{$k.name}}MergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBuf{{$k.Name}}Iterator(input) + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &{{$k.name}}MergeHeapItem{itr: bufInput}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *{{$k.name}}MergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *{{$k.name}}MergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + itr.curr = nil + itr.inputs = nil + itr.heap.items = nil + return nil +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}MergeIterator) Next() (*{{$k.Name}}Point, error) { + // Initialize the heap. This needs to be done lazily on the first call to this iterator + // so that iterator initialization done through the Select() call returns quickly. + // Queries can only be interrupted after the Select() call completes so any operations + // done during iterator creation cannot be interrupted, which is why we do it here + // instead so an interrupt can happen while initializing the heap. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*{{$k.name}}MergeHeapItem, 0, len(items)) + for _, item := range items { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil, nil + } + itr.curr = heap.Pop(itr.heap).(*{{$k.name}}MergeHeapItem) + + // Read point and set current window. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p, nil + } + + // Read the next point from the current iterator. + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if window := itr.window; window.name != p.Name { + inWindow = false + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { + inWindow = false + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { + inWindow = false + } else if !opt.Ascending && p.Time < window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p, nil + } +} + +// {{$k.name}}MergeHeap represents a heap of {{$k.name}}MergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type {{$k.name}}MergeHeap struct { + opt IteratorOptions + items []*{{$k.name}}MergeHeapItem +} + +func (h *{{$k.name}}MergeHeap) Len() int { return len(h.items) } +func (h *{{$k.name}}MergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *{{$k.name}}MergeHeap) Less(i, j int) bool { + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + + +func (h *{{$k.name}}MergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*{{$k.name}}MergeHeapItem)) +} + +func (h *{{$k.name}}MergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type {{$k.name}}MergeHeapItem struct { + itr *buf{{$k.Name}}Iterator +} + +// {{$k.name}}SortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type {{$k.name}}SortedMergeIterator struct { + inputs []{{$k.Name}}Iterator + heap *{{$k.name}}SortedMergeHeap + init bool +} + +// new{{$k.Name}}SortedMergeIterator returns an instance of {{$k.name}}SortedMergeIterator. +func new{{$k.Name}}SortedMergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) Iterator { + itr := &{{$k.name}}SortedMergeIterator{ + inputs: inputs, + heap: &{{$k.name}}SortedMergeHeap{ + items: make([]*{{$k.name}}SortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Append to the heap. + itr.heap.items = append(itr.heap.items, &{{$k.name}}SortedMergeHeapItem{itr: input}) + } + + return itr +} + +// Stats returns an aggregation of stats from the underlying iterators. +func (itr *{{$k.name}}SortedMergeIterator) Stats() IteratorStats { + var stats IteratorStats + for _, input := range itr.inputs { + stats.Add(input.Stats()) + } + return stats +} + +// Close closes the underlying iterators. +func (itr *{{$k.name}}SortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *{{$k.name}}SortedMergeIterator) Next() (*{{$k.Name}}Point, error) { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *{{$k.name}}SortedMergeIterator) pop() (*{{$k.Name}}Point, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap.items + itr.heap.items = make([]*{{$k.name}}SortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap.items = append(itr.heap.items, item) + } + heap.Init(itr.heap) + itr.init = true + } + + if len(itr.heap.items) == 0 { + return nil, nil + } + + // Read the next item from the heap. + item := heap.Pop(itr.heap).(*{{$k.name}}SortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } else if item.point == nil { + return nil, nil + } + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point, item.err = item.itr.Next(); item.point != nil { + heap.Push(itr.heap, item) + } + + return p, nil +} + +// {{$k.name}}SortedMergeHeap represents a heap of {{$k.name}}SortedMergeHeapItems. +type {{$k.name}}SortedMergeHeap struct { + opt IteratorOptions + items []*{{$k.name}}SortedMergeHeapItem +} + +func (h *{{$k.name}}SortedMergeHeap) Len() int { return len(h.items) } +func (h *{{$k.name}}SortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *{{$k.name}}SortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() + } + return x.Time > y.Time +} + +func (h *{{$k.name}}SortedMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*{{$k.name}}SortedMergeHeapItem)) +} + +func (h *{{$k.name}}SortedMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type {{$k.name}}SortedMergeHeapItem struct { + point *{{$k.Name}}Point + err error + itr {{$k.Name}}Iterator +} + +// {{$k.name}}ParallelIterator represents an iterator that pulls data in a separate goroutine. +type {{$k.name}}ParallelIterator struct { + input {{$k.Name}}Iterator + ch chan {{$k.name}}PointError + + once sync.Once + closing chan struct{} + wg sync.WaitGroup +} + +// new{{$k.Name}}ParallelIterator returns a new instance of {{$k.name}}ParallelIterator. +func new{{$k.Name}}ParallelIterator(input {{$k.Name}}Iterator) *{{$k.name}}ParallelIterator { + itr := &{{$k.name}}ParallelIterator{ + input: input, + ch: make(chan {{$k.name}}PointError, 256), + closing: make(chan struct{}), + } + itr.wg.Add(1) + go itr.monitor() + return itr +} + +// Stats returns stats from the underlying iterator. +func (itr *{{$k.name}}ParallelIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *{{$k.name}}ParallelIterator) Close() error { + itr.once.Do(func() { close(itr.closing) }) + itr.wg.Wait() + return itr.input.Close() +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}ParallelIterator) Next() (*{{$k.Name}}Point, error) { + v, ok := <-itr.ch + if !ok { + return nil, io.EOF + } + return v.point, v.err +} + +// monitor runs in a separate goroutine and actively pulls the next point. +func (itr *{{$k.name}}ParallelIterator) monitor() { + defer close(itr.ch) + defer itr.wg.Done() + + for { + // Read next point. + p, err := itr.input.Next() + if p != nil { + p = p.Clone() + } + + select { + case <-itr.closing: + return + case itr.ch <- {{$k.name}}PointError{point: p, err: err}: + } + } +} + +type {{$k.name}}PointError struct { + point *{{$k.Name}}Point + err error +} + +// {{$k.name}}LimitIterator represents an iterator that limits points per group. +type {{$k.name}}LimitIterator struct { + input {{$k.Name}}Iterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// new{{$k.Name}}LimitIterator returns a new instance of {{$k.name}}LimitIterator. +func new{{$k.Name}}LimitIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}LimitIterator { + return &{{$k.name}}LimitIterator{ + input: input, + opt: opt, + } +} + +// Stats returns stats from the underlying iterator. +func (itr *{{$k.name}}LimitIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the underlying iterators. +func (itr *{{$k.name}}LimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}LimitIterator) Next() (*{{$k.Name}}Point, error) { + for { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + continue + } + + return p, nil + } +} + +type {{$k.name}}FillIterator struct { + input *buf{{$k.Name}}Iterator + prev {{$k.Name}}Point + startTime int64 + endTime int64 + auxFields []interface{} + init bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + offset int64 + } +} + +func new{{$k.Name}}FillIterator(input {{$k.Name}}Iterator, expr Expr, opt IteratorOptions) *{{$k.name}}FillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = {{$k.Zero}} + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + endTime, _ = opt.Window(opt.EndTime) + } else { + startTime, _ = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + return &{{$k.name}}FillIterator{ + input: newBuf{{$k.Name}}Iterator(input), + prev: {{$k.Name}}Point{Nil: true}, + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } +} + +func (itr *{{$k.name}}FillIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}FillIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}FillIterator) Next() (*{{$k.Name}}Point, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil, nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + if itr.opt.Location != nil { + _, itr.window.offset = itr.opt.Zone(itr.window.time) + } + itr.prev = {{$k.Name}}Point{Nil: true} + break + } + + // Check if the point is our next expected point. + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { + if p != nil { + itr.input.unread(p) + } + + p = &{{$k.Name}}Point{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case LinearFill: + {{- if or (eq $k.Name "Float") (eq $k.Name "Integer")}} + if !itr.prev.Nil { + next, err := itr.input.peek() + if err != nil { + return nil, err + } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { + interval := int64(itr.opt.Interval.Duration) + start := itr.window.time / interval + p.Value = linear{{$k.Name}}(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) + } else { + p.Nil = true + } + } else { + p.Nil = true + } + {{else}} + fallthrough + {{- end}} + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castTo{{$k.Name}}(itr.opt.FillValue) + case PreviousFill: + if !itr.prev.Nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = *p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time += int64(itr.opt.Interval.Duration) + } else { + itr.window.time -= int64(itr.opt.Interval.Duration) + } + + // Check to see if we have passed over an offset change and adjust the time + // to account for this new offset. + if itr.opt.Location != nil { + if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { + diff := itr.window.offset - offset + if abs(diff) < int64(itr.opt.Interval.Duration) { + itr.window.time += diff + } + itr.window.offset = offset + } + } + return p, nil +} + +// {{$k.name}}IntervalIterator represents a {{$k.name}} implementation of IntervalIterator. +type {{$k.name}}IntervalIterator struct { + input {{$k.Name}}Iterator + opt IteratorOptions +} + +func new{{$k.Name}}IntervalIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}IntervalIterator { + return &{{$k.name}}IntervalIterator{input: input, opt: opt} +} + +func (itr *{{$k.name}}IntervalIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}IntervalIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}IntervalIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + p.Time, _ = itr.opt.Window(p.Time) + // If we see the minimum allowable time, set the time to zero so we don't + // break the default returned time for aggregate queries without times. + if p.Time == MinTime { + p.Time = 0 + } + return p, nil +} + +// {{$k.name}}InterruptIterator represents a {{$k.name}} implementation of InterruptIterator. +type {{$k.name}}InterruptIterator struct { + input {{$k.Name}}Iterator + closing <-chan struct{} + count int +} + +func new{{$k.Name}}InterruptIterator(input {{$k.Name}}Iterator, closing <-chan struct{}) *{{$k.name}}InterruptIterator { + return &{{$k.name}}InterruptIterator{input: input, closing: closing} +} + +func (itr *{{$k.name}}InterruptIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}InterruptIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}InterruptIterator) Next() (*{{$k.Name}}Point, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator + // has been interrupted before the first point is emitted it will + // not emit any points. + if itr.count & 0xFF == 0xFF { + select { + case <-itr.closing: + return nil, itr.Close() + default: + // Reset iterator count to zero and fall through to emit the next point. + itr.count = 0 + } + } + + // Increment the counter for every point read. + itr.count++ + return itr.input.Next() +} + +// {{$k.name}}CloseInterruptIterator represents a {{$k.name}} implementation of CloseInterruptIterator. +type {{$k.name}}CloseInterruptIterator struct { + input {{$k.Name}}Iterator + closing <-chan struct{} + done chan struct{} + once sync.Once +} + +func new{{$k.Name}}CloseInterruptIterator(input {{$k.Name}}Iterator, closing <-chan struct{}) *{{$k.name}}CloseInterruptIterator { + itr := &{{$k.name}}CloseInterruptIterator{ + input: input, + closing: closing, + done: make(chan struct{}), + } + go itr.monitor() + return itr +} + +func (itr *{{$k.name}}CloseInterruptIterator) monitor() { + select { + case <-itr.closing: + itr.Close() + case <-itr.done: + } +} + +func (itr *{{$k.name}}CloseInterruptIterator) Stats() IteratorStats { + return itr.input.Stats() +} + +func (itr *{{$k.name}}CloseInterruptIterator) Close() error { + itr.once.Do(func() { + close(itr.done) + itr.input.Close() + }) + return nil +} + +func (itr *{{$k.name}}CloseInterruptIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if err != nil { + // Check if the iterator was closed. + select { + case <-itr.done: + return nil, nil + default: + return nil, err + } + } + return p, nil +} + +// aux{{$k.Name}}Point represents a combination of a point and an error for the AuxIterator. +type aux{{$k.Name}}Point struct { + point *{{$k.Name}}Point + err error +} + +// {{$k.name}}AuxIterator represents a {{$k.name}} implementation of AuxIterator. +type {{$k.name}}AuxIterator struct { + input *buf{{$k.Name}}Iterator + output chan aux{{$k.Name}}Point + fields *auxIteratorFields + background bool +} + +func new{{$k.Name}}AuxIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}AuxIterator { + return &{{$k.name}}AuxIterator{ + input: newBuf{{$k.Name}}Iterator(input), + output: make(chan aux{{$k.Name}}Point, 1), + fields: newAuxIteratorFields(opt), + } +} + +func (itr *{{$k.name}}AuxIterator) Background() { + itr.background = true + itr.Start() + go DrainIterator(itr) +} + +func (itr *{{$k.name}}AuxIterator) Start() { go itr.stream() } +func (itr *{{$k.name}}AuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}AuxIterator) Close() error { return itr.input.Close() } +func (itr *{{$k.name}}AuxIterator) Next() (*{{$k.Name}}Point, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *{{$k.name}}AuxIterator) Iterator(name string, typ DataType) Iterator { return itr.fields.iterator(name, typ) } + +func (itr *{{.name}}AuxIterator) stream() { + for { + // Read next point. + p, err := itr.input.Next() + if err != nil { + itr.output <- aux{{$k.Name}}Point{err: err} + itr.fields.sendError(err) + break + } else if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- aux{{$k.Name}}Point{point: p} + if ok := itr.fields.send(p); !ok && itr.background { + break + } + } + + close(itr.output) + itr.fields.close() +} + +// {{$k.name}}ChanIterator represents a new instance of {{$k.name}}ChanIterator. +type {{$k.name}}ChanIterator struct { + buf struct { + i int + filled bool + points [2]{{$k.Name}}Point + } + err error + cond *sync.Cond + done bool +} + +func (itr *{{$k.name}}ChanIterator) Stats() IteratorStats { return IteratorStats{} } + +func (itr *{{$k.name}}ChanIterator) Close() error { + itr.cond.L.Lock() + // Mark the channel iterator as done and signal all waiting goroutines to start again. + itr.done = true + itr.cond.Broadcast() + // Do not defer the unlock so we don't create an unnecessary allocation. + itr.cond.L.Unlock() + return nil +} + +func (itr *{{$k.name}}ChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Wait for either the iterator to be done (so we don't have to set the value) + // or for the buffer to have been read and ready for another write. + for !itr.done && itr.buf.filled { + itr.cond.Wait() + } + + // Do not set the value and return false to signal that the iterator is closed. + // Do this after the above wait as the above for loop may have exited because + // the iterator was closed. + if itr.done { + return false + } + + switch v := value.(type) { + case {{$k.Type}}: + itr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Value: v} +{{if eq $k.Name "Float"}} + case int64: + itr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Value: float64(v)} +{{end}} + default: + itr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Nil: true} + } + itr.buf.filled = true + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() + return true +} + +func (itr *{{$k.name}}ChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *{{$k.name}}ChanIterator) Next() (*{{$k.Name}}Point, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } + + // Wait until either a value is available in the buffer or + // the iterator is closed. + for !itr.done && !itr.buf.filled { + itr.cond.Wait() + } + + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + return nil, nil + } + + // Always read from the buffer if it exists, even if the iterator + // is closed. This prevents the last value from being truncated by + // the parent iterator. + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false + itr.cond.Signal() + return p, nil +} + +{{range $v := $types}} + +// {{$k.name}}Reduce{{$v.Name}}Iterator executes a reducer for every interval and buffers the result. +type {{$k.name}}Reduce{{$v.Name}}Iterator struct { + input *buf{{$k.Name}}Iterator + create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) + dims []string + opt IteratorOptions + points []{{$v.Name}}Point + keepTags bool +} + +func new{{$k.Name}}Reduce{{$v.Name}}Iterator(input {{$k.Name}}Iterator, opt IteratorOptions, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter)) *{{$k.name}}Reduce{{$v.Name}}Iterator { + return &{{$k.name}}Reduce{{$v.Name}}Iterator{ + input: newBuf{{$k.Name}}Iterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// {{$k.name}}Reduce{{$v.Name}}Point stores the reduced data for a name/tag combination. +type {{$k.name}}Reduce{{$v.Name}}Point struct { + Name string + Tags Tags + Aggregator {{$k.Name}}PointAggregator + Emitter {{$v.Name}}PointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { + // Calculate next window. + var ( + startTime, endTime int64 + window struct { + name string + tags string + } + ) + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() + break + } + + // Create points by tags. + m := make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point) + for { + // Read next point. + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { + break + } else if curr.Nil { + continue + } else if curr.Name != window.name { + itr.input.unread(curr) + break + } + + // Ensure this point is within the same final window. + if curr.Name != window.name { + itr.input.unread(curr) + break + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { + itr.input.unread(curr) + break + } + + // Retrieve the tags on this point for this level of the query. + // This may be different than the bucket dimensions. + tags := curr.Tags.Subset(itr.dims) + id := tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &{{$k.name}}Reduce{{$v.Name}}Point{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.Aggregate{{$k.Name}}(curr) + } + + // Reverse sort points by name & tag if our output is supposed to be ordered. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + if len(keys) > 1 && itr.opt.Ordered { + sort.Sort(reverseStringSlice(keys)) + } + + // Assume the points are already sorted until proven otherwise. + sortedByTime := true + // Emit the points for each name & tag combination. + a := make([]{{$v.Name}}Point, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points)-1; i >= 0; i-- { + points[i].Name = rp.Name + if !itr.keepTags { + points[i].Tags = rp.Tags + } + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } else { + sortedByTime = false + } + a = append(a, points[i]) + } + } + + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse({{$v.name}}PointsByTime(a))) + } + + return a, nil +} + +// {{$k.name}}Stream{{$v.Name}}Iterator streams inputs into the iterator and emits points gradually. +type {{$k.name}}Stream{{$v.Name}}Iterator struct { + input *buf{{$k.Name}}Iterator + create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) + dims []string + opt IteratorOptions + m map[string]*{{$k.name}}Reduce{{$v.Name}}Point + points []{{$v.Name}}Point +} + +// new{{$k.Name}}Stream{{$v.Name}}Iterator returns a new instance of {{$k.name}}Stream{{$v.Name}}Iterator. +func new{{$k.Name}}Stream{{$v.Name}}Iterator(input {{$k.Name}}Iterator, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter), opt IteratorOptions) *{{$k.name}}Stream{{$v.Name}}Iterator { + return &{{$k.name}}Stream{{$v.Name}}Iterator{ + input: newBuf{{$k.Name}}Iterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + m: make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point), + } +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Close() error { return itr.input.Close() } + +// Next returns the next value for the stream iterator. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + var err error + itr.points, err = itr.reduce() + if len(itr.points) == 0 { + return nil, err + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p, nil +} + +// reduce creates and manages aggregators for every point from the input. +// After aggregating a point, it always tries to emit a value using the emitter. +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { + for { + // Read next point. + curr, err := itr.input.Next() + if curr == nil { + // Close all of the aggregators to flush any remaining points to emit. + var points []{{$v.Name}}Point + for _, rp := range itr.m { + if aggregator, ok := rp.Aggregator.(io.Closer); ok { + if err := aggregator.Close(); err != nil { + return nil, err + } + + pts := rp.Emitter.Emit() + if len(pts) == 0 { + continue + } + + for i := range pts { + pts[i].Name = rp.Name + pts[i].Tags = rp.Tags + } + points = append(points, pts...) + } + } + + // Eliminate the aggregators and emitters. + itr.m = nil + return points, nil + } else if err != nil { + return nil, err + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.dims) + + id := curr.Name + if len(tags.m) > 0 { + id += "\x00" + tags.ID() + } + + // Retrieve the aggregator for this name/tag combination or create one. + rp := itr.m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &{{$k.name}}Reduce{{.Name}}Point{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + itr.m[id] = rp + } + rp.Aggregator.Aggregate{{$k.Name}}(curr) + + // Attempt to emit points from the aggregator. + points := rp.Emitter.Emit() + if len(points) == 0 { + continue + } + + for i := range points { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + } + return points, nil + } +} + +// {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator executes a function to modify an existing point +// for every output of the input iterator. +type {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator struct { + left *buf{{$k.Name}}Iterator + right *buf{{$k.Name}}Iterator + fn {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprFunc + points []{{$k.Name}}Point // must be size 2 + storePrev bool +} + +func new{{$k.Name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator(left, right {{$k.Name}}Iterator, opt IteratorOptions, fn func(a, b {{$k.Type}}) {{$v.Type}}) *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator { + var points []{{$k.Name}}Point + switch opt.Fill { + case NullFill, PreviousFill: + points = []{{$k.Name}}Point{ {Nil: true}, {Nil: true} } + case NumberFill: + value := castTo{{$k.Name}}(opt.FillValue) + points = []{{$k.Name}}Point{ {Value: value}, {Value: value} } + } + return &{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator{ + left: newBuf{{$k.Name}}Iterator(left), + right: newBuf{{$k.Name}}Iterator(right), + points: points, + fn: fn, + storePrev: opt.Fill == PreviousFill, + } +} + +func (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Stats() IteratorStats { + stats := itr.left.Stats() + stats.Add(itr.right.Stats()) + return stats +} + +func (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Close() error { + itr.left.Close() + itr.right.Close() + return nil +} + +func (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Next() (*{{$v.Name}}Point, error) { + for { + a, b, err := itr.next() + if err != nil || (a == nil && b == nil) { + return nil, err + } + + // If any of these are nil and we are using fill(none), skip these points. + if (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil { + continue + } + + // If one of the two points is nil, we need to fill it with a fake nil + // point that has the same name, tags, and time as the other point. + // There should never be a time when both of these are nil. + if a == nil { + p := *b + a = &p + a.Value = {{$k.Nil}} + a.Nil = true + } else if b == nil { + p := *a + b = &p + b.Value = {{$k.Nil}} + b.Nil = true + } + + // If a value is nil, use the fill values if the fill value is non-nil. + if a.Nil && !itr.points[0].Nil { + a.Value = itr.points[0].Value + a.Nil = false + } + if b.Nil && !itr.points[1].Nil { + b.Value = itr.points[1].Value + b.Nil = false + } + + if itr.storePrev { + itr.points[0], itr.points[1] = *a, *b + } + +{{if eq $k.Name $v.Name}} + if a.Nil { + return a, nil + } else if b.Nil { + return b, nil + } + a.Value = itr.fn(a.Value, b.Value) + return a, nil +{{else}} + p := &{{$v.Name}}Point{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Nil: a.Nil || b.Nil, + Aggregated: a.Aggregated, + } + if !p.Nil { + p.Value = itr.fn(a.Value, b.Value) + } + return p, nil +{{end}} + } +} + +// next returns the next points within each iterator. If the iterators are +// uneven, it organizes them so only matching points are returned. +func (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) next() (a, b *{{$k.Name}}Point, err error) { + // Retrieve the next value for both the left and right. + a, err = itr.left.Next() + if err != nil { + return nil, nil, err + } + b, err = itr.right.Next() + if err != nil { + return nil, nil, err + } + + // If we have a point from both, make sure that they match each other. + if a != nil && b != nil { + if a.Name > b.Name { + itr.left.unread(a) + return nil, b, nil + } else if a.Name < b.Name { + itr.right.unread(b) + return a, nil, nil + } + + if ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags { + itr.left.unread(a) + return nil, b, nil + } else if ltags < rtags { + itr.right.unread(b) + return a, nil, nil + } + + if a.Time > b.Time { + itr.left.unread(a) + return nil, b, nil + } else if a.Time < b.Time { + itr.right.unread(b) + return a, nil, nil + } + } + return a, b, nil +} + +// {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprFunc creates or modifies a point by combining two +// points. The point passed in may be modified and returned rather than +// allocating a new point if possible. One of the points may be nil, but at +// least one of the points will be non-nil. +type {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprFunc func(a, b {{$k.Type}}) {{$v.Type}} +{{end}} + +// {{$k.name}}TransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type {{$k.name}}TransformIterator struct { + input {{$k.Name}}Iterator + fn {{$k.name}}TransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}TransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}TransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *{{$k.name}}TransformIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + p = itr.fn(p) + } + return p, nil +} + +// {{$k.name}}TransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type {{$k.name}}TransformFunc func(p *{{$k.Name}}Point) *{{$k.Name}}Point + +// {{$k.name}}BoolTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type {{$k.name}}BoolTransformIterator struct { + input {{$k.Name}}Iterator + fn {{$k.name}}BoolTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}BoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}BoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *{{$k.name}}BoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// {{$k.name}}BoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type {{$k.name}}BoolTransformFunc func(p *{{$k.Name}}Point) *BooleanPoint + +// {{$k.name}}DedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type {{$k.name}}DedupeIterator struct { + input {{$k.Name}}Iterator + m map[string]struct{} // lookup of points already sent +} + +type {{$k.name}}IteratorMapper struct { + e *Emitter + buf []interface{} + driver IteratorMap // which iterator to use for the primary value, can be nil + fields []IteratorMap // which iterator to use for an aux field + point {{$k.Name}}Point +} + +func new{{$k.Name}}IteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *{{$k.name}}IteratorMapper { + e := NewEmitter(itrs, opt.Ascending, 0) + e.OmitTime = true + return &{{$k.name}}IteratorMapper{ + e: e, + buf: make([]interface{}, len(itrs)), + driver: driver, + fields: fields, + point: {{$k.Name}}Point{ + Aux: make([]interface{}, len(fields)), + }, + } +} + +func (itr *{{$k.name}}IteratorMapper) Next() (*{{$k.Name}}Point, error) { + t, name, tags, err := itr.e.loadBuf() + if err != nil || t == ZeroTime { + return nil, err + } + itr.point.Time = t + itr.point.Name = name + itr.point.Tags = tags + + itr.e.readInto(t, name, tags, itr.buf) + if itr.driver != nil { + if v := itr.driver.Value(tags, itr.buf); v != nil { + if v, ok := v.({{$k.Type}}); ok { + itr.point.Value = v + itr.point.Nil = false + } else { + itr.point.Value = {{$k.Nil}} + itr.point.Nil = true + } + } else { + itr.point.Value = {{$k.Nil}} + itr.point.Nil = true + } + } + for i, f := range itr.fields { + itr.point.Aux[i] = f.Value(tags, itr.buf) + } + return &itr.point, nil +} + +func (itr *{{$k.name}}IteratorMapper) Stats() IteratorStats { + stats := IteratorStats{} + for _, itr := range itr.e.itrs { + stats.Add(itr.Stats()) + } + return stats +} + +func (itr *{{$k.name}}IteratorMapper) Close() error { + return itr.e.Close() +} + +type {{$k.name}}FilterIterator struct { + input {{$k.Name}}Iterator + cond Expr + opt IteratorOptions + m map[string]interface{} +} + +func new{{$k.Name}}FilterIterator(input {{$k.Name}}Iterator, cond Expr, opt IteratorOptions) {{$k.Name}}Iterator { + // Strip out time conditions from the WHERE clause. + // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. + n := RewriteFunc(CloneExpr(cond), func(n Node) Node { + switch n := n.(type) { + case *BinaryExpr: + if n.LHS.String() == "time" { + return &BooleanLiteral{Val: true} + } + } + return n + }) + + cond, _ = n.(Expr) + if cond == nil { + return input + } else if n, ok := cond.(*BooleanLiteral); ok && n.Val { + return input + } + + return &{{$k.name}}FilterIterator{ + input: input, + cond: cond, + opt: opt, + m: make(map[string]interface{}), + } +} + +func (itr *{{$k.name}}FilterIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}FilterIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}FilterIterator) Next() (*{{$k.Name}}Point, error) { + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } + + for i, ref := range itr.opt.Aux { + itr.m[ref.Val] = p.Aux[i] + } + for k, v := range p.Tags.KeyValues() { + itr.m[k] = v + } + + if !EvalBool(itr.cond, itr.m) { + continue + } + return p, nil + } +} + +// new{{$k.Name}}DedupeIterator returns a new instance of {{$k.name}}DedupeIterator. +func new{{$k.Name}}DedupeIterator(input {{$k.Name}}Iterator) *{{$k.name}}DedupeIterator { + return &{{$k.name}}DedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *{{$k.name}}DedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}DedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *{{$k.name}}DedupeIterator) Next() (*{{$k.Name}}Point, error) { + for { + // Read next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encode{{$k.Name}}Point(p)) + if err != nil { + return nil, err + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p, nil + } +} + +// {{$k.name}}ReaderIterator represents an iterator that streams from a reader. +type {{$k.name}}ReaderIterator struct { + r io.Reader + dec *{{$k.Name}}PointDecoder +} + +// new{{$k.Name}}ReaderIterator returns a new instance of {{$k.name}}ReaderIterator. +func new{{$k.Name}}ReaderIterator(r io.Reader, stats IteratorStats) *{{$k.name}}ReaderIterator { + dec := New{{$k.Name}}PointDecoder(r) + dec.stats = stats + + return &{{$k.name}}ReaderIterator{ + r: r, + dec: dec, + } +} + +// Stats returns stats about points processed. +func (itr *{{$k.name}}ReaderIterator) Stats() IteratorStats { return itr.dec.stats } + +// Close closes the underlying reader, if applicable. +func (itr *{{$k.name}}ReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}ReaderIterator) Next() (*{{$k.Name}}Point, error) { + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &{{$k.Name}}Point{} + if err := itr.dec.Decode{{$k.Name}}Point(p); err == io.EOF { + return nil, nil + } else if err != nil { + return nil, err + } + return p, nil +} +{{end}} + + +// IteratorEncoder is an encoder for encoding an iterator's points to w. +type IteratorEncoder struct { + w io.Writer + + // Frequency with which stats are emitted. + StatsInterval time.Duration +} + +// NewIteratorEncoder encodes an iterator's points to w. +func NewIteratorEncoder(w io.Writer) *IteratorEncoder { + return &IteratorEncoder{ + w: w, + + StatsInterval: DefaultStatsInterval, + } +} + +// EncodeIterator encodes and writes all of itr's points to the underlying writer. +func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error { + switch itr := itr.(type) { + case FloatIterator: + return enc.encodeFloatIterator(itr) + case IntegerIterator: + return enc.encodeIntegerIterator(itr) + case StringIterator: + return enc.encodeStringIterator(itr) + case BooleanIterator: + return enc.encodeBooleanIterator(itr) + default: + panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr)) + } +} + +{{range .}} +// encode{{.Name}}Iterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error { + ticker := time.NewTicker(enc.StatsInterval) + defer ticker.Stop() + + // Emit initial stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + + // Continually stream points from the iterator into the encoder. + penc := New{{.Name}}PointEncoder(enc.w) + for { + // Emit stats periodically. + select { + case <-ticker.C: + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + default: + } + + // Retrieve the next point from the iterator. + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { + break + } + + // Write the point to the point encoder. + if err := penc.Encode{{.Name}}Point(p); err != nil { + return err + } + } + + // Emit final stats. + if err := enc.encodeStats(itr.Stats()); err != nil { + return err + } + return nil +} + +{{end}} + +// encode a stats object in the point stream. +func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error { + buf, err := proto.Marshal(&internal.Point{ + Name: proto.String(""), + Tags: proto.String(""), + Time: proto.Int64(0), + Nil: proto.Bool(false), + + Stats: encodeIteratorStats(&stats), + }) + if err != nil { + return err + } + + if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { + return err + } + if _, err := enc.w.Write(buf); err != nil { + return err + } + return nil +} + +{{end}} diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.go b/vendor/github.com/influxdata/influxdb/influxql/iterator.go new file mode 100644 index 0000000..cd14de8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.go @@ -0,0 +1,1402 @@ +package influxql + +import ( + "errors" + "fmt" + "io" + "sync" + "time" + + "github.com/influxdata/influxdb/models" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/influxql/internal" +) + +// ErrUnknownCall is returned when operating on an unknown function call. +var ErrUnknownCall = errors.New("unknown call") + +const ( + // MinTime is used as the minimum time value when computing an unbounded range. + // This time is one less than the MinNanoTime so that the first minimum + // time can be used as a sentinel value to signify that it is the default + // value rather than explicitly set by the user. + MinTime = models.MinNanoTime - 1 + + // MaxTime is used as the maximum time value when computing an unbounded range. + // This time is 2262-04-11 23:47:16.854775806 +0000 UTC + MaxTime = models.MaxNanoTime + + // secToNs is the number of nanoseconds in a second. + secToNs = int64(time.Second) +) + +// Iterator represents a generic interface for all Iterators. +// Most iterator operations are done on the typed sub-interfaces. +type Iterator interface { + Stats() IteratorStats + Close() error +} + +// Iterators represents a list of iterators. +type Iterators []Iterator + +// Stats returns the aggregation of all iterator stats. +func (a Iterators) Stats() IteratorStats { + var stats IteratorStats + for _, itr := range a { + stats.Add(itr.Stats()) + } + return stats +} + +// Close closes all iterators. +func (a Iterators) Close() error { + for _, itr := range a { + itr.Close() + } + return nil +} + +// filterNonNil returns a slice of iterators that removes all nil iterators. +func (a Iterators) filterNonNil() []Iterator { + other := make([]Iterator, 0, len(a)) + for _, itr := range a { + if itr == nil { + continue + } + other = append(other, itr) + } + return other +} + +// castType determines what type to cast the set of iterators to. +// An iterator type is chosen using this hierarchy: +// float > integer > string > boolean +func (a Iterators) castType() DataType { + if len(a) == 0 { + return Unknown + } + + typ := DataType(Boolean) + for _, input := range a { + switch input.(type) { + case FloatIterator: + // Once a float iterator is found, short circuit the end. + return Float + case IntegerIterator: + if typ > Integer { + typ = Integer + } + case StringIterator: + if typ > String { + typ = String + } + case BooleanIterator: + // Boolean is the lowest type. + } + } + return typ +} + +// cast casts an array of iterators to a single type. +// Iterators that are not compatible or cannot be cast to the +// chosen iterator type are closed and dropped. +func (a Iterators) cast() interface{} { + typ := a.castType() + switch typ { + case Float: + return newFloatIterators(a) + case Integer: + return newIntegerIterators(a) + case String: + return newStringIterators(a) + case Boolean: + return newBooleanIterators(a) + } + return a +} + +// Merge combines all iterators into a single iterator. +// A sorted merge iterator or a merge iterator can be used based on opt. +func (a Iterators) Merge(opt IteratorOptions) (Iterator, error) { + // Check if this is a call expression. + call, ok := opt.Expr.(*Call) + + // Merge into a single iterator. + if !ok && opt.MergeSorted() { + itr := NewSortedMergeIterator(a, opt) + if itr != nil && opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, opt.InterruptCh) + } + return itr, nil + } + + // We do not need an ordered output so use a merge iterator. + itr := NewMergeIterator(a, opt) + if itr == nil { + return nil, nil + } + + if opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, opt.InterruptCh) + } + + if !ok { + // This is not a call expression so do not use a call iterator. + return itr, nil + } + + // When merging the count() function, use sum() to sum the counted points. + if call.Name == "count" { + opt.Expr = &Call{ + Name: "sum", + Args: call.Args, + } + } + return NewCallIterator(itr, opt) +} + +// NewMergeIterator returns an iterator to merge itrs into one. +// Inputs must either be merge iterators or only contain a single name/tag in +// sorted order. The iterator will output all points by window, name/tag, then +// time. This iterator is useful when you need all of the points for an +// interval. +func NewMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { + inputs = Iterators(inputs).filterNonNil() + if n := len(inputs); n == 0 { + return nil + } else if n == 1 { + return inputs[0] + } + + // Aggregate functions can use a more relaxed sorting so that points + // within a window are grouped. This is much more efficient. + switch inputs := Iterators(inputs).cast().(type) { + case []FloatIterator: + return newFloatMergeIterator(inputs, opt) + case []IntegerIterator: + return newIntegerMergeIterator(inputs, opt) + case []StringIterator: + return newStringMergeIterator(inputs, opt) + case []BooleanIterator: + return newBooleanMergeIterator(inputs, opt) + default: + panic(fmt.Sprintf("unsupported merge iterator type: %T", inputs)) + } +} + +// NewParallelMergeIterator returns an iterator that breaks input iterators +// into groups and processes them in parallel. +func NewParallelMergeIterator(inputs []Iterator, opt IteratorOptions, parallelism int) Iterator { + inputs = Iterators(inputs).filterNonNil() + if len(inputs) == 0 { + return nil + } else if len(inputs) == 1 { + return inputs[0] + } + + // Limit parallelism to the number of inputs. + if len(inputs) < parallelism { + parallelism = len(inputs) + } + + // Determine the number of inputs per output iterator. + n := len(inputs) / parallelism + + // Group iterators together. + outputs := make([]Iterator, parallelism) + for i := range outputs { + var slice []Iterator + if i < len(outputs)-1 { + slice = inputs[i*n : (i+1)*n] + } else { + slice = inputs[i*n:] + } + + outputs[i] = newParallelIterator(NewMergeIterator(slice, opt)) + } + + // Merge all groups together. + return NewMergeIterator(outputs, opt) +} + +// NewSortedMergeIterator returns an iterator to merge itrs into one. +// Inputs must either be sorted merge iterators or only contain a single +// name/tag in sorted order. The iterator will output all points by name/tag, +// then time. This iterator is useful when you need all points for a name/tag +// to be in order. +func NewSortedMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { + inputs = Iterators(inputs).filterNonNil() + if len(inputs) == 0 { + return nil + } else if len(inputs) == 1 { + return inputs[0] + } + + switch inputs := Iterators(inputs).cast().(type) { + case []FloatIterator: + return newFloatSortedMergeIterator(inputs, opt) + case []IntegerIterator: + return newIntegerSortedMergeIterator(inputs, opt) + case []StringIterator: + return newStringSortedMergeIterator(inputs, opt) + case []BooleanIterator: + return newBooleanSortedMergeIterator(inputs, opt) + default: + panic(fmt.Sprintf("unsupported sorted merge iterator type: %T", inputs)) + } +} + +// newParallelIterator returns an iterator that runs in a separate goroutine. +func newParallelIterator(input Iterator) Iterator { + if input == nil { + return nil + } + + switch itr := input.(type) { + case FloatIterator: + return newFloatParallelIterator(itr) + case IntegerIterator: + return newIntegerParallelIterator(itr) + case StringIterator: + return newStringParallelIterator(itr) + case BooleanIterator: + return newBooleanParallelIterator(itr) + default: + panic(fmt.Sprintf("unsupported parallel iterator type: %T", itr)) + } +} + +// NewLimitIterator returns an iterator that limits the number of points per grouping. +func NewLimitIterator(input Iterator, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatLimitIterator(input, opt) + case IntegerIterator: + return newIntegerLimitIterator(input, opt) + case StringIterator: + return newStringLimitIterator(input, opt) + case BooleanIterator: + return newBooleanLimitIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported limit iterator type: %T", input)) + } +} + +// NewFilterIterator returns an iterator that filters the points based on the +// condition. This iterator is not nearly as efficient as filtering points +// within the query engine and is only used when filtering subqueries. +func NewFilterIterator(input Iterator, cond Expr, opt IteratorOptions) Iterator { + if input == nil { + return nil + } + + switch input := input.(type) { + case FloatIterator: + return newFloatFilterIterator(input, cond, opt) + case IntegerIterator: + return newIntegerFilterIterator(input, cond, opt) + case StringIterator: + return newStringFilterIterator(input, cond, opt) + case BooleanIterator: + return newBooleanFilterIterator(input, cond, opt) + default: + panic(fmt.Sprintf("unsupported filter iterator type: %T", input)) + } +} + +// NewDedupeIterator returns an iterator that only outputs unique points. +// This iterator maintains a serialized copy of each row so it is inefficient +// to use on large datasets. It is intended for small datasets such as meta queries. +func NewDedupeIterator(input Iterator) Iterator { + if input == nil { + return nil + } + + switch input := input.(type) { + case FloatIterator: + return newFloatDedupeIterator(input) + case IntegerIterator: + return newIntegerDedupeIterator(input) + case StringIterator: + return newStringDedupeIterator(input) + case BooleanIterator: + return newBooleanDedupeIterator(input) + default: + panic(fmt.Sprintf("unsupported dedupe iterator type: %T", input)) + } +} + +// NewFillIterator returns an iterator that fills in missing points in an aggregate. +func NewFillIterator(input Iterator, expr Expr, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatFillIterator(input, expr, opt) + case IntegerIterator: + return newIntegerFillIterator(input, expr, opt) + case StringIterator: + return newStringFillIterator(input, expr, opt) + case BooleanIterator: + return newBooleanFillIterator(input, expr, opt) + default: + panic(fmt.Sprintf("unsupported fill iterator type: %T", input)) + } +} + +// NewIntervalIterator returns an iterator that sets the time on each point to the interval. +func NewIntervalIterator(input Iterator, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatIntervalIterator(input, opt) + case IntegerIterator: + return newIntegerIntervalIterator(input, opt) + case StringIterator: + return newStringIntervalIterator(input, opt) + case BooleanIterator: + return newBooleanIntervalIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported fill iterator type: %T", input)) + } +} + +// NewInterruptIterator returns an iterator that will stop producing output +// when the passed-in channel is closed. +func NewInterruptIterator(input Iterator, closing <-chan struct{}) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatInterruptIterator(input, closing) + case IntegerIterator: + return newIntegerInterruptIterator(input, closing) + case StringIterator: + return newStringInterruptIterator(input, closing) + case BooleanIterator: + return newBooleanInterruptIterator(input, closing) + default: + panic(fmt.Sprintf("unsupported interrupt iterator type: %T", input)) + } +} + +// NewCloseInterruptIterator returns an iterator that will invoke the Close() method on an +// iterator when the passed-in channel has been closed. +func NewCloseInterruptIterator(input Iterator, closing <-chan struct{}) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatCloseInterruptIterator(input, closing) + case IntegerIterator: + return newIntegerCloseInterruptIterator(input, closing) + case StringIterator: + return newStringCloseInterruptIterator(input, closing) + case BooleanIterator: + return newBooleanCloseInterruptIterator(input, closing) + default: + panic(fmt.Sprintf("unsupported close iterator iterator type: %T", input)) + } +} + +// AuxIterator represents an iterator that can split off separate auxiliary iterators. +type AuxIterator interface { + Iterator + + // Auxilary iterator + Iterator(name string, typ DataType) Iterator + + // Start starts writing to the created iterators. + Start() + + // Backgrounds the iterator so that, when start is called, it will + // continuously read from the iterator. + Background() +} + +// NewAuxIterator returns a new instance of AuxIterator. +func NewAuxIterator(input Iterator, opt IteratorOptions) AuxIterator { + switch input := input.(type) { + case FloatIterator: + return newFloatAuxIterator(input, opt) + case IntegerIterator: + return newIntegerAuxIterator(input, opt) + case StringIterator: + return newStringAuxIterator(input, opt) + case BooleanIterator: + return newBooleanAuxIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported aux iterator type: %T", input)) + } +} + +// auxIteratorField represents an auxilary field within an AuxIterator. +type auxIteratorField struct { + name string // field name + typ DataType // detected data type + itrs []Iterator // auxillary iterators + mu sync.Mutex + opt IteratorOptions +} + +func (f *auxIteratorField) append(itr Iterator) { + f.mu.Lock() + defer f.mu.Unlock() + f.itrs = append(f.itrs, itr) +} + +func (f *auxIteratorField) close() { + f.mu.Lock() + defer f.mu.Unlock() + for _, itr := range f.itrs { + itr.Close() + } +} + +type auxIteratorFields struct { + fields []*auxIteratorField + dimensions []string +} + +// newAuxIteratorFields returns a new instance of auxIteratorFields from a list of field names. +func newAuxIteratorFields(opt IteratorOptions) *auxIteratorFields { + fields := make([]*auxIteratorField, len(opt.Aux)) + for i, ref := range opt.Aux { + fields[i] = &auxIteratorField{name: ref.Val, typ: ref.Type, opt: opt} + } + return &auxIteratorFields{ + fields: fields, + dimensions: opt.GetDimensions(), + } +} + +func (a *auxIteratorFields) close() { + for _, f := range a.fields { + f.close() + } +} + +// iterator creates a new iterator for a named auxilary field. +func (a *auxIteratorFields) iterator(name string, typ DataType) Iterator { + for _, f := range a.fields { + // Skip field if it's name doesn't match. + // Exit if no points were received by the iterator. + if f.name != name || (typ != Unknown && f.typ != typ) { + continue + } + + // Create channel iterator by data type. + switch f.typ { + case Float: + itr := &floatChanIterator{cond: sync.NewCond(&sync.Mutex{})} + f.append(itr) + return itr + case Integer: + itr := &integerChanIterator{cond: sync.NewCond(&sync.Mutex{})} + f.append(itr) + return itr + case String, Tag: + itr := &stringChanIterator{cond: sync.NewCond(&sync.Mutex{})} + f.append(itr) + return itr + case Boolean: + itr := &booleanChanIterator{cond: sync.NewCond(&sync.Mutex{})} + f.append(itr) + return itr + default: + break + } + } + + return &nilFloatIterator{} +} + +// send sends a point to all field iterators. +func (a *auxIteratorFields) send(p Point) (ok bool) { + values := p.aux() + for i, f := range a.fields { + var v interface{} + if i < len(values) { + v = values[i] + } + + tags := p.tags() + tags = tags.Subset(a.dimensions) + + // Send new point for each aux iterator. + // Primitive pointers represent nil values. + for _, itr := range f.itrs { + switch itr := itr.(type) { + case *floatChanIterator: + ok = itr.setBuf(p.name(), tags, p.time(), v) || ok + case *integerChanIterator: + ok = itr.setBuf(p.name(), tags, p.time(), v) || ok + case *stringChanIterator: + ok = itr.setBuf(p.name(), tags, p.time(), v) || ok + case *booleanChanIterator: + ok = itr.setBuf(p.name(), tags, p.time(), v) || ok + default: + panic(fmt.Sprintf("invalid aux itr type: %T", itr)) + } + } + } + return ok +} + +func (a *auxIteratorFields) sendError(err error) { + for _, f := range a.fields { + for _, itr := range f.itrs { + switch itr := itr.(type) { + case *floatChanIterator: + itr.setErr(err) + case *integerChanIterator: + itr.setErr(err) + case *stringChanIterator: + itr.setErr(err) + case *booleanChanIterator: + itr.setErr(err) + default: + panic(fmt.Sprintf("invalid aux itr type: %T", itr)) + } + } + } +} + +// DrainIterator reads and discards all points from itr. +func DrainIterator(itr Iterator) { + defer itr.Close() + switch itr := itr.(type) { + case FloatIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + case IntegerIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + case StringIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + case BooleanIterator: + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { + } + default: + panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) + } +} + +// DrainIterators reads and discards all points from itrs. +func DrainIterators(itrs []Iterator) { + defer Iterators(itrs).Close() + for { + var hasData bool + + for _, itr := range itrs { + switch itr := itr.(type) { + case FloatIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + case IntegerIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + case StringIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + case BooleanIterator: + if p, _ := itr.Next(); p != nil { + hasData = true + } + default: + panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) + } + } + + // Exit once all iterators return a nil point. + if !hasData { + break + } + } +} + +// NewReaderIterator returns an iterator that streams from a reader. +func NewReaderIterator(r io.Reader, typ DataType, stats IteratorStats) Iterator { + switch typ { + case Float: + return newFloatReaderIterator(r, stats) + case Integer: + return newIntegerReaderIterator(r, stats) + case String: + return newStringReaderIterator(r, stats) + case Boolean: + return newBooleanReaderIterator(r, stats) + default: + return &nilFloatReaderIterator{r: r} + } +} + +// IteratorCreator is an interface to create Iterators. +type IteratorCreator interface { + // Creates a simple iterator for use in an InfluxQL query. + CreateIterator(source *Measurement, opt IteratorOptions) (Iterator, error) +} + +// FieldMapper returns the data type for the field inside of the measurement. +type FieldMapper interface { + FieldDimensions(m *Measurement) (fields map[string]DataType, dimensions map[string]struct{}, err error) + + TypeMapper +} + +// IteratorOptions is an object passed to CreateIterator to specify creation options. +type IteratorOptions struct { + // Expression to iterate for. + // This can be VarRef or a Call. + Expr Expr + + // Auxilary tags or values to also retrieve for the point. + Aux []VarRef + + // Data sources from which to receive data. This is only used for encoding + // measurements over RPC and is no longer used in the open source version. + Sources []Source + + // Group by interval and tags. + Interval Interval + Dimensions []string // The final dimensions of the query (stays the same even in subqueries). + GroupBy map[string]struct{} // Dimensions to group points by in intermediate iterators. + Location *time.Location + + // Fill options. + Fill FillOption + FillValue interface{} + + // Condition to filter by. + Condition Expr + + // Time range for the iterator. + StartTime int64 + EndTime int64 + + // Sorted in time ascending order if true. + Ascending bool + + // Limits the number of points per series. + Limit, Offset int + + // Limits the number of series. + SLimit, SOffset int + + // Removes duplicate rows from raw queries. + Dedupe bool + + // Determines if this is a query for raw data or an aggregate/selector. + Ordered bool + + // Limits on the creation of iterators. + MaxSeriesN int + + // If this channel is set and is closed, the iterator should try to exit + // and close as soon as possible. + InterruptCh <-chan struct{} + + // Authorizer can limit access to data + Authorizer Authorizer +} + +// newIteratorOptionsStmt creates the iterator options from stmt. +func newIteratorOptionsStmt(stmt *SelectStatement, sopt *SelectOptions) (opt IteratorOptions, err error) { + + // Determine time range from the condition. + startTime, endTime, err := TimeRange(stmt.Condition, stmt.Location) + if err != nil { + return IteratorOptions{}, err + } + + if !startTime.IsZero() { + opt.StartTime = startTime.UnixNano() + } else { + if sopt != nil { + opt.StartTime = sopt.MinTime.UnixNano() + } else { + opt.StartTime = MinTime + } + } + if !endTime.IsZero() { + opt.EndTime = endTime.UnixNano() + } else { + if sopt != nil { + opt.EndTime = sopt.MaxTime.UnixNano() + } else { + opt.EndTime = MaxTime + } + } + opt.Location = stmt.Location + + // Determine group by interval. + interval, err := stmt.GroupByInterval() + if err != nil { + return opt, err + } + // Set duration to zero if a negative interval has been used. + if interval < 0 { + interval = 0 + } else if interval > 0 { + opt.Interval.Offset, err = stmt.GroupByOffset() + if err != nil { + return opt, err + } + } + opt.Interval.Duration = interval + + // Always request an ordered output for the top level iterators. + // The emitter will always emit points as ordered. + opt.Ordered = true + + // Determine dimensions. + opt.GroupBy = make(map[string]struct{}, len(opt.Dimensions)) + for _, d := range stmt.Dimensions { + if d, ok := d.Expr.(*VarRef); ok { + opt.Dimensions = append(opt.Dimensions, d.Val) + opt.GroupBy[d.Val] = struct{}{} + } + } + + opt.Condition = stmt.Condition + opt.Ascending = stmt.TimeAscending() + opt.Dedupe = stmt.Dedupe + + opt.Fill, opt.FillValue = stmt.Fill, stmt.FillValue + if opt.Fill == NullFill && stmt.Target != nil { + // Set the fill option to none if a target has been given. + // Null values will get ignored when being written to the target + // so fill(null) wouldn't write any null values to begin with. + opt.Fill = NoFill + } + opt.Limit, opt.Offset = stmt.Limit, stmt.Offset + opt.SLimit, opt.SOffset = stmt.SLimit, stmt.SOffset + if sopt != nil { + opt.MaxSeriesN = sopt.MaxSeriesN + opt.InterruptCh = sopt.InterruptCh + opt.Authorizer = sopt.Authorizer + } + + return opt, nil +} + +func newIteratorOptionsSubstatement(stmt *SelectStatement, opt IteratorOptions) (IteratorOptions, error) { + subOpt, err := newIteratorOptionsStmt(stmt, nil) + if err != nil { + return IteratorOptions{}, err + } + + if subOpt.StartTime < opt.StartTime { + subOpt.StartTime = opt.StartTime + } + if subOpt.EndTime > opt.EndTime { + subOpt.EndTime = opt.EndTime + } + // Propagate the dimensions to the inner subquery. + subOpt.Dimensions = opt.Dimensions + for d := range opt.GroupBy { + subOpt.GroupBy[d] = struct{}{} + } + subOpt.InterruptCh = opt.InterruptCh + + // Propagate the SLIMIT and SOFFSET from the outer query. + subOpt.SLimit += opt.SLimit + subOpt.SOffset += opt.SOffset + + // If the inner query uses a null fill option and is not a raw query, + // switch it to none so we don't hit an unnecessary penalty from the + // fill iterator. Null values will end up getting stripped by an outer + // query anyway so there's no point in having them here. We still need + // all other types of fill iterators because they can affect the result + // of the outer query. We also do not do this for raw queries because + // there is no fill iterator for them and fill(none) doesn't work with + // raw queries. + if !stmt.IsRawQuery && subOpt.Fill == NullFill { + subOpt.Fill = NoFill + } + + // Inherit the ordering method from the outer query. + subOpt.Ordered = opt.Ordered + + // If there is no interval for this subquery, but the outer query has an + // interval, inherit the parent interval. + interval, err := stmt.GroupByInterval() + if err != nil { + return IteratorOptions{}, err + } else if interval == 0 { + subOpt.Interval = opt.Interval + } + return subOpt, nil +} + +// MergeSorted returns true if the options require a sorted merge. +func (opt IteratorOptions) MergeSorted() bool { + return opt.Ordered +} + +// SeekTime returns the time the iterator should start from. +// For ascending iterators this is the start time, for descending iterators it's the end time. +func (opt IteratorOptions) SeekTime() int64 { + if opt.Ascending { + return opt.StartTime + } + return opt.EndTime +} + +// Window returns the time window [start,end) that t falls within. +func (opt IteratorOptions) Window(t int64) (start, end int64) { + if opt.Interval.IsZero() { + return opt.StartTime, opt.EndTime + 1 + } + + // Subtract the offset to the time so we calculate the correct base interval. + t -= int64(opt.Interval.Offset) + + // Retrieve the zone offset for the start time. + var zone int64 + if opt.Location != nil { + _, zone = opt.Zone(t) + } + + // Truncate time by duration. + dt := (t + zone) % int64(opt.Interval.Duration) + if dt < 0 { + // Negative modulo rounds up instead of down, so offset + // with the duration. + dt += int64(opt.Interval.Duration) + } + + // Find the start time. + if MinTime+dt >= t { + start = MinTime + } else { + start = t - dt + } + + // Look for the start offset again because the first time may have been + // after the offset switch. Now that we are at midnight in UTC, we can + // lookup the zone offset again to get the real starting offset. + if opt.Location != nil { + _, startOffset := opt.Zone(start) + // Do not adjust the offset if the offset change is greater than or + // equal to the duration. + if o := zone - startOffset; o != 0 && abs(o) < int64(opt.Interval.Duration) { + start += o + } + } + start += int64(opt.Interval.Offset) + + // Find the end time. + if dt := int64(opt.Interval.Duration) - dt; MaxTime-dt <= t { + end = MaxTime + } else { + end = t + dt + } + + // Retrieve the zone offset for the end time. + if opt.Location != nil { + _, endOffset := opt.Zone(end) + // Adjust the end time if the offset is different from the start offset. + // Only apply the offset if it is smaller than the duration. + // This prevents going back in time and creating time windows + // that don't make any sense. + if o := zone - endOffset; o != 0 && abs(o) < int64(opt.Interval.Duration) { + // If the offset is greater than 0, that means we are adding time. + // Added time goes into the previous interval because the clocks + // move backwards. If the offset is less than 0, then we are skipping + // time. Skipped time comes after the switch so if we have a time + // interval that lands on the switch, it comes from the next + // interval and not the current one. For this reason, we need to know + // when the actual switch happens by seeing if the time switch is within + // the current interval. We calculate the zone offset with the offset + // and see if the value is the same. If it is, we apply the + // offset. + if o > 0 { + end += o + } else if _, z := opt.Zone(end + o); z == endOffset { + end += o + } + } + } + end += int64(opt.Interval.Offset) + return +} + +// DerivativeInterval returns the time interval for the derivative function. +func (opt IteratorOptions) DerivativeInterval() Interval { + // Use the interval on the derivative() call, if specified. + if expr, ok := opt.Expr.(*Call); ok && len(expr.Args) == 2 { + return Interval{Duration: expr.Args[1].(*DurationLiteral).Val} + } + + // Otherwise use the group by interval, if specified. + if opt.Interval.Duration > 0 { + return Interval{Duration: opt.Interval.Duration} + } + + return Interval{Duration: time.Second} +} + +// ElapsedInterval returns the time interval for the elapsed function. +func (opt IteratorOptions) ElapsedInterval() Interval { + // Use the interval on the elapsed() call, if specified. + if expr, ok := opt.Expr.(*Call); ok && len(expr.Args) == 2 { + return Interval{Duration: expr.Args[1].(*DurationLiteral).Val} + } + + return Interval{Duration: time.Nanosecond} +} + +// IntegralInterval returns the time interval for the integral function. +func (opt IteratorOptions) IntegralInterval() Interval { + // Use the interval on the integral() call, if specified. + if expr, ok := opt.Expr.(*Call); ok && len(expr.Args) == 2 { + return Interval{Duration: expr.Args[1].(*DurationLiteral).Val} + } + + return Interval{Duration: time.Second} +} + +// GetDimensions retrieves the dimensions for this query. +func (opt IteratorOptions) GetDimensions() []string { + if len(opt.GroupBy) > 0 { + dimensions := make([]string, 0, len(opt.GroupBy)) + for dim := range opt.GroupBy { + dimensions = append(dimensions, dim) + } + return dimensions + } + return opt.Dimensions +} + +// Zone returns the zone information for the given time. The offset is in nanoseconds. +func (opt *IteratorOptions) Zone(ns int64) (string, int64) { + if opt.Location == nil { + return "", 0 + } + + t := time.Unix(0, ns).In(opt.Location) + name, offset := t.Zone() + return name, secToNs * int64(offset) +} + +// MarshalBinary encodes opt into a binary format. +func (opt *IteratorOptions) MarshalBinary() ([]byte, error) { + return proto.Marshal(encodeIteratorOptions(opt)) +} + +// UnmarshalBinary decodes from a binary format in to opt. +func (opt *IteratorOptions) UnmarshalBinary(buf []byte) error { + var pb internal.IteratorOptions + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + other, err := decodeIteratorOptions(&pb) + if err != nil { + return err + } + *opt = *other + + return nil +} + +func encodeIteratorOptions(opt *IteratorOptions) *internal.IteratorOptions { + pb := &internal.IteratorOptions{ + Interval: encodeInterval(opt.Interval), + Dimensions: opt.Dimensions, + Fill: proto.Int32(int32(opt.Fill)), + StartTime: proto.Int64(opt.StartTime), + EndTime: proto.Int64(opt.EndTime), + Ascending: proto.Bool(opt.Ascending), + Limit: proto.Int64(int64(opt.Limit)), + Offset: proto.Int64(int64(opt.Offset)), + SLimit: proto.Int64(int64(opt.SLimit)), + SOffset: proto.Int64(int64(opt.SOffset)), + Dedupe: proto.Bool(opt.Dedupe), + MaxSeriesN: proto.Int64(int64(opt.MaxSeriesN)), + Ordered: proto.Bool(opt.Ordered), + } + + // Set expression, if set. + if opt.Expr != nil { + pb.Expr = proto.String(opt.Expr.String()) + } + + // Set the location, if set. + if opt.Location != nil { + pb.Location = proto.String(opt.Location.String()) + } + + // Convert and encode aux fields as variable references. + if opt.Aux != nil { + pb.Fields = make([]*internal.VarRef, len(opt.Aux)) + pb.Aux = make([]string, len(opt.Aux)) + for i, ref := range opt.Aux { + pb.Fields[i] = encodeVarRef(ref) + pb.Aux[i] = ref.Val + } + } + + // Encode group by dimensions from a map. + if opt.GroupBy != nil { + dimensions := make([]string, 0, len(opt.GroupBy)) + for dim := range opt.GroupBy { + dimensions = append(dimensions, dim) + } + pb.GroupBy = dimensions + } + + // Convert and encode sources to measurements. + if opt.Sources != nil { + sources := make([]*internal.Measurement, len(opt.Sources)) + for i, source := range opt.Sources { + mm := source.(*Measurement) + sources[i] = encodeMeasurement(mm) + } + pb.Sources = sources + } + + // Fill value can only be a number. Set it if available. + if v, ok := opt.FillValue.(float64); ok { + pb.FillValue = proto.Float64(v) + } + + // Set condition, if set. + if opt.Condition != nil { + pb.Condition = proto.String(opt.Condition.String()) + } + + return pb +} + +func decodeIteratorOptions(pb *internal.IteratorOptions) (*IteratorOptions, error) { + opt := &IteratorOptions{ + Interval: decodeInterval(pb.GetInterval()), + Dimensions: pb.GetDimensions(), + Fill: FillOption(pb.GetFill()), + StartTime: pb.GetStartTime(), + EndTime: pb.GetEndTime(), + Ascending: pb.GetAscending(), + Limit: int(pb.GetLimit()), + Offset: int(pb.GetOffset()), + SLimit: int(pb.GetSLimit()), + SOffset: int(pb.GetSOffset()), + Dedupe: pb.GetDedupe(), + MaxSeriesN: int(pb.GetMaxSeriesN()), + Ordered: pb.GetOrdered(), + } + + // Set expression, if set. + if pb.Expr != nil { + expr, err := ParseExpr(pb.GetExpr()) + if err != nil { + return nil, err + } + opt.Expr = expr + } + + if pb.Location != nil { + loc, err := time.LoadLocation(pb.GetLocation()) + if err != nil { + return nil, err + } + opt.Location = loc + } + + // Convert and decode variable references. + if fields := pb.GetFields(); fields != nil { + opt.Aux = make([]VarRef, len(fields)) + for i, ref := range fields { + opt.Aux[i] = decodeVarRef(ref) + } + } else if aux := pb.GetAux(); aux != nil { + opt.Aux = make([]VarRef, len(aux)) + for i, name := range aux { + opt.Aux[i] = VarRef{Val: name} + } + } + + // Convert and decode sources to measurements. + if pb.Sources != nil { + sources := make([]Source, len(pb.GetSources())) + for i, source := range pb.GetSources() { + mm, err := decodeMeasurement(source) + if err != nil { + return nil, err + } + sources[i] = mm + } + opt.Sources = sources + } + + // Convert group by dimensions to a map. + if pb.GroupBy != nil { + dimensions := make(map[string]struct{}, len(pb.GroupBy)) + for _, dim := range pb.GetGroupBy() { + dimensions[dim] = struct{}{} + } + opt.GroupBy = dimensions + } + + // Set the fill value, if set. + if pb.FillValue != nil { + opt.FillValue = pb.GetFillValue() + } + + // Set condition, if set. + if pb.Condition != nil { + expr, err := ParseExpr(pb.GetCondition()) + if err != nil { + return nil, err + } + opt.Condition = expr + } + + return opt, nil +} + +// selectInfo represents an object that stores info about select fields. +type selectInfo struct { + calls map[*Call]struct{} + refs map[*VarRef]struct{} +} + +// newSelectInfo creates a object with call and var ref info from stmt. +func newSelectInfo(stmt *SelectStatement) *selectInfo { + info := &selectInfo{ + calls: make(map[*Call]struct{}), + refs: make(map[*VarRef]struct{}), + } + Walk(info, stmt.Fields) + return info +} + +func (v *selectInfo) Visit(n Node) Visitor { + switch n := n.(type) { + case *Call: + v.calls[n] = struct{}{} + return nil + case *VarRef: + v.refs[n] = struct{}{} + return nil + } + return v +} + +// FindSelector returns a selector from the selectInfo. This will only +// return a selector if the Call is a selector and it's the only function +// in the selectInfo. +func (v *selectInfo) FindSelector() *Call { + if len(v.calls) != 1 { + return nil + } + + for s := range v.calls { + if IsSelector(s) { + return s + } + } + return nil +} + +// Interval represents a repeating interval for a query. +type Interval struct { + Duration time.Duration + Offset time.Duration +} + +// IsZero returns true if the interval has no duration. +func (i Interval) IsZero() bool { return i.Duration == 0 } + +func encodeInterval(i Interval) *internal.Interval { + return &internal.Interval{ + Duration: proto.Int64(i.Duration.Nanoseconds()), + Offset: proto.Int64(i.Offset.Nanoseconds()), + } +} + +func decodeInterval(pb *internal.Interval) Interval { + return Interval{ + Duration: time.Duration(pb.GetDuration()), + Offset: time.Duration(pb.GetOffset()), + } +} + +func encodeVarRef(ref VarRef) *internal.VarRef { + return &internal.VarRef{ + Val: proto.String(ref.Val), + Type: proto.Int32(int32(ref.Type)), + } +} + +func decodeVarRef(pb *internal.VarRef) VarRef { + return VarRef{ + Val: pb.GetVal(), + Type: DataType(pb.GetType()), + } +} + +type nilFloatIterator struct{} + +func (*nilFloatIterator) Stats() IteratorStats { return IteratorStats{} } +func (*nilFloatIterator) Close() error { return nil } +func (*nilFloatIterator) Next() (*FloatPoint, error) { return nil, nil } + +type nilFloatReaderIterator struct { + r io.Reader +} + +func (*nilFloatReaderIterator) Stats() IteratorStats { return IteratorStats{} } +func (itr *nilFloatReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + itr.r = nil + return r.Close() + } + return nil +} +func (*nilFloatReaderIterator) Next() (*FloatPoint, error) { return nil, nil } + +// integerFloatTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type integerFloatTransformIterator struct { + input IntegerIterator + fn integerFloatTransformFunc +} + +// Stats returns stats from the input iterator. +func (itr *integerFloatTransformIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *integerFloatTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerFloatTransformIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil + } + return nil, nil +} + +// integerFloatTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type integerFloatTransformFunc func(p *IntegerPoint) *FloatPoint + +type integerFloatCastIterator struct { + input IntegerIterator +} + +func (itr *integerFloatCastIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerFloatCastIterator) Close() error { return itr.input.Close() } +func (itr *integerFloatCastIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + return &FloatPoint{ + Name: p.Name, + Tags: p.Tags, + Time: p.Time, + Nil: p.Nil, + Value: float64(p.Value), + Aux: p.Aux, + }, nil +} + +// IteratorStats represents statistics about an iterator. +// Some statistics are available immediately upon iterator creation while +// some are derived as the iterator processes data. +type IteratorStats struct { + SeriesN int // series represented + PointN int // points returned +} + +// Add aggregates fields from s and other together. Overwrites s. +func (s *IteratorStats) Add(other IteratorStats) { + s.SeriesN += other.SeriesN + s.PointN += other.PointN +} + +func encodeIteratorStats(stats *IteratorStats) *internal.IteratorStats { + return &internal.IteratorStats{ + SeriesN: proto.Int64(int64(stats.SeriesN)), + PointN: proto.Int64(int64(stats.PointN)), + } +} + +func decodeIteratorStats(pb *internal.IteratorStats) IteratorStats { + return IteratorStats{ + SeriesN: int(pb.GetSeriesN()), + PointN: int(pb.GetPointN()), + } +} + +// floatFastDedupeIterator outputs unique points where the point has a single aux field. +type floatFastDedupeIterator struct { + input FloatIterator + m map[fastDedupeKey]struct{} // lookup of points already sent +} + +// newFloatFastDedupeIterator returns a new instance of floatFastDedupeIterator. +func newFloatFastDedupeIterator(input FloatIterator) *floatFastDedupeIterator { + return &floatFastDedupeIterator{ + input: input, + m: make(map[fastDedupeKey]struct{}), + } +} + +// Stats returns stats from the input iterator. +func (itr *floatFastDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } + +// Close closes the iterator and all child iterators. +func (itr *floatFastDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *floatFastDedupeIterator) Next() (*FloatPoint, error) { + for { + // Read next point. + // Skip if there are not any aux fields. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } else if len(p.Aux) == 0 { + continue + } + + // If the point has already been output then move to the next point. + key := fastDedupeKey{name: p.Name} + key.values[0] = p.Aux[0] + if len(p.Aux) > 1 { + key.values[1] = p.Aux[1] + } + if _, ok := itr.m[key]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[key] = struct{}{} + return p, nil + } +} + +type fastDedupeKey struct { + name string + values [2]interface{} +} + +type reverseStringSlice []string + +func (p reverseStringSlice) Len() int { return len(p) } +func (p reverseStringSlice) Less(i, j int) bool { return p[i] > p[j] } +func (p reverseStringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func abs(v int64) int64 { + if v < 0 { + return -v + } + return v +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator_mapper.go b/vendor/github.com/influxdata/influxdb/influxql/iterator_mapper.go new file mode 100644 index 0000000..eb498b9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator_mapper.go @@ -0,0 +1,44 @@ +package influxql + +import "fmt" + +type IteratorMap interface { + Value(tags Tags, buf []interface{}) interface{} +} + +type FieldMap int + +func (i FieldMap) Value(tags Tags, buf []interface{}) interface{} { return buf[i] } + +type TagMap string + +func (s TagMap) Value(tags Tags, buf []interface{}) interface{} { return tags.Value(string(s)) } + +type NullMap struct{} + +func (NullMap) Value(tags Tags, buf []interface{}) interface{} { return nil } + +func NewIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) Iterator { + if driver != nil { + switch driver := driver.(type) { + case FieldMap: + switch itrs[int(driver)].(type) { + case FloatIterator: + return newFloatIteratorMapper(itrs, driver, fields, opt) + case IntegerIterator: + return newIntegerIteratorMapper(itrs, driver, fields, opt) + case StringIterator: + return newStringIteratorMapper(itrs, driver, fields, opt) + case BooleanIterator: + return newBooleanIteratorMapper(itrs, driver, fields, opt) + default: + panic(fmt.Sprintf("unable to map iterator type: %T", itrs[int(driver)])) + } + case TagMap: + return newStringIteratorMapper(itrs, driver, fields, opt) + default: + panic(fmt.Sprintf("unable to create iterator mapper with driveression type: %T", driver)) + } + } + return newFloatIteratorMapper(itrs, nil, fields, opt) +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator_mapper_test.go b/vendor/github.com/influxdata/influxdb/influxql/iterator_mapper_test.go new file mode 100644 index 0000000..e602c2a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator_mapper_test.go @@ -0,0 +1,62 @@ +package influxql_test + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +func TestIteratorMapper(t *testing.T) { + val1itr := &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Value: 2}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Value: 8}, + }} + + val2itr := &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Value: "c"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Value: "b"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Value: "h"}, + }} + inputs := []influxql.Iterator{val1itr, val2itr} + + opt := influxql.IteratorOptions{ + Ascending: true, + Aux: []influxql.VarRef{ + {Val: "val1", Type: influxql.Float}, + {Val: "val2", Type: influxql.String}, + }, + } + itr := influxql.NewIteratorMapper(inputs, nil, []influxql.IteratorMap{ + influxql.FieldMap(0), + influxql.FieldMap(1), + influxql.TagMap("host"), + }, opt) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Aux: []interface{}{float64(1), "a", "A"}}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Aux: []interface{}{float64(3), "c", "A"}}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Aux: []interface{}{float64(2), "b", "B"}}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Aux: []interface{}{float64(8), "h", "B"}}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + switch input := input.(type) { + case *FloatIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *StringIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator_test.go b/vendor/github.com/influxdata/influxdb/influxql/iterator_test.go new file mode 100644 index 0000000..c99cc78 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator_test.go @@ -0,0 +1,1532 @@ +package influxql_test + +import ( + "bytes" + "fmt" + "math" + "reflect" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Float(t *testing.T) { + inputs := []*FloatIterator{ + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.FloatPoint{}}, + {Points: []influxql.FloatPoint{}}, + } + + itr := influxql.NewMergeIterator(FloatIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Integer(t *testing.T) { + inputs := []*IntegerIterator{ + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.IntegerPoint{}}, + } + itr := influxql.NewMergeIterator(IntegerIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_String(t *testing.T) { + inputs := []*StringIterator{ + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}, + }}, + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, + }}, + {Points: []influxql.StringPoint{}}, + } + itr := influxql.NewMergeIterator(StringIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Boolean(t *testing.T) { + inputs := []*BooleanIterator{ + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}, + }}, + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}, + }}, + {Points: []influxql.BooleanPoint{}}, + } + itr := influxql.NewMergeIterator(BooleanIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +func TestMergeIterator_Nil(t *testing.T) { + itr := influxql.NewMergeIterator([]influxql.Iterator{nil}, influxql.IteratorOptions{}) + if itr != nil { + t.Fatalf("unexpected iterator: %#v", itr) + } +} + +func TestMergeIterator_Cast_Float(t *testing.T) { + inputs := []influxql.Iterator{ + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + } + + itr := influxql.NewMergeIterator(inputs, influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + switch input := input.(type) { + case *FloatIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *IntegerIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Float(t *testing.T) { + inputs := []*FloatIterator{ + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.FloatPoint{}}, + } + itr := influxql.NewSortedMergeIterator(FloatIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Integer(t *testing.T) { + inputs := []*IntegerIterator{ + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.IntegerPoint{}}, + } + itr := influxql.NewSortedMergeIterator(IntegerIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_String(t *testing.T) { + inputs := []*StringIterator{ + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: "h"}, + }}, + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, + }}, + {Points: []influxql.StringPoint{}}, + } + itr := influxql.NewSortedMergeIterator(StringIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: "h"}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Boolean(t *testing.T) { + inputs := []*BooleanIterator{ + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: true}, + }}, + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: true}, + }}, + {Points: []influxql.BooleanPoint{}}, + } + itr := influxql.NewSortedMergeIterator(BooleanIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: true}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: true}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +func TestSortedMergeIterator_Nil(t *testing.T) { + itr := influxql.NewSortedMergeIterator([]influxql.Iterator{nil}, influxql.IteratorOptions{}) + if itr != nil { + t.Fatalf("unexpected iterator: %#v", itr) + } +} + +func TestSortedMergeIterator_Cast_Float(t *testing.T) { + inputs := []influxql.Iterator{ + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + } + + itr := influxql.NewSortedMergeIterator(inputs, influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Dimensions: []string{"host"}, + Ascending: true, + }) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + switch input := input.(type) { + case *FloatIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *IntegerIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Float(t *testing.T) { + input := &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0, Value: 1}, + {Name: "cpu", Time: 5, Value: 3}, + {Name: "cpu", Time: 10, Value: 5}, + {Name: "mem", Time: 5, Value: 3}, + {Name: "mem", Time: 7, Value: 8}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 5, Value: 3}}, + {&influxql.FloatPoint{Name: "mem", Time: 7, Value: 8}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Integer(t *testing.T) { + input := &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0, Value: 1}, + {Name: "cpu", Time: 5, Value: 3}, + {Name: "cpu", Time: 10, Value: 5}, + {Name: "mem", Time: 5, Value: 3}, + {Name: "mem", Time: 7, Value: 8}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 3}}, + {&influxql.IntegerPoint{Name: "mem", Time: 7, Value: 8}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_String(t *testing.T) { + input := &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Time: 0, Value: "a"}, + {Name: "cpu", Time: 5, Value: "b"}, + {Name: "cpu", Time: 10, Value: "c"}, + {Name: "mem", Time: 5, Value: "d"}, + {Name: "mem", Time: 7, Value: "e"}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Time: 5, Value: "b"}}, + {&influxql.StringPoint{Name: "mem", Time: 7, Value: "e"}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Boolean(t *testing.T) { + input := &BooleanIterator{Points: []influxql.BooleanPoint{ + {Name: "cpu", Time: 0, Value: true}, + {Name: "cpu", Time: 5, Value: false}, + {Name: "cpu", Time: 10, Value: true}, + {Name: "mem", Time: 5, Value: false}, + {Name: "mem", Time: 7, Value: true}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Time: 5, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Time: 7, Value: true}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure auxilary iterators can be created for auxilary fields. +func TestFloatAuxIterator(t *testing.T) { + itr := influxql.NewAuxIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 1, Aux: []interface{}{float64(100), float64(200)}}, + {Time: 1, Value: 2, Aux: []interface{}{float64(500), math.NaN()}}, + }}, + influxql.IteratorOptions{Aux: []influxql.VarRef{{Val: "f0", Type: influxql.Float}, {Val: "f1", Type: influxql.Float}}}, + ) + + itrs := []influxql.Iterator{ + itr, + itr.Iterator("f0", influxql.Unknown), + itr.Iterator("f1", influxql.Unknown), + itr.Iterator("f0", influxql.Unknown), + } + itr.Start() + + if a, err := Iterators(itrs).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + { + &influxql.FloatPoint{Time: 0, Value: 1, Aux: []interface{}{float64(100), float64(200)}}, + &influxql.FloatPoint{Time: 0, Value: float64(100)}, + &influxql.FloatPoint{Time: 0, Value: float64(200)}, + &influxql.FloatPoint{Time: 0, Value: float64(100)}, + }, + { + &influxql.FloatPoint{Time: 1, Value: 2, Aux: []interface{}{float64(500), math.NaN()}}, + &influxql.FloatPoint{Time: 1, Value: float64(500)}, + &influxql.FloatPoint{Time: 1, Value: math.NaN()}, + &influxql.FloatPoint{Time: 1, Value: float64(500)}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure limit iterator returns a subset of points. +func TestLimitIterator(t *testing.T) { + itr := influxql.NewLimitIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 0}, + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + }}, + influxql.IteratorOptions{ + Limit: 2, + Offset: 1, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + }, + ) + + if a, err := (Iterators{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 1, Value: 1}}, + {&influxql.FloatPoint{Time: 2, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestFillIterator_DST(t *testing.T) { + for _, tt := range []struct { + name string + start, end time.Time + points []time.Duration + opt influxql.IteratorOptions + }{ + { + name: "Start_GroupByDay_Ascending", + start: mustParseTime("2000-04-01T00:00:00-08:00"), + end: mustParseTime("2000-04-05T00:00:00-07:00"), + points: []time.Duration{ + 24 * time.Hour, + 47 * time.Hour, + 71 * time.Hour, + }, + opt: influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 24 * time.Hour, + }, + Location: LosAngeles, + Ascending: true, + }, + }, + { + name: "Start_GroupByDay_Descending", + start: mustParseTime("2000-04-01T00:00:00-08:00"), + end: mustParseTime("2000-04-05T00:00:00-07:00"), + points: []time.Duration{ + 71 * time.Hour, + 47 * time.Hour, + 24 * time.Hour, + }, + opt: influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 24 * time.Hour, + }, + Location: LosAngeles, + Ascending: false, + }, + }, + { + name: "Start_GroupByHour_Ascending", + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-02T05:00:00-07:00"), + points: []time.Duration{ + 1 * time.Hour, + 2 * time.Hour, + 3 * time.Hour, + }, + opt: influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 1 * time.Hour, + }, + Location: LosAngeles, + Ascending: true, + }, + }, + { + name: "Start_GroupByHour_Descending", + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-02T05:00:00-07:00"), + points: []time.Duration{ + 3 * time.Hour, + 2 * time.Hour, + 1 * time.Hour, + }, + opt: influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 1 * time.Hour, + }, + Location: LosAngeles, + Ascending: false, + }, + }, + { + name: "Start_GroupBy2Hour_Ascending", + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-02T07:00:00-07:00"), + points: []time.Duration{ + 2 * time.Hour, + 3 * time.Hour, + 5 * time.Hour, + }, + opt: influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 2 * time.Hour, + }, + Location: LosAngeles, + Ascending: true, + }, + }, + { + name: "Start_GroupBy2Hour_Descending", + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-02T07:00:00-07:00"), + points: []time.Duration{ + 5 * time.Hour, + 3 * time.Hour, + 2 * time.Hour, + }, + opt: influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 2 * time.Hour, + }, + Location: LosAngeles, + Ascending: false, + }, + }, + { + name: "End_GroupByDay_Ascending", + start: mustParseTime("2000-10-28T00:00:00-07:00"), + end: mustParseTime("2000-11-01T00:00:00-08:00"), + points: []time.Duration{ + 24 * time.Hour, + 49 * time.Hour, + 73 * time.Hour, + }, + opt: influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 24 * time.Hour, + }, + Location: LosAngeles, + Ascending: true, + }, + }, + { + name: "End_GroupByDay_Descending", + start: mustParseTime("2000-10-28T00:00:00-07:00"), + end: mustParseTime("2000-11-01T00:00:00-08:00"), + points: []time.Duration{ + 73 * time.Hour, + 49 * time.Hour, + 24 * time.Hour, + }, + opt: influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 24 * time.Hour, + }, + Location: LosAngeles, + Ascending: false, + }, + }, + { + name: "End_GroupByHour_Ascending", + start: mustParseTime("2000-10-29T00:00:00-07:00"), + end: mustParseTime("2000-10-29T03:00:00-08:00"), + points: []time.Duration{ + 1 * time.Hour, + 2 * time.Hour, + 3 * time.Hour, + }, + opt: influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 1 * time.Hour, + }, + Location: LosAngeles, + Ascending: true, + }, + }, + { + name: "End_GroupByHour_Descending", + start: mustParseTime("2000-10-29T00:00:00-07:00"), + end: mustParseTime("2000-10-29T03:00:00-08:00"), + points: []time.Duration{ + 3 * time.Hour, + 2 * time.Hour, + 1 * time.Hour, + }, + opt: influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 1 * time.Hour, + }, + Location: LosAngeles, + Ascending: false, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + opt := tt.opt + opt.StartTime = tt.start.UnixNano() + opt.EndTime = tt.end.UnixNano() - 1 + + points := make([][]influxql.Point, 0, len(tt.points)+1) + if opt.Ascending { + points = append(points, []influxql.Point{ + &influxql.FloatPoint{ + Time: tt.start.UnixNano(), + }, + }) + } + for _, d := range tt.points { + points = append(points, []influxql.Point{ + &influxql.FloatPoint{ + Time: tt.start.Add(d).UnixNano(), + Nil: true, + }, + }) + } + if !opt.Ascending { + points = append(points, []influxql.Point{ + &influxql.FloatPoint{ + Time: tt.start.UnixNano(), + }, + }) + } + itr := influxql.NewFillIterator( + &FloatIterator{Points: []influxql.FloatPoint{{Time: tt.start.UnixNano(), Value: 0}}}, + nil, + opt, + ) + + if a, err := (Iterators{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, points) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + }) + } +} + +// Iterators is a test wrapper for iterators. +type Iterators []influxql.Iterator + +// Next returns the next value from each iterator. +// Returns nil if any iterator returns a nil. +func (itrs Iterators) Next() ([]influxql.Point, error) { + a := make([]influxql.Point, len(itrs)) + for i, itr := range itrs { + switch itr := itr.(type) { + case influxql.FloatIterator: + fp, err := itr.Next() + if fp == nil || err != nil { + return nil, err + } + a[i] = fp + case influxql.IntegerIterator: + ip, err := itr.Next() + if ip == nil || err != nil { + return nil, err + } + a[i] = ip + case influxql.StringIterator: + sp, err := itr.Next() + if sp == nil || err != nil { + return nil, err + } + a[i] = sp + case influxql.BooleanIterator: + bp, err := itr.Next() + if bp == nil || err != nil { + return nil, err + } + a[i] = bp + default: + panic(fmt.Sprintf("iterator type not supported: %T", itr)) + } + } + return a, nil +} + +// ReadAll reads all points from all iterators. +func (itrs Iterators) ReadAll() ([][]influxql.Point, error) { + var a [][]influxql.Point + + // Read from every iterator until a nil is encountered. + for { + points, err := itrs.Next() + if err != nil { + return nil, err + } else if points == nil { + break + } + a = append(a, influxql.Points(points).Clone()) + } + + // Close all iterators. + influxql.Iterators(itrs).Close() + + return a, nil +} + +func TestIteratorOptions_Window_Interval(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + }, + } + + start, end := opt.Window(4) + if start != 0 { + t.Errorf("expected start to be 0, got %d", start) + } + if end != 10 { + t.Errorf("expected end to be 10, got %d", end) + } +} + +func TestIteratorOptions_Window_Offset(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + Offset: 8, + }, + } + + start, end := opt.Window(14) + if start != 8 { + t.Errorf("expected start to be 8, got %d", start) + } + if end != 18 { + t.Errorf("expected end to be 18, got %d", end) + } +} + +func TestIteratorOptions_Window_Default(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: 0, + EndTime: 60, + } + + start, end := opt.Window(34) + if start != 0 { + t.Errorf("expected start to be 0, got %d", start) + } + if end != 61 { + t.Errorf("expected end to be 61, got %d", end) + } +} + +func TestIteratorOptions_Window_Location(t *testing.T) { + for _, tt := range []struct { + now time.Time + start, end time.Time + interval time.Duration + }{ + { + now: mustParseTime("2000-04-02T12:14:15-07:00"), + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-03T00:00:00-07:00"), + interval: 24 * time.Hour, + }, + { + now: mustParseTime("2000-04-02T01:17:12-08:00"), + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-03T00:00:00-07:00"), + interval: 24 * time.Hour, + }, + { + now: mustParseTime("2000-04-02T01:14:15-08:00"), + start: mustParseTime("2000-04-02T00:00:00-08:00"), + end: mustParseTime("2000-04-02T03:00:00-07:00"), + interval: 2 * time.Hour, + }, + { + now: mustParseTime("2000-04-02T03:17:12-07:00"), + start: mustParseTime("2000-04-02T03:00:00-07:00"), + end: mustParseTime("2000-04-02T04:00:00-07:00"), + interval: 2 * time.Hour, + }, + { + now: mustParseTime("2000-04-02T01:14:15-08:00"), + start: mustParseTime("2000-04-02T01:00:00-08:00"), + end: mustParseTime("2000-04-02T03:00:00-07:00"), + interval: 1 * time.Hour, + }, + { + now: mustParseTime("2000-04-02T03:17:12-07:00"), + start: mustParseTime("2000-04-02T03:00:00-07:00"), + end: mustParseTime("2000-04-02T04:00:00-07:00"), + interval: 1 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T12:14:15-08:00"), + start: mustParseTime("2000-10-29T00:00:00-07:00"), + end: mustParseTime("2000-10-30T00:00:00-08:00"), + interval: 24 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T01:17:12-07:00"), + start: mustParseTime("2000-10-29T00:00:00-07:00"), + end: mustParseTime("2000-10-30T00:00:00-08:00"), + interval: 24 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T01:14:15-07:00"), + start: mustParseTime("2000-10-29T00:00:00-07:00"), + end: mustParseTime("2000-10-29T02:00:00-08:00"), + interval: 2 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T03:17:12-08:00"), + start: mustParseTime("2000-10-29T02:00:00-08:00"), + end: mustParseTime("2000-10-29T04:00:00-08:00"), + interval: 2 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T01:14:15-07:00"), + start: mustParseTime("2000-10-29T01:00:00-07:00"), + end: mustParseTime("2000-10-29T01:00:00-08:00"), + interval: 1 * time.Hour, + }, + { + now: mustParseTime("2000-10-29T02:17:12-07:00"), + start: mustParseTime("2000-10-29T02:00:00-07:00"), + end: mustParseTime("2000-10-29T03:00:00-07:00"), + interval: 1 * time.Hour, + }, + } { + t.Run(fmt.Sprintf("%s/%s", tt.now, tt.interval), func(t *testing.T) { + opt := influxql.IteratorOptions{ + Location: LosAngeles, + Interval: influxql.Interval{ + Duration: tt.interval, + }, + } + start, end := opt.Window(tt.now.UnixNano()) + if have, want := time.Unix(0, start).In(LosAngeles), tt.start; !have.Equal(want) { + t.Errorf("unexpected start time: %s != %s", have, want) + } + if have, want := time.Unix(0, end).In(LosAngeles), tt.end; !have.Equal(want) { + t.Errorf("unexpected end time: %s != %s", have, want) + } + }) + } +} + +func TestIteratorOptions_Window_MinTime(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + Interval: influxql.Interval{ + Duration: time.Hour, + }, + } + expected := time.Unix(0, influxql.MinTime).Add(time.Hour).Truncate(time.Hour) + + start, end := opt.Window(influxql.MinTime) + if start != influxql.MinTime { + t.Errorf("expected start to be %d, got %d", influxql.MinTime, start) + } + if have, want := end, expected.UnixNano(); have != want { + t.Errorf("expected end to be %d, got %d", want, have) + } +} + +func TestIteratorOptions_Window_MaxTime(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + Interval: influxql.Interval{ + Duration: time.Hour, + }, + } + expected := time.Unix(0, influxql.MaxTime).Truncate(time.Hour) + + start, end := opt.Window(influxql.MaxTime) + if have, want := start, expected.UnixNano(); have != want { + t.Errorf("expected start to be %d, got %d", want, have) + } + if end != influxql.MaxTime { + t.Errorf("expected end to be %d, got %d", influxql.MaxTime, end) + } +} + +func TestIteratorOptions_SeekTime_Ascending(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: 30, + EndTime: 60, + Ascending: true, + } + + time := opt.SeekTime() + if time != 30 { + t.Errorf("expected time to be 30, got %d", time) + } +} + +func TestIteratorOptions_SeekTime_Descending(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: 30, + EndTime: 60, + Ascending: false, + } + + time := opt.SeekTime() + if time != 60 { + t.Errorf("expected time to be 60, got %d", time) + } +} + +func TestIteratorOptions_DerivativeInterval_Default(t *testing.T) { + opt := influxql.IteratorOptions{} + expected := influxql.Interval{Duration: time.Second} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_DerivativeInterval_GroupBy(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := influxql.Interval{Duration: 10} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_DerivativeInterval_Call(t *testing.T) { + opt := influxql.IteratorOptions{ + Expr: &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "value"}, + &influxql.DurationLiteral{Val: 2 * time.Second}, + }, + }, + Interval: influxql.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := influxql.Interval{Duration: 2 * time.Second} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_ElapsedInterval_Default(t *testing.T) { + opt := influxql.IteratorOptions{} + expected := influxql.Interval{Duration: time.Nanosecond} + actual := opt.ElapsedInterval() + if actual != expected { + t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_ElapsedInterval_GroupBy(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := influxql.Interval{Duration: time.Nanosecond} + actual := opt.ElapsedInterval() + if actual != expected { + t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_ElapsedInterval_Call(t *testing.T) { + opt := influxql.IteratorOptions{ + Expr: &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "value"}, + &influxql.DurationLiteral{Val: 2 * time.Second}, + }, + }, + Interval: influxql.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := influxql.Interval{Duration: 2 * time.Second} + actual := opt.ElapsedInterval() + if actual != expected { + t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_IntegralInterval_Default(t *testing.T) { + opt := influxql.IteratorOptions{} + expected := influxql.Interval{Duration: time.Second} + actual := opt.IntegralInterval() + if actual != expected { + t.Errorf("expected default integral interval to be %v, got %v", expected, actual) + } +} + +// Ensure iterator options can be marshaled to and from a binary format. +func TestIteratorOptions_MarshalBinary(t *testing.T) { + opt := &influxql.IteratorOptions{ + Expr: MustParseExpr("count(value)"), + Aux: []influxql.VarRef{{Val: "a"}, {Val: "b"}, {Val: "c"}}, + Interval: influxql.Interval{ + Duration: 1 * time.Hour, + Offset: 20 * time.Minute, + }, + Dimensions: []string{"region", "host"}, + GroupBy: map[string]struct{}{ + "region": {}, + "host": {}, + "cluster": {}, + }, + Fill: influxql.NumberFill, + FillValue: float64(100), + Condition: MustParseExpr(`foo = 'bar'`), + StartTime: 1000, + EndTime: 2000, + Ascending: true, + Limit: 100, + Offset: 200, + SLimit: 300, + SOffset: 400, + Dedupe: true, + } + + // Marshal to binary. + buf, err := opt.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Unmarshal back to an object. + var other influxql.IteratorOptions + if err := other.UnmarshalBinary(buf); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(&other, opt) { + t.Fatalf("unexpected options: %s", spew.Sdump(other)) + } +} + +// Ensure iterator can be encoded and decoded over a byte stream. +func TestIterator_EncodeDecode(t *testing.T) { + var buf bytes.Buffer + + // Create an iterator with several points & stats. + itr := &FloatIterator{ + Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 0}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 1, Value: 10}, + }, + stats: influxql.IteratorStats{ + SeriesN: 2, + PointN: 0, + }, + } + + // Encode to the buffer. + enc := influxql.NewIteratorEncoder(&buf) + enc.StatsInterval = 100 * time.Millisecond + if err := enc.EncodeIterator(itr); err != nil { + t.Fatal(err) + } + + // Decode from the buffer. + dec := influxql.NewReaderIterator(&buf, influxql.Float, itr.Stats()) + + // Initial stats should exist immediately. + fdec := dec.(influxql.FloatIterator) + if stats := fdec.Stats(); !reflect.DeepEqual(stats, influxql.IteratorStats{SeriesN: 2, PointN: 0}) { + t.Fatalf("unexpected stats(initial): %#v", stats) + } + + // Read both points. + if p, err := fdec.Next(); err != nil { + t.Fatalf("unexpected error(0): %#v", err) + } else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 0}) { + t.Fatalf("unexpected point(0); %#v", p) + } + if p, err := fdec.Next(); err != nil { + t.Fatalf("unexpected error(1): %#v", err) + } else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 1, Value: 10}) { + t.Fatalf("unexpected point(1); %#v", p) + } + if p, err := fdec.Next(); err != nil { + t.Fatalf("unexpected error(eof): %#v", err) + } else if p != nil { + t.Fatalf("unexpected point(eof); %#v", p) + } +} + +// IteratorCreator is a mockable implementation of SelectStatementExecutor.IteratorCreator. +type IteratorCreator struct { + CreateIteratorFn func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) + FieldDimensionsFn func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) +} + +func (ic *IteratorCreator) CreateIterator(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + return ic.CreateIteratorFn(m, opt) +} + +func (ic *IteratorCreator) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + return ic.FieldDimensionsFn(m) +} + +func (ic *IteratorCreator) MapType(m *influxql.Measurement, field string) influxql.DataType { + f, d, err := ic.FieldDimensions(m) + if err != nil { + return influxql.Unknown + } + + if typ, ok := f[field]; ok { + return typ + } + if _, ok := d[field]; ok { + return influxql.Tag + } + return influxql.Unknown +} + +// Test implementation of influxql.FloatIterator +type FloatIterator struct { + Points []influxql.FloatPoint + Closed bool + stats influxql.IteratorStats +} + +func (itr *FloatIterator) Stats() influxql.IteratorStats { return itr.stats } +func (itr *FloatIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *FloatIterator) Next() (*influxql.FloatPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} + +func FloatIterators(inputs []*FloatIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of influxql.IntegerIterator +type IntegerIterator struct { + Points []influxql.IntegerPoint + Closed bool + stats influxql.IteratorStats +} + +func (itr *IntegerIterator) Stats() influxql.IteratorStats { return itr.stats } +func (itr *IntegerIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *IntegerIterator) Next() (*influxql.IntegerPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} + +func IntegerIterators(inputs []*IntegerIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of influxql.StringIterator +type StringIterator struct { + Points []influxql.StringPoint + Closed bool + stats influxql.IteratorStats +} + +func (itr *StringIterator) Stats() influxql.IteratorStats { return itr.stats } +func (itr *StringIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *StringIterator) Next() (*influxql.StringPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} + +func StringIterators(inputs []*StringIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of influxql.BooleanIterator +type BooleanIterator struct { + Points []influxql.BooleanPoint + Closed bool + stats influxql.IteratorStats +} + +func (itr *BooleanIterator) Stats() influxql.IteratorStats { return itr.stats } +func (itr *BooleanIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *BooleanIterator) Next() (*influxql.BooleanPoint, error) { + if len(itr.Points) == 0 || itr.Closed { + return nil, nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v, nil +} + +func BooleanIterators(inputs []*BooleanIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/linear.go b/vendor/github.com/influxdata/influxdb/influxql/linear.go new file mode 100644 index 0000000..910063c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/linear.go @@ -0,0 +1,21 @@ +package influxql + +// linearFloat computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) +// and returns the value of the point on the line with time windowTime +// y = mx + b +func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextValue float64) float64 { + m := (nextValue - previousValue) / float64(nextTime-previousTime) // the slope of the line + x := float64(windowTime - previousTime) // how far into the interval we are + b := previousValue + return m*x + b +} + +// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) +// and returns the value of the point on the line with time windowTime +// y = mx + b +func linearInteger(windowTime, previousTime, nextTime int64, previousValue, nextValue int64) int64 { + m := float64(nextValue-previousValue) / float64(nextTime-previousTime) // the slope of the line + x := float64(windowTime - previousTime) // how far into the interval we are + b := float64(previousValue) + return int64(m*x + b) +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/monitor.go b/vendor/github.com/influxdata/influxdb/influxql/monitor.go new file mode 100644 index 0000000..3ea4618 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/monitor.go @@ -0,0 +1,23 @@ +package influxql + +import "time" + +// PointLimitMonitor is a query monitor that exits when the number of points +// emitted exceeds a threshold. +func PointLimitMonitor(itrs Iterators, interval time.Duration, limit int) QueryMonitorFunc { + return func(closing <-chan struct{}) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + stats := itrs.Stats() + if stats.PointN >= limit { + return ErrMaxSelectPointsLimitExceeded(stats.PointN, limit) + } + case <-closing: + return nil + } + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead.go b/vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead.go new file mode 100644 index 0000000..f2e628d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead.go @@ -0,0 +1,239 @@ +// Package neldermead is an implementation of the Nelder-Mead optimization method. +// Based on work by Michael F. Hutt: http://www.mikehutt.com/neldermead.html +package neldermead + +import "math" + +const ( + defaultMaxIterations = 1000 + // reflection coefficient + defaultAlpha = 1.0 + // contraction coefficient + defaultBeta = 0.5 + // expansion coefficient + defaultGamma = 2.0 +) + +// Optimizer represents the parameters to the Nelder-Mead simplex method. +type Optimizer struct { + // Maximum number of iterations. + MaxIterations int + // Reflection coefficient. + Alpha, + // Contraction coefficient. + Beta, + // Expansion coefficient. + Gamma float64 +} + +// New returns a new instance of Optimizer with all values set to the defaults. +func New() *Optimizer { + return &Optimizer{ + MaxIterations: defaultMaxIterations, + Alpha: defaultAlpha, + Beta: defaultBeta, + Gamma: defaultGamma, + } +} + +// Optimize applies the Nelder-Mead simplex method with the Optimizer's settings. +func (o *Optimizer) Optimize( + objfunc func([]float64) float64, + start []float64, + epsilon, + scale float64, +) (float64, []float64) { + n := len(start) + + //holds vertices of simplex + v := make([][]float64, n+1) + for i := range v { + v[i] = make([]float64, n) + } + + //value of function at each vertex + f := make([]float64, n+1) + + //reflection - coordinates + vr := make([]float64, n) + + //expansion - coordinates + ve := make([]float64, n) + + //contraction - coordinates + vc := make([]float64, n) + + //centroid - coordinates + vm := make([]float64, n) + + // create the initial simplex + // assume one of the vertices is 0,0 + + pn := scale * (math.Sqrt(float64(n+1)) - 1 + float64(n)) / (float64(n) * math.Sqrt(2)) + qn := scale * (math.Sqrt(float64(n+1)) - 1) / (float64(n) * math.Sqrt(2)) + + for i := 0; i < n; i++ { + v[0][i] = start[i] + } + + for i := 1; i <= n; i++ { + for j := 0; j < n; j++ { + if i-1 == j { + v[i][j] = pn + start[j] + } else { + v[i][j] = qn + start[j] + } + } + } + + // find the initial function values + for j := 0; j <= n; j++ { + f[j] = objfunc(v[j]) + } + + // begin the main loop of the minimization + for itr := 1; itr <= o.MaxIterations; itr++ { + + // find the indexes of the largest and smallest values + vg := 0 + vs := 0 + for i := 0; i <= n; i++ { + if f[i] > f[vg] { + vg = i + } + if f[i] < f[vs] { + vs = i + } + } + // find the index of the second largest value + vh := vs + for i := 0; i <= n; i++ { + if f[i] > f[vh] && f[i] < f[vg] { + vh = i + } + } + + // calculate the centroid + for i := 0; i <= n-1; i++ { + cent := 0.0 + for m := 0; m <= n; m++ { + if m != vg { + cent += v[m][i] + } + } + vm[i] = cent / float64(n) + } + + // reflect vg to new vertex vr + for i := 0; i <= n-1; i++ { + vr[i] = vm[i] + o.Alpha*(vm[i]-v[vg][i]) + } + + // value of function at reflection point + fr := objfunc(vr) + + if fr < f[vh] && fr >= f[vs] { + for i := 0; i <= n-1; i++ { + v[vg][i] = vr[i] + } + f[vg] = fr + } + + // investigate a step further in this direction + if fr < f[vs] { + for i := 0; i <= n-1; i++ { + ve[i] = vm[i] + o.Gamma*(vr[i]-vm[i]) + } + + // value of function at expansion point + fe := objfunc(ve) + + // by making fe < fr as opposed to fe < f[vs], + // Rosenbrocks function takes 63 iterations as opposed + // to 64 when using double variables. + + if fe < fr { + for i := 0; i <= n-1; i++ { + v[vg][i] = ve[i] + } + f[vg] = fe + } else { + for i := 0; i <= n-1; i++ { + v[vg][i] = vr[i] + } + f[vg] = fr + } + } + + // check to see if a contraction is necessary + if fr >= f[vh] { + if fr < f[vg] && fr >= f[vh] { + // perform outside contraction + for i := 0; i <= n-1; i++ { + vc[i] = vm[i] + o.Beta*(vr[i]-vm[i]) + } + } else { + // perform inside contraction + for i := 0; i <= n-1; i++ { + vc[i] = vm[i] - o.Beta*(vm[i]-v[vg][i]) + } + } + + // value of function at contraction point + fc := objfunc(vc) + + if fc < f[vg] { + for i := 0; i <= n-1; i++ { + v[vg][i] = vc[i] + } + f[vg] = fc + } else { + // at this point the contraction is not successful, + // we must halve the distance from vs to all the + // vertices of the simplex and then continue. + + for row := 0; row <= n; row++ { + if row != vs { + for i := 0; i <= n-1; i++ { + v[row][i] = v[vs][i] + (v[row][i]-v[vs][i])/2.0 + } + } + } + f[vg] = objfunc(v[vg]) + f[vh] = objfunc(v[vh]) + } + } + + // test for convergence + fsum := 0.0 + for i := 0; i <= n; i++ { + fsum += f[i] + } + favg := fsum / float64(n+1) + s := 0.0 + for i := 0; i <= n; i++ { + s += math.Pow((f[i]-favg), 2.0) / float64(n) + } + s = math.Sqrt(s) + if s < epsilon { + break + } + } + + // find the index of the smallest value + vs := 0 + for i := 0; i <= n; i++ { + if f[i] < f[vs] { + vs = i + } + } + + parameters := make([]float64, n) + for i := 0; i < n; i++ { + parameters[i] = v[vs][i] + } + + min := objfunc(v[vs]) + + return min, parameters +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead_test.go b/vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead_test.go new file mode 100644 index 0000000..12d2681 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead_test.go @@ -0,0 +1,64 @@ +package neldermead_test + +import ( + "math" + "testing" + + "github.com/influxdata/influxdb/influxql/neldermead" +) + +func round(num float64, precision float64) float64 { + rnum := num * math.Pow(10, precision) + var tnum float64 + if rnum < 0 { + tnum = math.Floor(rnum - 0.5) + } else { + tnum = math.Floor(rnum + 0.5) + } + rnum = tnum / math.Pow(10, precision) + return rnum +} + +func almostEqual(a, b, e float64) bool { + return math.Abs(a-b) < e +} + +func Test_Optimize(t *testing.T) { + + constraints := func(x []float64) { + for i := range x { + x[i] = round(x[i], 5) + } + } + // 100*(b-a^2)^2 + (1-a)^2 + // + // Obvious global minimum at (a,b) = (1,1) + // + // Useful visualization: + // https://www.wolframalpha.com/input/?i=minimize(100*(b-a%5E2)%5E2+%2B+(1-a)%5E2) + f := func(x []float64) float64 { + constraints(x) + // a = x[0] + // b = x[1] + return 100*(x[1]-x[0]*x[0])*(x[1]-x[0]*x[0]) + (1.0-x[0])*(1.0-x[0]) + } + + start := []float64{-1.2, 1.0} + + opt := neldermead.New() + epsilon := 1e-5 + min, parameters := opt.Optimize(f, start, epsilon, 1) + + if !almostEqual(min, 0, epsilon) { + t.Errorf("unexpected min: got %f exp 0", min) + } + + if !almostEqual(parameters[0], 1, 1e-2) { + t.Errorf("unexpected parameters[0]: got %f exp 1", parameters[0]) + } + + if !almostEqual(parameters[1], 1, 1e-2) { + t.Errorf("unexpected parameters[1]: got %f exp 1", parameters[1]) + } + +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/parser.go b/vendor/github.com/influxdata/influxdb/influxql/parser.go new file mode 100644 index 0000000..ac64377 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/parser.go @@ -0,0 +1,3014 @@ +package influxql + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +const ( + // DateFormat represents the format for date literals. + DateFormat = "2006-01-02" + + // DateTimeFormat represents the format for date time literals. + DateTimeFormat = "2006-01-02 15:04:05.999999" +) + +// Parser represents an InfluxQL parser. +type Parser struct { + s *bufScanner + params map[string]interface{} +} + +// NewParser returns a new instance of Parser. +func NewParser(r io.Reader) *Parser { + return &Parser{s: newBufScanner(r)} +} + +// SetParams sets the parameters that will be used for any bound parameter substitutions. +func (p *Parser) SetParams(params map[string]interface{}) { + p.params = params +} + +// ParseQuery parses a query string and returns its AST representation. +func ParseQuery(s string) (*Query, error) { return NewParser(strings.NewReader(s)).ParseQuery() } + +// ParseStatement parses a statement string and returns its AST representation. +func ParseStatement(s string) (Statement, error) { + return NewParser(strings.NewReader(s)).ParseStatement() +} + +// MustParseStatement parses a statement string and returns its AST. Panic on error. +func MustParseStatement(s string) Statement { + stmt, err := ParseStatement(s) + if err != nil { + panic(err.Error()) + } + return stmt +} + +// ParseExpr parses an expression string and returns its AST representation. +func ParseExpr(s string) (Expr, error) { return NewParser(strings.NewReader(s)).ParseExpr() } + +// MustParseExpr parses an expression string and returns its AST. Panic on error. +func MustParseExpr(s string) Expr { + expr, err := ParseExpr(s) + if err != nil { + panic(err.Error()) + } + return expr +} + +// ParseQuery parses an InfluxQL string and returns a Query AST object. +func (p *Parser) ParseQuery() (*Query, error) { + var statements Statements + semi := true + + for { + if tok, pos, lit := p.scanIgnoreWhitespace(); tok == EOF { + return &Query{Statements: statements}, nil + } else if tok == SEMICOLON { + semi = true + } else { + if !semi { + return nil, newParseError(tokstr(tok, lit), []string{";"}, pos) + } + p.unscan() + s, err := p.ParseStatement() + if err != nil { + return nil, err + } + statements = append(statements, s) + semi = false + } + } +} + +// ParseStatement parses an InfluxQL string and returns a Statement AST object. +func (p *Parser) ParseStatement() (Statement, error) { + // Inspect the first token. + tok, pos, lit := p.scanIgnoreWhitespace() + switch tok { + case SELECT: + return p.parseSelectStatement(targetNotRequired) + case DELETE: + return p.parseDeleteStatement() + case SHOW: + return p.parseShowStatement() + case CREATE: + return p.parseCreateStatement() + case DROP: + return p.parseDropStatement() + case GRANT: + return p.parseGrantStatement() + case REVOKE: + return p.parseRevokeStatement() + case ALTER: + return p.parseAlterStatement() + case SET: + return p.parseSetPasswordUserStatement() + case KILL: + return p.parseKillQueryStatement() + default: + return nil, newParseError(tokstr(tok, lit), []string{"SELECT", "DELETE", "SHOW", "CREATE", "DROP", "GRANT", "REVOKE", "ALTER", "SET", "KILL"}, pos) + } +} + +// parseShowStatement parses a string and returns a list statement. +// This function assumes the SHOW token has already been consumed. +func (p *Parser) parseShowStatement() (Statement, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + switch tok { + case CONTINUOUS: + return p.parseShowContinuousQueriesStatement() + case GRANTS: + return p.parseGrantsForUserStatement() + case DATABASES: + return p.parseShowDatabasesStatement() + case FIELD: + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == KEYS { + return p.parseShowFieldKeysStatement() + } + return nil, newParseError(tokstr(tok, lit), []string{"KEYS"}, pos) + case MEASUREMENTS: + return p.parseShowMeasurementsStatement() + case QUERIES: + return p.parseShowQueriesStatement() + case RETENTION: + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == POLICIES { + return p.parseShowRetentionPoliciesStatement() + } + return nil, newParseError(tokstr(tok, lit), []string{"POLICIES"}, pos) + case SERIES: + return p.parseShowSeriesStatement() + case SHARD: + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == GROUPS { + return p.parseShowShardGroupsStatement() + } + return nil, newParseError(tokstr(tok, lit), []string{"GROUPS"}, pos) + case SHARDS: + return p.parseShowShardsStatement() + case STATS: + return p.parseShowStatsStatement() + case DIAGNOSTICS: + return p.parseShowDiagnosticsStatement() + case TAG: + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == KEYS { + return p.parseShowTagKeysStatement() + } else if tok == VALUES { + return p.parseShowTagValuesStatement() + } + return nil, newParseError(tokstr(tok, lit), []string{"KEYS", "VALUES"}, pos) + case USERS: + return p.parseShowUsersStatement() + case SUBSCRIPTIONS: + return p.parseShowSubscriptionsStatement() + } + + showQueryKeywords := []string{ + "CONTINUOUS", + "DATABASES", + "FIELD", + "GRANTS", + "MEASUREMENTS", + "QUERIES", + "RETENTION", + "SERIES", + "TAG", + "USERS", + "STATS", + "DIAGNOSTICS", + "SHARD", + "SHARDS", + "SUBSCRIPTIONS", + } + sort.Strings(showQueryKeywords) + + return nil, newParseError(tokstr(tok, lit), showQueryKeywords, pos) +} + +// parseCreateStatement parses a string and returns a create statement. +// This function assumes the CREATE token has already been consumed. +func (p *Parser) parseCreateStatement() (Statement, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == CONTINUOUS { + return p.parseCreateContinuousQueryStatement() + } else if tok == DATABASE { + return p.parseCreateDatabaseStatement() + } else if tok == USER { + return p.parseCreateUserStatement() + } else if tok == RETENTION { + tok, pos, lit = p.scanIgnoreWhitespace() + if tok != POLICY { + return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos) + } + return p.parseCreateRetentionPolicyStatement() + } else if tok == SUBSCRIPTION { + return p.parseCreateSubscriptionStatement() + } + + return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "DATABASE", "USER", "RETENTION", "SUBSCRIPTION"}, pos) +} + +// parseDropStatement parses a string and returns a drop statement. +// This function assumes the DROP token has already been consumed. +func (p *Parser) parseDropStatement() (Statement, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + switch tok { + case CONTINUOUS: + return p.parseDropContinuousQueryStatement() + case DATABASE: + return p.parseDropDatabaseStatement() + case MEASUREMENT: + return p.parseDropMeasurementStatement() + case RETENTION: + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != POLICY { + return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos) + } + return p.parseDropRetentionPolicyStatement() + case SERIES: + return p.parseDropSeriesStatement() + case SHARD: + return p.parseDropShardStatement() + case SUBSCRIPTION: + return p.parseDropSubscriptionStatement() + case USER: + return p.parseDropUserStatement() + default: + return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "MEASUREMENT", "RETENTION", "SERIES", "SHARD", "SUBSCRIPTION", "USER"}, pos) + } +} + +// parseAlterStatement parses a string and returns an alter statement. +// This function assumes the ALTER token has already been consumed. +func (p *Parser) parseAlterStatement() (Statement, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == RETENTION { + if tok, pos, lit = p.scanIgnoreWhitespace(); tok != POLICY { + return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos) + } + return p.parseAlterRetentionPolicyStatement() + } + + return nil, newParseError(tokstr(tok, lit), []string{"RETENTION"}, pos) +} + +// parseSetPasswordUserStatement parses a string and returns a set statement. +// This function assumes the SET token has already been consumed. +func (p *Parser) parseSetPasswordUserStatement() (*SetPasswordUserStatement, error) { + stmt := &SetPasswordUserStatement{} + + // Consume the required PASSWORD FOR tokens. + if err := p.parseTokens([]Token{PASSWORD, FOR}); err != nil { + return nil, err + } + + // Parse username + ident, err := p.parseIdent() + + if err != nil { + return nil, err + } + stmt.Name = ident + + // Consume the required = token. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != EQ { + return nil, newParseError(tokstr(tok, lit), []string{"="}, pos) + } + + // Parse new user's password + if ident, err = p.parseString(); err != nil { + return nil, err + } + stmt.Password = ident + + return stmt, nil +} + +// parseKillQueryStatement parses a string and returns a kill statement. +// This function assumes the KILL token has already been consumed. +func (p *Parser) parseKillQueryStatement() (*KillQueryStatement, error) { + if err := p.parseTokens([]Token{QUERY}); err != nil { + return nil, err + } + + qid, err := p.parseUInt64() + if err != nil { + return nil, err + } + + var host string + if tok, _, _ := p.scanIgnoreWhitespace(); tok == ON { + host, err = p.parseIdent() + if err != nil { + return nil, err + } + } else { + p.unscan() + } + return &KillQueryStatement{QueryID: qid, Host: host}, nil +} + +// parseCreateSubscriptionStatement parses a string and returns a CreateSubscriptionStatement. +// This function assumes the "CREATE SUBSCRIPTION" tokens have already been consumed. +func (p *Parser) parseCreateSubscriptionStatement() (*CreateSubscriptionStatement, error) { + stmt := &CreateSubscriptionStatement{} + + // Read the id of the subscription to create. + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.Name = ident + + // Expect an "ON" keyword. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON { + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) + } + + // Read the name of the database. + if ident, err = p.parseIdent(); err != nil { + return nil, err + } + stmt.Database = ident + + if tok, pos, lit := p.scan(); tok != DOT { + return nil, newParseError(tokstr(tok, lit), []string{"."}, pos) + } + + // Read the name of the retention policy. + if ident, err = p.parseIdent(); err != nil { + return nil, err + } + stmt.RetentionPolicy = ident + + // Expect a "DESTINATIONS" keyword. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != DESTINATIONS { + return nil, newParseError(tokstr(tok, lit), []string{"DESTINATIONS"}, pos) + } + + // Expect one of "ANY ALL" keywords. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok == ALL || tok == ANY { + stmt.Mode = tokens[tok] + } else { + return nil, newParseError(tokstr(tok, lit), []string{"ALL", "ANY"}, pos) + } + + // Read list of destinations. + var destinations []string + if destinations, err = p.parseStringList(); err != nil { + return nil, err + } + stmt.Destinations = destinations + + return stmt, nil +} + +// parseCreateRetentionPolicyStatement parses a string and returns a create retention policy statement. +// This function assumes the CREATE RETENTION POLICY tokens have already been consumed. +func (p *Parser) parseCreateRetentionPolicyStatement() (*CreateRetentionPolicyStatement, error) { + stmt := &CreateRetentionPolicyStatement{} + + // Parse the retention policy name. + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.Name = ident + + // Consume the required ON token. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON { + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) + } + + // Parse the database name. + ident, err = p.parseIdent() + if err != nil { + return nil, err + } + stmt.Database = ident + + // Parse required DURATION token. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != DURATION { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) + } + + // Parse duration value + d, err := p.parseDuration() + if err != nil { + return nil, err + } + stmt.Duration = d + + // Parse required REPLICATION token. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != REPLICATION { + return nil, newParseError(tokstr(tok, lit), []string{"REPLICATION"}, pos) + } + + // Parse replication value. + n, err := p.parseInt(1, math.MaxInt32) + if err != nil { + return nil, err + } + stmt.Replication = n + + // Parse optional SHARD token. + if tok, _, _ := p.scanIgnoreWhitespace(); tok == SHARD { + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != DURATION { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) + } + + // Check to see if they used the INF keyword + tok, pos, _ := p.scanIgnoreWhitespace() + if tok == INF { + return nil, &ParseError{ + Message: "invalid duration INF for shard duration", + Pos: pos, + } + } + p.unscan() + + d, err := p.parseDuration() + if err != nil { + return nil, err + } + stmt.ShardGroupDuration = d + } else { + p.unscan() + } + + // Parse optional DEFAULT token. + if tok, _, _ := p.scanIgnoreWhitespace(); tok == DEFAULT { + stmt.Default = true + } else { + p.unscan() + } + + return stmt, nil +} + +// parseAlterRetentionPolicyStatement parses a string and returns an alter retention policy statement. +// This function assumes the ALTER RETENTION POLICY tokens have already been consumed. +func (p *Parser) parseAlterRetentionPolicyStatement() (*AlterRetentionPolicyStatement, error) { + stmt := &AlterRetentionPolicyStatement{} + + // Parse the retention policy name. + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == DEFAULT { + stmt.Name = "default" + } else if tok == IDENT { + stmt.Name = lit + } else { + return nil, newParseError(tokstr(tok, lit), []string{"identifier"}, pos) + } + + // Consume the required ON token. + if tok, pos, lit = p.scanIgnoreWhitespace(); tok != ON { + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) + } + + // Parse the database name. + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.Database = ident + + // Loop through option tokens (DURATION, REPLICATION, SHARD DURATION, DEFAULT, etc.). + found := make(map[Token]struct{}) +Loop: + for { + tok, pos, lit := p.scanIgnoreWhitespace() + if _, ok := found[tok]; ok { + return nil, &ParseError{ + Message: fmt.Sprintf("found duplicate %s option", tok), + Pos: pos, + } + } + + switch tok { + case DURATION: + d, err := p.parseDuration() + if err != nil { + return nil, err + } + stmt.Duration = &d + case REPLICATION: + n, err := p.parseInt(1, math.MaxInt32) + if err != nil { + return nil, err + } + stmt.Replication = &n + case SHARD: + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == DURATION { + // Check to see if they used the INF keyword + tok, pos, _ := p.scanIgnoreWhitespace() + if tok == INF { + return nil, &ParseError{ + Message: "invalid duration INF for shard duration", + Pos: pos, + } + } + p.unscan() + + d, err := p.parseDuration() + if err != nil { + return nil, err + } + stmt.ShardGroupDuration = &d + } else { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) + } + case DEFAULT: + stmt.Default = true + default: + if len(found) == 0 { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION", "REPLICATION", "SHARD", "DEFAULT"}, pos) + } + p.unscan() + break Loop + } + found[tok] = struct{}{} + } + + return stmt, nil +} + +// parseInt parses a string representing a base 10 integer and returns the number. +// It returns an error if the parsed number is outside the range [min, max]. +func (p *Parser) parseInt(min, max int) (int, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != INTEGER { + return 0, newParseError(tokstr(tok, lit), []string{"integer"}, pos) + } + + // Convert string to int. + n, err := strconv.Atoi(lit) + if err != nil { + return 0, &ParseError{Message: err.Error(), Pos: pos} + } else if min > n || n > max { + return 0, &ParseError{ + Message: fmt.Sprintf("invalid value %d: must be %d <= n <= %d", n, min, max), + Pos: pos, + } + } + + return n, nil +} + +// parseUInt64 parses a string and returns a 64-bit unsigned integer literal. +func (p *Parser) parseUInt64() (uint64, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != INTEGER { + return 0, newParseError(tokstr(tok, lit), []string{"integer"}, pos) + } + + // Convert string to unsigned 64-bit integer + n, err := strconv.ParseUint(lit, 10, 64) + if err != nil { + return 0, &ParseError{Message: err.Error(), Pos: pos} + } + + return uint64(n), nil +} + +// parseDuration parses a string and returns a duration literal. +// This function assumes the DURATION token has already been consumed. +func (p *Parser) parseDuration() (time.Duration, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != DURATIONVAL && tok != INF { + return 0, newParseError(tokstr(tok, lit), []string{"duration"}, pos) + } + + if tok == INF { + return 0, nil + } + + d, err := ParseDuration(lit) + if err != nil { + return 0, &ParseError{Message: err.Error(), Pos: pos} + } + + return d, nil +} + +// parseIdent parses an identifier. +func (p *Parser) parseIdent() (string, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != IDENT { + return "", newParseError(tokstr(tok, lit), []string{"identifier"}, pos) + } + return lit, nil +} + +// parseIdentList parses a comma delimited list of identifiers. +func (p *Parser) parseIdentList() ([]string, error) { + // Parse first (required) identifier. + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + idents := []string{ident} + + // Parse remaining (optional) identifiers. + for { + if tok, _, _ := p.scanIgnoreWhitespace(); tok != COMMA { + p.unscan() + return idents, nil + } + + if ident, err = p.parseIdent(); err != nil { + return nil, err + } + + idents = append(idents, ident) + } +} + +// parseSegmentedIdents parses a segmented identifiers. +// e.g., "db"."rp".measurement or "db"..measurement +func (p *Parser) parseSegmentedIdents() ([]string, error) { + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + idents := []string{ident} + + // Parse remaining (optional) identifiers. + for { + if tok, _, _ := p.scan(); tok != DOT { + // No more segments so we're done. + p.unscan() + break + } + + if ch := p.peekRune(); ch == '/' { + // Next segment is a regex so we're done. + break + } else if ch == ':' { + // Next segment is context-specific so let caller handle it. + break + } else if ch == '.' { + // Add an empty identifier. + idents = append(idents, "") + continue + } + + // Parse the next identifier. + if ident, err = p.parseIdent(); err != nil { + return nil, err + } + + idents = append(idents, ident) + } + + if len(idents) > 3 { + msg := fmt.Sprintf("too many segments in %s", QuoteIdent(idents...)) + return nil, &ParseError{Message: msg} + } + + return idents, nil +} + +// parseString parses a string. +func (p *Parser) parseString() (string, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != STRING { + return "", newParseError(tokstr(tok, lit), []string{"string"}, pos) + } + return lit, nil +} + +// parseStringList parses a list of strings separated by commas. +func (p *Parser) parseStringList() ([]string, error) { + // Parse first (required) string. + str, err := p.parseString() + if err != nil { + return nil, err + } + strs := []string{str} + + // Parse remaining (optional) strings. + for { + if tok, _, _ := p.scanIgnoreWhitespace(); tok != COMMA { + p.unscan() + return strs, nil + } + + if str, err = p.parseString(); err != nil { + return nil, err + } + + strs = append(strs, str) + } +} + +// parseRevokeStatement parses a string and returns a revoke statement. +// This function assumes the REVOKE token has already been consumed. +func (p *Parser) parseRevokeStatement() (Statement, error) { + // Parse the privilege to be revoked. + priv, err := p.parsePrivilege() + if err != nil { + return nil, err + } + + // Check for ON or FROM clauses. + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == ON { + stmt, err := p.parseRevokeOnStatement() + if err != nil { + return nil, err + } + stmt.Privilege = priv + return stmt, nil + } else if tok == FROM { + // Admin privilege is only revoked on ALL PRIVILEGES. + if priv != AllPrivileges { + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) + } + return p.parseRevokeAdminStatement() + } + + // Only ON or FROM clauses are allowed after privilege. + if priv == AllPrivileges { + return nil, newParseError(tokstr(tok, lit), []string{"ON", "FROM"}, pos) + } + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) +} + +// parseRevokeOnStatement parses a string and returns a revoke statement. +// This function assumes the [PRIVILEGE] ON tokens have already been consumed. +func (p *Parser) parseRevokeOnStatement() (*RevokeStatement, error) { + stmt := &RevokeStatement{} + + // Parse the name of the database. + lit, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.On = lit + + // Parse FROM clause. + tok, pos, lit := p.scanIgnoreWhitespace() + + // Check for required FROM token. + if tok != FROM { + return nil, newParseError(tokstr(tok, lit), []string{"FROM"}, pos) + } + + // Parse the name of the user. + lit, err = p.parseIdent() + if err != nil { + return nil, err + } + stmt.User = lit + + return stmt, nil +} + +// parseRevokeAdminStatement parses a string and returns a revoke admin statement. +// This function assumes the ALL [PRVILEGES] FROM token has already been consumed. +func (p *Parser) parseRevokeAdminStatement() (*RevokeAdminStatement, error) { + // Admin privilege is always false when revoke admin clause is called. + stmt := &RevokeAdminStatement{} + + // Parse the name of the user. + lit, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.User = lit + + return stmt, nil +} + +// parseGrantStatement parses a string and returns a grant statement. +// This function assumes the GRANT token has already been consumed. +func (p *Parser) parseGrantStatement() (Statement, error) { + // Parse the privilege to be granted. + priv, err := p.parsePrivilege() + if err != nil { + return nil, err + } + + // Check for ON or TO clauses. + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == ON { + stmt, err := p.parseGrantOnStatement() + if err != nil { + return nil, err + } + stmt.Privilege = priv + return stmt, nil + } else if tok == TO { + // Admin privilege is only granted on ALL PRIVILEGES. + if priv != AllPrivileges { + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) + } + return p.parseGrantAdminStatement() + } + + // Only ON or TO clauses are allowed after privilege. + if priv == AllPrivileges { + return nil, newParseError(tokstr(tok, lit), []string{"ON", "TO"}, pos) + } + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) +} + +// parseGrantOnStatement parses a string and returns a grant statement. +// This function assumes the [PRIVILEGE] ON tokens have already been consumed. +func (p *Parser) parseGrantOnStatement() (*GrantStatement, error) { + stmt := &GrantStatement{} + + // Parse the name of the database. + lit, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.On = lit + + // Parse TO clause. + tok, pos, lit := p.scanIgnoreWhitespace() + + // Check for required TO token. + if tok != TO { + return nil, newParseError(tokstr(tok, lit), []string{"TO"}, pos) + } + + // Parse the name of the user. + lit, err = p.parseIdent() + if err != nil { + return nil, err + } + stmt.User = lit + + return stmt, nil +} + +// parseGrantAdminStatement parses a string and returns a grant admin statement. +// This function assumes the ALL [PRVILEGES] TO tokens have already been consumed. +func (p *Parser) parseGrantAdminStatement() (*GrantAdminStatement, error) { + // Admin privilege is always true when grant admin clause is called. + stmt := &GrantAdminStatement{} + + // Parse the name of the user. + lit, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.User = lit + + return stmt, nil +} + +// parsePrivilege parses a string and returns a Privilege. +func (p *Parser) parsePrivilege() (Privilege, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + switch tok { + case READ: + return ReadPrivilege, nil + case WRITE: + return WritePrivilege, nil + case ALL: + // Consume optional PRIVILEGES token + tok, pos, lit = p.scanIgnoreWhitespace() + if tok != PRIVILEGES { + p.unscan() + } + return AllPrivileges, nil + } + return 0, newParseError(tokstr(tok, lit), []string{"READ", "WRITE", "ALL [PRIVILEGES]"}, pos) +} + +// parseSelectStatement parses a select string and returns a Statement AST object. +// This function assumes the SELECT token has already been consumed. +func (p *Parser) parseSelectStatement(tr targetRequirement) (*SelectStatement, error) { + stmt := &SelectStatement{} + var err error + + // Parse fields: "FIELD+". + if stmt.Fields, err = p.parseFields(); err != nil { + return nil, err + } + + // Parse target: "INTO" + if stmt.Target, err = p.parseTarget(tr); err != nil { + return nil, err + } + + // Parse source: "FROM". + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != FROM { + return nil, newParseError(tokstr(tok, lit), []string{"FROM"}, pos) + } + if stmt.Sources, err = p.parseSources(true); err != nil { + return nil, err + } + + // Parse condition: "WHERE EXPR". + if stmt.Condition, err = p.parseCondition(); err != nil { + return nil, err + } + + // Parse dimensions: "GROUP BY DIMENSION+". + if stmt.Dimensions, err = p.parseDimensions(); err != nil { + return nil, err + } + + // Parse fill options: "fill(