add vendoring with go dep

This commit is contained in:
Adrian Todorov 2017-10-25 20:52:40 +00:00
parent 704f4d20d1
commit a59409f16b
1627 changed files with 489673 additions and 0 deletions

33
Gopkg.lock generated Normal file
View File

@ -0,0 +1,33 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/influxdata/influxdb"
packages = ["client/v2","models","pkg/escape"]
revision = "a7c3d3c0f7b74f71919047adbc4933460967a576"
version = "v1.3.6"
[[projects]]
name = "github.com/vmware/govmomi"
packages = [".","property","session","vim25","vim25/debug","vim25/methods","vim25/mo","vim25/progress","vim25/soap","vim25/types","vim25/xml"]
revision = "b63044e5f833781eb7b305bc035392480ee06a82"
version = "v0.15.0"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context"]
revision = "4b14673ba32bee7f5ac0f990a48f033919fd418b"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "0b7718e6a338978c4ea1efca3255565c667967ddd6ff68999a1d1cea5112209e"
solver-name = "gps-cdcl"
solver-version = 1

38
Gopkg.toml Normal file
View File

@ -0,0 +1,38 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "github.com/davecgh/go-spew"
version = "1.1.0"
[[constraint]]
name = "github.com/influxdata/influxdb"
version = "1.3.6"
[[constraint]]
name = "github.com/vmware/govmomi"
version = "0.15.0"
[[constraint]]
branch = "master"
name = "golang.org/x/net"

22
vendor/github.com/davecgh/go-spew/.gitignore generated vendored Normal file
View File

@ -0,0 +1,22 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

14
vendor/github.com/davecgh/go-spew/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,14 @@
language: go
go:
- 1.5.4
- 1.6.3
- 1.7
install:
- go get -v golang.org/x/tools/cmd/cover
script:
- go test -v -tags=safe ./spew
- go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
after_success:
- go get -v github.com/mattn/goveralls
- export PATH=$PATH:$HOME/gopath/bin
- goveralls -coverprofile=profile.cov -service=travis-ci

15
vendor/github.com/davecgh/go-spew/LICENSE generated vendored Normal file
View File

@ -0,0 +1,15 @@
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

205
vendor/github.com/davecgh/go-spew/README.md generated vendored Normal file
View File

@ -0,0 +1,205 @@
go-spew
=======
[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]
(https://travis-ci.org/davecgh/go-spew) [![ISC License]
(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
(https://coveralls.io/r/davecgh/go-spew?branch=master)
Go-spew implements a deep pretty printer for Go data structures to aid in
debugging. A comprehensive suite of tests with 100% test coverage is provided
to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
report. Go-spew is licensed under the liberal ISC license, so it may be used in
open source or commercial projects.
If you're interested in reading about how this package came to life and some
of the challenges involved in providing a deep pretty printer, there is a blog
post about it
[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
## Documentation
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
(http://godoc.org/github.com/davecgh/go-spew/spew)
Full `go doc` style documentation for the project can be viewed online without
installing this package by using the excellent GoDoc site here:
http://godoc.org/github.com/davecgh/go-spew/spew
You can also view the documentation locally once the package is installed with
the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
## Installation
```bash
$ go get -u github.com/davecgh/go-spew/spew
```
## Quick Start
Add this import line to the file you're working in:
```Go
import "github.com/davecgh/go-spew/spew"
```
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
```Go
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
```
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
and pointer addresses):
```Go
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
```
## Debugging a Web Application Example
Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
```Go
package main
import (
"fmt"
"html"
"net/http"
"github.com/davecgh/go-spew/spew"
)
func handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
}
func main() {
http.HandleFunc("/", handler)
http.ListenAndServe(":8080", nil)
}
```
## Sample Dump Output
```
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) {
(string) "one": (bool) true
}
}
([]uint8) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
```
## Sample Formatter Output
Double pointer to a uint8:
```
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
```
Pointer to circular struct with a uint8 field and a pointer to itself:
```
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
```
## Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available via the
spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
```
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables. This option
relies on access to the unsafe package, so it will not have any effect when
running in environments without access to the unsafe package such as Google
App Engine or with the "safe" build tag specified.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of capacities
for arrays, slices, maps and channels. This is useful when diffing data
structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are supported,
with other types sorted according to the reflect.Value.String() output
which guarantees display stability. Natural map order is used by
default.
* SpewKeys
SpewKeys specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only considered
if SortKeys is true.
```
## Unsafe Package Dependency
This package relies on the unsafe package to perform some of the more advanced
features, however it also supports a "limited" mode which allows it to work in
environments where the unsafe package is not available. By default, it will
operate in this mode on Google App Engine and when compiled with GopherJS. The
"safe" build tag may also be specified to force the package to build without
using the unsafe package.
## License
Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.

22
vendor/github.com/davecgh/go-spew/cov_report.sh generated vendored Normal file
View File

@ -0,0 +1,22 @@
#!/bin/sh
# This script uses gocov to generate a test coverage report.
# The gocov tool my be obtained with the following command:
# go get github.com/axw/gocov/gocov
#
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
# Check for gocov.
if ! type gocov >/dev/null 2>&1; then
echo >&2 "This script requires the gocov tool."
echo >&2 "You may obtain it with the following command:"
echo >&2 "go get github.com/axw/gocov/gocov"
exit 1
fi
# Only run the cgo tests if gcc is installed.
if type gcc >/dev/null 2>&1; then
(cd spew && gocov test -tags testcgo | gocov report)
else
(cd spew && gocov test | gocov report)
fi

152
vendor/github.com/davecgh/go-spew/spew/bypass.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
}
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
}

38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

341
vendor/github.com/davecgh/go-spew/spew/common.go generated vendored Normal file
View File

@ -0,0 +1,341 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

298
vendor/github.com/davecgh/go-spew/spew/common_test.go generated vendored Normal file
View File

@ -0,0 +1,298 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"fmt"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
// custom type to test Stinger interface on non-pointer receiver.
type stringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with non-pointer receivers.
func (s stringer) String() string {
return "stringer " + string(s)
}
// custom type to test Stinger interface on pointer receiver.
type pstringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with only pointer receivers.
func (s *pstringer) String() string {
return "stringer " + string(*s)
}
// xref1 and xref2 are cross referencing structs for testing circular reference
// detection.
type xref1 struct {
ps2 *xref2
}
type xref2 struct {
ps1 *xref1
}
// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
// reference for testing detection.
type indirCir1 struct {
ps2 *indirCir2
}
type indirCir2 struct {
ps3 *indirCir3
}
type indirCir3 struct {
ps1 *indirCir1
}
// embed is used to test embedded structures.
type embed struct {
a string
}
// embedwrap is used to test embedded structures.
type embedwrap struct {
*embed
e *embed
}
// panicer is used to intentionally cause a panic for testing spew properly
// handles them
type panicer int
func (p panicer) String() string {
panic("test panic")
}
// customError is used to test custom error interface invocation.
type customError int
func (e customError) Error() string {
return fmt.Sprintf("error: %d", int(e))
}
// stringizeWants converts a slice of wanted test output into a format suitable
// for a test error message.
func stringizeWants(wants []string) string {
s := ""
for i, want := range wants {
if i > 0 {
s += fmt.Sprintf("want%d: %s", i+1, want)
} else {
s += "want: " + want
}
}
return s
}
// testFailed returns whether or not a test failed by checking if the result
// of the test is in the slice of wanted strings.
func testFailed(result string, wants []string) bool {
for _, want := range wants {
if result == want {
return false
}
}
return true
}
type sortableStruct struct {
x int
}
func (ss sortableStruct) String() string {
return fmt.Sprintf("ss.%d", ss.x)
}
type unsortableStruct struct {
x int
}
type sortTestCase struct {
input []reflect.Value
expected []reflect.Value
}
func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
getInterfaces := func(values []reflect.Value) []interface{} {
interfaces := []interface{}{}
for _, v := range values {
interfaces = append(interfaces, v.Interface())
}
return interfaces
}
for _, test := range tests {
spew.SortValues(test.input, cs)
// reflect.DeepEqual cannot really make sense of reflect.Value,
// probably because of all the pointer tricks. For instance,
// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
// instead.
input := getInterfaces(test.input)
expected := getInterfaces(test.expected)
if !reflect.DeepEqual(input, expected) {
t.Errorf("Sort mismatch:\n %v != %v", input, expected)
}
}
}
// TestSortValues ensures the sort functionality for relect.Value based sorting
// works as intended.
func TestSortValues(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
embedA := v(embed{"a"})
embedB := v(embed{"b"})
embedC := v(embed{"c"})
tests := []sortTestCase{
// No values.
{
[]reflect.Value{},
[]reflect.Value{},
},
// Bools.
{
[]reflect.Value{v(false), v(true), v(false)},
[]reflect.Value{v(false), v(false), v(true)},
},
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Uints.
{
[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
},
// Floats.
{
[]reflect.Value{v(2.0), v(1.0), v(3.0)},
[]reflect.Value{v(1.0), v(2.0), v(3.0)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// Array
{
[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
},
// Uintptrs.
{
[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
},
// SortableStructs.
{
// Note: not sorted - DisableMethods is set.
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
// Note: not sorted - SpewKeys is false.
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
},
// Invalid.
{
[]reflect.Value{embedB, embedA, embedC},
[]reflect.Value{embedB, embedA, embedC},
},
}
cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
helpTestSortValues(tests, &cs, t)
}
// TestSortValuesWithMethods ensures the sort functionality for relect.Value
// based sorting works as intended when using string methods.
func TestSortValuesWithMethods(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
tests := []sortTestCase{
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// SortableStructs.
{
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
// Note: not sorted - SpewKeys is false.
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
},
}
cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
helpTestSortValues(tests, &cs, t)
}
// TestSortValuesWithSpew ensures the sort functionality for relect.Value
// based sorting works as intended when using spew to stringify keys.
func TestSortValuesWithSpew(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
tests := []sortTestCase{
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// SortableStructs.
{
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
},
}
cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
helpTestSortValues(tests, &cs, t)
}

306
vendor/github.com/davecgh/go-spew/spew/config.go generated vendored Normal file
View File

@ -0,0 +1,306 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

211
vendor/github.com/davecgh/go-spew/spew/doc.go generated vendored Normal file
View File

@ -0,0 +1,211 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

509
vendor/github.com/davecgh/go-spew/spew/dump.go generated vendored Normal file
View File

@ -0,0 +1,509 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
d.w.Write(nilAngleBytes)
case cycleFound == true:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

1042
vendor/github.com/davecgh/go-spew/spew/dump_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

99
vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when both cgo is supported and "-tags testcgo" is added to the go test
// command line. This means the cgo tests are only added (and hence run) when
// specifially requested. This configuration is used because spew itself
// does not require cgo to run even though it does handle certain cgo types
// specially. Rather than forcing all clients to require cgo and an external
// C compiler just to run the tests, this scheme makes them optional.
// +build cgo,testcgo
package spew_test
import (
"fmt"
"github.com/davecgh/go-spew/spew/testdata"
)
func addCgoDumpTests() {
// C char pointer.
v := testdata.GetCgoCharPointer()
nv := testdata.GetCgoNullCharPointer()
pv := &v
vcAddr := fmt.Sprintf("%p", v)
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "*testdata._Ctype_char"
vs := "116"
addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
addDumpTest(nv, "("+vt+")(<nil>)\n")
// C char array.
v2, v2l, v2c := testdata.GetCgoCharArray()
v2Len := fmt.Sprintf("%d", v2l)
v2Cap := fmt.Sprintf("%d", v2c)
v2t := "[6]testdata._Ctype_char"
v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
"{\n 00000000 74 65 73 74 32 00 " +
" |test2.|\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
// C unsigned char array.
v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
v3Len := fmt.Sprintf("%d", v3l)
v3Cap := fmt.Sprintf("%d", v3c)
v3t := "[6]testdata._Ctype_unsignedchar"
v3t2 := "[6]testdata._Ctype_uchar"
v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
"{\n 00000000 74 65 73 74 33 00 " +
" |test3.|\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n")
// C signed char array.
v4, v4l, v4c := testdata.GetCgoSignedCharArray()
v4Len := fmt.Sprintf("%d", v4l)
v4Cap := fmt.Sprintf("%d", v4c)
v4t := "[6]testdata._Ctype_schar"
v4t2 := "testdata._Ctype_schar"
v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
"{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
") 0\n}"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
// C uint8_t array.
v5, v5l, v5c := testdata.GetCgoUint8tArray()
v5Len := fmt.Sprintf("%d", v5l)
v5Cap := fmt.Sprintf("%d", v5c)
v5t := "[6]testdata._Ctype_uint8_t"
v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
"{\n 00000000 74 65 73 74 35 00 " +
" |test5.|\n}"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
// C typedefed unsigned char array.
v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
v6Len := fmt.Sprintf("%d", v6l)
v6Cap := fmt.Sprintf("%d", v6c)
v6t := "[6]testdata._Ctype_custom_uchar_t"
v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
"{\n 00000000 74 65 73 74 36 00 " +
" |test6.|\n}"
addDumpTest(v6, "("+v6t+") "+v6s+"\n")
}

View File

@ -0,0 +1,26 @@
// Copyright (c) 2013 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when either cgo is not supported or "-tags testcgo" is not added to the go
// test command line. This file intentionally does not setup any cgo tests in
// this scenario.
// +build !cgo !testcgo
package spew_test
func addCgoDumpTests() {
// Don't add any tests for cgo since this file is only compiled when
// there should not be any cgo tests.
}

226
vendor/github.com/davecgh/go-spew/spew/example_test.go generated vendored Normal file
View File

@ -0,0 +1,226 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"fmt"
"github.com/davecgh/go-spew/spew"
)
type Flag int
const (
flagOne Flag = iota
flagTwo
)
var flagStrings = map[Flag]string{
flagOne: "flagOne",
flagTwo: "flagTwo",
}
func (f Flag) String() string {
if s, ok := flagStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown flag (%d)", int(f))
}
type Bar struct {
data uintptr
}
type Foo struct {
unexportedField Bar
ExportedField map[interface{}]interface{}
}
// This example demonstrates how to use Dump to dump variables to stdout.
func ExampleDump() {
// The following package level declarations are assumed for this example:
/*
type Flag int
const (
flagOne Flag = iota
flagTwo
)
var flagStrings = map[Flag]string{
flagOne: "flagOne",
flagTwo: "flagTwo",
}
func (f Flag) String() string {
if s, ok := flagStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown flag (%d)", int(f))
}
type Bar struct {
data uintptr
}
type Foo struct {
unexportedField Bar
ExportedField map[interface{}]interface{}
}
*/
// Setup some sample data structures for the example.
bar := Bar{uintptr(0)}
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
f := Flag(5)
b := []byte{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32,
}
// Dump!
spew.Dump(s1, f, b)
// Output:
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
// (spew_test.Flag) Unknown flag (5)
// ([]uint8) (len=34 cap=34) {
// 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
// 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
// 00000020 31 32 |12|
// }
//
}
// This example demonstrates how to use Printf to display a variable with a
// format string and inline formatting.
func ExamplePrintf() {
// Create a double pointer to a uint 8.
ui8 := uint8(5)
pui8 := &ui8
ppui8 := &pui8
// Create a circular data type.
type circular struct {
ui8 uint8
c *circular
}
c := circular{ui8: 1}
c.c = &c
// Print!
spew.Printf("ppui8: %v\n", ppui8)
spew.Printf("circular: %v\n", c)
// Output:
// ppui8: <**>5
// circular: {1 <*>{1 <*><shown>}}
}
// This example demonstrates how to use a ConfigState.
func ExampleConfigState() {
// Modify the indent level of the ConfigState only. The global
// configuration is not modified.
scs := spew.ConfigState{Indent: "\t"}
// Output using the ConfigState instance.
v := map[string]int{"one": 1}
scs.Printf("v: %v\n", v)
scs.Dump(v)
// Output:
// v: map[one:1]
// (map[string]int) (len=1) {
// (string) (len=3) "one": (int) 1
// }
}
// This example demonstrates how to use ConfigState.Dump to dump variables to
// stdout
func ExampleConfigState_Dump() {
// See the top-level Dump example for details on the types used in this
// example.
// Create two ConfigState instances with different indentation.
scs := spew.ConfigState{Indent: "\t"}
scs2 := spew.ConfigState{Indent: " "}
// Setup some sample data structures for the example.
bar := Bar{uintptr(0)}
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
// Dump using the ConfigState instances.
scs.Dump(s1)
scs2.Dump(s1)
// Output:
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
//
}
// This example demonstrates how to use ConfigState.Printf to display a variable
// with a format string and inline formatting.
func ExampleConfigState_Printf() {
// See the top-level Dump example for details on the types used in this
// example.
// Create two ConfigState instances and modify the method handling of the
// first ConfigState only.
scs := spew.NewDefaultConfig()
scs2 := spew.NewDefaultConfig()
scs.DisableMethods = true
// Alternatively
// scs := spew.ConfigState{Indent: " ", DisableMethods: true}
// scs2 := spew.ConfigState{Indent: " "}
// This is of type Flag which implements a Stringer and has raw value 1.
f := flagTwo
// Dump using the ConfigState instances.
scs.Printf("f: %v\n", f)
scs2.Printf("f: %v\n", f)
// Output:
// f: 1
// f: flagTwo
}

419
vendor/github.com/davecgh/go-spew/spew/format.go generated vendored Normal file
View File

@ -0,0 +1,419 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound == true:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

1558
vendor/github.com/davecgh/go-spew/spew/format_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
This test file is part of the spew package rather than than the spew_test
package because it needs access to internals to properly test certain cases
which are not possible via the public interface since they should never happen.
*/
package spew
import (
"bytes"
"reflect"
"testing"
)
// dummyFmtState implements a fake fmt.State to use for testing invalid
// reflect.Value handling. This is necessary because the fmt package catches
// invalid values before invoking the formatter on them.
type dummyFmtState struct {
bytes.Buffer
}
func (dfs *dummyFmtState) Flag(f int) bool {
if f == int('+') {
return true
}
return false
}
func (dfs *dummyFmtState) Precision() (int, bool) {
return 0, false
}
func (dfs *dummyFmtState) Width() (int, bool) {
return 0, false
}
// TestInvalidReflectValue ensures the dump and formatter code handles an
// invalid reflect value properly. This needs access to internal state since it
// should never happen in real code and therefore can't be tested via the public
// API.
func TestInvalidReflectValue(t *testing.T) {
i := 1
// Dump invalid reflect value.
v := new(reflect.Value)
buf := new(bytes.Buffer)
d := dumpState{w: buf, cs: &Config}
d.dump(*v)
s := buf.String()
want := "<invalid>"
if s != want {
t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Formatter invalid reflect value.
buf2 := new(dummyFmtState)
f := formatState{value: *v, cs: &Config, fs: buf2}
f.format(*v)
s = buf2.String()
want = "<invalid>"
if s != want {
t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want)
}
}
// SortValues makes the internal sortValues function available to the test
// package.
func SortValues(values []reflect.Value, cs *ConfigState) {
sortValues(values, cs)
}

View File

@ -0,0 +1,102 @@
// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
/*
This test file is part of the spew package rather than than the spew_test
package because it needs access to internals to properly test certain cases
which are not possible via the public interface since they should never happen.
*/
package spew
import (
"bytes"
"reflect"
"testing"
"unsafe"
)
// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
// the maximum kind value which does not exist. This is needed to test the
// fallback code which punts to the standard fmt library for new types that
// might get added to the language.
func changeKind(v *reflect.Value, readOnly bool) {
rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
if readOnly {
*rvf |= flagRO
} else {
*rvf &= ^uintptr(flagRO)
}
}
// TestAddedReflectValue tests functionaly of the dump and formatter code which
// falls back to the standard fmt library for new types that might get added to
// the language.
func TestAddedReflectValue(t *testing.T) {
i := 1
// Dump using a reflect.Value that is exported.
v := reflect.ValueOf(int8(5))
changeKind(&v, false)
buf := new(bytes.Buffer)
d := dumpState{w: buf, cs: &Config}
d.dump(v)
s := buf.String()
want := "(int8) 5"
if s != want {
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Dump using a reflect.Value that is not exported.
changeKind(&v, true)
buf.Reset()
d.dump(v)
s = buf.String()
want = "(int8) <int8 Value>"
if s != want {
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Formatter using a reflect.Value that is exported.
changeKind(&v, false)
buf2 := new(dummyFmtState)
f := formatState{value: v, cs: &Config, fs: buf2}
f.format(v)
s = buf2.String()
want = "5"
if s != want {
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
}
i++
// Formatter using a reflect.Value that is not exported.
changeKind(&v, true)
buf2.Reset()
f = formatState{value: v, cs: &Config, fs: buf2}
f.format(v)
s = buf2.String()
want = "<int8 Value>"
if s != want {
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
}
}

148
vendor/github.com/davecgh/go-spew/spew/spew.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

320
vendor/github.com/davecgh/go-spew/spew/spew_test.go generated vendored Normal file
View File

@ -0,0 +1,320 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/davecgh/go-spew/spew"
)
// spewFunc is used to identify which public function of the spew package or
// ConfigState a test applies to.
type spewFunc int
const (
fCSFdump spewFunc = iota
fCSFprint
fCSFprintf
fCSFprintln
fCSPrint
fCSPrintln
fCSSdump
fCSSprint
fCSSprintf
fCSSprintln
fCSErrorf
fCSNewFormatter
fErrorf
fFprint
fFprintln
fPrint
fPrintln
fSdump
fSprint
fSprintf
fSprintln
)
// Map of spewFunc values to names for pretty printing.
var spewFuncStrings = map[spewFunc]string{
fCSFdump: "ConfigState.Fdump",
fCSFprint: "ConfigState.Fprint",
fCSFprintf: "ConfigState.Fprintf",
fCSFprintln: "ConfigState.Fprintln",
fCSSdump: "ConfigState.Sdump",
fCSPrint: "ConfigState.Print",
fCSPrintln: "ConfigState.Println",
fCSSprint: "ConfigState.Sprint",
fCSSprintf: "ConfigState.Sprintf",
fCSSprintln: "ConfigState.Sprintln",
fCSErrorf: "ConfigState.Errorf",
fCSNewFormatter: "ConfigState.NewFormatter",
fErrorf: "spew.Errorf",
fFprint: "spew.Fprint",
fFprintln: "spew.Fprintln",
fPrint: "spew.Print",
fPrintln: "spew.Println",
fSdump: "spew.Sdump",
fSprint: "spew.Sprint",
fSprintf: "spew.Sprintf",
fSprintln: "spew.Sprintln",
}
func (f spewFunc) String() string {
if s, ok := spewFuncStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown spewFunc (%d)", int(f))
}
// spewTest is used to describe a test to be performed against the public
// functions of the spew package or ConfigState.
type spewTest struct {
cs *spew.ConfigState
f spewFunc
format string
in interface{}
want string
}
// spewTests houses the tests to be performed against the public functions of
// the spew package and ConfigState.
//
// These tests are only intended to ensure the public functions are exercised
// and are intentionally not exhaustive of types. The exhaustive type
// tests are handled in the dump and format tests.
var spewTests []spewTest
// redirStdout is a helper function to return the standard output from f as a
// byte slice.
func redirStdout(f func()) ([]byte, error) {
tempFile, err := ioutil.TempFile("", "ss-test")
if err != nil {
return nil, err
}
fileName := tempFile.Name()
defer os.Remove(fileName) // Ignore error
origStdout := os.Stdout
os.Stdout = tempFile
f()
os.Stdout = origStdout
tempFile.Close()
return ioutil.ReadFile(fileName)
}
func initSpewTests() {
// Config states with various settings.
scsDefault := spew.NewDefaultConfig()
scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true}
scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true}
scsNoCap := &spew.ConfigState{DisableCapacities: true}
// Variables for tests on types which implement Stringer interface with and
// without a pointer receiver.
ts := stringer("test")
tps := pstringer("test")
type ptrTester struct {
s *struct{}
}
tptr := &ptrTester{s: &struct{}{}}
// depthTester is used to test max depth handling for structs, array, slices
// and maps.
type depthTester struct {
ic indirCir1
arr [1]string
slice []string
m map[string]int
}
dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"},
map[string]int{"one": 1}}
// Variable for tests on types which implement error interface.
te := customError(10)
spewTests = []spewTest{
{scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"},
{scsDefault, fCSFprint, "", int16(32767), "32767"},
{scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"},
{scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"},
{scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"},
{scsDefault, fCSPrintln, "", uint8(255), "255\n"},
{scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"},
{scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"},
{scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"},
{scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"},
{scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"},
{scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"},
{scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"},
{scsDefault, fFprint, "", float32(3.14), "3.14"},
{scsDefault, fFprintln, "", float64(6.28), "6.28\n"},
{scsDefault, fPrint, "", true, "true"},
{scsDefault, fPrintln, "", false, "false\n"},
{scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"},
{scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"},
{scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"},
{scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"},
{scsNoMethods, fCSFprint, "", ts, "test"},
{scsNoMethods, fCSFprint, "", &ts, "<*>test"},
{scsNoMethods, fCSFprint, "", tps, "test"},
{scsNoMethods, fCSFprint, "", &tps, "<*>test"},
{scsNoPmethods, fCSFprint, "", ts, "stringer test"},
{scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"},
{scsNoPmethods, fCSFprint, "", tps, "test"},
{scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"},
{scsMaxDepth, fCSFprint, "", dt, "{{<max>} [<max>] [<max>] map[<max>]}"},
{scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" +
" ic: (spew_test.indirCir1) {\n <max depth reached>\n },\n" +
" arr: ([1]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
" slice: ([]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
" m: (map[string]int) (len=1) {\n <max depth reached>\n }\n}\n"},
{scsContinue, fCSFprint, "", ts, "(stringer test) test"},
{scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " +
"(len=4) (stringer test) \"test\"\n"},
{scsContinue, fCSFprint, "", te, "(error: 10) 10"},
{scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
"(error: 10) 10\n"},
{scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"},
{scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"},
{scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"},
{scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"},
}
}
// TestSpew executes all of the tests described by spewTests.
func TestSpew(t *testing.T) {
initSpewTests()
t.Logf("Running %d tests", len(spewTests))
for i, test := range spewTests {
buf := new(bytes.Buffer)
switch test.f {
case fCSFdump:
test.cs.Fdump(buf, test.in)
case fCSFprint:
test.cs.Fprint(buf, test.in)
case fCSFprintf:
test.cs.Fprintf(buf, test.format, test.in)
case fCSFprintln:
test.cs.Fprintln(buf, test.in)
case fCSPrint:
b, err := redirStdout(func() { test.cs.Print(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fCSPrintln:
b, err := redirStdout(func() { test.cs.Println(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fCSSdump:
str := test.cs.Sdump(test.in)
buf.WriteString(str)
case fCSSprint:
str := test.cs.Sprint(test.in)
buf.WriteString(str)
case fCSSprintf:
str := test.cs.Sprintf(test.format, test.in)
buf.WriteString(str)
case fCSSprintln:
str := test.cs.Sprintln(test.in)
buf.WriteString(str)
case fCSErrorf:
err := test.cs.Errorf(test.format, test.in)
buf.WriteString(err.Error())
case fCSNewFormatter:
fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))
case fErrorf:
err := spew.Errorf(test.format, test.in)
buf.WriteString(err.Error())
case fFprint:
spew.Fprint(buf, test.in)
case fFprintln:
spew.Fprintln(buf, test.in)
case fPrint:
b, err := redirStdout(func() { spew.Print(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fPrintln:
b, err := redirStdout(func() { spew.Println(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fSdump:
str := spew.Sdump(test.in)
buf.WriteString(str)
case fSprint:
str := spew.Sprint(test.in)
buf.WriteString(str)
case fSprintf:
str := spew.Sprintf(test.format, test.in)
buf.WriteString(str)
case fSprintln:
str := spew.Sprintln(test.in)
buf.WriteString(str)
default:
t.Errorf("%v #%d unrecognized function", test.f, i)
continue
}
s := buf.String()
if test.want != s {
t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want)
continue
}
}
}

View File

@ -0,0 +1,82 @@
// Copyright (c) 2013 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when both cgo is supported and "-tags testcgo" is added to the go test
// command line. This code should really only be in the dumpcgo_test.go file,
// but unfortunately Go will not allow cgo in test files, so this is a
// workaround to allow cgo types to be tested. This configuration is used
// because spew itself does not require cgo to run even though it does handle
// certain cgo types specially. Rather than forcing all clients to require cgo
// and an external C compiler just to run the tests, this scheme makes them
// optional.
// +build cgo,testcgo
package testdata
/*
#include <stdint.h>
typedef unsigned char custom_uchar_t;
char *ncp = 0;
char *cp = "test";
char ca[6] = {'t', 'e', 's', 't', '2', '\0'};
unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'};
signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'};
uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
*/
import "C"
// GetCgoNullCharPointer returns a null char pointer via cgo. This is only
// used for tests.
func GetCgoNullCharPointer() interface{} {
return C.ncp
}
// GetCgoCharPointer returns a char pointer via cgo. This is only used for
// tests.
func GetCgoCharPointer() interface{} {
return C.cp
}
// GetCgoCharArray returns a char array via cgo and the array's len and cap.
// This is only used for tests.
func GetCgoCharArray() (interface{}, int, int) {
return C.ca, len(C.ca), cap(C.ca)
}
// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
// array's len and cap. This is only used for tests.
func GetCgoUnsignedCharArray() (interface{}, int, int) {
return C.uca, len(C.uca), cap(C.uca)
}
// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
// and cap. This is only used for tests.
func GetCgoSignedCharArray() (interface{}, int, int) {
return C.sca, len(C.sca), cap(C.sca)
}
// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
// cap. This is only used for tests.
func GetCgoUint8tArray() (interface{}, int, int) {
return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
}
// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
// cgo and the array's len and cap. This is only used for tests.
func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
return C.tuca, len(C.tuca), cap(C.tuca)
}

61
vendor/github.com/davecgh/go-spew/test_coverage.txt generated vendored Normal file
View File

@ -0,0 +1,61 @@
github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88)
github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82)
github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52)
github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44)
github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39)
github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30)
github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18)
github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13)
github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12)
github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11)
github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11)
github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10)
github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8)
github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7)
github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5)
github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4)
github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4)
github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4)
github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4)
github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3)
github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3)
github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3)
github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3)
github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3)
github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1)
github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1)
github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1)
github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505)

1
vendor/github.com/influxdata/influxdb/.dockerignore generated vendored Normal file
View File

@ -0,0 +1 @@
build

View File

@ -0,0 +1,56 @@
### Directions
_GitHub Issues are reserved for actionable bug reports and feature requests._
_General questions should be sent to the [InfluxDB Community Site](https://community.influxdata.com)._
_Before opening an issue, search for similar bug reports or feature requests on GitHub Issues._
_If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
_Erase the other section and everything on and above this line._
### Bug report
__System info:__ [Include InfluxDB version, operating system name, and other relevant details]
__Steps to reproduce:__
1. [First Step]
2. [Second Step]
3. [and so on...]
__Expected behavior:__ [What you expected to happen]
__Actual behavior:__ [What actually happened]
__Additional info:__ [Include gist of relevant config, logs, etc.]
Also, if this is an issue of for performance, locking, etc the following commands are useful to create debug information for the team.
```
curl -o profiles.tar.gz "http://localhost:8086/debug/pprof/all?cpu=true"
curl -o vars.txt "http://localhost:8086/debug/vars"
iostat -xd 1 30 > iostat.txt
```
**Please note** It will take at least 30 seconds for the first cURL command above to return a response.
This is because it will run a CPU profile as part of its information gathering, which takes 30 seconds to collect.
Ideally you should run these commands when you're experiencing problems, so we can capture the state of the system at that time.
If you're concerned about running a CPU profile (which only has a small, temporary impact on performance), then you can set `?cpu=false` or omit `?cpu=true` altogether.
Please run those if possible and link them from a [gist](http://gist.github.com) or simply attach them as a comment to the issue.
*Please note, the quickest way to fix a bug is to open a Pull Request.*
### Feature Request
Opening a feature request kicks off a discussion.
Requests may be closed if we're not actively planning to work on them.
__Proposal:__ [Description of the feature]
__Current behavior:__ [What currently happens]
__Desired behavior:__ [What you would like to happen]
__Use case:__ [Why is this important (helps with prioritizing requests)]

View File

@ -0,0 +1,13 @@
###### Required for all non-trivial PRs
- [ ] Rebased/mergable
- [ ] Tests pass
- [ ] CHANGELOG.md updated
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
###### Required only if applicable
_You can erase any checkboxes below this note if they are not applicable to your Pull Request._
- [ ] [InfluxQL Spec](https://github.com/influxdata/influxdb/blob/master/influxql/README.md) updated
- [ ] Provide example syntax
- [ ] Update man page when modifying a command
- [ ] Config changes: update sample config (`etc/config.sample.toml`), server `NewDemoConfig` method, and `Diagnostics` methods reporting config settings, if necessary
- [ ] [InfluxData Documentation](https://github.com/influxdata/docs.influxdata.com): issue filed or pull request submitted \<link to issue or pull request\>

76
vendor/github.com/influxdata/influxdb/.gitignore generated vendored Normal file
View File

@ -0,0 +1,76 @@
# Keep editor-specific, non-project specific ignore rules in global .gitignore:
# https://help.github.com/articles/ignoring-files/#create-a-global-gitignore
*~
src/
config.json
/bin/
/query/a.out*
# ignore generated files.
cmd/influxd/version.go
# executables
*.test
influx_tsm
**/influx_tsm
!**/influx_tsm/
influx_stress
**/influx_stress
!**/influx_stress/
influxd
**/influxd
!**/influxd/
influx
**/influx
!**/influx/
influxdb
**/influxdb
!**/influxdb/
influx_inspect
**/influx_inspect
!**/influx_inspect/
/benchmark-tool
/main
/benchmark-storage
godef
gosym
gocode
inspect-raft
# dependencies
out_rpm/
packages/
# autconf
autom4te.cache/
config.log
config.status
# log file
influxdb.log
benchmark.log
# config file
config.toml
# test data files
integration/migration_data/
# man outputs
man/*.xml
man/*.1
man/*.1.gz
# test outputs
/test-results.xml

23
vendor/github.com/influxdata/influxdb/.hooks/pre-commit generated vendored Executable file
View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
fmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l`
if [ $fmtcount -gt 0 ]; then
echo "Some files aren't formatted, please run 'go fmt ./...' to format your source code before committing"
exit 1
fi
vetcount=`go tool vet ./ 2>&1 | wc -l`
if [ $vetcount -gt 0 ]; then
echo "Some files aren't passing vet heuristics, please run 'go vet ./...' to see the errors it flags and correct your source code before committing"
exit 1
fi
exit 0
# Ensure FIXME lines are removed before commit.
fixme_lines=$(git diff --cached | grep ^+ | grep -v pre-commit | grep FIXME | sed 's_^+\s*__g')
if [ "$fixme_lines" != "" ]; then
echo "Please remove the following lines:"
echo -e "$fixme_lines"
exit 1
fi

6
vendor/github.com/influxdata/influxdb/.mention-bot generated vendored Normal file
View File

@ -0,0 +1,6 @@
{
"maxReviewers": 3,
"fileBlacklist": ["CHANGELOG.md"],
"userBlacklist": ["pauldix", "toddboom", "aviau", "mark-rushakoff"],
"requiredOrgs": ["influxdata"]
}

2724
vendor/github.com/influxdata/influxdb/CHANGELOG.md generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,82 @@
_This document is currently in draft form._
# Background
The goal of this guide is to capture some Do and Don'ts of Go code for the InfluxDB database. When it comes to Go, writing good code is often achieved with the help of tools like `go fmt` and `go vet`. However there are still some practices not enforceable by any tools. This guide lists some specific practices to follow when writing code for the database.
*Like everything, one needs to use good judgment.* There will always be times when it doesn't make sense to follow a guideline outlined in this document. If that case arises, be ready to justify your choices.
# The Guidelines
## Try not to use third-party libraries
A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) in some storage engines. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use.
For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/).
## Always include a default case in a 'switch' statement
The lack of a `default` case in a `switch` statement can be a significant source of bugs. This is particularly true in the case of a type-assertions switch. So always include a `default` statement unless you have an explicit reason not to.
## When -- and when not -- set a channel to 'nil'
## Use defer with anonymous functions to handle complex locking
Consider a block of code like the following.
```
mu.Lock()
if foo == "quit" {
mu.Unlock()
return
} else if foo == "continue" {
if bar == "quit" {
mu.Unlock()
return
}
bar = "still going"
} else {
qux = "here at last"
mu.Unlock()
return
}
foo = "more to do"
bar = "still more to do"
mu.Unlock()
qux = "finished now"
return
```
While this is obviously contrived, complex lock control like this is sometimes required, and doesn't lend itself to `defer`. But as the code evolves, it's easy to introduce new cases, and forget to release locks. One way to address this is to use an anonymous function like so:
```
more := func() bool {
mu.Lock()
defer mu.Unlock()
if foo == "quit" {
return false
} else if foo == "continue" {
if bar == "quit" {
return false
}
bar = "still going"
} else {
qux = "here at last"
return false
}
foo = "more to do"
bar = "still more to do"
return true
}()
if more {
qux = "finished"
}
return
```
This allows us to use `defer` but ensures that if any new cases are added to the logic within the anonymous function, the lock will always be released. Another advantage of this approach is that `defer` will still run even in the event of a panic, ensuring the locks will be released even in that case.
## When to call 'panic()'
# Useful links
- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go)
- [Go in production](http://peter.bourgon.org/go-in-production/)
- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/)
- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables`

280
vendor/github.com/influxdata/influxdb/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,280 @@
Contributing to InfluxDB
========================
Bug reports
---------------
Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following.
* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04.
* The version of InfluxDB you are running
* Whether you installed it using a pre-built package, or built it from source.
* A small test case, if applicable, that demonstrates the issues.
Remember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.**
If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html)
Test cases should be in the form of `curl` commands. For example:
```bash
# create database
curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb"
# create retention policy
curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT"
# write data
curl -X POST http://localhost:8086/write --data-urlencode "db=mydb" --data-binary "cpu,region=useast,host=server_1,service=redis value=61"
# Delete a Measurement
curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu'
# Query the Measurement
# Bug: expected it to return no data, but data comes back.
curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu'
```
**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report.
Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [InfluxData Community](https://community.influxdata.com/), not filed as issues. Issues like this will be closed.
Feature requests
---------------
We really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB.
Contributing to the source code
---------------
InfluxDB follows standard Go project structure. This means that all your Go development are done in `$GOPATH/src`. GOPATH can be any directory under which InfluxDB and all its dependencies will be cloned. For full details on the project structure, follow along below.
You should also read our [coding guide](https://github.com/influxdata/influxdb/blob/master/CODING_GUIDELINES.md), to understand better how to write code for InfluxDB.
Submitting a pull request
------------
To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged.
There will usually be some back and forth as we finalize the change, but once that completes it may be merged.
To assist in review for the PR, please add the following to your pull request comment:
```md
- [ ] CHANGELOG.md updated
- [ ] Rebased/mergable
- [ ] Tests pass
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
```
Signing the CLA
---------------
If you are going to be contributing back to InfluxDB please take a
second to sign our CLA, which can be found
[on our website](https://influxdata.com/community/cla/).
Installing Go
-------------
InfluxDB requires Go 1.8.3
At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions
on how to install it see [the gvm page on github](https://github.com/moovweb/gvm).
After installing gvm you can install and set the default go version by
running the following:
gvm install go1.8.3
gvm use go1.8.3 --default
Installing GDM
-------------
InfluxDB uses [gdm](https://github.com/sparrc/gdm) to manage dependencies. Install it by running the following:
go get github.com/sparrc/gdm
Revision Control Systems
-------------
Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system.
Currently the project only depends on `git` and `mercurial`.
* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git)
* [Install Mercurial](http://mercurial.selenic.com/wiki/Download)
Getting the source
------
Setup the project structure and fetch the repo like so:
```bash
mkdir $HOME/gocodez
export GOPATH=$HOME/gocodez
go get github.com/influxdata/influxdb
```
You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime.
Cloning a fork
-------------
If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork:
```bash
export GOPATH=$HOME/gocodez
mkdir -p $GOPATH/src/github.com/influxdata
cd $GOPATH/src/github.com/influxdata
git clone git@github.com:<username>/influxdb
```
Retaining the directory structure `$GOPATH/src/github.com/influxdata` is necessary so that Go imports work correctly.
Build and Test
-----
Make sure you have Go installed and the project structure as shown above. To then get the dependencies for the project, execute the following commands:
```bash
cd $GOPATH/src/github.com/influxdata/influxdb
gdm restore
```
To then build and install the binaries, run the following command.
```bash
go clean ./...
go install ./...
```
The binaries will be located in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`.
To set the version and commit flags during the build pass the following to the **install** command:
```bash
-ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT"
```
where `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash.
If you want to build packages, see `build.py` usage information:
```bash
python build.py --help
# Or to build a package for your current system
python build.py --package
```
To run the tests, execute the following command:
```bash
cd $GOPATH/src/github.com/influxdata/influxdb
go test -v ./...
# run tests that match some pattern
go test -run=TestDatabase . -v
# run tests and show coverage
go test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover
```
To install go cover, run the following command:
```
go get golang.org/x/tools/cmd/cover
```
Generated Google Protobuf code
-----------------
Most changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain.
First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/
) 2.6.1 or later for your OS:
Then install the go plugins:
```bash
go get github.com/gogo/protobuf/proto
go get github.com/gogo/protobuf/protoc-gen-gogo
go get github.com/gogo/protobuf/gogoproto
```
Finally run, `go generate` after updating any `*.proto` file:
```bash
go generate ./...
```
**Troubleshooting**
If generating the protobuf code is failing for you, check each of the following:
* Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed.
* Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`.
Generated Go Templates
----------------------
The query engine requires optimized data structures for each data type so
instead of writing each implementation several times we use templates. _Do not
change code that ends in a `.gen.go` extension!_ Instead you must edit the
`.gen.go.tmpl` file that was used to generate it.
Once you've edited the template file, you'll need the [`tmpl`][tmpl] utility
to generate the code:
```sh
$ go get github.com/benbjohnson/tmpl
```
Then you can regenerate all templates in the project:
```sh
$ go generate ./...
```
[tmpl]: https://github.com/benbjohnson/tmpl
Pre-commit checks
-------------
We have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following:
```bash
cd $GOPATH/src/github.com/influxdata/influxdb
cp .hooks/pre-commit .git/hooks/
```
In case the commit is rejected because it's not formatted you can run
the following to format the code:
```
go fmt ./...
go vet ./...
```
To install go vet, run the following command:
```
go get golang.org/x/tools/cmd/vet
```
NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above.
For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet).
Profiling
-----
When troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU and memory profiling turned on. For example:
```sh
# start influx with profiling
./influxd -cpuprofile influxdcpu.prof -memprof influxdmem.prof
# run queries, writes, whatever you're testing
# Quit out of influxd and influxd.prof will then be written.
# open up pprof to examine the profiling data.
go tool pprof ./influxd influxd.prof
# once inside run "web", opens up browser with the CPU graph
# can also run "web <function name>" to zoom in. Or "list <function name>" to see specific lines
```
Note that when you pass the binary to `go tool pprof` *you must specify the path to the binary*.
If you are profiling benchmarks built with the `testing` package, you may wish
to use the [`github.com/pkg/profile`](github.com/pkg/profile) package to limit
the code being profiled:
```go
func BenchmarkSomething(b *testing.B) {
// do something intensive like fill database with data...
defer profile.Start(profile.ProfilePath("/tmp"), profile.MemProfile).Stop()
// do something that you want to profile...
}
```
Continuous Integration testing
-----
InfluxDB uses CircleCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdata/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file.

View File

@ -0,0 +1,35 @@
FROM ioft/i386-ubuntu:14.04
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
python-software-properties \
software-properties-common \
wget \
git \
mercurial \
make \
ruby \
ruby-dev \
rpm \
zip \
python \
python-boto
RUN gem install fpm
# Install go
ENV GOPATH /root/go
ENV GO_VERSION 1.8.3
ENV GO_ARCH 386
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \
rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz
ENV PATH /usr/local/go/bin:$PATH
ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb
ENV PATH $GOPATH/bin:$PATH
RUN mkdir -p $PROJECT_DIR
WORKDIR $PROJECT_DIR
VOLUME $PROJECT_DIR
ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ]

View File

@ -0,0 +1,38 @@
FROM ubuntu:trusty
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
python-software-properties \
software-properties-common \
wget \
git \
mercurial \
make \
ruby \
ruby-dev \
rpm \
zip \
python \
python-boto \
asciidoc \
xmlto \
docbook-xsl
RUN gem install fpm
# Install go
ENV GOPATH /root/go
ENV GO_VERSION 1.8.3
ENV GO_ARCH amd64
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \
rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz
ENV PATH /usr/local/go/bin:$PATH
ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb
ENV PATH $GOPATH/bin:$PATH
RUN mkdir -p $PROJECT_DIR
WORKDIR $PROJECT_DIR
VOLUME $PROJECT_DIR
ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ]

View File

@ -0,0 +1,43 @@
FROM ubuntu:trusty
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
python-software-properties \
software-properties-common \
wget \
git \
mercurial \
make \
ruby \
ruby-dev \
rpm \
zip \
python \
python-boto
RUN gem install fpm
# Setup env
ENV GOPATH /root/go
ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb
ENV PATH $GOPATH/bin:$PATH
RUN mkdir -p $PROJECT_DIR
VOLUME $PROJECT_DIR
# Install go
ENV GO_VERSION 1.8.3
ENV GO_ARCH amd64
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \
rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz
# Clone Go tip for compilation
ENV GOROOT_BOOTSTRAP /usr/local/go
RUN git clone https://go.googlesource.com/go
ENV PATH /go/bin:$PATH
# Add script for compiling go
ENV GO_CHECKOUT master
ADD ./gobuild.sh /gobuild.sh
ENTRYPOINT [ "/gobuild.sh" ]

View File

@ -0,0 +1,12 @@
FROM 32bit/ubuntu:14.04
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python-software-properties software-properties-common git
RUN add-apt-repository ppa:evarlast/golang1.4
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go
ENV GOPATH=/root/go
RUN mkdir -p /root/go/src/github.com/influxdata/influxdb
RUN mkdir -p /tmp/artifacts
VOLUME /root/go/src/github.com/influxdata/influxdb
VOLUME /tmp/artifacts

21
vendor/github.com/influxdata/influxdb/Godeps generated vendored Normal file
View File

@ -0,0 +1,21 @@
collectd.org e84e8af5356e7f47485bbc95c96da6dd7984a67e
github.com/BurntSushi/toml 99064174e013895bbd9b025c31100bd1d9b590ca
github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c
github.com/boltdb/bolt 4b1ebc1869ad66568b313d0dc410e2be72670dda
github.com/cespare/xxhash 4a94f899c20bc44d4f5f807cb14529e72aca99d6
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/dgrijalva/jwt-go 24c63f56522a87ec5339cc3567883f1039378fdb
github.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef
github.com/dgryski/go-bitstream 7d46cd22db7004f0cceb6f7975824b560cf0e486
github.com/gogo/protobuf 30433562cfbf487fe1df7cd26c7bab168d2f14d0
github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
github.com/google/go-cmp 18107e6c56edb2d51f965f7d68e59404f0daee54
github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967
github.com/jwilder/encoding 27894731927e49b0a9023f00312be26733744815
github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447
github.com/peterh/liner 88609521dc4b6c858fd4c98b628147da928ce4ac
github.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d
github.com/spaolacci/murmur3 0d12bf811670bf6a1a63828dfbd003eded177fce
github.com/uber-go/atomic 74ca5ec650841aee9f289dce76e928313a37cbc6
github.com/uber-go/zap fbae0281ffd546fa6d1959fec6075ac5da7fb577
golang.org/x/crypto 9477e0b78b9ac3d0b03822fd95422e2fe07627cd

20
vendor/github.com/influxdata/influxdb/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013-2016 Errplane Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,25 @@
# List
- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE)
- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE)
- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE)
- github.com/uber-go/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt)
- github.com/uber-go/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt)
- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)

39
vendor/github.com/influxdata/influxdb/Makefile generated vendored Normal file
View File

@ -0,0 +1,39 @@
PACKAGES=$(shell find . -name '*.go' -print0 | xargs -0 -n1 dirname | sort --unique)
default:
metalint: deadcode cyclo aligncheck defercheck structcheck lint errcheck
deadcode:
@deadcode $(PACKAGES) 2>&1
cyclo:
@gocyclo -over 10 $(PACKAGES)
aligncheck:
@aligncheck $(PACKAGES)
defercheck:
@defercheck $(PACKAGES)
structcheck:
@structcheck $(PACKAGES)
lint:
@for pkg in $(PACKAGES); do golint $$pkg; done
errcheck:
@for pkg in $(PACKAGES); do \
errcheck -ignorepkg=bytes,fmt -ignore=":(Rollback|Close)" $$pkg \
done
tools:
go get github.com/remyoudompheng/go-misc/deadcode
go get github.com/alecthomas/gocyclo
go get github.com/opennota/check/...
go get github.com/golang/lint/golint
go get github.com/kisielk/errcheck
go get github.com/sparrc/gdm
.PHONY: default,metalint,deadcode,cyclo,aligncheck,defercheck,structcheck,lint,errcheck,tools

180
vendor/github.com/influxdata/influxdb/QUERIES.md generated vendored Normal file
View File

@ -0,0 +1,180 @@
The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field key, or tag key appears it should be wrapped in double quotes.
# Databases & retention policies
```sql
-- create a database
CREATE DATABASE <name>
-- create a retention policy
CREATE RETENTION POLICY <rp-name> ON <db-name> DURATION <duration> REPLICATION <n> [DEFAULT]
-- alter retention policy
ALTER RETENTION POLICY <rp-name> ON <db-name> (DURATION <duration> | REPLICATION <n> | DEFAULT)+
-- drop a database
DROP DATABASE <name>
-- drop a retention policy
DROP RETENTION POLICY <rp-name> ON <db-name>
```
where `<duration>` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `<replication>` must be an integer.
If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads.
# Users and permissions
```sql
-- create user
CREATE USER <name> WITH PASSWORD '<password>'
-- grant privilege on a database
GRANT <privilege> ON <db> TO <user>
-- grant cluster admin privileges
GRANT ALL [PRIVILEGES] TO <user>
-- revoke privilege
REVOKE <privilege> ON <db> FROM <user>
-- revoke all privileges for a DB
REVOKE ALL [PRIVILEGES] ON <db> FROM <user>
-- revoke all privileges including cluster admin
REVOKE ALL [PRIVILEGES] FROM <user>
-- combine db creation with privilege assignment (user must already exist)
CREATE DATABASE <name> GRANT <privilege> TO <user>
CREATE DATABASE <name> REVOKE <privilege> FROM <user>
-- delete a user
DROP USER <name>
```
where `<privilege> := READ | WRITE | All `.
Authentication must be enabled in the influxdb.conf file for user permissions to be in effect.
By default, newly created users have no privileges to any databases.
Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements.
# Select
```sql
SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m)
SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region
```
## Group By
# Delete
# Series
## Destroy
```sql
DROP MEASUREMENT <name>
DROP MEASUREMENT cpu WHERE region = 'uswest'
```
## Show
Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery.
```sql
-- show all databases
SHOW DATABASES
-- show measurement names
SHOW MEASUREMENTS
SHOW MEASUREMENTS LIMIT 15
SHOW MEASUREMENTS LIMIT 10 OFFSET 40
SHOW MEASUREMENTS WHERE service = 'redis'
-- LIMIT and OFFSET can be applied to any of the SHOW type queries
-- show all series across all measurements/tagsets
SHOW SERIES
-- get a show of all series for any measurements where tag key region = tak value 'uswest'
SHOW SERIES WHERE region = 'uswest'
SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10
-- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns
-- series split into measurements. Each series counts as a row. So you could see only a
-- single measurement returned, but 10 series within it.
SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100
-- show all retention policies on a database
SHOW RETENTION POLICIES ON mydb
-- get a show of all tag keys across all measurements
SHOW TAG KEYS
-- show all the tag keys for a given measurement
SHOW TAG KEYS FROM cpu
SHOW TAG KEYS FROM temperature, wind_speed
-- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required
SHOW TAG VALUES WITH TAG KEY = 'region'
SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host'
-- and you can do stuff against fields
SHOW FIELD KEYS FROM cpu
-- but you can't do this
SHOW FIELD VALUES
-- we don't index field values, so this query should be invalid.
-- show all users
SHOW USERS
```
Note that `FROM` and `WHERE` are optional clauses in most of the show series queries.
And the show series output looks like this:
```json
[
{
"name": "cpu",
"columns": ["id", "region", "host"],
"values": [
1, "uswest", "servera",
2, "uswest", "serverb"
]
},
{
"name": "reponse_time",
"columns": ["id", "application", "host"],
"values": [
3, "myRailsApp", "servera"
]
}
]
```
# Continuous Queries
Continuous queries are going to be inspired by MySQL `TRIGGER` syntax:
http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html
Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention,
particularly in the case where creation is scripted.
## Create
CREATE CONTINUOUS QUERY <name> AS SELECT ... FROM ...
## Destroy
DROP CONTINUOUS QUERY <name>
## List
SHOW CONTINUOUS QUERIES

71
vendor/github.com/influxdata/influxdb/README.md generated vendored Normal file
View File

@ -0,0 +1,71 @@
# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) [![Go Report Card](https://goreportcard.com/badge/github.com/influxdata/influxdb)](https://goreportcard.com/report/github.com/influxdata/influxdb) [![Docker pulls](https://img.shields.io/docker/pulls/library/influxdb.svg)](https://hub.docker.com/_/influxdb/)
## An Open-Source Time Series Database
InfluxDB is an open source **time series database** with
**no external dependencies**. It's useful for recording metrics,
events, and performing analytics.
## Features
* Built-in [HTTP API](https://docs.influxdata.com/influxdb/latest/guides/writing_data/) so you don't have to write any server side code to get up and running.
* Data can be tagged, allowing very flexible querying.
* SQL-like query language.
* Simple to install and manage, and fast to get data in and out.
* It aims to answer queries in real-time. That means every data point is
indexed as it comes in and is immediately available in queries that
should return in < 100ms.
## Installation
We recommend installing InfluxDB using one of the [pre-built packages](https://influxdata.com/downloads/#influxdb). Then start InfluxDB using:
* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package.
* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later.
* `$GOPATH/bin/influxd` if you have built InfluxDB from source.
## Getting Started
### Create your first database
```
curl -XPOST 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb"
```
### Insert some data
```
curl -XPOST 'http://localhost:8086/write?db=mydb' \
-d 'cpu,host=server01,region=uswest load=42 1434055562000000000'
curl -XPOST 'http://localhost:8086/write?db=mydb' \
-d 'cpu,host=server02,region=uswest load=78 1434055562000000000'
curl -XPOST 'http://localhost:8086/write?db=mydb' \
-d 'cpu,host=server03,region=useast load=15.4 1434055562000000000'
```
### Query for the data
```JSON
curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \
--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d"
```
### Analyze the data
```JSON
curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \
--data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'"
```
## Documentation
* Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/latest/).
* Follow the [getting started guide](https://docs.influxdata.com/influxdb/latest/introduction/getting_started/) to learn the basics in just a few minutes.
* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/latest/guides/writing_data/).
## Contributing
If you're feeling adventurous and want to contribute to InfluxDB, see our [contributing doc](https://github.com/influxdata/influxdb/blob/master/CONTRIBUTING.md) for info on how to make feature requests, build from source, and run tests.
## Looking for Support?
InfluxDB offers a number of services to help your project succeed. We offer Developer Support for organizations in active development, Managed Hosting to make it easy to move into production, and Enterprise Support for companies requiring the best response times, SLAs, and technical fixes. Visit our [support page](https://influxdata.com/services/) or contact [sales@influxdb.com](mailto:sales@influxdb.com) to learn how we can best help you succeed.

9
vendor/github.com/influxdata/influxdb/TODO.md generated vendored Normal file
View File

@ -0,0 +1,9 @@
# TODO
## v2
TODO list for v2. Here is a list of things we want to add to v1, but can't because they would be a breaking change.
- [#1834](https://github.com/influxdata/influxdb/issues/1834): Disallow using time as a tag key or field key.
- [#2124](https://github.com/influxdata/influxdb/issues/2124): Prohibit writes with precision, but without an explicit timestamp.
- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries.

37
vendor/github.com/influxdata/influxdb/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,37 @@
version: 0.{build}
pull_requests:
do_not_increment_build_number: true
branches:
only:
- master
os: Windows Server 2012 R2
# Custom clone folder (variables are not expanded here).
clone_folder: c:\gopath\src\github.com\influxdata\influxdb
# Environment variables
environment:
GOROOT: C:\go17
GOPATH: C:\gopath
# Scripts that run after cloning repository
install:
- set PATH=%GOROOT%\bin;%GOPATH%\bin;%PATH%
- rmdir c:\go /s /q
- echo %PATH%
- echo %GOPATH%
- cd C:\gopath\src\github.com\influxdata\influxdb
- go version
- go env
- go get github.com/sparrc/gdm
- cd C:\gopath\src\github.com\influxdata\influxdb
- gdm restore
# To run your custom scripts instead of automatic MSBuild
build_script:
- go get -t -v ./...
- go test -race -v ./...
# To disable deployment
deploy: off

991
vendor/github.com/influxdata/influxdb/build.py generated vendored Executable file
View File

@ -0,0 +1,991 @@
#!/usr/bin/python2.7 -u
import sys
import os
import subprocess
import time
from datetime import datetime
import shutil
import tempfile
import hashlib
import re
import logging
import argparse
################
#### InfluxDB Variables
################
# Packaging variables
PACKAGE_NAME = "influxdb"
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/influxdb"
DATA_DIR = "/var/lib/influxdb"
SCRIPT_DIR = "/usr/lib/influxdb/scripts"
CONFIG_DIR = "/etc/influxdb"
LOGROTATE_DIR = "/etc/logrotate.d"
MAN_DIR = "/usr/share/man"
INIT_SCRIPT = "scripts/init.sh"
SYSTEMD_SCRIPT = "scripts/influxdb.service"
PREINST_SCRIPT = "scripts/pre-install.sh"
POSTINST_SCRIPT = "scripts/post-install.sh"
POSTUNINST_SCRIPT = "scripts/post-uninstall.sh"
LOGROTATE_SCRIPT = "scripts/logrotate"
DEFAULT_CONFIG = "etc/config.sample.toml"
# Default AWS S3 bucket for uploads
DEFAULT_BUCKET = "dl.influxdata.com/influxdb/artifacts"
CONFIGURATION_FILES = [
CONFIG_DIR + '/influxdb.conf',
LOGROTATE_DIR + '/influxdb',
]
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/influxdb"
MAINTAINER = "support@influxdb.com"
VENDOR = "InfluxData"
DESCRIPTION = "Distributed time-series database."
prereqs = [ 'git', 'go' ]
go_vet_command = "go tool vet ./"
optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--after-install {} \
--before-install {} \
--after-remove {} \
--license {} \
--maintainer {} \
--directories {} \
--directories {} \
--directories {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
POSTINST_SCRIPT,
PREINST_SCRIPT,
POSTUNINST_SCRIPT,
PACKAGE_LICENSE,
MAINTAINER,
LOG_DIR,
DATA_DIR,
MAN_DIR,
DESCRIPTION)
for f in CONFIGURATION_FILES:
fpm_common_args += " --config-files {}".format(f)
targets = {
'influx' : './cmd/influx',
'influxd' : './cmd/influxd',
'influx_stress' : './cmd/influx_stress',
'influx_inspect' : './cmd/influx_inspect',
'influx_tsm' : './cmd/influx_tsm',
}
supported_builds = {
'darwin': [ "amd64" ],
'windows': [ "amd64" ],
'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ]
}
supported_packages = {
"darwin": [ "tar" ],
"linux": [ "deb", "rpm", "tar" ],
"windows": [ "zip" ],
}
################
#### InfluxDB Functions
################
def print_banner():
logging.info("""
___ __ _ ___ ___
|_ _|_ _ / _| |_ ___ _| \\| _ )
| || ' \\| _| | || \\ \\ / |) | _ \\
|___|_||_|_| |_|\\_,_/_\\_\\___/|___/
Build Script
""")
def create_package_fs(build_root):
"""Create a filesystem structure to mimic the package filesystem.
"""
logging.debug("Creating package filesystem at location: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [ INSTALL_ROOT_DIR[1:],
LOG_DIR[1:],
DATA_DIR[1:],
SCRIPT_DIR[1:],
CONFIG_DIR[1:],
LOGROTATE_DIR[1:],
MAN_DIR[1:] ]
for d in dirs:
os.makedirs(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, config_only=False, windows=False):
"""Copy the necessary scripts and configuration files to the package
filesystem.
"""
if config_only:
logging.debug("Copying configuration to build directory.")
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "influxdb.conf"))
os.chmod(os.path.join(build_root, "influxdb.conf"), 0o644)
else:
logging.debug("Copying scripts and sample configuration to build directory.")
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"))
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"), 0o644)
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"))
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0o644)
def package_man_files(build_root):
"""Copy and gzip man pages to the package filesystem."""
logging.debug("Installing man pages.")
run("make -C man/ clean install DESTDIR={}/usr".format(build_root))
for path, dir, files in os.walk(os.path.join(build_root, MAN_DIR[1:])):
for f in files:
run("gzip -9n {}".format(os.path.join(path, f)))
def go_get(branch, update=False, no_uncommitted=False):
"""Retrieve build dependencies or restore pinned dependencies.
"""
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
if not check_path_for("gdm"):
logging.info("Downloading `gdm`...")
get_command = "go get github.com/sparrc/gdm"
run(get_command)
logging.info("Retrieving dependencies with `gdm`...")
sys.stdout.flush()
run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH")))
return True
def run_tests(race, parallel, timeout, no_vet, junit=False):
"""Run the Go test suite on binary output.
"""
logging.info("Starting tests...")
if race:
logging.info("Race is enabled.")
if parallel is not None:
logging.info("Using parallel: {}".format(parallel))
if timeout is not None:
logging.info("Using timeout: {}".format(timeout))
out = run("go fmt ./...")
if len(out) > 0:
logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.")
logging.error("{}".format(out))
return False
if not no_vet:
logging.info("Running 'go vet'...")
out = run(go_vet_command)
if len(out) > 0:
logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.")
logging.error("{}".format(out))
return False
else:
logging.info("Skipping 'go vet' call...")
test_command = "go test -v"
if race:
test_command += " -race"
if parallel is not None:
test_command += " -parallel {}".format(parallel)
if timeout is not None:
test_command += " -timeout {}".format(timeout)
test_command += " ./..."
if junit:
logging.info("Retrieving go-junit-report...")
run("go get github.com/jstemmer/go-junit-report")
# Retrieve the output from this command.
logging.info("Running tests...")
logging.debug("{}".format(test_command))
proc = subprocess.Popen(test_command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, unused_err = proc.communicate()
output = output.decode('utf-8').strip()
# Process the output through go-junit-report.
with open('test-results.xml', 'w') as f:
logging.debug("{}".format("go-junit-report"))
junit_proc = subprocess.Popen(["go-junit-report"], stdin=subprocess.PIPE, stdout=f, stderr=subprocess.PIPE)
unused_output, err = junit_proc.communicate(output.encode('ascii', 'ignore'))
if junit_proc.returncode != 0:
logging.error("Command '{}' failed with error: {}".format("go-junit-report", err))
sys.exit(1)
if proc.returncode != 0:
logging.error("Command '{}' failed with error: {}".format(test_command, output.encode('ascii', 'ignore')))
sys.exit(1)
else:
logging.info("Running tests...")
output = run(test_command)
logging.debug("Test output:\n{}".format(out.encode('ascii', 'ignore')))
return True
################
#### All InfluxDB-specific content above this line
################
def run(command, allow_failure=False, shell=False):
"""Run shell command (convenience wrapper around subprocess).
"""
out = None
logging.debug("{}".format(command))
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
out = out.decode('utf-8').strip()
# logging.debug("Command output: {}".format(out))
except subprocess.CalledProcessError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e.output))
return None
else:
logging.error("Command '{}' failed with error: {}".format(command, e.output))
sys.exit(1)
except OSError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e))
return out
else:
logging.error("Command '{}' failed with error: {}".format(command, e))
sys.exit(1)
else:
return out
def create_temp_dir(prefix = None):
""" Create temporary directory with optional prefix.
"""
if prefix is None:
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
else:
return tempfile.mkdtemp(prefix=prefix)
def increment_minor_version(version):
"""Return the version with the minor version incremented and patch
version set to zero.
"""
ver_list = version.split('.')
if len(ver_list) != 3:
logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version))
return version
ver_list[1] = str(int(ver_list[1]) + 1)
ver_list[2] = str(0)
inc_version = '.'.join(ver_list)
logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version))
return inc_version
def get_current_version_tag():
"""Retrieve the raw git version tag.
"""
version = run("git describe --always --tags --abbrev=0")
return version
def get_current_version():
"""Parse version information from git tag output.
"""
version_tag = get_current_version_tag()
# Remove leading 'v'
if version_tag[0] == 'v':
version_tag = version_tag[1:]
# Replace any '-'/'_' with '~'
if '-' in version_tag:
version_tag = version_tag.replace("-","~")
if '_' in version_tag:
version_tag = version_tag.replace("_","~")
return version_tag
def get_current_commit(short=False):
"""Retrieve the current git commit.
"""
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
"""Retrieve the current git branch.
"""
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def local_changes():
"""Return True if there are local un-committed changes.
"""
output = run("git diff-files --ignore-submodules --").strip()
if len(output) > 0:
return True
return False
def get_system_arch():
"""Retrieve current system architecture.
"""
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
elif arch == "386":
arch = "i386"
elif arch == "aarch64":
arch = "arm64"
elif 'arm' in arch:
# Prevent uname from reporting full ARM arch (eg 'armv7l')
arch = "arm"
return arch
def get_system_platform():
"""Retrieve current system platform.
"""
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
"""Retrieve version information for Go.
"""
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
"""Check the the user's path for the provided binary.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
"""Check environment for common Go variables.
"""
logging.info("Checking environment...")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
logging.debug("Using '{}' for {}".format(os.environ.get(v), v))
cwd = os.getcwd()
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.")
return True
def check_prereqs():
"""Check user path for required dependencies.
"""
logging.info("Checking for dependencies...")
for req in prereqs:
if not check_path_for(req):
logging.error("Could not find dependency: {}".format(req))
return False
return True
def upload_packages(packages, bucket_name=None, overwrite=False):
"""Upload provided package output to AWS S3.
"""
logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages))
try:
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
logging.getLogger("boto").setLevel(logging.WARNING)
except ImportError:
logging.warn("Cannot upload packages without 'boto' Python library!")
return False
logging.info("Connecting to AWS S3...")
# Up the number of attempts to 10 from default of 1
boto.config.add_section("Boto")
boto.config.set("Boto", "metadata_service_num_attempts", "10")
c = boto.connect_s3(calling_format=OrdinaryCallingFormat())
if bucket_name is None:
bucket_name = DEFAULT_BUCKET
bucket = c.get_bucket(bucket_name.split('/')[0])
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/folder). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
logging.debug("Using key: {}".format(name))
if bucket.get_key(name) is None or overwrite:
logging.info("Uploading file {}".format(name))
k = Key(bucket)
k.key = name
if overwrite:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name))
return True
def go_list(vendor=False, relative=False):
"""
Return a list of packages
If vendor is False vendor package are not included
If relative is True the package prefix defined by PACKAGE_URL is stripped
"""
p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
packages = out.split('\n')
if packages[-1] == '':
packages = packages[:-1]
if not vendor:
non_vendor = []
for p in packages:
if '/vendor/' not in p:
non_vendor.append(p)
packages = non_vendor
if relative:
relative_pkgs = []
for p in packages:
r = p.replace(PACKAGE_URL, '.')
if r != '.':
relative_pkgs.append(r)
packages = relative_pkgs
return packages
def build(version=None,
platform=None,
arch=None,
nightly=False,
race=False,
clean=False,
outdir=".",
tags=[],
static=False):
"""Build each target for the specified architecture and platform.
"""
logging.info("Starting build for {}/{}...".format(platform, arch))
logging.info("Using Go version: {}".format(get_go_version()))
logging.info("Using git branch: {}".format(get_current_branch()))
logging.info("Using git commit: {}".format(get_current_commit()))
if static:
logging.info("Using statically-compiled output.")
if race:
logging.info("Race is enabled.")
if len(tags) > 0:
logging.info("Using build tags: {}".format(','.join(tags)))
logging.info("Sending build output to: {}".format(outdir))
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/' and outdir != ".":
logging.info("Cleaning build directory '{}' before building.".format(outdir))
shutil.rmtree(outdir)
os.makedirs(outdir)
logging.info("Using version '{}' for build.".format(version))
for target, path in targets.items():
logging.info("Building target: {}".format(target))
build_command = ""
# Handle static binary output
if static is True or "static_" in arch:
if "static_" in arch:
static = True
arch = arch.replace("static_", "")
build_command += "CGO_ENABLED=0 "
# Handle variations in architecture output
if arch == "i386" or arch == "i686":
arch = "386"
elif "arm" in arch:
arch = "arm"
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if "arm" in arch:
if arch == "armel":
build_command += "GOARM=5 "
elif arch == "armhf" or arch == "arm":
build_command += "GOARM=6 "
elif arch == "arm64":
# TODO(rossmcdonald) - Verify this is the correct setting for arm64
build_command += "GOARM=7 "
else:
logging.error("Invalid ARM architecture specified: {}".format(arch))
logging.error("Please specify either 'armel', 'armhf', or 'arm64'.")
return False
if platform == 'windows':
target = target + '.exe'
build_command += "go build -o {} ".format(os.path.join(outdir, target))
if race:
build_command += "-race "
if len(tags) > 0:
build_command += "-tags {} ".format(','.join(tags))
if "1.4" in get_go_version():
if static:
build_command += "-ldflags=\"-s -X main.version {} -X main.branch {} -X main.commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version {} -X main.branch {} -X main.commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
# Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'
if static:
build_command += "-ldflags=\"-s -X main.version={} -X main.branch={} -X main.commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version={} -X main.branch={} -X main.commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
if static:
build_command += "-a -installsuffix cgo "
build_command += path
start_time = datetime.utcnow()
run(build_command, shell=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def generate_md5_from_file(path):
"""Generate MD5 signature based on the contents of the file at path.
"""
m = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def generate_sig_from_file(path):
"""Generate a detached GPG signature from the file at path.
"""
logging.debug("Generating GPG signature for file: {}".format(path))
gpg_path = check_path_for('gpg')
if gpg_path is None:
logging.warn("gpg binary not found on path! Skipping signature creation.")
return False
if os.environ.get("GNUPG_HOME") is not None:
run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path))
else:
run('gpg --armor --detach-sign --yes {}'.format(path))
return True
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
"""Package the output of the build process.
"""
outfiles = []
tmp_build_dir = create_temp_dir()
logging.debug("Packaging for build output: {}".format(build_output))
logging.info("Using temporary directory: {}".format(tmp_build_dir))
try:
for platform in build_output:
# Create top-level folder displaying which platform (linux, etc)
os.makedirs(os.path.join(tmp_build_dir, platform))
for arch in build_output[platform]:
logging.info("Creating packages for {}/{}".format(platform, arch))
# Create second-level directory displaying the architecture (amd64, etc)
current_location = build_output[platform][arch]
# Create directory tree to mimic file system of package
build_root = os.path.join(tmp_build_dir,
platform,
arch,
'{}-{}-{}'.format(PACKAGE_NAME, version, iteration))
os.makedirs(build_root)
# Copy packaging scripts to build directory
if platform == "windows":
# For windows and static builds, just copy
# binaries to root of package (no other scripts or
# directories)
package_scripts(build_root, config_only=True, windows=True)
elif static or "static_" in arch:
package_scripts(build_root, config_only=True)
else:
create_package_fs(build_root)
package_scripts(build_root)
if platform != "windows":
package_man_files(build_root)
for binary in targets:
# Copy newly-built binaries to packaging directory
if platform == 'windows':
binary = binary + '.exe'
if platform == 'windows' or static or "static_" in arch:
# Where the binary should go in the package filesystem
to = os.path.join(build_root, binary)
# Where the binary currently is located
fr = os.path.join(current_location, binary)
else:
# Where the binary currently is located
fr = os.path.join(current_location, binary)
# Where the binary should go in the package filesystem
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
shutil.copy(fr, to)
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
name = pkg_name
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
if "static_" in arch:
# Remove the "static_" from the displayed arch on the package
package_arch = arch.replace("static_", "")
else:
package_arch = arch
if not release and not nightly:
# For non-release builds, just use the commit hash as the version
package_version = "{}~{}".format(version,
get_current_commit(short=True))
package_iteration = "0"
package_build_root = build_root
current_location = build_output[platform][arch]
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
if nightly:
if static or "static_" in arch:
name = '{}-static-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
name = '{}-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
if static or "static_" in arch:
name = '{}-{}-static_{}_{}'.format(name,
package_version,
platform,
package_arch)
else:
name = '{}-{}_{}_{}'.format(name,
package_version,
platform,
package_arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name)
run(tar_command, shell=True)
run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
run(zip_command, shell=True)
run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
logging.info("Skipping package type '{}' for static builds.".format(package_type))
else:
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
package_arch,
package_type,
package_version,
package_iteration,
package_build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils --rpm-posttrans {}".format(POSTINST_SCRIPT)
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
logging.warn("Could not determine output from packaging output!")
else:
if nightly:
# Strip nightly version from package name
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
os.rename(outfile, new_outfile)
outfile = new_outfile
else:
if package_type == 'rpm':
# rpm's convert any dashes to underscores
package_version = package_version.replace("-", "_")
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version)
os.rename(outfile, new_outfile)
outfile = new_outfile
outfiles.append(os.path.join(os.getcwd(), outfile))
logging.debug("Produced package files: {}".format(outfiles))
return outfiles
finally:
# Cleanup
shutil.rmtree(tmp_build_dir)
def main(args):
global PACKAGE_NAME
if args.release and args.nightly:
logging.error("Cannot be both a nightly and a release.")
return 1
if args.nightly:
args.version = increment_minor_version(args.version)
args.version = "{}~n{}".format(args.version,
datetime.utcnow().strftime("%Y%m%d%H%M"))
args.iteration = 0
# Pre-build checks
check_environ()
if not check_prereqs():
return 1
if args.build_tags is None:
args.build_tags = []
else:
args.build_tags = args.build_tags.split(',')
orig_commit = get_current_commit(short=True)
orig_branch = get_current_branch()
if args.platform not in supported_builds and args.platform != 'all':
logging.error("Invalid build platform: {}".format(target_platform))
return 1
build_output = {}
if args.branch != orig_branch and args.commit != orig_commit:
logging.error("Can only specify one branch or commit to build from.")
return 1
elif args.branch != orig_branch:
logging.info("Moving to git branch: {}".format(args.branch))
run("git checkout {}".format(args.branch))
elif args.commit != orig_commit:
logging.info("Moving to git commit: {}".format(args.commit))
run("git checkout {}".format(args.commit))
if not args.no_get:
if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
return 1
if args.test:
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet, args.junit_report):
return 1
platforms = []
single_build = True
if args.platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [args.platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if args.arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [args.arch]
for arch in archs:
od = args.outdir
if not single_build:
od = os.path.join(args.outdir, platform, arch)
if not build(version=args.version,
platform=platform,
arch=arch,
nightly=args.nightly,
race=args.race,
clean=args.clean,
outdir=od,
tags=args.build_tags,
static=args.static):
return 1
build_output.get(platform).update( { arch : od } )
# Build packages
if args.package:
if not check_path_for("fpm"):
logging.error("FPM ruby gem required for packaging. Stopping.")
return 1
packages = package(build_output,
args.name,
args.version,
nightly=args.nightly,
iteration=args.iteration,
static=args.static,
release=args.release)
if args.sign:
logging.debug("Generating GPG signatures for packages: {}".format(packages))
sigs = [] # retain signatures so they can be uploaded with packages
for p in packages:
if generate_sig_from_file(p):
sigs.append(p + '.asc')
else:
logging.error("Creation of signature for package [{}] failed!".format(p))
return 1
packages += sigs
if args.upload:
logging.debug("Files staged for upload: {}".format(packages))
if args.nightly:
args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1
logging.info("Packages created:")
for p in packages:
logging.info("{} (MD5={})".format(p.split('/')[-1:][0],
generate_md5_from_file(p)))
if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(orig_branch))
run("git checkout {}".format(orig_branch))
return 0
if __name__ == '__main__':
LOG_LEVEL = logging.INFO
if '--debug' in sys.argv[1:]:
LOG_LEVEL = logging.DEBUG
log_format = '[%(levelname)s] %(funcName)s: %(message)s'
logging.basicConfig(level=LOG_LEVEL,
format=log_format)
parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')
parser.add_argument('--verbose','-v','--debug',
action='store_true',
help='Use debug output')
parser.add_argument('--outdir', '-o',
metavar='<output directory>',
default='./build/',
type=os.path.abspath,
help='Output directory')
parser.add_argument('--name', '-n',
metavar='<name>',
default=PACKAGE_NAME,
type=str,
help='Name to use for package name (when package is specified)')
parser.add_argument('--arch',
metavar='<amd64|i386|armhf|arm64|armel|all>',
type=str,
default=get_system_arch(),
help='Target architecture for build output')
parser.add_argument('--platform',
metavar='<linux|darwin|windows|all>',
type=str,
default=get_system_platform(),
help='Target platform for build output')
parser.add_argument('--branch',
metavar='<branch>',
type=str,
default=get_current_branch(),
help='Build from a specific branch')
parser.add_argument('--commit',
metavar='<commit>',
type=str,
default=get_current_commit(short=True),
help='Build from a specific commit')
parser.add_argument('--version',
metavar='<version>',
type=str,
default=get_current_version(),
help='Version information to apply to build output (ex: 0.12.0)')
parser.add_argument('--iteration',
metavar='<package iteration>',
type=str,
default="1",
help='Package iteration to apply to build output (defaults to 1)')
parser.add_argument('--stats',
action='store_true',
help='Emit build metrics (requires InfluxDB Python client)')
parser.add_argument('--stats-server',
metavar='<hostname:port>',
type=str,
help='Send build stats to InfluxDB using provided hostname and port')
parser.add_argument('--stats-db',
metavar='<database name>',
type=str,
help='Send build stats to InfluxDB using provided database name')
parser.add_argument('--nightly',
action='store_true',
help='Mark build output as nightly build (will incremement the minor version)')
parser.add_argument('--update',
action='store_true',
help='Update build dependencies prior to building')
parser.add_argument('--package',
action='store_true',
help='Package binary output')
parser.add_argument('--release',
action='store_true',
help='Mark build output as release')
parser.add_argument('--clean',
action='store_true',
help='Clean output directory before building')
parser.add_argument('--no-get',
action='store_true',
help='Do not retrieve pinned dependencies when building')
parser.add_argument('--no-uncommitted',
action='store_true',
help='Fail if uncommitted changes exist in the working directory')
parser.add_argument('--upload',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--upload-overwrite','-w',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--bucket',
metavar='<S3 bucket name>',
type=str,
default=DEFAULT_BUCKET,
help='Destination bucket for uploads')
parser.add_argument('--build-tags',
metavar='<tags>',
help='Optional build tags to use for compilation')
parser.add_argument('--static',
action='store_true',
help='Create statically-compiled binary output')
parser.add_argument('--sign',
action='store_true',
help='Create GPG detached signatures for packages (when package is specified)')
parser.add_argument('--test',
action='store_true',
help='Run tests (does not produce build output)')
parser.add_argument('--junit-report',
action='store_true',
help='Output tests in the JUnit XML format')
parser.add_argument('--no-vet',
action='store_true',
help='Do not run "go vet" when running tests')
parser.add_argument('--race',
action='store_true',
help='Enable race flag for build output')
parser.add_argument('--parallel',
metavar='<num threads>',
type=int,
help='Number of tests to run simultaneously')
parser.add_argument('--timeout',
metavar='<timeout>',
type=str,
help='Timeout for tests before failing')
args = parser.parse_args()
print_banner()
sys.exit(main(args))

22
vendor/github.com/influxdata/influxdb/build.sh generated vendored Executable file
View File

@ -0,0 +1,22 @@
#!/bin/bash
# Run the build utility via Docker
set -e
# Make sure our working dir is the dir of the script
DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)
cd $DIR
# Build new docker image
docker build -f Dockerfile_build_ubuntu64 -t influxdb-builder $DIR
echo "Running build.py"
# Run docker
docker run --rm \
-e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \
-e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \
-v $HOME/.aws.conf:/root/.aws.conf \
-v $DIR:/root/go/src/github.com/influxdata/influxdb \
influxdb-builder \
"$@"

39
vendor/github.com/influxdata/influxdb/circle-test.sh generated vendored Executable file
View File

@ -0,0 +1,39 @@
#!/bin/bash
#
# This is the InfluxDB test script for CircleCI, it is a light wrapper around ./test.sh.
# Exit if any command fails
set -e
# Get dir of script and make it is our working directory.
DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
cd $DIR
export OUTPUT_DIR="$CIRCLE_ARTIFACTS"
# Don't delete the container since CircleCI doesn't have permission to do so.
export DOCKER_RM="false"
# Get number of test environments.
count=$(./test.sh count)
# Check that we aren't wasting CircleCI nodes.
if [ $CIRCLE_NODE_INDEX -gt $((count - 1)) ]
then
echo "More CircleCI nodes allocated than tests environments to run!"
exit 0
fi
# Map CircleCI nodes to test environments.
tests=$(seq 0 $((count - 1)))
for i in $tests
do
mine=$(( $i % $CIRCLE_NODE_TOTAL ))
if [ $mine -eq $CIRCLE_NODE_INDEX ]
then
echo "Running test env index: $i"
./test.sh $i
fi
done
# Copy the JUnit test XML to the test reports folder.
mkdir -p $CIRCLE_TEST_REPORTS/reports
cp test-results.xml $CIRCLE_TEST_REPORTS/reports/test-results.xml

43
vendor/github.com/influxdata/influxdb/circle.yml generated vendored Normal file
View File

@ -0,0 +1,43 @@
machine:
services:
- docker
environment:
GODIST: "go1.8.3.linux-amd64.tar.gz"
post:
- mkdir -p download
- test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST
- sudo rm -rf /usr/local/go
- sudo tar -C /usr/local -xzf download/$GODIST
dependencies:
cache_directories:
- "~/docker"
- ~/download
override:
- ./test.sh save:
# building the docker images can take a long time, hence caching
timeout: 1800
test:
override:
- bash circle-test.sh:
parallel: true
# Race tests using 960s timeout
timeout: 960
deployment:
release:
tag: /^v[0-9]+(\.[0-9]+)*(\S*)$/
commands:
- >
docker run
-e "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID"
-e "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY"
-v $(pwd):/root/go/src/github.com/influxdata/influxdb
influxdb_build_ubuntu64
--release
--package
--platform all
--arch all
--upload
--bucket dl.influxdata.com/influxdb/releases

298
vendor/github.com/influxdata/influxdb/client/README.md generated vendored Normal file
View File

@ -0,0 +1,298 @@
# InfluxDB Client
[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2)
## Description
**NOTE:** The Go client library now has a "v2" version, with the old version
being deprecated. The new version can be imported at
`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible.
A Go client library written and maintained by the **InfluxDB** team.
This package provides convenience functions to read and write time series data.
It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
## Getting Started
### Connecting To Your Database
Connecting to an **InfluxDB** database is straightforward. You will need a host
name, a port and the cluster user credentials if applicable. The default port is
8086. You can customize these settings to your specific installation via the
**InfluxDB** configuration file.
Though not necessary for experimentation, you may want to create a new user
and authenticate the connection to your database.
For more information please check out the
[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/).
For the impatient, you can create a new admin user _bubba_ by firing off the
[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
```shell
influx
> create user bubba with password 'bumblebeetuna'
> grant all privileges to bubba
```
And now for good measure set the credentials in you shell environment.
In the example below we will use $INFLUX_USER and $INFLUX_PWD
Now with the administrivia out of the way, let's connect to our database.
NOTE: If you've opted out of creating a user, you can omit Username and Password in
the configuration below.
```go
package main
import (
"log"
"time"
"github.com/influxdata/influxdb/client/v2"
)
const (
MyDB = "square_holes"
username = "bubba"
password = "bumblebeetuna"
)
func main() {
// Create a new HTTPClient
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: "http://localhost:8086",
Username: username,
Password: password,
})
if err != nil {
log.Fatal(err)
}
// Create a new point batch
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
Database: MyDB,
Precision: "s",
})
if err != nil {
log.Fatal(err)
}
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
log.Fatal(err)
}
bp.AddPoint(pt)
// Write the batch
if err := c.Write(bp); err != nil {
log.Fatal(err)
}
}
```
### Inserting Data
Time series data aka *points* are written to the database using batch inserts.
The mechanism is to create one or more points and then create a batch aka
*batch points* and write these to a given database and series. A series is a
combination of a measurement (time/values) and a set of tags.
In this sample we will create a batch of a 1,000 points. Each point has a time and
a single value as well as 2 tags indicating a shape and color. We write these points
to a database called _square_holes_ using a measurement named _shapes_.
NOTE: You can specify a RetentionPolicy as part of the batch points. If not
provided InfluxDB will use the database _default_ retention policy.
```go
func writePoints(clnt client.Client) {
sampleSize := 1000
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
Database: "systemstats",
Precision: "us",
})
if err != nil {
log.Fatal(err)
}
rand.Seed(time.Now().UnixNano())
for i := 0; i < sampleSize; i++ {
regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
tags := map[string]string{
"cpu": "cpu-total",
"host": fmt.Sprintf("host%d", rand.Intn(1000)),
"region": regions[rand.Intn(len(regions))],
}
idle := rand.Float64() * 100.0
fields := map[string]interface{}{
"idle": idle,
"busy": 100.0 - idle,
}
pt, err := client.NewPoint(
"cpu_usage",
tags,
fields,
time.Now(),
)
if err != nil {
log.Fatal(err)
}
bp.AddPoint(pt)
}
if err := clnt.Write(bp); err != nil {
log.Fatal(err)
}
}
```
### Querying Data
One nice advantage of using **InfluxDB** the ability to query your data using familiar
SQL constructs. In this example we can create a convenience function to query the database
as follows:
```go
// queryDB convenience function to query the database
func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
q := client.Query{
Command: cmd,
Database: MyDB,
}
if response, err := clnt.Query(q); err == nil {
if response.Error() != nil {
return res, response.Error()
}
res = response.Results
} else {
return res, err
}
return res, nil
}
```
#### Creating a Database
```go
_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
if err != nil {
log.Fatal(err)
}
```
#### Count Records
```go
q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
res, err := queryDB(clnt, q)
if err != nil {
log.Fatal(err)
}
count := res[0].Series[0].Values[0][1]
log.Printf("Found a total of %v records\n", count)
```
#### Find the last 10 _shapes_ records
```go
q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20)
res, err = queryDB(clnt, q)
if err != nil {
log.Fatal(err)
}
for i, row := range res[0].Series[0].Values {
t, err := time.Parse(time.RFC3339, row[0].(string))
if err != nil {
log.Fatal(err)
}
val := row[1].(string)
log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
}
```
### Using the UDP Client
The **InfluxDB** client also supports writing over UDP.
```go
func WriteUDP() {
// Make client
c, err := client.NewUDPClient("localhost:8089")
if err != nil {
panic(err.Error())
}
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Precision: "s",
})
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
panic(err.Error())
}
bp.AddPoint(pt)
// Write the batch
c.Write(bp)
}
```
### Point Splitting
The UDP client now supports splitting single points that exceed the configured
payload size. The logic for processing each point is listed here, starting with
an empty payload.
1. If adding the point to the current (non-empty) payload would exceed the
configured size, send the current payload. Otherwise, add it to the current
payload.
1. If the point is smaller than the configured size, add it to the payload.
1. If the point has no timestamp, just try to send the entire point as a single
UDP payload, and process the next point.
1. Since the point has a timestamp, re-use the existing measurement name,
tagset, and timestamp and create multiple new points by splitting up the
fields. The per-point length will be kept close to the configured size,
staying under it if possible. This does mean that one large field, maybe a
long string, could be sent as a larger-than-configured payload.
The above logic attempts to respect configured payload sizes, but not sacrifice
any data integrity. Points without a timestamp can't be split, as that may
cause fields to have differing timestamps when processed by the server.
## Go Docs
Please refer to
[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2)
for documentation.
## See Also
You can also examine how the client library is used by the
[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).

View File

@ -0,0 +1,113 @@
package client_test
import (
"fmt"
"log"
"math/rand"
"net/url"
"os"
"strconv"
"time"
"github.com/influxdata/influxdb/client"
)
func ExampleNewClient() {
host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086))
if err != nil {
log.Fatal(err)
}
// NOTE: this assumes you've setup a user and have setup shell env variables,
// namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below.
conf := client.Config{
URL: *host,
Username: os.Getenv("INFLUX_USER"),
Password: os.Getenv("INFLUX_PWD"),
}
con, err := client.NewClient(conf)
if err != nil {
log.Fatal(err)
}
log.Println("Connection", con)
}
func ExampleClient_Ping() {
host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086))
if err != nil {
log.Fatal(err)
}
con, err := client.NewClient(client.Config{URL: *host})
if err != nil {
log.Fatal(err)
}
dur, ver, err := con.Ping()
if err != nil {
log.Fatal(err)
}
log.Printf("Happy as a hippo! %v, %s", dur, ver)
}
func ExampleClient_Query() {
host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086))
if err != nil {
log.Fatal(err)
}
con, err := client.NewClient(client.Config{URL: *host})
if err != nil {
log.Fatal(err)
}
q := client.Query{
Command: "select count(value) from shapes",
Database: "square_holes",
}
if response, err := con.Query(q); err == nil && response.Error() == nil {
log.Println(response.Results)
}
}
func ExampleClient_Write() {
host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086))
if err != nil {
log.Fatal(err)
}
con, err := client.NewClient(client.Config{URL: *host})
if err != nil {
log.Fatal(err)
}
var (
shapes = []string{"circle", "rectangle", "square", "triangle"}
colors = []string{"red", "blue", "green"}
sampleSize = 1000
pts = make([]client.Point, sampleSize)
)
rand.Seed(42)
for i := 0; i < sampleSize; i++ {
pts[i] = client.Point{
Measurement: "shapes",
Tags: map[string]string{
"color": strconv.Itoa(rand.Intn(len(colors))),
"shape": strconv.Itoa(rand.Intn(len(shapes))),
},
Fields: map[string]interface{}{
"value": rand.Intn(sampleSize),
},
Time: time.Now(),
Precision: "s",
}
}
bps := client.BatchPoints{
Points: pts,
Database: "BumbeBeeTuna",
RetentionPolicy: "default",
}
_, err = con.Write(bps)
if err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1,832 @@
// Package client implements a now-deprecated client for InfluxDB;
// use github.com/influxdata/influxdb/client/v2 instead.
package client // import "github.com/influxdata/influxdb/client"
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/influxdata/influxdb/models"
)
const (
// DefaultHost is the default host used to connect to an InfluxDB instance
DefaultHost = "localhost"
// DefaultPort is the default port used to connect to an InfluxDB instance
DefaultPort = 8086
// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
DefaultTimeout = 0
)
// Query is used to send a command to the server. Both Command and Database are required.
type Query struct {
Command string
Database string
// Chunked tells the server to send back chunked responses. This places
// less load on the server by sending back chunks of the response rather
// than waiting for the entire response all at once.
Chunked bool
// ChunkSize sets the maximum number of rows that will be returned per
// chunk. Chunks are either divided based on their series or if they hit
// the chunk size limit.
//
// Chunked must be set to true for this option to be used.
ChunkSize int
}
// ParseConnectionString will parse a string to create a valid connection URL
func ParseConnectionString(path string, ssl bool) (url.URL, error) {
var host string
var port int
h, p, err := net.SplitHostPort(path)
if err != nil {
if path == "" {
host = DefaultHost
} else {
host = path
}
// If they didn't specify a port, always use the default port
port = DefaultPort
} else {
host = h
port, err = strconv.Atoi(p)
if err != nil {
return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
}
}
u := url.URL{
Scheme: "http",
}
if ssl {
u.Scheme = "https"
}
u.Host = net.JoinHostPort(host, strconv.Itoa(port))
return u, nil
}
// Config is used to specify what server to connect to.
// URL: The URL of the server connecting to.
// Username/Password are optional. They will be passed via basic auth if provided.
// UserAgent: If not provided, will default "InfluxDBClient",
// Timeout: If not provided, will default to 0 (no timeout)
type Config struct {
URL url.URL
UnixSocket string
Username string
Password string
UserAgent string
Timeout time.Duration
Precision string
WriteConsistency string
UnsafeSsl bool
}
// NewConfig will create a config to be used in connecting to the client
func NewConfig() Config {
return Config{
Timeout: DefaultTimeout,
}
}
// Client is used to make calls to the server.
type Client struct {
url url.URL
unixSocket string
username string
password string
httpClient *http.Client
userAgent string
precision string
}
const (
// ConsistencyOne requires at least one data node acknowledged a write.
ConsistencyOne = "one"
// ConsistencyAll requires all data nodes to acknowledge a write.
ConsistencyAll = "all"
// ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
ConsistencyQuorum = "quorum"
// ConsistencyAny allows for hinted hand off, potentially no write happened yet.
ConsistencyAny = "any"
)
// NewClient will instantiate and return a connected client to issue commands to the server.
func NewClient(c Config) (*Client, error) {
tlsConfig := &tls.Config{
InsecureSkipVerify: c.UnsafeSsl,
}
tr := &http.Transport{
TLSClientConfig: tlsConfig,
}
if c.UnixSocket != "" {
// No need for compression in local communications.
tr.DisableCompression = true
tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", c.UnixSocket)
}
}
client := Client{
url: c.URL,
unixSocket: c.UnixSocket,
username: c.Username,
password: c.Password,
httpClient: &http.Client{Timeout: c.Timeout, Transport: tr},
userAgent: c.UserAgent,
precision: c.Precision,
}
if client.userAgent == "" {
client.userAgent = "InfluxDBClient"
}
return &client, nil
}
// SetAuth will update the username and passwords
func (c *Client) SetAuth(u, p string) {
c.username = u
c.password = p
}
// SetPrecision will update the precision
func (c *Client) SetPrecision(precision string) {
c.precision = precision
}
// Query sends a command to the server and returns the Response
func (c *Client) Query(q Query) (*Response, error) {
u := c.url
u.Path = "query"
values := u.Query()
values.Set("q", q.Command)
values.Set("db", q.Database)
if q.Chunked {
values.Set("chunked", "true")
if q.ChunkSize > 0 {
values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
}
}
if c.precision != "" {
values.Set("epoch", c.precision)
}
u.RawQuery = values.Encode()
req, err := http.NewRequest("POST", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.userAgent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var response Response
if q.Chunked {
cr := NewChunkedResponse(resp.Body)
for {
r, err := cr.NextResponse()
if err != nil {
// If we got an error while decoding the response, send that back.
return nil, err
}
if r == nil {
break
}
response.Results = append(response.Results, r.Results...)
if r.Err != nil {
response.Err = r.Err
break
}
}
} else {
dec := json.NewDecoder(resp.Body)
dec.UseNumber()
if err := dec.Decode(&response); err != nil {
// Ignore EOF errors if we got an invalid status code.
if !(err == io.EOF && resp.StatusCode != http.StatusOK) {
return nil, err
}
}
}
// If we don't have an error in our json response, and didn't get StatusOK,
// then send back an error.
if resp.StatusCode != http.StatusOK && response.Error() == nil {
return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
}
return &response, nil
}
// Write takes BatchPoints and allows for writing of multiple points with defaults
// If successful, error is nil and Response is nil
// If an error occurs, Response may contain additional information if populated.
func (c *Client) Write(bp BatchPoints) (*Response, error) {
u := c.url
u.Path = "write"
var b bytes.Buffer
for _, p := range bp.Points {
err := checkPointTypes(p)
if err != nil {
return nil, err
}
if p.Raw != "" {
if _, err := b.WriteString(p.Raw); err != nil {
return nil, err
}
} else {
for k, v := range bp.Tags {
if p.Tags == nil {
p.Tags = make(map[string]string, len(bp.Tags))
}
p.Tags[k] = v
}
if _, err := b.WriteString(p.MarshalString()); err != nil {
return nil, err
}
}
if err := b.WriteByte('\n'); err != nil {
return nil, err
}
}
req, err := http.NewRequest("POST", u.String(), &b)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "")
req.Header.Set("User-Agent", c.userAgent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
precision := bp.Precision
if precision == "" {
precision = c.precision
}
params := req.URL.Query()
params.Set("db", bp.Database)
params.Set("rp", bp.RetentionPolicy)
params.Set("precision", precision)
params.Set("consistency", bp.WriteConsistency)
req.URL.RawQuery = params.Encode()
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var response Response
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
var err = fmt.Errorf(string(body))
response.Err = err
return &response, err
}
return nil, nil
}
// WriteLineProtocol takes a string with line returns to delimit each write
// If successful, error is nil and Response is nil
// If an error occurs, Response may contain additional information if populated.
func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
u := c.url
u.Path = "write"
r := strings.NewReader(data)
req, err := http.NewRequest("POST", u.String(), r)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "")
req.Header.Set("User-Agent", c.userAgent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
params := req.URL.Query()
params.Set("db", database)
params.Set("rp", retentionPolicy)
params.Set("precision", precision)
params.Set("consistency", writeConsistency)
req.URL.RawQuery = params.Encode()
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var response Response
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
err := fmt.Errorf(string(body))
response.Err = err
return &response, err
}
return nil, nil
}
// Ping will check to see if the server is up
// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
func (c *Client) Ping() (time.Duration, string, error) {
now := time.Now()
u := c.url
u.Path = "ping"
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return 0, "", err
}
req.Header.Set("User-Agent", c.userAgent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return 0, "", err
}
defer resp.Body.Close()
version := resp.Header.Get("X-Influxdb-Version")
return time.Since(now), version, nil
}
// Structs
// Message represents a user message.
type Message struct {
Level string `json:"level,omitempty"`
Text string `json:"text,omitempty"`
}
// Result represents a resultset returned from a single statement.
type Result struct {
Series []models.Row
Messages []*Message
Err error
}
// MarshalJSON encodes the result into JSON.
func (r *Result) MarshalJSON() ([]byte, error) {
// Define a struct that outputs "error" as a string.
var o struct {
Series []models.Row `json:"series,omitempty"`
Messages []*Message `json:"messages,omitempty"`
Err string `json:"error,omitempty"`
}
// Copy fields to output struct.
o.Series = r.Series
o.Messages = r.Messages
if r.Err != nil {
o.Err = r.Err.Error()
}
return json.Marshal(&o)
}
// UnmarshalJSON decodes the data into the Result struct
func (r *Result) UnmarshalJSON(b []byte) error {
var o struct {
Series []models.Row `json:"series,omitempty"`
Messages []*Message `json:"messages,omitempty"`
Err string `json:"error,omitempty"`
}
dec := json.NewDecoder(bytes.NewBuffer(b))
dec.UseNumber()
err := dec.Decode(&o)
if err != nil {
return err
}
r.Series = o.Series
r.Messages = o.Messages
if o.Err != "" {
r.Err = errors.New(o.Err)
}
return nil
}
// Response represents a list of statement results.
type Response struct {
Results []Result
Err error
}
// MarshalJSON encodes the response into JSON.
func (r *Response) MarshalJSON() ([]byte, error) {
// Define a struct that outputs "error" as a string.
var o struct {
Results []Result `json:"results,omitempty"`
Err string `json:"error,omitempty"`
}
// Copy fields to output struct.
o.Results = r.Results
if r.Err != nil {
o.Err = r.Err.Error()
}
return json.Marshal(&o)
}
// UnmarshalJSON decodes the data into the Response struct
func (r *Response) UnmarshalJSON(b []byte) error {
var o struct {
Results []Result `json:"results,omitempty"`
Err string `json:"error,omitempty"`
}
dec := json.NewDecoder(bytes.NewBuffer(b))
dec.UseNumber()
err := dec.Decode(&o)
if err != nil {
return err
}
r.Results = o.Results
if o.Err != "" {
r.Err = errors.New(o.Err)
}
return nil
}
// Error returns the first error from any statement.
// Returns nil if no errors occurred on any statements.
func (r *Response) Error() error {
if r.Err != nil {
return r.Err
}
for _, result := range r.Results {
if result.Err != nil {
return result.Err
}
}
return nil
}
// duplexReader reads responses and writes it to another writer while
// satisfying the reader interface.
type duplexReader struct {
r io.Reader
w io.Writer
}
func (r *duplexReader) Read(p []byte) (n int, err error) {
n, err = r.r.Read(p)
if err == nil {
r.w.Write(p[:n])
}
return n, err
}
// ChunkedResponse represents a response from the server that
// uses chunking to stream the output.
type ChunkedResponse struct {
dec *json.Decoder
duplex *duplexReader
buf bytes.Buffer
}
// NewChunkedResponse reads a stream and produces responses from the stream.
func NewChunkedResponse(r io.Reader) *ChunkedResponse {
resp := &ChunkedResponse{}
resp.duplex = &duplexReader{r: r, w: &resp.buf}
resp.dec = json.NewDecoder(resp.duplex)
resp.dec.UseNumber()
return resp
}
// NextResponse reads the next line of the stream and returns a response.
func (r *ChunkedResponse) NextResponse() (*Response, error) {
var response Response
if err := r.dec.Decode(&response); err != nil {
if err == io.EOF {
return nil, nil
}
// A decoding error happened. This probably means the server crashed
// and sent a last-ditch error message to us. Ensure we have read the
// entirety of the connection to get any remaining error text.
io.Copy(ioutil.Discard, r.duplex)
return nil, errors.New(strings.TrimSpace(r.buf.String()))
}
r.buf.Reset()
return &response, nil
}
// Point defines the fields that will be written to the database
// Measurement, Time, and Fields are required
// Precision can be specified if the time is in epoch format (integer).
// Valid values for Precision are n, u, ms, s, m, and h
type Point struct {
Measurement string
Tags map[string]string
Time time.Time
Fields map[string]interface{}
Precision string
Raw string
}
// MarshalJSON will format the time in RFC3339Nano
// Precision is also ignored as it is only used for writing, not reading
// Or another way to say it is we always send back in nanosecond precision
func (p *Point) MarshalJSON() ([]byte, error) {
point := struct {
Measurement string `json:"measurement,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Time string `json:"time,omitempty"`
Fields map[string]interface{} `json:"fields,omitempty"`
Precision string `json:"precision,omitempty"`
}{
Measurement: p.Measurement,
Tags: p.Tags,
Fields: p.Fields,
Precision: p.Precision,
}
// Let it omit empty if it's really zero
if !p.Time.IsZero() {
point.Time = p.Time.UTC().Format(time.RFC3339Nano)
}
return json.Marshal(&point)
}
// MarshalString renders string representation of a Point with specified
// precision. The default precision is nanoseconds.
func (p *Point) MarshalString() string {
pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time)
if err != nil {
return "# ERROR: " + err.Error() + " " + p.Measurement
}
if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
return pt.String()
}
return pt.PrecisionString(p.Precision)
}
// UnmarshalJSON decodes the data into the Point struct
func (p *Point) UnmarshalJSON(b []byte) error {
var normal struct {
Measurement string `json:"measurement"`
Tags map[string]string `json:"tags"`
Time time.Time `json:"time"`
Precision string `json:"precision"`
Fields map[string]interface{} `json:"fields"`
}
var epoch struct {
Measurement string `json:"measurement"`
Tags map[string]string `json:"tags"`
Time *int64 `json:"time"`
Precision string `json:"precision"`
Fields map[string]interface{} `json:"fields"`
}
if err := func() error {
var err error
dec := json.NewDecoder(bytes.NewBuffer(b))
dec.UseNumber()
if err = dec.Decode(&epoch); err != nil {
return err
}
// Convert from epoch to time.Time, but only if Time
// was actually set.
var ts time.Time
if epoch.Time != nil {
ts, err = EpochToTime(*epoch.Time, epoch.Precision)
if err != nil {
return err
}
}
p.Measurement = epoch.Measurement
p.Tags = epoch.Tags
p.Time = ts
p.Precision = epoch.Precision
p.Fields = normalizeFields(epoch.Fields)
return nil
}(); err == nil {
return nil
}
dec := json.NewDecoder(bytes.NewBuffer(b))
dec.UseNumber()
if err := dec.Decode(&normal); err != nil {
return err
}
normal.Time = SetPrecision(normal.Time, normal.Precision)
p.Measurement = normal.Measurement
p.Tags = normal.Tags
p.Time = normal.Time
p.Precision = normal.Precision
p.Fields = normalizeFields(normal.Fields)
return nil
}
// Remove any notion of json.Number
func normalizeFields(fields map[string]interface{}) map[string]interface{} {
newFields := map[string]interface{}{}
for k, v := range fields {
switch v := v.(type) {
case json.Number:
jv, e := v.Float64()
if e != nil {
panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
}
newFields[k] = jv
default:
newFields[k] = v
}
}
return newFields
}
// BatchPoints is used to send batched data in a single write.
// Database and Points are required
// If no retention policy is specified, it will use the databases default retention policy.
// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
// If time is specified, it will be applied to any point with an empty time.
// Precision can be specified if the time is in epoch format (integer).
// Valid values for Precision are n, u, ms, s, m, and h
type BatchPoints struct {
Points []Point `json:"points,omitempty"`
Database string `json:"database,omitempty"`
RetentionPolicy string `json:"retentionPolicy,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Time time.Time `json:"time,omitempty"`
Precision string `json:"precision,omitempty"`
WriteConsistency string `json:"-"`
}
// UnmarshalJSON decodes the data into the BatchPoints struct
func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
var normal struct {
Points []Point `json:"points"`
Database string `json:"database"`
RetentionPolicy string `json:"retentionPolicy"`
Tags map[string]string `json:"tags"`
Time time.Time `json:"time"`
Precision string `json:"precision"`
}
var epoch struct {
Points []Point `json:"points"`
Database string `json:"database"`
RetentionPolicy string `json:"retentionPolicy"`
Tags map[string]string `json:"tags"`
Time *int64 `json:"time"`
Precision string `json:"precision"`
}
if err := func() error {
var err error
if err = json.Unmarshal(b, &epoch); err != nil {
return err
}
// Convert from epoch to time.Time
var ts time.Time
if epoch.Time != nil {
ts, err = EpochToTime(*epoch.Time, epoch.Precision)
if err != nil {
return err
}
}
bp.Points = epoch.Points
bp.Database = epoch.Database
bp.RetentionPolicy = epoch.RetentionPolicy
bp.Tags = epoch.Tags
bp.Time = ts
bp.Precision = epoch.Precision
return nil
}(); err == nil {
return nil
}
if err := json.Unmarshal(b, &normal); err != nil {
return err
}
normal.Time = SetPrecision(normal.Time, normal.Precision)
bp.Points = normal.Points
bp.Database = normal.Database
bp.RetentionPolicy = normal.RetentionPolicy
bp.Tags = normal.Tags
bp.Time = normal.Time
bp.Precision = normal.Precision
return nil
}
// utility functions
// Addr provides the current url as a string of the server the client is connected to.
func (c *Client) Addr() string {
if c.unixSocket != "" {
return c.unixSocket
}
return c.url.String()
}
// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.
func checkPointTypes(p Point) error {
for _, v := range p.Fields {
switch v.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil:
return nil
default:
return fmt.Errorf("unsupported point type: %T", v)
}
}
return nil
}
// helper functions
// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
func EpochToTime(epoch int64, precision string) (time.Time, error) {
if precision == "" {
precision = "s"
}
var t time.Time
switch precision {
case "h":
t = time.Unix(0, epoch*int64(time.Hour))
case "m":
t = time.Unix(0, epoch*int64(time.Minute))
case "s":
t = time.Unix(0, epoch*int64(time.Second))
case "ms":
t = time.Unix(0, epoch*int64(time.Millisecond))
case "u":
t = time.Unix(0, epoch*int64(time.Microsecond))
case "n":
t = time.Unix(0, epoch)
default:
return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
}
return t, nil
}
// SetPrecision will round a time to the specified precision
func SetPrecision(t time.Time, precision string) time.Time {
switch precision {
case "n":
case "u":
return t.Round(time.Microsecond)
case "ms":
return t.Round(time.Millisecond)
case "s":
return t.Round(time.Second)
case "m":
return t.Round(time.Minute)
case "h":
return t.Round(time.Hour)
}
return t
}

View File

@ -0,0 +1,831 @@
package client_test
import (
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strings"
"testing"
"time"
"github.com/influxdata/influxdb/client"
)
func BenchmarkWrite(b *testing.B) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data client.Response
w.WriteHeader(http.StatusNoContent)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
b.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
bp := client.BatchPoints{
Points: []client.Point{
{Fields: map[string]interface{}{"value": 101}}},
}
for i := 0; i < b.N; i++ {
r, err := c.Write(bp)
if err != nil {
b.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
if r != nil {
b.Fatalf("unexpected response. expected %v, actual %v", nil, r)
}
}
}
func BenchmarkUnmarshalJSON2Tags(b *testing.B) {
var bp client.BatchPoints
data := []byte(`
{
"database": "foo",
"retentionPolicy": "bar",
"points": [
{
"name": "cpu",
"tags": {
"host": "server01",
"region": "us-east1"
},
"time": 14244733039069373,
"precision": "n",
"fields": {
"value": 4541770385657154000
}
}
]
}
`)
for i := 0; i < b.N; i++ {
if err := json.Unmarshal(data, &bp); err != nil {
b.Errorf("unable to unmarshal nanosecond data: %s", err.Error())
}
b.SetBytes(int64(len(data)))
}
}
func BenchmarkUnmarshalJSON10Tags(b *testing.B) {
var bp client.BatchPoints
data := []byte(`
{
"database": "foo",
"retentionPolicy": "bar",
"points": [
{
"name": "cpu",
"tags": {
"host": "server01",
"region": "us-east1",
"tag1": "value1",
"tag2": "value2",
"tag2": "value3",
"tag4": "value4",
"tag5": "value5",
"tag6": "value6",
"tag7": "value7",
"tag8": "value8"
},
"time": 14244733039069373,
"precision": "n",
"fields": {
"value": 4541770385657154000
}
}
]
}
`)
for i := 0; i < b.N; i++ {
if err := json.Unmarshal(data, &bp); err != nil {
b.Errorf("unable to unmarshal nanosecond data: %s", err.Error())
}
b.SetBytes(int64(len(data)))
}
}
func TestNewClient(t *testing.T) {
config := client.Config{}
_, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_Ping(t *testing.T) {
ts := emptyTestServer()
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
d, version, err := c.Ping()
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
if d.Nanoseconds() == 0 {
t.Fatalf("expected a duration greater than zero. actual %v", d.Nanoseconds())
}
if version != "x.x" {
t.Fatalf("unexpected version. expected %s, actual %v", "x.x", version)
}
}
func TestClient_Query(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data client.Response
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
query := client.Query{}
_, err = c.Query(query)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_ChunkedQuery(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data client.Response
w.WriteHeader(http.StatusOK)
enc := json.NewEncoder(w)
_ = enc.Encode(data)
_ = enc.Encode(data)
}))
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
query := client.Query{Chunked: true}
_, err = c.Query(query)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_BasicAuth(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
u, p, ok := r.BasicAuth()
if !ok {
t.Errorf("basic auth error")
}
if u != "username" {
t.Errorf("unexpected username, expected %q, actual %q", "username", u)
}
if p != "password" {
t.Errorf("unexpected password, expected %q, actual %q", "password", p)
}
w.WriteHeader(http.StatusNoContent)
}))
defer ts.Close()
u, _ := url.Parse(ts.URL)
u.User = url.UserPassword("username", "password")
config := client.Config{URL: *u, Username: "username", Password: "password"}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
_, _, err = c.Ping()
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_Write(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data client.Response
w.WriteHeader(http.StatusNoContent)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
bp := client.BatchPoints{}
r, err := c.Write(bp)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
if r != nil {
t.Fatalf("unexpected response. expected %v, actual %v", nil, r)
}
}
func TestClient_UserAgent(t *testing.T) {
receivedUserAgent := ""
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
receivedUserAgent = r.UserAgent()
var data client.Response
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
_, err := http.Get(ts.URL)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
tests := []struct {
name string
userAgent string
expected string
}{
{
name: "Empty user agent",
userAgent: "",
expected: "InfluxDBClient",
},
{
name: "Custom user agent",
userAgent: "Test Influx Client",
expected: "Test Influx Client",
},
}
for _, test := range tests {
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u, UserAgent: test.userAgent}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
receivedUserAgent = ""
query := client.Query{}
_, err = c.Query(query)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
if !strings.HasPrefix(receivedUserAgent, test.expected) {
t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
}
receivedUserAgent = ""
bp := client.BatchPoints{}
_, err = c.Write(bp)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
if !strings.HasPrefix(receivedUserAgent, test.expected) {
t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
}
receivedUserAgent = ""
_, _, err = c.Ping()
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
if receivedUserAgent != test.expected {
t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
}
}
}
func TestClient_Messages(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"results":[{"messages":[{"level":"warning","text":"deprecation test"}]}]}`))
}))
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
query := client.Query{}
resp, err := c.Query(query)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
if got, exp := len(resp.Results), 1; got != exp {
t.Fatalf("unexpected number of results. expected %v, actual %v", exp, got)
}
r := resp.Results[0]
if got, exp := len(r.Messages), 1; got != exp {
t.Fatalf("unexpected number of messages. expected %v, actual %v", exp, got)
}
m := r.Messages[0]
if got, exp := m.Level, "warning"; got != exp {
t.Errorf("unexpected message level. expected %v, actual %v", exp, got)
}
if got, exp := m.Text, "deprecation test"; got != exp {
t.Errorf("unexpected message text. expected %v, actual %v", exp, got)
}
}
func TestPoint_UnmarshalEpoch(t *testing.T) {
now := time.Now()
tests := []struct {
name string
epoch int64
precision string
expected time.Time
}{
{
name: "nanoseconds",
epoch: now.UnixNano(),
precision: "n",
expected: now,
},
{
name: "microseconds",
epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond),
precision: "u",
expected: now.Round(time.Microsecond),
},
{
name: "milliseconds",
epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond),
precision: "ms",
expected: now.Round(time.Millisecond),
},
{
name: "seconds",
epoch: now.Round(time.Second).UnixNano() / int64(time.Second),
precision: "s",
expected: now.Round(time.Second),
},
{
name: "minutes",
epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute),
precision: "m",
expected: now.Round(time.Minute),
},
{
name: "hours",
epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour),
precision: "h",
expected: now.Round(time.Hour),
},
{
name: "max int64",
epoch: 9223372036854775807,
precision: "n",
expected: time.Unix(0, 9223372036854775807),
},
{
name: "100 years from now",
epoch: now.Add(time.Hour * 24 * 365 * 100).UnixNano(),
precision: "n",
expected: now.Add(time.Hour * 24 * 365 * 100),
},
}
for _, test := range tests {
t.Logf("testing %q\n", test.name)
data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision))
t.Logf("json: %s", string(data))
var p client.Point
err := json.Unmarshal(data, &p)
if err != nil {
t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err)
}
if !p.Time.Equal(test.expected) {
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time)
}
}
}
func TestPoint_UnmarshalRFC(t *testing.T) {
now := time.Now().UTC()
tests := []struct {
name string
rfc string
now time.Time
expected time.Time
}{
{
name: "RFC3339Nano",
rfc: time.RFC3339Nano,
now: now,
expected: now,
},
{
name: "RFC3339",
rfc: time.RFC3339,
now: now.Round(time.Second),
expected: now.Round(time.Second),
},
}
for _, test := range tests {
t.Logf("testing %q\n", test.name)
ts := test.now.Format(test.rfc)
data := []byte(fmt.Sprintf(`{"time": %q}`, ts))
t.Logf("json: %s", string(data))
var p client.Point
err := json.Unmarshal(data, &p)
if err != nil {
t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err)
}
if !p.Time.Equal(test.expected) {
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time)
}
}
}
func TestPoint_MarshalOmitempty(t *testing.T) {
now := time.Now().UTC()
tests := []struct {
name string
point client.Point
now time.Time
expected string
}{
{
name: "all empty",
point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}},
now: now,
expected: `{"measurement":"cpu","fields":{"value":1.1}}`,
},
{
name: "with time",
point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Time: now},
now: now,
expected: fmt.Sprintf(`{"measurement":"cpu","time":"%s","fields":{"value":1.1}}`, now.Format(time.RFC3339Nano)),
},
{
name: "with tags",
point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Tags: map[string]string{"foo": "bar"}},
now: now,
expected: `{"measurement":"cpu","tags":{"foo":"bar"},"fields":{"value":1.1}}`,
},
{
name: "with precision",
point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Precision: "ms"},
now: now,
expected: `{"measurement":"cpu","fields":{"value":1.1},"precision":"ms"}`,
},
}
for _, test := range tests {
t.Logf("testing %q\n", test.name)
b, err := json.Marshal(&test.point)
if err != nil {
t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err)
}
if test.expected != string(b) {
t.Fatalf("Unexpected result. expected: %v, actual: %v", test.expected, string(b))
}
}
}
func TestEpochToTime(t *testing.T) {
now := time.Now()
tests := []struct {
name string
epoch int64
precision string
expected time.Time
}{
{name: "nanoseconds", epoch: now.UnixNano(), precision: "n", expected: now},
{name: "microseconds", epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), precision: "u", expected: now.Round(time.Microsecond)},
{name: "milliseconds", epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), precision: "ms", expected: now.Round(time.Millisecond)},
{name: "seconds", epoch: now.Round(time.Second).UnixNano() / int64(time.Second), precision: "s", expected: now.Round(time.Second)},
{name: "minutes", epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), precision: "m", expected: now.Round(time.Minute)},
{name: "hours", epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), precision: "h", expected: now.Round(time.Hour)},
}
for _, test := range tests {
t.Logf("testing %q\n", test.name)
tm, e := client.EpochToTime(test.epoch, test.precision)
if e != nil {
t.Fatalf("unexpected error: expected %v, actual: %v", nil, e)
}
if tm != test.expected {
t.Fatalf("unexpected time: expected %v, actual %v", test.expected, tm)
}
}
}
// helper functions
func emptyTestServer() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(50 * time.Millisecond)
w.Header().Set("X-Influxdb-Version", "x.x")
return
}))
}
// Ensure that data with epoch times can be decoded.
func TestBatchPoints_Normal(t *testing.T) {
var bp client.BatchPoints
data := []byte(`
{
"database": "foo",
"retentionPolicy": "bar",
"points": [
{
"name": "cpu",
"tags": {
"host": "server01"
},
"time": 14244733039069373,
"precision": "n",
"values": {
"value": 4541770385657154000
}
},
{
"name": "cpu",
"tags": {
"host": "server01"
},
"time": 14244733039069380,
"precision": "n",
"values": {
"value": 7199311900554737000
}
}
]
}
`)
if err := json.Unmarshal(data, &bp); err != nil {
t.Errorf("unable to unmarshal nanosecond data: %s", err.Error())
}
}
func TestClient_Timeout(t *testing.T) {
done := make(chan bool)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
<-done
}))
defer ts.Close()
defer func() { done <- true }()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u, Timeout: 500 * time.Millisecond}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
query := client.Query{}
_, err = c.Query(query)
if err == nil {
t.Fatalf("unexpected success. expected timeout error")
} else if !strings.Contains(err.Error(), "request canceled") &&
!strings.Contains(err.Error(), "use of closed network connection") {
t.Fatalf("unexpected error. expected 'request canceled' error, got %v", err)
}
}
func TestClient_NoTimeout(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(1 * time.Second)
var data client.Response
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
query := client.Query{}
_, err = c.Query(query)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_WriteUint64(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data client.Response
w.WriteHeader(http.StatusNoContent)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
bp := client.BatchPoints{
Points: []client.Point{
{
Fields: map[string]interface{}{"value": uint64(10)},
},
},
}
r, err := c.Write(bp)
if err == nil {
t.Fatalf("unexpected error. expected err, actual %v", err)
}
if r != nil {
t.Fatalf("unexpected response. expected %v, actual %v", nil, r)
}
}
func TestClient_ParseConnectionString_IPv6(t *testing.T) {
path := "[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086"
u, err := client.ParseConnectionString(path, false)
if err != nil {
t.Fatalf("unexpected error, expected %v, actual %v", nil, err)
}
if u.Host != path {
t.Fatalf("ipv6 parse failed, expected %s, actual %s", path, u.Host)
}
}
func TestClient_CustomCertificates(t *testing.T) {
// generated with:
// openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days 3650 -nodes -config influx.cnf
// influx.cnf:
// [req]
// distinguished_name = req_distinguished_name
// x509_extensions = v3_req
// prompt = no
// [req_distinguished_name]
// C = US
// ST = CA
// L = San Francisco
// O = InfluxDB
// CN = github.com/influxdata
// [v3_req]
// keyUsage = keyEncipherment, dataEncipherment
// extendedKeyUsage = serverAuth
// subjectAltName = @alt_names
// [alt_names]
// IP.1 = 127.0.0.1
//
key := `
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLswqKJLxfhBRi
4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigrXeadK6hv
qjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+3UcrzVjS
1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDKu54hMU1t
WTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW37ZfuxTa
mhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2tiMT3Wt39m
hXzclLTDAgMBAAECggEAK8mpElkjRUUXPMqMQSdpYe5rv5g973bb8n3jyMpC7i/I
dSwWM4hfmbVWfhnhHk7kErvb9raQxGiGJLrp2eP6Gw69RPGA54SodpoY21cCzHDi
b4FDQH+MoOKyy/xQHb4kitfejK70ha320huI5OhjOQgCtJeNh8yYVIGX3pX2BVyu
36UB9tfX1S5pbiHeih3vZGd322Muj/joNzIelnYRBnoO0xqvQ0S1Dk+dLCTHO0/m
u9AZN8c2TsRWZpJPMWwBv8LuABbE0e66/TSsrfklAn86ELCo44lZURDE7uPZ4pIH
FWtmf+nW5Hy6aPhy60E40MqotlejhWwB3ktY/m3JAQKBgQDuB4nhxzJA9lH9EaCt
byvJ9wGVvI3k79hOwc/Z2R3dNe+Ma+TJy+aBppvsLF4qz83aWC+canyasbHcPNR/
vXQGlsgKfucrmd1PfMV7uvOIkfOjK0E6mRC+jMuKtNTQrdtM1BU/Z7LY0iy0fNJ6
aNqhFdlJmmk0g+4bR4SAWB6FkwKBgQDbE/7r1u+GdJk/mhdjTi1aegr9lXb0l7L6
BCvOYhs/Z/pXfsaYPSXhgk2w+LiGk6BaEA2/4Sr0YS2MAAaIhBVeFBIXVpNrXB3K
Yg1jOEeLQ3qoVBeJFhJNrN9ZQx33HANC1W/Y1apMwaYqCRUGVQkrdcsN2KNea1z0
3qeYeCCSEQKBgCKZKeuNfrp+k1BLnaVYAW9r3ekb7SwXyMM53LJ3oqWiz10D2c+T
OcAirYtYr59dcTiJlPIRcGcz6PxwQxsGOLU0eYM9CvEFfmutYS8o73ksbdOL2AFi
elKYOIXC3yQuATBbq3L56b8mXaUmd5mfYBgGCv1t2ljtzFBext248UbNAoGBAIv1
2V24YiwnH6THf/ucfVMZNx5Mt8OJivk5YvcmLDw05HWzc5LdNe89PP871z963u3K
5c3ZP4UC9INFnOboY3JIJkqsr9/d6NZcECt8UBDDmoAhwSt+Y1EmiUZQn7s4NUkk
bKE919/Ts6GVTc5O013lkkUVS0HOG4QBH1dEH6LRAoGAStl11WA9tuKXiBl5XG/C
cq9mFPNJK3pEgd6YH874vEnYEEqENR4MFK3uWXus9Nm+VYxbUbPEzFF4kpsfukDg
/JAVqY4lUam7g6fyyaoIIPQEp7jGjbsUf46IjnUjFcaojOugA3EAfn9awREUDuJZ
cvh4WzEegcExTppINW1NB5E=
-----END PRIVATE KEY-----
`
cert := `
-----BEGIN CERTIFICATE-----
MIIDdjCCAl6gAwIBAgIJAMYGAwkxUV51MA0GCSqGSIb3DQEBCwUAMFgxCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzERMA8G
A1UECgwISW5mbHV4REIxETAPBgNVBAMMCGluZmx1eGRiMB4XDTE1MTIyOTAxNTg1
NloXDTI1MTIyNjAxNTg1NlowWDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYw
FAYDVQQHDA1TYW4gRnJhbmNpc2NvMREwDwYDVQQKDAhJbmZsdXhEQjERMA8GA1UE
AwwIaW5mbHV4ZGIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLswqK
JLxfhBRi4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigr
XeadK6hvqjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+
3UcrzVjS1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDK
u54hMU1tWTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW
37ZfuxTamhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2ti
MT3Wt39mhXzclLTDAgMBAAGjQzBBMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgQw
MBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcN
AQELBQADggEBAJxgHeduV9q2BuKnrt+sjXLGn/HwbMbgGbgFK6kUKJBWtv6Pa7JJ
m4teDmTMWiaeB2g4N2bmaWTuEZzzShNKG5roFeWm1ilFMAyzkb+VifN4YuDKH62F
3e259qsytiGbbJF3F//4sjfMw8qZVEPvspG1zKsASo0PpSOOUFmxcj0oMAXhnMrk
rRcbk6fufhyq0iZGl8ZLKTCrkjk0b3qlNs6UaRD9/XBB59VlQ8I338sfjV06edwY
jn5Amab0uyoFNEp70Y4WGxrxUTS1GAC1LCA13S7EnidD440UrnWALTarjmHAK6aW
war3JNM1mGB3o2iAtuOJlFIKLpI1x+1e8pI=
-----END CERTIFICATE-----
`
cer, err := tls.X509KeyPair([]byte(cert), []byte(key))
if err != nil {
t.Fatalf("Received error: %v", err)
}
server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data client.Response
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
server.TLS = &tls.Config{Certificates: []tls.Certificate{cer}}
server.TLS.BuildNameToCertificate()
server.StartTLS()
defer server.Close()
certFile, _ := ioutil.TempFile("", "influx-cert-")
certFile.WriteString(cert)
certFile.Close()
defer os.Remove(certFile.Name())
u, _ := url.Parse(server.URL)
tests := []struct {
name string
unsafeSsl bool
expected error
}{
{name: "validate certificates", unsafeSsl: false, expected: errors.New("error")},
{name: "not validate certificates", unsafeSsl: true, expected: nil},
}
for _, test := range tests {
config := client.Config{URL: *u, UnsafeSsl: test.unsafeSsl}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
query := client.Query{}
_, err = c.Query(query)
if (test.expected == nil) != (err == nil) {
t.Fatalf("%s: expected %v. got %v. unsafeSsl: %v", test.name, test.expected, err, test.unsafeSsl)
}
}
}
func TestChunkedResponse(t *testing.T) {
s := `{"results":[{},{}]}{"results":[{}]}`
r := client.NewChunkedResponse(strings.NewReader(s))
resp, err := r.NextResponse()
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
} else if actual := len(resp.Results); actual != 2 {
t.Fatalf("unexpected number of results. expected %v, actual %v", 2, actual)
}
resp, err = r.NextResponse()
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
} else if actual := len(resp.Results); actual != 1 {
t.Fatalf("unexpected number of results. expected %v, actual %v", 1, actual)
}
resp, err = r.NextResponse()
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
} else if resp != nil {
t.Fatalf("unexpected response. expected %v, actual %v", nil, resp)
}
}

View File

@ -0,0 +1,609 @@
// Package client (v2) is the current official Go client for InfluxDB.
package client // import "github.com/influxdata/influxdb/client/v2"
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/influxdata/influxdb/models"
)
// HTTPConfig is the config data needed to create an HTTP Client.
type HTTPConfig struct {
// Addr should be of the form "http://host:port"
// or "http://[ipv6-host%zone]:port".
Addr string
// Username is the influxdb username, optional.
Username string
// Password is the influxdb password, optional.
Password string
// UserAgent is the http User Agent, defaults to "InfluxDBClient".
UserAgent string
// Timeout for influxdb writes, defaults to no timeout.
Timeout time.Duration
// InsecureSkipVerify gets passed to the http client, if true, it will
// skip https certificate verification. Defaults to false.
InsecureSkipVerify bool
// TLSConfig allows the user to set their own TLS config for the HTTP
// Client. If set, this option overrides InsecureSkipVerify.
TLSConfig *tls.Config
}
// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct.
type BatchPointsConfig struct {
// Precision is the write precision of the points, defaults to "ns".
Precision string
// Database is the database to write points to.
Database string
// RetentionPolicy is the retention policy of the points.
RetentionPolicy string
// Write consistency is the number of servers required to confirm write.
WriteConsistency string
}
// Client is a client interface for writing & querying the database.
type Client interface {
// Ping checks that status of cluster, and will always return 0 time and no
// error for UDP clients.
Ping(timeout time.Duration) (time.Duration, string, error)
// Write takes a BatchPoints object and writes all Points to InfluxDB.
Write(bp BatchPoints) error
// Query makes an InfluxDB Query on the database. This will fail if using
// the UDP client.
Query(q Query) (*Response, error)
// Close releases any resources a Client may be using.
Close() error
}
// NewHTTPClient returns a new Client from the provided config.
// Client is safe for concurrent use by multiple goroutines.
func NewHTTPClient(conf HTTPConfig) (Client, error) {
if conf.UserAgent == "" {
conf.UserAgent = "InfluxDBClient"
}
u, err := url.Parse(conf.Addr)
if err != nil {
return nil, err
} else if u.Scheme != "http" && u.Scheme != "https" {
m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+
" must start with http:// or https://", u.Scheme)
return nil, errors.New(m)
}
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: conf.InsecureSkipVerify,
},
}
if conf.TLSConfig != nil {
tr.TLSClientConfig = conf.TLSConfig
}
return &client{
url: *u,
username: conf.Username,
password: conf.Password,
useragent: conf.UserAgent,
httpClient: &http.Client{
Timeout: conf.Timeout,
Transport: tr,
},
transport: tr,
}, nil
}
// Ping will check to see if the server is up with an optional timeout on waiting for leader.
// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) {
now := time.Now()
u := c.url
u.Path = "ping"
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return 0, "", err
}
req.Header.Set("User-Agent", c.useragent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
if timeout > 0 {
params := req.URL.Query()
params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds()))
req.URL.RawQuery = params.Encode()
}
resp, err := c.httpClient.Do(req)
if err != nil {
return 0, "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return 0, "", err
}
if resp.StatusCode != http.StatusNoContent {
var err = fmt.Errorf(string(body))
return 0, "", err
}
version := resp.Header.Get("X-Influxdb-Version")
return time.Since(now), version, nil
}
// Close releases the client's resources.
func (c *client) Close() error {
c.transport.CloseIdleConnections()
return nil
}
// client is safe for concurrent use as the fields are all read-only
// once the client is instantiated.
type client struct {
// N.B - if url.UserInfo is accessed in future modifications to the
// methods on client, you will need to syncronise access to url.
url url.URL
username string
password string
useragent string
httpClient *http.Client
transport *http.Transport
}
// BatchPoints is an interface into a batched grouping of points to write into
// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
// batch for each goroutine.
type BatchPoints interface {
// AddPoint adds the given point to the Batch of points.
AddPoint(p *Point)
// AddPoints adds the given points to the Batch of points.
AddPoints(ps []*Point)
// Points lists the points in the Batch.
Points() []*Point
// Precision returns the currently set precision of this Batch.
Precision() string
// SetPrecision sets the precision of this batch.
SetPrecision(s string) error
// Database returns the currently set database of this Batch.
Database() string
// SetDatabase sets the database of this Batch.
SetDatabase(s string)
// WriteConsistency returns the currently set write consistency of this Batch.
WriteConsistency() string
// SetWriteConsistency sets the write consistency of this Batch.
SetWriteConsistency(s string)
// RetentionPolicy returns the currently set retention policy of this Batch.
RetentionPolicy() string
// SetRetentionPolicy sets the retention policy of this Batch.
SetRetentionPolicy(s string)
}
// NewBatchPoints returns a BatchPoints interface based on the given config.
func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
if conf.Precision == "" {
conf.Precision = "ns"
}
if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
return nil, err
}
bp := &batchpoints{
database: conf.Database,
precision: conf.Precision,
retentionPolicy: conf.RetentionPolicy,
writeConsistency: conf.WriteConsistency,
}
return bp, nil
}
type batchpoints struct {
points []*Point
database string
precision string
retentionPolicy string
writeConsistency string
}
func (bp *batchpoints) AddPoint(p *Point) {
bp.points = append(bp.points, p)
}
func (bp *batchpoints) AddPoints(ps []*Point) {
bp.points = append(bp.points, ps...)
}
func (bp *batchpoints) Points() []*Point {
return bp.points
}
func (bp *batchpoints) Precision() string {
return bp.precision
}
func (bp *batchpoints) Database() string {
return bp.database
}
func (bp *batchpoints) WriteConsistency() string {
return bp.writeConsistency
}
func (bp *batchpoints) RetentionPolicy() string {
return bp.retentionPolicy
}
func (bp *batchpoints) SetPrecision(p string) error {
if _, err := time.ParseDuration("1" + p); err != nil {
return err
}
bp.precision = p
return nil
}
func (bp *batchpoints) SetDatabase(db string) {
bp.database = db
}
func (bp *batchpoints) SetWriteConsistency(wc string) {
bp.writeConsistency = wc
}
func (bp *batchpoints) SetRetentionPolicy(rp string) {
bp.retentionPolicy = rp
}
// Point represents a single data point.
type Point struct {
pt models.Point
}
// NewPoint returns a point with the given timestamp. If a timestamp is not
// given, then data is sent to the database without a timestamp, in which case
// the server will assign local time upon reception. NOTE: it is recommended to
// send data with a timestamp.
func NewPoint(
name string,
tags map[string]string,
fields map[string]interface{},
t ...time.Time,
) (*Point, error) {
var T time.Time
if len(t) > 0 {
T = t[0]
}
pt, err := models.NewPoint(name, models.NewTags(tags), fields, T)
if err != nil {
return nil, err
}
return &Point{
pt: pt,
}, nil
}
// String returns a line-protocol string of the Point.
func (p *Point) String() string {
return p.pt.String()
}
// PrecisionString returns a line-protocol string of the Point,
// with the timestamp formatted for the given precision.
func (p *Point) PrecisionString(precison string) string {
return p.pt.PrecisionString(precison)
}
// Name returns the measurement name of the point.
func (p *Point) Name() string {
return string(p.pt.Name())
}
// Tags returns the tags associated with the point.
func (p *Point) Tags() map[string]string {
return p.pt.Tags().Map()
}
// Time return the timestamp for the point.
func (p *Point) Time() time.Time {
return p.pt.Time()
}
// UnixNano returns timestamp of the point in nanoseconds since Unix epoch.
func (p *Point) UnixNano() int64 {
return p.pt.UnixNano()
}
// Fields returns the fields for the point.
func (p *Point) Fields() (map[string]interface{}, error) {
return p.pt.Fields()
}
// NewPointFrom returns a point from the provided models.Point.
func NewPointFrom(pt models.Point) *Point {
return &Point{pt: pt}
}
func (c *client) Write(bp BatchPoints) error {
var b bytes.Buffer
for _, p := range bp.Points() {
if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
return err
}
if err := b.WriteByte('\n'); err != nil {
return err
}
}
u := c.url
u.Path = "write"
req, err := http.NewRequest("POST", u.String(), &b)
if err != nil {
return err
}
req.Header.Set("Content-Type", "")
req.Header.Set("User-Agent", c.useragent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
params := req.URL.Query()
params.Set("db", bp.Database())
params.Set("rp", bp.RetentionPolicy())
params.Set("precision", bp.Precision())
params.Set("consistency", bp.WriteConsistency())
req.URL.RawQuery = params.Encode()
resp, err := c.httpClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
var err = fmt.Errorf(string(body))
return err
}
return nil
}
// Query defines a query to send to the server.
type Query struct {
Command string
Database string
Precision string
Chunked bool
ChunkSize int
Parameters map[string]interface{}
}
// NewQuery returns a query object.
// The database and precision arguments can be empty strings if they are not needed for the query.
func NewQuery(command, database, precision string) Query {
return Query{
Command: command,
Database: database,
Precision: precision,
Parameters: make(map[string]interface{}),
}
}
// NewQueryWithParameters returns a query object.
// The database and precision arguments can be empty strings if they are not needed for the query.
// parameters is a map of the parameter names used in the command to their values.
func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query {
return Query{
Command: command,
Database: database,
Precision: precision,
Parameters: parameters,
}
}
// Response represents a list of statement results.
type Response struct {
Results []Result
Err string `json:"error,omitempty"`
}
// Error returns the first error from any statement.
// It returns nil if no errors occurred on any statements.
func (r *Response) Error() error {
if r.Err != "" {
return fmt.Errorf(r.Err)
}
for _, result := range r.Results {
if result.Err != "" {
return fmt.Errorf(result.Err)
}
}
return nil
}
// Message represents a user message.
type Message struct {
Level string
Text string
}
// Result represents a resultset returned from a single statement.
type Result struct {
Series []models.Row
Messages []*Message
Err string `json:"error,omitempty"`
}
// Query sends a command to the server and returns the Response.
func (c *client) Query(q Query) (*Response, error) {
u := c.url
u.Path = "query"
jsonParameters, err := json.Marshal(q.Parameters)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "")
req.Header.Set("User-Agent", c.useragent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
params := req.URL.Query()
params.Set("q", q.Command)
params.Set("db", q.Database)
params.Set("params", string(jsonParameters))
if q.Chunked {
params.Set("chunked", "true")
if q.ChunkSize > 0 {
params.Set("chunk_size", strconv.Itoa(q.ChunkSize))
}
}
if q.Precision != "" {
params.Set("epoch", q.Precision)
}
req.URL.RawQuery = params.Encode()
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var response Response
if q.Chunked {
cr := NewChunkedResponse(resp.Body)
for {
r, err := cr.NextResponse()
if err != nil {
// If we got an error while decoding the response, send that back.
return nil, err
}
if r == nil {
break
}
response.Results = append(response.Results, r.Results...)
if r.Err != "" {
response.Err = r.Err
break
}
}
} else {
dec := json.NewDecoder(resp.Body)
dec.UseNumber()
decErr := dec.Decode(&response)
// ignore this error if we got an invalid status code
if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
decErr = nil
}
// If we got a valid decode error, send that back
if decErr != nil {
return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr)
}
}
// If we don't have an error in our json response, and didn't get statusOK
// then send back an error
if resp.StatusCode != http.StatusOK && response.Error() == nil {
return &response, fmt.Errorf("received status code %d from server",
resp.StatusCode)
}
return &response, nil
}
// duplexReader reads responses and writes it to another writer while
// satisfying the reader interface.
type duplexReader struct {
r io.Reader
w io.Writer
}
func (r *duplexReader) Read(p []byte) (n int, err error) {
n, err = r.r.Read(p)
if err == nil {
r.w.Write(p[:n])
}
return n, err
}
// ChunkedResponse represents a response from the server that
// uses chunking to stream the output.
type ChunkedResponse struct {
dec *json.Decoder
duplex *duplexReader
buf bytes.Buffer
}
// NewChunkedResponse reads a stream and produces responses from the stream.
func NewChunkedResponse(r io.Reader) *ChunkedResponse {
resp := &ChunkedResponse{}
resp.duplex = &duplexReader{r: r, w: &resp.buf}
resp.dec = json.NewDecoder(resp.duplex)
resp.dec.UseNumber()
return resp
}
// NextResponse reads the next line of the stream and returns a response.
func (r *ChunkedResponse) NextResponse() (*Response, error) {
var response Response
if err := r.dec.Decode(&response); err != nil {
if err == io.EOF {
return nil, nil
}
// A decoding error happened. This probably means the server crashed
// and sent a last-ditch error message to us. Ensure we have read the
// entirety of the connection to get any remaining error text.
io.Copy(ioutil.Discard, r.duplex)
return nil, errors.New(strings.TrimSpace(r.buf.String()))
}
r.buf.Reset()
return &response, nil
}

View File

@ -0,0 +1,563 @@
package client
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"sync"
"testing"
"time"
)
func TestUDPClient_Query(t *testing.T) {
config := UDPConfig{Addr: "localhost:8089"}
c, err := NewUDPClient(config)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
defer c.Close()
query := Query{}
_, err = c.Query(query)
if err == nil {
t.Error("Querying UDP client should fail")
}
}
func TestUDPClient_Ping(t *testing.T) {
config := UDPConfig{Addr: "localhost:8089"}
c, err := NewUDPClient(config)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
defer c.Close()
rtt, version, err := c.Ping(0)
if rtt != 0 || version != "" || err != nil {
t.Errorf("unexpected error. expected (%v, '%v', %v), actual (%v, '%v', %v)", 0, "", nil, rtt, version, err)
}
}
func TestUDPClient_Write(t *testing.T) {
config := UDPConfig{Addr: "localhost:8089"}
c, err := NewUDPClient(config)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
defer c.Close()
bp, err := NewBatchPoints(BatchPointsConfig{})
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
fields := make(map[string]interface{})
fields["value"] = 1.0
pt, _ := NewPoint("cpu", make(map[string]string), fields)
bp.AddPoint(pt)
err = c.Write(bp)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestUDPClient_BadAddr(t *testing.T) {
config := UDPConfig{Addr: "foobar@wahoo"}
c, err := NewUDPClient(config)
if err == nil {
defer c.Close()
t.Error("Expected resolve error")
}
}
func TestUDPClient_Batches(t *testing.T) {
var logger writeLogger
var cl udpclient
cl.conn = &logger
cl.payloadSize = 20 // should allow for two points per batch
// expected point should look like this: "cpu a=1i"
fields := map[string]interface{}{"a": 1}
p, _ := NewPoint("cpu", nil, fields, time.Time{})
bp, _ := NewBatchPoints(BatchPointsConfig{})
for i := 0; i < 9; i++ {
bp.AddPoint(p)
}
if err := cl.Write(bp); err != nil {
t.Fatalf("Unexpected error during Write: %v", err)
}
if len(logger.writes) != 5 {
t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), 5)
}
}
func TestUDPClient_Split(t *testing.T) {
var logger writeLogger
var cl udpclient
cl.conn = &logger
cl.payloadSize = 1 // force one field per point
fields := map[string]interface{}{"a": 1, "b": 2, "c": 3, "d": 4}
p, _ := NewPoint("cpu", nil, fields, time.Unix(1, 0))
bp, _ := NewBatchPoints(BatchPointsConfig{})
bp.AddPoint(p)
if err := cl.Write(bp); err != nil {
t.Fatalf("Unexpected error during Write: %v", err)
}
if len(logger.writes) != len(fields) {
t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), len(fields))
}
}
type writeLogger struct {
writes [][]byte
}
func (w *writeLogger) Write(b []byte) (int, error) {
w.writes = append(w.writes, append([]byte(nil), b...))
return len(b), nil
}
func (w *writeLogger) Close() error { return nil }
func TestClient_Query(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
config := HTTPConfig{Addr: ts.URL}
c, _ := NewHTTPClient(config)
defer c.Close()
query := Query{}
_, err := c.Query(query)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_ChunkedQuery(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
w.WriteHeader(http.StatusOK)
enc := json.NewEncoder(w)
_ = enc.Encode(data)
_ = enc.Encode(data)
}))
defer ts.Close()
config := HTTPConfig{Addr: ts.URL}
c, err := NewHTTPClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
query := Query{Chunked: true}
_, err = c.Query(query)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_BoundParameters(t *testing.T) {
var parameterString string
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
r.ParseForm()
parameterString = r.FormValue("params")
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
config := HTTPConfig{Addr: ts.URL}
c, _ := NewHTTPClient(config)
defer c.Close()
expectedParameters := map[string]interface{}{
"testStringParameter": "testStringValue",
"testNumberParameter": 12.3,
}
query := Query{
Parameters: expectedParameters,
}
_, err := c.Query(query)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
var actualParameters map[string]interface{}
err = json.Unmarshal([]byte(parameterString), &actualParameters)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
if !reflect.DeepEqual(expectedParameters, actualParameters) {
t.Errorf("unexpected parameters. expected %v, actual %v", expectedParameters, actualParameters)
}
}
func TestClient_BasicAuth(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
u, p, ok := r.BasicAuth()
if !ok {
t.Errorf("basic auth error")
}
if u != "username" {
t.Errorf("unexpected username, expected %q, actual %q", "username", u)
}
if p != "password" {
t.Errorf("unexpected password, expected %q, actual %q", "password", p)
}
var data Response
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
config := HTTPConfig{Addr: ts.URL, Username: "username", Password: "password"}
c, _ := NewHTTPClient(config)
defer c.Close()
query := Query{}
_, err := c.Query(query)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_Ping(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
w.WriteHeader(http.StatusNoContent)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
config := HTTPConfig{Addr: ts.URL}
c, _ := NewHTTPClient(config)
defer c.Close()
_, _, err := c.Ping(0)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_Concurrent_Use(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{}`))
}))
defer ts.Close()
config := HTTPConfig{Addr: ts.URL}
c, _ := NewHTTPClient(config)
defer c.Close()
var wg sync.WaitGroup
wg.Add(3)
n := 1000
errC := make(chan error)
go func() {
defer wg.Done()
bp, err := NewBatchPoints(BatchPointsConfig{})
if err != nil {
errC <- fmt.Errorf("got error %v", err)
return
}
for i := 0; i < n; i++ {
if err = c.Write(bp); err != nil {
errC <- fmt.Errorf("got error %v", err)
return
}
}
}()
go func() {
defer wg.Done()
var q Query
for i := 0; i < n; i++ {
if _, err := c.Query(q); err != nil {
errC <- fmt.Errorf("got error %v", err)
return
}
}
}()
go func() {
defer wg.Done()
for i := 0; i < n; i++ {
c.Ping(time.Second)
}
}()
go func() {
wg.Wait()
close(errC)
}()
for err := range errC {
if err != nil {
t.Error(err)
}
}
}
func TestClient_Write(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
w.WriteHeader(http.StatusNoContent)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
config := HTTPConfig{Addr: ts.URL}
c, _ := NewHTTPClient(config)
defer c.Close()
bp, err := NewBatchPoints(BatchPointsConfig{})
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
err = c.Write(bp)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_UserAgent(t *testing.T) {
receivedUserAgent := ""
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
receivedUserAgent = r.UserAgent()
var data Response
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
_, err := http.Get(ts.URL)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
tests := []struct {
name string
userAgent string
expected string
}{
{
name: "Empty user agent",
userAgent: "",
expected: "InfluxDBClient",
},
{
name: "Custom user agent",
userAgent: "Test Influx Client",
expected: "Test Influx Client",
},
}
for _, test := range tests {
config := HTTPConfig{Addr: ts.URL, UserAgent: test.userAgent}
c, _ := NewHTTPClient(config)
defer c.Close()
receivedUserAgent = ""
query := Query{}
_, err = c.Query(query)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
if !strings.HasPrefix(receivedUserAgent, test.expected) {
t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
}
receivedUserAgent = ""
bp, _ := NewBatchPoints(BatchPointsConfig{})
err = c.Write(bp)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
if !strings.HasPrefix(receivedUserAgent, test.expected) {
t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
}
receivedUserAgent = ""
_, err := c.Query(query)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
if receivedUserAgent != test.expected {
t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
}
}
}
func TestClient_PointString(t *testing.T) {
const shortForm = "2006-Jan-02"
time1, _ := time.Parse(shortForm, "2013-Feb-03")
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p, _ := NewPoint("cpu_usage", tags, fields, time1)
s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000000000"
if p.String() != s {
t.Errorf("Point String Error, got %s, expected %s", p.String(), s)
}
s = "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000"
if p.PrecisionString("ms") != s {
t.Errorf("Point String Error, got %s, expected %s",
p.PrecisionString("ms"), s)
}
}
func TestClient_PointWithoutTimeString(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p, _ := NewPoint("cpu_usage", tags, fields)
s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39"
if p.String() != s {
t.Errorf("Point String Error, got %s, expected %s", p.String(), s)
}
if p.PrecisionString("ms") != s {
t.Errorf("Point String Error, got %s, expected %s",
p.PrecisionString("ms"), s)
}
}
func TestClient_PointName(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p, _ := NewPoint("cpu_usage", tags, fields)
exp := "cpu_usage"
if p.Name() != exp {
t.Errorf("Error, got %s, expected %s",
p.Name(), exp)
}
}
func TestClient_PointTags(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p, _ := NewPoint("cpu_usage", tags, fields)
if !reflect.DeepEqual(tags, p.Tags()) {
t.Errorf("Error, got %v, expected %v",
p.Tags(), tags)
}
}
func TestClient_PointUnixNano(t *testing.T) {
const shortForm = "2006-Jan-02"
time1, _ := time.Parse(shortForm, "2013-Feb-03")
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p, _ := NewPoint("cpu_usage", tags, fields, time1)
exp := int64(1359849600000000000)
if p.UnixNano() != exp {
t.Errorf("Error, got %d, expected %d",
p.UnixNano(), exp)
}
}
func TestClient_PointFields(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p, _ := NewPoint("cpu_usage", tags, fields)
pfields, err := p.Fields()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(fields, pfields) {
t.Errorf("Error, got %v, expected %v",
pfields, fields)
}
}
func TestBatchPoints_PrecisionError(t *testing.T) {
_, err := NewBatchPoints(BatchPointsConfig{Precision: "foobar"})
if err == nil {
t.Errorf("Precision: foobar should have errored")
}
bp, _ := NewBatchPoints(BatchPointsConfig{Precision: "ns"})
err = bp.SetPrecision("foobar")
if err == nil {
t.Errorf("Precision: foobar should have errored")
}
}
func TestBatchPoints_SettersGetters(t *testing.T) {
bp, _ := NewBatchPoints(BatchPointsConfig{
Precision: "ns",
Database: "db",
RetentionPolicy: "rp",
WriteConsistency: "wc",
})
if bp.Precision() != "ns" {
t.Errorf("Expected: %s, got %s", bp.Precision(), "ns")
}
if bp.Database() != "db" {
t.Errorf("Expected: %s, got %s", bp.Database(), "db")
}
if bp.RetentionPolicy() != "rp" {
t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp")
}
if bp.WriteConsistency() != "wc" {
t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc")
}
bp.SetDatabase("db2")
bp.SetRetentionPolicy("rp2")
bp.SetWriteConsistency("wc2")
err := bp.SetPrecision("s")
if err != nil {
t.Errorf("Did not expect error: %s", err.Error())
}
if bp.Precision() != "s" {
t.Errorf("Expected: %s, got %s", bp.Precision(), "s")
}
if bp.Database() != "db2" {
t.Errorf("Expected: %s, got %s", bp.Database(), "db2")
}
if bp.RetentionPolicy() != "rp2" {
t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp2")
}
if bp.WriteConsistency() != "wc2" {
t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc2")
}
}

View File

@ -0,0 +1,265 @@
package client_test
import (
"fmt"
"math/rand"
"os"
"time"
"github.com/influxdata/influxdb/client/v2"
)
// Create a new client
func ExampleClient() {
// NOTE: this assumes you've setup a user and have setup shell env variables,
// namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below.
_, err := client.NewHTTPClient(client.HTTPConfig{
Addr: "http://localhost:8086",
Username: os.Getenv("INFLUX_USER"),
Password: os.Getenv("INFLUX_PWD"),
})
if err != nil {
fmt.Println("Error creating InfluxDB Client: ", err.Error())
}
}
// Write a point using the UDP client
func ExampleClient_uDP() {
// Make client
config := client.UDPConfig{Addr: "localhost:8089"}
c, err := client.NewUDPClient(config)
if err != nil {
fmt.Println("Error: ", err.Error())
}
defer c.Close()
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Precision: "s",
})
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
fmt.Println("Error: ", err.Error())
}
bp.AddPoint(pt)
// Write the batch
c.Write(bp)
}
// Ping the cluster using the HTTP client
func ExampleClient_Ping() {
// Make client
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: "http://localhost:8086",
})
if err != nil {
fmt.Println("Error creating InfluxDB Client: ", err.Error())
}
defer c.Close()
_, _, err = c.Ping(0)
if err != nil {
fmt.Println("Error pinging InfluxDB Cluster: ", err.Error())
}
}
// Write a point using the HTTP client
func ExampleClient_write() {
// Make client
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: "http://localhost:8086",
})
if err != nil {
fmt.Println("Error creating InfluxDB Client: ", err.Error())
}
defer c.Close()
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: "BumbleBeeTuna",
Precision: "s",
})
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
fmt.Println("Error: ", err.Error())
}
bp.AddPoint(pt)
// Write the batch
c.Write(bp)
}
// Create a batch and add a point
func ExampleBatchPoints() {
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: "BumbleBeeTuna",
Precision: "s",
})
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
fmt.Println("Error: ", err.Error())
}
bp.AddPoint(pt)
}
// Using the BatchPoints setter functions
func ExampleBatchPoints_setters() {
// Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{})
bp.SetDatabase("BumbleBeeTuna")
bp.SetPrecision("ms")
// Create a point and add to batch
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err != nil {
fmt.Println("Error: ", err.Error())
}
bp.AddPoint(pt)
}
// Create a new point with a timestamp
func ExamplePoint() {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
if err == nil {
fmt.Println("We created a point: ", pt.String())
}
}
// Create a new point without a timestamp
func ExamplePoint_withoutTime() {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{
"idle": 10.1,
"system": 53.3,
"user": 46.6,
}
pt, err := client.NewPoint("cpu_usage", tags, fields)
if err == nil {
fmt.Println("We created a point w/o time: ", pt.String())
}
}
// Write 1000 points
func ExampleClient_write1000() {
sampleSize := 1000
// Make client
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: "http://localhost:8086",
})
if err != nil {
fmt.Println("Error creating InfluxDB Client: ", err.Error())
}
defer c.Close()
rand.Seed(42)
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: "systemstats",
Precision: "us",
})
for i := 0; i < sampleSize; i++ {
regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
tags := map[string]string{
"cpu": "cpu-total",
"host": fmt.Sprintf("host%d", rand.Intn(1000)),
"region": regions[rand.Intn(len(regions))],
}
idle := rand.Float64() * 100.0
fields := map[string]interface{}{
"idle": idle,
"busy": 100.0 - idle,
}
pt, err := client.NewPoint(
"cpu_usage",
tags,
fields,
time.Now(),
)
if err != nil {
println("Error:", err.Error())
continue
}
bp.AddPoint(pt)
}
err = c.Write(bp)
if err != nil {
fmt.Println("Error: ", err.Error())
}
}
// Make a Query
func ExampleClient_query() {
// Make client
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: "http://localhost:8086",
})
if err != nil {
fmt.Println("Error creating InfluxDB Client: ", err.Error())
}
defer c.Close()
q := client.NewQuery("SELECT count(value) FROM shapes", "square_holes", "ns")
if response, err := c.Query(q); err == nil && response.Error() == nil {
fmt.Println(response.Results)
}
}
// Create a Database with a query
func ExampleClient_createDatabase() {
// Make client
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: "http://localhost:8086",
})
if err != nil {
fmt.Println("Error creating InfluxDB Client: ", err.Error())
}
defer c.Close()
q := client.NewQuery("CREATE DATABASE telegraf", "", "")
if response, err := c.Query(q); err == nil && response.Error() == nil {
fmt.Println(response.Results)
}
}

112
vendor/github.com/influxdata/influxdb/client/v2/udp.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
package client
import (
"fmt"
"io"
"net"
"time"
)
const (
// UDPPayloadSize is a reasonable default payload size for UDP packets that
// could be travelling over the internet.
UDPPayloadSize = 512
)
// UDPConfig is the config data needed to create a UDP Client.
type UDPConfig struct {
// Addr should be of the form "host:port"
// or "[ipv6-host%zone]:port".
Addr string
// PayloadSize is the maximum size of a UDP client message, optional
// Tune this based on your network. Defaults to UDPPayloadSize.
PayloadSize int
}
// NewUDPClient returns a client interface for writing to an InfluxDB UDP
// service from the given config.
func NewUDPClient(conf UDPConfig) (Client, error) {
var udpAddr *net.UDPAddr
udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr)
if err != nil {
return nil, err
}
conn, err := net.DialUDP("udp", nil, udpAddr)
if err != nil {
return nil, err
}
payloadSize := conf.PayloadSize
if payloadSize == 0 {
payloadSize = UDPPayloadSize
}
return &udpclient{
conn: conn,
payloadSize: payloadSize,
}, nil
}
// Close releases the udpclient's resources.
func (uc *udpclient) Close() error {
return uc.conn.Close()
}
type udpclient struct {
conn io.WriteCloser
payloadSize int
}
func (uc *udpclient) Write(bp BatchPoints) error {
var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed
var d, _ = time.ParseDuration("1" + bp.Precision())
var delayedError error
var checkBuffer = func(n int) {
if len(b) > 0 && len(b)+n > uc.payloadSize {
if _, err := uc.conn.Write(b); err != nil {
delayedError = err
}
b = b[:0]
}
}
for _, p := range bp.Points() {
p.pt.Round(d)
pointSize := p.pt.StringSize() + 1 // include newline in size
//point := p.pt.RoundedString(d) + "\n"
checkBuffer(pointSize)
if p.Time().IsZero() || pointSize <= uc.payloadSize {
b = p.pt.AppendString(b)
b = append(b, '\n')
continue
}
points := p.pt.Split(uc.payloadSize - 1) // account for newline character
for _, sp := range points {
checkBuffer(sp.StringSize() + 1)
b = sp.AppendString(b)
b = append(b, '\n')
}
}
if len(b) > 0 {
if _, err := uc.conn.Write(b); err != nil {
return err
}
}
return delayedError
}
func (uc *udpclient) Query(q Query) (*Response, error) {
return nil, fmt.Errorf("Querying via UDP is not supported")
}
func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) {
return 0, "", nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,58 @@
package cli
import "testing"
func TestParseCommand_InsertInto(t *testing.T) {
t.Parallel()
c := CommandLine{}
tests := []struct {
cmd, db, rp string
}{
{
cmd: `INSERT INTO test cpu,host=serverA,region=us-west value=1.0`,
db: "",
rp: "test",
},
{
cmd: ` INSERT INTO .test cpu,host=serverA,region=us-west value=1.0`,
db: "",
rp: "test",
},
{
cmd: `INSERT INTO "test test" cpu,host=serverA,region=us-west value=1.0`,
db: "",
rp: "test test",
},
{
cmd: `Insert iNTO test.test cpu,host=serverA,region=us-west value=1.0`,
db: "test",
rp: "test",
},
{
cmd: `insert into "test test" cpu,host=serverA,region=us-west value=1.0`,
db: "",
rp: "test test",
},
{
cmd: `insert into "d b"."test test" cpu,host=serverA,region=us-west value=1.0`,
db: "d b",
rp: "test test",
},
}
for _, test := range tests {
t.Logf("command: %s", test.cmd)
bp, err := c.parseInsert(test.cmd)
if err != nil {
t.Fatal(err)
}
if bp.Database != test.db {
t.Fatalf(`Command "insert into" db parsing failed, expected: %q, actual: %q`, test.db, bp.Database)
}
if bp.RetentionPolicy != test.rp {
t.Fatalf(`Command "insert into" rp parsing failed, expected: %q, actual: %q`, test.rp, bp.RetentionPolicy)
}
}
}

View File

@ -0,0 +1,594 @@
package cli_test
import (
"bufio"
"bytes"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing"
"github.com/influxdata/influxdb/client"
"github.com/influxdata/influxdb/cmd/influx/cli"
"github.com/influxdata/influxdb/influxql"
"github.com/peterh/liner"
)
const (
CLIENT_VERSION = "y.y"
SERVER_VERSION = "x.x"
)
func TestNewCLI(t *testing.T) {
t.Parallel()
c := cli.New(CLIENT_VERSION)
if c == nil {
t.Fatal("CommandLine shouldn't be nil.")
}
if c.ClientVersion != CLIENT_VERSION {
t.Fatalf("CommandLine version is %s but should be %s", c.ClientVersion, CLIENT_VERSION)
}
}
func TestRunCLI(t *testing.T) {
t.Parallel()
ts := emptyTestServer()
defer ts.Close()
u, _ := url.Parse(ts.URL)
h, p, _ := net.SplitHostPort(u.Host)
c := cli.New(CLIENT_VERSION)
c.Host = h
c.Port, _ = strconv.Atoi(p)
c.IgnoreSignals = true
c.ForceTTY = true
go func() {
close(c.Quit)
}()
if err := c.Run(); err != nil {
t.Fatalf("Run failed with error: %s", err)
}
}
func TestRunCLI_ExecuteInsert(t *testing.T) {
t.Parallel()
ts := emptyTestServer()
defer ts.Close()
u, _ := url.Parse(ts.URL)
h, p, _ := net.SplitHostPort(u.Host)
c := cli.New(CLIENT_VERSION)
c.Host = h
c.Port, _ = strconv.Atoi(p)
c.ClientConfig.Precision = "ms"
c.Execute = "INSERT sensor,floor=1 value=2"
c.IgnoreSignals = true
c.ForceTTY = true
if err := c.Run(); err != nil {
t.Fatalf("Run failed with error: %s", err)
}
}
func TestSetAuth(t *testing.T) {
t.Parallel()
c := cli.New(CLIENT_VERSION)
config := client.NewConfig()
client, _ := client.NewClient(config)
c.Client = client
u := "userx"
p := "pwdy"
c.SetAuth("auth " + u + " " + p)
// validate CLI configuration
if c.ClientConfig.Username != u {
t.Fatalf("Username is %s but should be %s", c.ClientConfig.Username, u)
}
if c.ClientConfig.Password != p {
t.Fatalf("Password is %s but should be %s", c.ClientConfig.Password, p)
}
}
func TestSetPrecision(t *testing.T) {
t.Parallel()
c := cli.New(CLIENT_VERSION)
config := client.NewConfig()
client, _ := client.NewClient(config)
c.Client = client
// validate set non-default precision
p := "ns"
c.SetPrecision("precision " + p)
if c.ClientConfig.Precision != p {
t.Fatalf("Precision is %s but should be %s", c.ClientConfig.Precision, p)
}
// validate set default precision which equals empty string
p = "rfc3339"
c.SetPrecision("precision " + p)
if c.ClientConfig.Precision != "" {
t.Fatalf("Precision is %s but should be empty", c.ClientConfig.Precision)
}
}
func TestSetFormat(t *testing.T) {
t.Parallel()
c := cli.New(CLIENT_VERSION)
config := client.NewConfig()
client, _ := client.NewClient(config)
c.Client = client
// validate set non-default format
f := "json"
c.SetFormat("format " + f)
if c.Format != f {
t.Fatalf("Format is %s but should be %s", c.Format, f)
}
}
func Test_SetChunked(t *testing.T) {
t.Parallel()
c := cli.New(CLIENT_VERSION)
config := client.NewConfig()
client, _ := client.NewClient(config)
c.Client = client
// make sure chunked is on by default
if got, exp := c.Chunked, true; got != exp {
t.Fatalf("chunked should be on by default. got %v, exp %v", got, exp)
}
// turn chunked off
if err := c.ParseCommand("Chunked"); err != nil {
t.Fatalf("setting chunked failed: err: %s", err)
}
if got, exp := c.Chunked, false; got != exp {
t.Fatalf("setting chunked failed. got %v, exp %v", got, exp)
}
// turn chunked back on
if err := c.ParseCommand("Chunked"); err != nil {
t.Fatalf("setting chunked failed: err: %s", err)
}
if got, exp := c.Chunked, true; got != exp {
t.Fatalf("setting chunked failed. got %v, exp %v", got, exp)
}
}
func Test_SetChunkSize(t *testing.T) {
t.Parallel()
c := cli.New(CLIENT_VERSION)
config := client.NewConfig()
client, _ := client.NewClient(config)
c.Client = client
// check default chunk size
if got, exp := c.ChunkSize, 0; got != exp {
t.Fatalf("unexpected chunk size. got %d, exp %d", got, exp)
}
tests := []struct {
command string
exp int
}{
{"chunk size 20", 20},
{" CHunk siZE 55 ", 55},
{"chunk 10", 10},
{" chuNK 15", 15},
{"chunk size -60", 0},
{"chunk size 10", 10},
{"chunk size 0", 0},
{"chunk size 10", 10},
{"chunk size junk", 10},
}
for _, test := range tests {
if err := c.ParseCommand(test.command); err != nil {
t.Logf("command: %q", test.command)
t.Fatalf("setting chunked failed: err: %s", err)
}
if got, exp := c.ChunkSize, test.exp; got != exp {
t.Logf("command: %q", test.command)
t.Fatalf("unexpected chunk size. got %d, exp %d", got, exp)
}
}
}
func TestSetWriteConsistency(t *testing.T) {
t.Parallel()
c := cli.New(CLIENT_VERSION)
config := client.NewConfig()
client, _ := client.NewClient(config)
c.Client = client
// set valid write consistency
consistency := "all"
c.SetWriteConsistency("consistency " + consistency)
if c.ClientConfig.WriteConsistency != consistency {
t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, consistency)
}
// set different valid write consistency and validate change
consistency = "quorum"
c.SetWriteConsistency("consistency " + consistency)
if c.ClientConfig.WriteConsistency != consistency {
t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, consistency)
}
// set invalid write consistency and verify there was no change
invalidConsistency := "invalid_consistency"
c.SetWriteConsistency("consistency " + invalidConsistency)
if c.ClientConfig.WriteConsistency == invalidConsistency {
t.Fatalf("WriteConsistency is %s but should be %s", c.ClientConfig.WriteConsistency, consistency)
}
}
func TestParseCommand_CommandsExist(t *testing.T) {
t.Parallel()
c, err := client.NewClient(client.Config{})
if err != nil {
t.Fatalf("unexpected error %v", err)
}
m := cli.CommandLine{Client: c, Line: liner.NewLiner()}
tests := []struct {
cmd string
}{
{cmd: "gopher"},
{cmd: "auth"},
{cmd: "help"},
{cmd: "format"},
{cmd: "precision"},
{cmd: "settings"},
}
for _, test := range tests {
if err := m.ParseCommand(test.cmd); err != nil {
t.Fatalf(`Got error %v for command %q, expected nil`, err, test.cmd)
}
}
}
func TestParseCommand_Connect(t *testing.T) {
t.Parallel()
ts := emptyTestServer()
defer ts.Close()
u, _ := url.Parse(ts.URL)
cmd := "connect " + u.Host
c := cli.CommandLine{}
// assert connection is established
if err := c.ParseCommand(cmd); err != nil {
t.Fatalf("There was an error while connecting to %v: %v", u.Path, err)
}
// assert server version is populated
if c.ServerVersion != SERVER_VERSION {
t.Fatalf("Server version is %s but should be %s.", c.ServerVersion, SERVER_VERSION)
}
}
func TestParseCommand_TogglePretty(t *testing.T) {
t.Parallel()
c := cli.CommandLine{}
if c.Pretty {
t.Fatalf(`Pretty should be false.`)
}
c.ParseCommand("pretty")
if !c.Pretty {
t.Fatalf(`Pretty should be true.`)
}
c.ParseCommand("pretty")
if c.Pretty {
t.Fatalf(`Pretty should be false.`)
}
}
func TestParseCommand_Exit(t *testing.T) {
t.Parallel()
tests := []struct {
cmd string
}{
{cmd: "exit"},
{cmd: " exit"},
{cmd: "exit "},
{cmd: "Exit "},
}
for _, test := range tests {
c := cli.CommandLine{Quit: make(chan struct{}, 1)}
c.ParseCommand(test.cmd)
// channel should be closed
if _, ok := <-c.Quit; ok {
t.Fatalf(`Command "exit" failed for %q.`, test.cmd)
}
}
}
func TestParseCommand_Quit(t *testing.T) {
t.Parallel()
tests := []struct {
cmd string
}{
{cmd: "quit"},
{cmd: " quit"},
{cmd: "quit "},
{cmd: "Quit "},
}
for _, test := range tests {
c := cli.CommandLine{Quit: make(chan struct{}, 1)}
c.ParseCommand(test.cmd)
// channel should be closed
if _, ok := <-c.Quit; ok {
t.Fatalf(`Command "quit" failed for %q.`, test.cmd)
}
}
}
func TestParseCommand_Use(t *testing.T) {
t.Parallel()
ts := emptyTestServer()
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
tests := []struct {
cmd string
}{
{cmd: "use db"},
{cmd: " use db"},
{cmd: "use db "},
{cmd: "use db;"},
{cmd: "use db; "},
{cmd: "Use db"},
}
for _, test := range tests {
m := cli.CommandLine{Client: c}
if err := m.ParseCommand(test.cmd); err != nil {
t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd)
}
if m.Database != "db" {
t.Fatalf(`Command "use" changed database to %q. Expected db`, m.Database)
}
}
}
func TestParseCommand_UseAuth(t *testing.T) {
t.Parallel()
ts := emptyTestServer()
defer ts.Close()
u, _ := url.Parse(ts.URL)
tests := []struct {
cmd string
user string
database string
}{
{
cmd: "use db",
user: "admin",
database: "db",
},
{
cmd: "use blank",
user: "admin",
database: "",
},
{
cmd: "use db",
user: "anonymous",
database: "db",
},
{
cmd: "use blank",
user: "anonymous",
database: "blank",
},
}
for i, tt := range tests {
config := client.Config{URL: *u, Username: tt.user}
fmt.Println("using auth:", tt.user)
c, err := client.NewClient(config)
if err != nil {
t.Errorf("%d. unexpected error. expected %v, actual %v", i, nil, err)
continue
}
m := cli.CommandLine{Client: c}
m.ClientConfig.Username = tt.user
if err := m.ParseCommand(tt.cmd); err != nil {
t.Fatalf(`%d. Got error %v for command %q, expected nil.`, i, err, tt.cmd)
}
if m.Database != tt.database {
t.Fatalf(`%d. Command "use" changed database to %q. Expected %q`, i, m.Database, tt.database)
}
}
}
func TestParseCommand_Consistency(t *testing.T) {
t.Parallel()
c := cli.CommandLine{}
tests := []struct {
cmd string
}{
{cmd: "consistency one"},
{cmd: " consistency one"},
{cmd: "consistency one "},
{cmd: "consistency one;"},
{cmd: "consistency one; "},
{cmd: "Consistency one"},
}
for _, test := range tests {
if err := c.ParseCommand(test.cmd); err != nil {
t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd)
}
if c.ClientConfig.WriteConsistency != "one" {
t.Fatalf(`Command "consistency" changed consistency to %q. Expected one`, c.ClientConfig.WriteConsistency)
}
}
}
func TestParseCommand_Insert(t *testing.T) {
t.Parallel()
ts := emptyTestServer()
defer ts.Close()
u, _ := url.Parse(ts.URL)
config := client.Config{URL: *u}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
}
m := cli.CommandLine{Client: c}
tests := []struct {
cmd string
}{
{cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"},
{cmd: " INSERT cpu,host=serverA,region=us-west value=1.0"},
{cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"},
{cmd: "insert cpu,host=serverA,region=us-west value=1.0 "},
{cmd: "insert"},
{cmd: "Insert "},
{cmd: "insert c"},
{cmd: "insert int"},
}
for _, test := range tests {
if err := m.ParseCommand(test.cmd); err != nil {
t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd)
}
}
}
func TestParseCommand_History(t *testing.T) {
t.Parallel()
c := cli.CommandLine{Line: liner.NewLiner()}
defer c.Line.Close()
// append one entry to history
c.Line.AppendHistory("abc")
tests := []struct {
cmd string
}{
{cmd: "history"},
{cmd: " history"},
{cmd: "history "},
{cmd: "History "},
}
for _, test := range tests {
if err := c.ParseCommand(test.cmd); err != nil {
t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd)
}
}
// buf size should be at least 1
var buf bytes.Buffer
c.Line.WriteHistory(&buf)
if buf.Len() < 1 {
t.Fatal("History is borked")
}
}
func TestParseCommand_HistoryWithBlankCommand(t *testing.T) {
t.Parallel()
c := cli.CommandLine{Line: liner.NewLiner()}
defer c.Line.Close()
// append one entry to history
c.Line.AppendHistory("x")
tests := []struct {
cmd string
err error
}{
{cmd: "history"},
{cmd: " history"},
{cmd: "history "},
{cmd: "", err: cli.ErrBlankCommand}, // shouldn't be persisted in history
{cmd: " ", err: cli.ErrBlankCommand}, // shouldn't be persisted in history
{cmd: " ", err: cli.ErrBlankCommand}, // shouldn't be persisted in history
}
// a blank command will return cli.ErrBlankCommand.
for _, test := range tests {
if err := c.ParseCommand(test.cmd); err != test.err {
t.Errorf(`Got error %v for command %q, expected %v`, err, test.cmd, test.err)
}
}
// buf shall not contain empty commands
var buf bytes.Buffer
c.Line.WriteHistory(&buf)
scanner := bufio.NewScanner(&buf)
for scanner.Scan() {
if strings.TrimSpace(scanner.Text()) == "" {
t.Fatal("Empty commands should not be persisted in history.")
}
}
}
// helper methods
func emptyTestServer() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Influxdb-Version", SERVER_VERSION)
// Fake authorization entirely based on the username.
authorized := false
user, _, _ := r.BasicAuth()
switch user {
case "", "admin":
authorized = true
}
switch r.URL.Path {
case "/query":
values := r.URL.Query()
parser := influxql.NewParser(bytes.NewBufferString(values.Get("q")))
q, err := parser.ParseQuery()
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
stmt := q.Statements[0]
switch stmt.(type) {
case *influxql.ShowDatabasesStatement:
if authorized {
io.WriteString(w, `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db"]]}]}]}`)
} else {
w.WriteHeader(http.StatusUnauthorized)
io.WriteString(w, fmt.Sprintf(`{"error":"error authorizing query: %s not authorized to execute statement 'SHOW DATABASES', requires admin privilege"}`, user))
}
case *influxql.ShowDiagnosticsStatement:
io.WriteString(w, `{"results":[{}]}`)
}
case "/write":
w.WriteHeader(http.StatusOK)
}
}))
}

View File

@ -0,0 +1,34 @@
package cli
import (
"bytes"
"fmt"
)
func parseDatabaseAndRetentionPolicy(stmt []byte) (string, string, error) {
var db, rp []byte
var quoted bool
var seperatorCount int
stmt = bytes.TrimSpace(stmt)
for _, b := range stmt {
if b == '"' {
quoted = !quoted
continue
}
if b == '.' && !quoted {
seperatorCount++
if seperatorCount > 1 {
return "", "", fmt.Errorf("unable to parse database and retention policy from %s", string(stmt))
}
continue
}
if seperatorCount == 1 {
rp = append(rp, b)
continue
}
db = append(db, b)
}
return string(db), string(rp), nil
}

View File

@ -0,0 +1,90 @@
package cli
import (
"errors"
"testing"
)
func Test_parseDatabaseAndretentionPolicy(t *testing.T) {
tests := []struct {
stmt string
db string
rp string
err error
}{
{
stmt: `foo`,
db: "foo",
},
{
stmt: `"foo.bar"`,
db: "foo.bar",
},
{
stmt: `"foo.bar".`,
db: "foo.bar",
},
{
stmt: `."foo.bar"`,
rp: "foo.bar",
},
{
stmt: `foo.bar`,
db: "foo",
rp: "bar",
},
{
stmt: `"foo".bar`,
db: "foo",
rp: "bar",
},
{
stmt: `"foo"."bar"`,
db: "foo",
rp: "bar",
},
{
stmt: `"foo.bin"."bar"`,
db: "foo.bin",
rp: "bar",
},
{
stmt: `"foo.bin"."bar.baz...."`,
db: "foo.bin",
rp: "bar.baz....",
},
{
stmt: ` "foo.bin"."bar.baz...." `,
db: "foo.bin",
rp: "bar.baz....",
},
{
stmt: `"foo.bin"."bar".boom`,
err: errors.New("foo"),
},
{
stmt: "foo.bar.",
err: errors.New("foo"),
},
}
for _, test := range tests {
db, rp, err := parseDatabaseAndRetentionPolicy([]byte(test.stmt))
if err != nil && test.err == nil {
t.Errorf("unexpected error: got %s", err)
continue
}
if test.err != nil && err == nil {
t.Errorf("expected err: got: nil, exp: %s", test.err)
continue
}
if db != test.db {
t.Errorf("unexpected database: got: %s, exp: %s", db, test.db)
}
if rp != test.rp {
t.Errorf("unexpected retention policy: got: %s, exp: %s", rp, test.rp)
}
}
}

View File

@ -0,0 +1,120 @@
// The influx command is a CLI client to InfluxDB.
package main
import (
"flag"
"fmt"
"os"
"github.com/influxdata/influxdb/client"
"github.com/influxdata/influxdb/cmd/influx/cli"
)
// These variables are populated via the Go linker.
var (
version string
)
const (
// defaultFormat is the default format of the results when issuing queries
defaultFormat = "column"
// defaultPrecision is the default timestamp format of the results when issuing queries
defaultPrecision = "ns"
// defaultPPS is the default points per second that the import will throttle at
// by default it's 0, which means it will not throttle
defaultPPS = 0
)
func init() {
// If version is not set, make that clear.
if version == "" {
version = "unknown"
}
}
func main() {
c := cli.New(version)
fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError)
fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.")
fs.IntVar(&c.Port, "port", client.DefaultPort, "Influxdb port to connect to.")
fs.StringVar(&c.ClientConfig.UnixSocket, "socket", "", "Influxdb unix socket to connect to.")
fs.StringVar(&c.ClientConfig.Username, "username", "", "Username to connect to the server.")
fs.StringVar(&c.ClientConfig.Password, "password", "", `Password to connect to the server. Leaving blank will prompt for password (--password="").`)
fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.")
fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.")
fs.BoolVar(&c.ClientConfig.UnsafeSsl, "unsafeSsl", false, "Set this when connecting to the cluster using https and not use SSL verification.")
fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.")
fs.StringVar(&c.ClientConfig.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.")
fs.StringVar(&c.ClientConfig.WriteConsistency, "consistency", "all", "Set write consistency level: any, one, quorum, or all.")
fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.")
fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.")
fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.")
fs.BoolVar(&c.Import, "import", false, "Import a previous database.")
fs.IntVar(&c.ImporterConfig.PPS, "pps", defaultPPS, "How many points per second the import will allow. By default it is zero and will not throttle importing.")
fs.StringVar(&c.ImporterConfig.Path, "path", "", "path to the file to import")
fs.BoolVar(&c.ImporterConfig.Compressed, "compressed", false, "set to true if the import file is compressed")
// Define our own custom usage to print
fs.Usage = func() {
fmt.Println(`Usage of influx:
-version
Display the version and exit.
-host 'host name'
Host to connect to.
-port 'port #'
Port to connect to.
-socket 'unix domain socket'
Unix socket to connect to.
-database 'database name'
Database to connect to the server.
-password 'password'
Password to connect to the server. Leaving blank will prompt for password (--password '').
-username 'username'
Username to connect to the server.
-ssl
Use https for requests.
-unsafeSsl
Set this when connecting to the cluster using https and not use SSL verification.
-execute 'command'
Execute command and quit.
-format 'json|csv|column'
Format specifies the format of the server responses: json, csv, or column.
-precision 'rfc3339|h|m|s|ms|u|ns'
Precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns.
-consistency 'any|one|quorum|all'
Set write consistency level: any, one, quorum, or all
-pretty
Turns on pretty print for the json format.
-import
Import a previous database export from file
-pps
How many points per second the import will allow. By default it is zero and will not throttle importing.
-path
Path to file to import
-compressed
Set to true if the import file is compressed
Examples:
# Use influx in a non-interactive mode to query the database "metrics" and pretty print json:
$ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty
# Connect to a specific database on startup and set database context:
$ influx -database 'metrics' -host 'localhost' -port '8086'
`)
}
fs.Parse(os.Args[1:])
if c.ShowVersion {
c.Version()
os.Exit(0)
}
if err := c.Run(); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
}

View File

@ -0,0 +1,107 @@
# `influx_inspect`
## Ways to run
### `influx_inspect`
Will print usage for the tool.
### `influx_inspect report`
Displays series meta-data for all shards. Default location [$HOME/.influxdb]
### `influx_inspect dumptsm`
Dumps low-level details about tsm1 files
#### Flags
##### `-index` bool
Dump raw index data.
`default` = false
#### `-blocks` bool
Dump raw block data.
`default` = false
#### `-all`
Dump all data. Caution: This may print a lot of information.
`default` = false
#### `-filter-key`
Only display index and block data match this key substring.
`default` = ""
### `influx_inspect export`
Exports all tsm files to line protocol. This output file can be imported via the [influx](https://github.com/influxdata/influxdb/tree/master/importer#running-the-import-command) command.
#### `-datadir` string
Data storage path.
`default` = "$HOME/.influxdb/data"
#### `-waldir` string
WAL storage path.
`default` = "$HOME/.influxdb/wal"
#### `-out` string
Destination file to export to
`default` = "$HOME/.influxdb/export"
#### `-database` string (optional)
Database to export.
`default` = ""
#### `-retention` string (optional)
Retention policy to export.
`default` = ""
#### `-start` string (optional)
Optional. The time range to start at.
#### `-end` string (optional)
Optional. The time range to end at.
#### `-compress` bool (optional)
Compress the output.
`default` = false
#### Sample Commands
Export entire database and compress output:
```
influx_inspect export --compress
```
Export specific retention policy:
```
influx_inspect export --database mydb --retention autogen
```
##### Sample Data
This is a sample of what the output will look like.
```
# DDL
CREATE DATABASE MY_DB_NAME
CREATE RETENTION POLICY autogen ON MY_DB_NAME DURATION inf REPLICATION 1
# DML
# CONTEXT-DATABASE:MY_DB_NAME
# CONTEXT-RETENTION-POLICY:autogen
randset value=97.9296104805 1439856000000000000
randset value=25.3849066842 1439856100000000000
```
# Caveats
The system does not have access to the meta store when exporting TSM shards. As such, it always creates the retention policy with infinite duration and replication factor of 1.
End users may want to change this prior to re-importing if they are importing to a cluster or want a different duration for retention.

View File

@ -0,0 +1,474 @@
// Package dumptsi inspects low-level details about tsi1 files.
package dumptsi
import (
"flag"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"text/tabwriter"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb/index/tsi1"
)
// Command represents the program execution for "influxd dumptsi".
type Command struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
paths []string
showSeries bool
showMeasurements bool
showTagKeys bool
showTagValues bool
showTagValueSeries bool
measurementFilter *regexp.Regexp
tagKeyFilter *regexp.Regexp
tagValueFilter *regexp.Regexp
}
// NewCommand returns a new instance of Command.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
var measurementFilter, tagKeyFilter, tagValueFilter string
fs := flag.NewFlagSet("dumptsi", flag.ExitOnError)
fs.BoolVar(&cmd.showSeries, "series", false, "Show raw series data")
fs.BoolVar(&cmd.showMeasurements, "measurements", false, "Show raw measurement data")
fs.BoolVar(&cmd.showTagKeys, "tag-keys", false, "Show raw tag key data")
fs.BoolVar(&cmd.showTagValues, "tag-values", false, "Show raw tag value data")
fs.BoolVar(&cmd.showTagValueSeries, "tag-value-series", false, "Show raw series data for each value")
fs.StringVar(&measurementFilter, "measurement-filter", "", "Regex measurement filter")
fs.StringVar(&tagKeyFilter, "tag-key-filter", "", "Regex tag key filter")
fs.StringVar(&tagValueFilter, "tag-value-filter", "", "Regex tag value filter")
fs.SetOutput(cmd.Stdout)
fs.Usage = cmd.printUsage
if err := fs.Parse(args); err != nil {
return err
}
// Parse filters.
if measurementFilter != "" {
re, err := regexp.Compile(measurementFilter)
if err != nil {
return err
}
cmd.measurementFilter = re
}
if tagKeyFilter != "" {
re, err := regexp.Compile(tagKeyFilter)
if err != nil {
return err
}
cmd.tagKeyFilter = re
}
if tagValueFilter != "" {
re, err := regexp.Compile(tagValueFilter)
if err != nil {
return err
}
cmd.tagValueFilter = re
}
cmd.paths = fs.Args()
if len(cmd.paths) == 0 {
fmt.Printf("at least one path required\n\n")
fs.Usage()
return nil
}
// Some flags imply other flags.
if cmd.showTagValueSeries {
cmd.showTagValues = true
}
if cmd.showTagValues {
cmd.showTagKeys = true
}
if cmd.showTagKeys {
cmd.showMeasurements = true
}
return cmd.run()
}
func (cmd *Command) run() error {
// Build a file set from the paths on the command line.
idx, fs, err := cmd.readFileSet()
if err != nil {
return err
}
if idx != nil {
defer idx.Close()
} else {
defer fs.Close()
}
defer fs.Release()
// Show either raw data or summary stats.
if cmd.showSeries || cmd.showMeasurements {
if err := cmd.printMerged(fs); err != nil {
return err
}
} else {
if err := cmd.printFileSummaries(fs); err != nil {
return err
}
}
return nil
}
func (cmd *Command) readFileSet() (*tsi1.Index, *tsi1.FileSet, error) {
// If only one path exists and it's a directory then open as an index.
if len(cmd.paths) == 1 {
fi, err := os.Stat(cmd.paths[0])
if err != nil {
return nil, nil, err
} else if fi.IsDir() {
idx := tsi1.NewIndex()
idx.Path = cmd.paths[0]
idx.CompactionEnabled = false
if err := idx.Open(); err != nil {
return nil, nil, err
}
return idx, idx.RetainFileSet(), nil
}
}
// Open each file and group into a fileset.
var files []tsi1.File
for _, path := range cmd.paths {
switch ext := filepath.Ext(path); ext {
case tsi1.LogFileExt:
f := tsi1.NewLogFile(path)
if err := f.Open(); err != nil {
return nil, nil, err
}
files = append(files, f)
case tsi1.IndexFileExt:
f := tsi1.NewIndexFile()
f.SetPath(path)
if err := f.Open(); err != nil {
return nil, nil, err
}
files = append(files, f)
default:
return nil, nil, fmt.Errorf("unexpected file extension: %s", ext)
}
}
fs, err := tsi1.NewFileSet(nil, files)
if err != nil {
return nil, nil, err
}
fs.Retain()
return nil, fs, nil
}
func (cmd *Command) printMerged(fs *tsi1.FileSet) error {
if err := cmd.printSeries(fs); err != nil {
return err
} else if err := cmd.printMeasurements(fs); err != nil {
return err
}
return nil
}
func (cmd *Command) printSeries(fs *tsi1.FileSet) error {
if !cmd.showSeries {
return nil
}
// Print header.
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
fmt.Fprintln(tw, "Series\t")
// Iterate over each series.
itr := fs.SeriesIterator()
for e := itr.Next(); e != nil; e = itr.Next() {
name, tags := e.Name(), e.Tags()
if !cmd.matchSeries(e.Name(), e.Tags()) {
continue
}
fmt.Fprintf(tw, "%s%s\t%v\n", name, tags.HashKey(), deletedString(e.Deleted()))
}
// Flush & write footer spacing.
if err := tw.Flush(); err != nil {
return err
}
fmt.Fprint(cmd.Stdout, "\n\n")
return nil
}
func (cmd *Command) printMeasurements(fs *tsi1.FileSet) error {
if !cmd.showMeasurements {
return nil
}
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
fmt.Fprintln(tw, "Measurement\t")
// Iterate over each series.
itr := fs.MeasurementIterator()
for e := itr.Next(); e != nil; e = itr.Next() {
if cmd.measurementFilter != nil && !cmd.measurementFilter.Match(e.Name()) {
continue
}
fmt.Fprintf(tw, "%s\t%v\n", e.Name(), deletedString(e.Deleted()))
if err := tw.Flush(); err != nil {
return err
}
if err := cmd.printTagKeys(fs, e.Name()); err != nil {
return err
}
}
fmt.Fprint(cmd.Stdout, "\n\n")
return nil
}
func (cmd *Command) printTagKeys(fs *tsi1.FileSet, name []byte) error {
if !cmd.showTagKeys {
return nil
}
// Iterate over each key.
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
itr := fs.TagKeyIterator(name)
for e := itr.Next(); e != nil; e = itr.Next() {
if cmd.tagKeyFilter != nil && !cmd.tagKeyFilter.Match(e.Key()) {
continue
}
fmt.Fprintf(tw, " %s\t%v\n", e.Key(), deletedString(e.Deleted()))
if err := tw.Flush(); err != nil {
return err
}
if err := cmd.printTagValues(fs, name, e.Key()); err != nil {
return err
}
}
fmt.Fprint(cmd.Stdout, "\n")
return nil
}
func (cmd *Command) printTagValues(fs *tsi1.FileSet, name, key []byte) error {
if !cmd.showTagValues {
return nil
}
// Iterate over each value.
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
itr := fs.TagValueIterator(name, key)
for e := itr.Next(); e != nil; e = itr.Next() {
if cmd.tagValueFilter != nil && !cmd.tagValueFilter.Match(e.Value()) {
continue
}
fmt.Fprintf(tw, " %s\t%v\n", e.Value(), deletedString(e.Deleted()))
if err := tw.Flush(); err != nil {
return err
}
if err := cmd.printTagValueSeries(fs, name, key, e.Value()); err != nil {
return err
}
}
fmt.Fprint(cmd.Stdout, "\n")
return nil
}
func (cmd *Command) printTagValueSeries(fs *tsi1.FileSet, name, key, value []byte) error {
if !cmd.showTagValueSeries {
return nil
}
// Iterate over each series.
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
itr := fs.TagValueSeriesIterator(name, key, value)
for e := itr.Next(); e != nil; e = itr.Next() {
if !cmd.matchSeries(e.Name(), e.Tags()) {
continue
}
fmt.Fprintf(tw, " %s%s\n", e.Name(), e.Tags().HashKey())
if err := tw.Flush(); err != nil {
return err
}
}
fmt.Fprint(cmd.Stdout, "\n")
return nil
}
func (cmd *Command) printFileSummaries(fs *tsi1.FileSet) error {
for _, f := range fs.Files() {
switch f := f.(type) {
case *tsi1.LogFile:
if err := cmd.printLogFileSummary(f); err != nil {
return err
}
case *tsi1.IndexFile:
if err := cmd.printIndexFileSummary(f); err != nil {
return err
}
default:
panic("unreachable")
}
fmt.Fprintln(cmd.Stdout, "")
}
return nil
}
func (cmd *Command) printLogFileSummary(f *tsi1.LogFile) error {
fmt.Fprintf(cmd.Stdout, "[LOG FILE] %s\n", filepath.Base(f.Path()))
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
fmt.Fprintf(tw, "Series:\t%d\n", f.SeriesN())
fmt.Fprintf(tw, "Measurements:\t%d\n", f.MeasurementN())
fmt.Fprintf(tw, "Tag Keys:\t%d\n", f.TagKeyN())
fmt.Fprintf(tw, "Tag Values:\t%d\n", f.TagValueN())
return tw.Flush()
}
func (cmd *Command) printIndexFileSummary(f *tsi1.IndexFile) error {
fmt.Fprintf(cmd.Stdout, "[INDEX FILE] %s\n", filepath.Base(f.Path()))
// Calculate summary stats.
seriesN := f.SeriesN()
var measurementN, measurementSeriesN, measurementSeriesSize uint64
var keyN uint64
var valueN, valueSeriesN, valueSeriesSize uint64
mitr := f.MeasurementIterator()
for me, _ := mitr.Next().(*tsi1.MeasurementBlockElem); me != nil; me, _ = mitr.Next().(*tsi1.MeasurementBlockElem) {
kitr := f.TagKeyIterator(me.Name())
for ke, _ := kitr.Next().(*tsi1.TagBlockKeyElem); ke != nil; ke, _ = kitr.Next().(*tsi1.TagBlockKeyElem) {
vitr := f.TagValueIterator(me.Name(), ke.Key())
for ve, _ := vitr.Next().(*tsi1.TagBlockValueElem); ve != nil; ve, _ = vitr.Next().(*tsi1.TagBlockValueElem) {
valueN++
valueSeriesN += uint64(ve.SeriesN())
valueSeriesSize += uint64(len(ve.SeriesData()))
}
keyN++
}
measurementN++
measurementSeriesN += uint64(me.SeriesN())
measurementSeriesSize += uint64(len(me.SeriesData()))
}
// Write stats.
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
fmt.Fprintf(tw, "Series:\t%d\n", seriesN)
fmt.Fprintf(tw, "Measurements:\t%d\n", measurementN)
fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", measurementSeriesSize, formatSize(measurementSeriesSize))
fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(measurementSeriesSize)/float64(measurementSeriesN))
fmt.Fprintf(tw, "Tag Keys:\t%d\n", keyN)
fmt.Fprintf(tw, "Tag Values:\t%d\n", valueN)
fmt.Fprintf(tw, " Series:\t%d\n", valueSeriesN)
fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", valueSeriesSize, formatSize(valueSeriesSize))
fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(valueSeriesSize)/float64(valueSeriesN))
fmt.Fprintf(tw, "Avg tags per series:\t%.01f\n", float64(valueSeriesN)/float64(seriesN))
if err := tw.Flush(); err != nil {
return err
}
return nil
}
// matchSeries returns true if the command filters matches the series.
func (cmd *Command) matchSeries(name []byte, tags models.Tags) bool {
// Filter by measurement.
if cmd.measurementFilter != nil && !cmd.measurementFilter.Match(name) {
return false
}
// Filter by tag key/value.
if cmd.tagKeyFilter != nil || cmd.tagValueFilter != nil {
var matched bool
for _, tag := range tags {
if (cmd.tagKeyFilter == nil || cmd.tagKeyFilter.Match(tag.Key)) && (cmd.tagValueFilter == nil || cmd.tagValueFilter.Match(tag.Value)) {
matched = true
break
}
}
if !matched {
return false
}
}
return true
}
// printUsage prints the usage message to STDERR.
func (cmd *Command) printUsage() {
usage := `Dumps low-level details about tsi1 files.
Usage: influx_inspect dumptsi [flags] path...
-series
Dump raw series data
-measurements
Dump raw measurement data
-tag-keys
Dump raw tag keys
-tag-values
Dump raw tag values
-tag-value-series
Dump raw series for each tag value
-measurement-filter REGEXP
Filters data by measurement regular expression
-tag-key-filter REGEXP
Filters data by tag key regular expression
-tag-value-filter REGEXP
Filters data by tag value regular expression
If no flags are specified then summary stats are provided for each file.
`
fmt.Fprintf(cmd.Stdout, usage)
}
// deletedString returns "(deleted)" if v is true.
func deletedString(v bool) string {
if v {
return "(deleted)"
}
return ""
}
func formatSize(v uint64) string {
denom := uint64(1)
var uom string
for _, uom = range []string{"b", "kb", "mb", "gb", "tb"} {
if denom*1024 > v {
break
}
denom *= 1024
}
return fmt.Sprintf("%0.01f%s", float64(v)/float64(denom), uom)
}

View File

@ -0,0 +1,332 @@
// Package dumptsm inspects low-level details about tsm1 files.
package dumptsm
import (
"encoding/binary"
"flag"
"fmt"
"io"
"os"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/influxdata/influxdb/tsdb/engine/tsm1"
)
// Command represents the program execution for "influxd dumptsm".
type Command struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
dumpIndex bool
dumpBlocks bool
dumpAll bool
filterKey string
path string
}
// NewCommand returns a new instance of Command.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
fs := flag.NewFlagSet("file", flag.ExitOnError)
fs.BoolVar(&cmd.dumpIndex, "index", false, "Dump raw index data")
fs.BoolVar(&cmd.dumpBlocks, "blocks", false, "Dump raw block data")
fs.BoolVar(&cmd.dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information")
fs.StringVar(&cmd.filterKey, "filter-key", "", "Only display index and block data match this key substring")
fs.SetOutput(cmd.Stdout)
fs.Usage = cmd.printUsage
if err := fs.Parse(args); err != nil {
return err
}
if fs.Arg(0) == "" {
fmt.Printf("TSM file not specified\n\n")
fs.Usage()
return nil
}
cmd.path = fs.Args()[0]
cmd.dumpBlocks = cmd.dumpBlocks || cmd.dumpAll || cmd.filterKey != ""
cmd.dumpIndex = cmd.dumpIndex || cmd.dumpAll || cmd.filterKey != ""
return cmd.dump()
}
func (cmd *Command) dump() error {
var errors []error
f, err := os.Open(cmd.path)
if err != nil {
return err
}
// Get the file size
stat, err := f.Stat()
if err != nil {
return err
}
b := make([]byte, 8)
r, err := tsm1.NewTSMReader(f)
if err != nil {
return fmt.Errorf("Error opening TSM files: %s", err.Error())
}
defer r.Close()
minTime, maxTime := r.TimeRange()
keyCount := r.KeyCount()
blockStats := &blockStats{}
println("Summary:")
fmt.Printf(" File: %s\n", cmd.path)
fmt.Printf(" Time Range: %s - %s\n",
time.Unix(0, minTime).UTC().Format(time.RFC3339Nano),
time.Unix(0, maxTime).UTC().Format(time.RFC3339Nano),
)
fmt.Printf(" Duration: %s ", time.Unix(0, maxTime).Sub(time.Unix(0, minTime)))
fmt.Printf(" Series: %d ", keyCount)
fmt.Printf(" File Size: %d\n", stat.Size())
println()
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
if cmd.dumpIndex {
println("Index:")
tw.Flush()
println()
fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "Min Time", "Max Time", "Ofs", "Size", "Key", "Field"}, "\t"))
var pos int
for i := 0; i < keyCount; i++ {
key, _ := r.KeyAt(i)
for _, e := range r.Entries(string(key)) {
pos++
split := strings.Split(string(key), "#!~#")
// Possible corruption? Try to read as much as we can and point to the problem.
measurement := split[0]
field := split[1]
if cmd.filterKey != "" && !strings.Contains(string(key), cmd.filterKey) {
continue
}
fmt.Fprintln(tw, " "+strings.Join([]string{
strconv.FormatInt(int64(pos), 10),
time.Unix(0, e.MinTime).UTC().Format(time.RFC3339Nano),
time.Unix(0, e.MaxTime).UTC().Format(time.RFC3339Nano),
strconv.FormatInt(int64(e.Offset), 10),
strconv.FormatInt(int64(e.Size), 10),
measurement,
field,
}, "\t"))
tw.Flush()
}
}
}
tw = tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Chk", "Ofs", "Len", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t"))
// Starting at 5 because the magic number is 4 bytes + 1 byte version
i := int64(5)
var blockCount, pointCount, blockSize int64
indexSize := r.IndexSize()
// Start at the beginning and read every block
for j := 0; j < keyCount; j++ {
key, _ := r.KeyAt(j)
for _, e := range r.Entries(string(key)) {
f.Seek(int64(e.Offset), 0)
f.Read(b[:4])
chksum := binary.BigEndian.Uint32(b[:4])
buf := make([]byte, e.Size-4)
f.Read(buf)
blockSize += int64(e.Size)
if cmd.filterKey != "" && !strings.Contains(string(key), cmd.filterKey) {
i += blockSize
blockCount++
continue
}
blockType := buf[0]
encoded := buf[1:]
var v []tsm1.Value
v, err := tsm1.DecodeBlock(buf, v)
if err != nil {
return err
}
startTime := time.Unix(0, v[0].UnixNano())
pointCount += int64(len(v))
// Length of the timestamp block
tsLen, j := binary.Uvarint(encoded)
// Unpack the timestamp bytes
ts := encoded[int(j) : int(j)+int(tsLen)]
// Unpack the value bytes
values := encoded[int(j)+int(tsLen):]
tsEncoding := timeEnc[int(ts[0]>>4)]
vEncoding := encDescs[int(blockType+1)][values[0]>>4]
typeDesc := blockTypes[blockType]
blockStats.inc(0, ts[0]>>4)
blockStats.inc(int(blockType+1), values[0]>>4)
blockStats.size(len(buf))
if cmd.dumpBlocks {
fmt.Fprintln(tw, " "+strings.Join([]string{
strconv.FormatInt(blockCount, 10),
strconv.FormatUint(uint64(chksum), 10),
strconv.FormatInt(i, 10),
strconv.FormatInt(int64(len(buf)), 10),
typeDesc,
startTime.UTC().Format(time.RFC3339Nano),
strconv.FormatInt(int64(len(v)), 10),
fmt.Sprintf("%s/%s", tsEncoding, vEncoding),
fmt.Sprintf("%d/%d", len(ts), len(values)),
}, "\t"))
}
i += blockSize
blockCount++
}
}
if cmd.dumpBlocks {
println("Blocks:")
tw.Flush()
println()
}
var blockSizeAvg int64
if blockCount > 0 {
blockSizeAvg = blockSize / blockCount
}
fmt.Printf("Statistics\n")
fmt.Printf(" Blocks:\n")
fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n",
blockCount, blockSize, blockStats.min, blockStats.max, blockSizeAvg)
fmt.Printf(" Index:\n")
fmt.Printf(" Total: %d Size: %d\n", blockCount, indexSize)
fmt.Printf(" Points:\n")
fmt.Printf(" Total: %d", pointCount)
println()
println(" Encoding:")
for i, counts := range blockStats.counts {
if len(counts) == 0 {
continue
}
fmt.Printf(" %s: ", strings.Title(fieldType[i]))
for j, v := range counts {
fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100))
}
println()
}
fmt.Printf(" Compression:\n")
fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount))
fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount))
if len(errors) > 0 {
println()
fmt.Printf("Errors (%d):\n", len(errors))
for _, err := range errors {
fmt.Printf(" * %v\n", err)
}
println()
return fmt.Errorf("error count %d", len(errors))
}
return nil
}
// printUsage prints the usage message to STDERR.
func (cmd *Command) printUsage() {
usage := `Dumps low-level details about tsm1 files.
Usage: influx_inspect dumptsm [flags] <path
-index
Dump raw index data
-blocks
Dump raw block data
-all
Dump all data. Caution: This may print a lot of information
-filter-key <name>
Only display index and block data match this key substring
`
fmt.Fprintf(cmd.Stdout, usage)
}
var (
fieldType = []string{
"timestamp", "float", "int", "bool", "string",
}
blockTypes = []string{
"float64", "int64", "bool", "string",
}
timeEnc = []string{
"none", "s8b", "rle",
}
floatEnc = []string{
"none", "gor",
}
intEnc = []string{
"none", "s8b", "rle",
}
boolEnc = []string{
"none", "bp",
}
stringEnc = []string{
"none", "snpy",
}
encDescs = [][]string{
timeEnc, floatEnc, intEnc, boolEnc, stringEnc,
}
)
type blockStats struct {
min, max int
counts [][]int
}
func (b *blockStats) inc(typ int, enc byte) {
for len(b.counts) <= typ {
b.counts = append(b.counts, []int{})
}
for len(b.counts[typ]) <= int(enc) {
b.counts[typ] = append(b.counts[typ], 0)
}
b.counts[typ][enc]++
}
func (b *blockStats) size(sz int) {
if b.min == 0 || sz < b.min {
b.min = sz
}
if b.min == 0 || sz > b.max {
b.max = sz
}
}

View File

@ -0,0 +1,3 @@
package dumptsm_test
// TODO: write some tests

View File

@ -0,0 +1,408 @@
// Package export exports TSM files into InfluxDB line protocol format.
package export
import (
"bufio"
"compress/gzip"
"flag"
"fmt"
"io"
"math"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/escape"
"github.com/influxdata/influxdb/tsdb/engine/tsm1"
)
// Command represents the program execution for "influx_inspect export".
type Command struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
dataDir string
walDir string
out string
database string
retentionPolicy string
startTime int64
endTime int64
compress bool
manifest map[string]struct{}
tsmFiles map[string][]string
walFiles map[string][]string
}
// NewCommand returns a new instance of Command.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
manifest: make(map[string]struct{}),
tsmFiles: make(map[string][]string),
walFiles: make(map[string][]string),
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
var start, end string
fs := flag.NewFlagSet("export", flag.ExitOnError)
fs.StringVar(&cmd.dataDir, "datadir", os.Getenv("HOME")+"/.influxdb/data", "Data storage path")
fs.StringVar(&cmd.walDir, "waldir", os.Getenv("HOME")+"/.influxdb/wal", "WAL storage path")
fs.StringVar(&cmd.out, "out", os.Getenv("HOME")+"/.influxdb/export", "Destination file to export to")
fs.StringVar(&cmd.database, "database", "", "Optional: the database to export")
fs.StringVar(&cmd.retentionPolicy, "retention", "", "Optional: the retention policy to export (requires -database)")
fs.StringVar(&start, "start", "", "Optional: the start time to export (RFC3339 format)")
fs.StringVar(&end, "end", "", "Optional: the end time to export (RFC3339 format)")
fs.BoolVar(&cmd.compress, "compress", false, "Compress the output")
fs.SetOutput(cmd.Stdout)
fs.Usage = func() {
fmt.Fprintf(cmd.Stdout, "Exports TSM files into InfluxDB line protocol format.\n\n")
fmt.Fprintf(cmd.Stdout, "Usage: %s export [flags]\n\n", filepath.Base(os.Args[0]))
fs.PrintDefaults()
}
if err := fs.Parse(args); err != nil {
return err
}
// set defaults
if start != "" {
s, err := time.Parse(time.RFC3339, start)
if err != nil {
return err
}
cmd.startTime = s.UnixNano()
} else {
cmd.startTime = math.MinInt64
}
if end != "" {
e, err := time.Parse(time.RFC3339, end)
if err != nil {
return err
}
cmd.endTime = e.UnixNano()
} else {
// set end time to max if it is not set.
cmd.endTime = math.MaxInt64
}
if err := cmd.validate(); err != nil {
return err
}
return cmd.export()
}
func (cmd *Command) validate() error {
if cmd.retentionPolicy != "" && cmd.database == "" {
return fmt.Errorf("must specify a db")
}
if cmd.startTime != 0 && cmd.endTime != 0 && cmd.endTime < cmd.startTime {
return fmt.Errorf("end time before start time")
}
return nil
}
func (cmd *Command) export() error {
if err := cmd.walkTSMFiles(); err != nil {
return err
}
if err := cmd.walkWALFiles(); err != nil {
return err
}
return cmd.write()
}
func (cmd *Command) walkTSMFiles() error {
return filepath.Walk(cmd.dataDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// check to see if this is a tsm file
if filepath.Ext(path) != "."+tsm1.TSMFileExtension {
return nil
}
relPath, err := filepath.Rel(cmd.dataDir, path)
if err != nil {
return err
}
dirs := strings.Split(relPath, string(byte(os.PathSeparator)))
if len(dirs) < 2 {
return fmt.Errorf("invalid directory structure for %s", path)
}
if dirs[0] == cmd.database || cmd.database == "" {
if dirs[1] == cmd.retentionPolicy || cmd.retentionPolicy == "" {
key := filepath.Join(dirs[0], dirs[1])
cmd.manifest[key] = struct{}{}
cmd.tsmFiles[key] = append(cmd.tsmFiles[key], path)
}
}
return nil
})
}
func (cmd *Command) walkWALFiles() error {
return filepath.Walk(cmd.walDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// check to see if this is a wal file
fileName := filepath.Base(path)
if filepath.Ext(path) != "."+tsm1.WALFileExtension || !strings.HasPrefix(fileName, tsm1.WALFilePrefix) {
return nil
}
relPath, err := filepath.Rel(cmd.walDir, path)
if err != nil {
return err
}
dirs := strings.Split(relPath, string(byte(os.PathSeparator)))
if len(dirs) < 2 {
return fmt.Errorf("invalid directory structure for %s", path)
}
if dirs[0] == cmd.database || cmd.database == "" {
if dirs[1] == cmd.retentionPolicy || cmd.retentionPolicy == "" {
key := filepath.Join(dirs[0], dirs[1])
cmd.manifest[key] = struct{}{}
cmd.walFiles[key] = append(cmd.walFiles[key], path)
}
}
return nil
})
}
func (cmd *Command) write() error {
// open our output file and create an output buffer
f, err := os.Create(cmd.out)
if err != nil {
return err
}
defer f.Close()
// Because calling (*os.File).Write is relatively expensive,
// and we don't *need* to sync to disk on every written line of export,
// use a sized buffered writer so that we only sync the file every megabyte.
bw := bufio.NewWriterSize(f, 1024*1024)
defer bw.Flush()
var w io.Writer = bw
if cmd.compress {
gzw := gzip.NewWriter(w)
defer gzw.Close()
w = gzw
}
s, e := time.Unix(0, cmd.startTime).Format(time.RFC3339), time.Unix(0, cmd.endTime).Format(time.RFC3339)
fmt.Fprintf(w, "# INFLUXDB EXPORT: %s - %s\n", s, e)
// Write out all the DDL
fmt.Fprintln(w, "# DDL")
for key := range cmd.manifest {
keys := strings.Split(key, string(os.PathSeparator))
db, rp := influxql.QuoteIdent(keys[0]), influxql.QuoteIdent(keys[1])
fmt.Fprintf(w, "CREATE DATABASE %s WITH NAME %s\n", db, rp)
}
fmt.Fprintln(w, "# DML")
for key := range cmd.manifest {
keys := strings.Split(key, string(os.PathSeparator))
fmt.Fprintf(w, "# CONTEXT-DATABASE:%s\n", keys[0])
fmt.Fprintf(w, "# CONTEXT-RETENTION-POLICY:%s\n", keys[1])
if files, ok := cmd.tsmFiles[key]; ok {
fmt.Fprintf(cmd.Stdout, "writing out tsm file data for %s...", key)
if err := cmd.writeTsmFiles(w, files); err != nil {
return err
}
fmt.Fprintln(cmd.Stdout, "complete.")
}
if _, ok := cmd.walFiles[key]; ok {
fmt.Fprintf(cmd.Stdout, "writing out wal file data for %s...", key)
if err := cmd.writeWALFiles(w, cmd.walFiles[key], key); err != nil {
return err
}
fmt.Fprintln(cmd.Stdout, "complete.")
}
}
return nil
}
func (cmd *Command) writeTsmFiles(w io.Writer, files []string) error {
fmt.Fprintln(w, "# writing tsm data")
// we need to make sure we write the same order that the files were written
sort.Strings(files)
for _, f := range files {
if err := cmd.exportTSMFile(f, w); err != nil {
return err
}
}
return nil
}
func (cmd *Command) exportTSMFile(tsmFilePath string, w io.Writer) error {
f, err := os.Open(tsmFilePath)
if err != nil {
return err
}
defer f.Close()
r, err := tsm1.NewTSMReader(f)
if err != nil {
fmt.Fprintf(cmd.Stderr, "unable to read %s, skipping: %s\n", tsmFilePath, err.Error())
return nil
}
defer r.Close()
if sgStart, sgEnd := r.TimeRange(); sgStart > cmd.endTime || sgEnd < cmd.startTime {
return nil
}
for i := 0; i < r.KeyCount(); i++ {
key, _ := r.KeyAt(i)
values, err := r.ReadAll(string(key))
if err != nil {
fmt.Fprintf(cmd.Stderr, "unable to read key %q in %s, skipping: %s\n", string(key), tsmFilePath, err.Error())
continue
}
measurement, field := tsm1.SeriesAndFieldFromCompositeKey(key)
field = escape.Bytes(field)
if err := cmd.writeValues(w, measurement, string(field), values); err != nil {
// An error from writeValues indicates an IO error, which should be returned.
return err
}
}
return nil
}
func (cmd *Command) writeWALFiles(w io.Writer, files []string, key string) error {
fmt.Fprintln(w, "# writing wal data")
// we need to make sure we write the same order that the wal received the data
sort.Strings(files)
var once sync.Once
warnDelete := func() {
once.Do(func() {
msg := fmt.Sprintf(`WARNING: detected deletes in wal file.
Some series for %q may be brought back by replaying this data.
To resolve, you can either let the shard snapshot prior to exporting the data
or manually editing the exported file.
`, key)
fmt.Fprintln(cmd.Stderr, msg)
})
}
for _, f := range files {
if err := cmd.exportWALFile(f, w, warnDelete); err != nil {
return err
}
}
return nil
}
// exportWAL reads every WAL entry from r and exports it to w.
func (cmd *Command) exportWALFile(walFilePath string, w io.Writer, warnDelete func()) error {
f, err := os.Open(walFilePath)
if err != nil {
return err
}
defer f.Close()
r := tsm1.NewWALSegmentReader(f)
defer r.Close()
for r.Next() {
entry, err := r.Read()
if err != nil {
n := r.Count()
fmt.Fprintf(cmd.Stderr, "file %s corrupt at position %d", walFilePath, n)
break
}
switch t := entry.(type) {
case *tsm1.DeleteWALEntry, *tsm1.DeleteRangeWALEntry:
warnDelete()
continue
case *tsm1.WriteWALEntry:
for key, values := range t.Values {
measurement, field := tsm1.SeriesAndFieldFromCompositeKey([]byte(key))
// measurements are stored escaped, field names are not
field = escape.Bytes(field)
if err := cmd.writeValues(w, measurement, string(field), values); err != nil {
// An error from writeValues indicates an IO error, which should be returned.
return err
}
}
}
}
return nil
}
// writeValues writes every value in values to w, using the given series key and field name.
// If any call to w.Write fails, that error is returned.
func (cmd *Command) writeValues(w io.Writer, seriesKey []byte, field string, values []tsm1.Value) error {
buf := []byte(string(seriesKey) + " " + field + "=")
prefixLen := len(buf)
for _, value := range values {
ts := value.UnixNano()
if (ts < cmd.startTime) || (ts > cmd.endTime) {
continue
}
// Re-slice buf to be "<series_key> <field>=".
buf = buf[:prefixLen]
// Append the correct representation of the value.
switch v := value.Value().(type) {
case float64:
buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
case int64:
buf = strconv.AppendInt(buf, v, 10)
buf = append(buf, 'i')
case bool:
buf = strconv.AppendBool(buf, v)
case string:
buf = append(buf, '"')
buf = append(buf, models.EscapeStringField(v)...)
buf = append(buf, '"')
default:
// This shouldn't be possible, but we'll format it anyway.
buf = append(buf, fmt.Sprintf("%v", v)...)
}
// Now buf has "<series_key> <field>=<value>".
// Append the timestamp and a newline, then write it.
buf = append(buf, ' ')
buf = strconv.AppendInt(buf, ts, 10)
buf = append(buf, '\n')
if _, err := w.Write(buf); err != nil {
// Underlying IO error needs to be returned.
return err
}
}
return nil
}

View File

@ -0,0 +1,340 @@
package export
import (
"bytes"
"fmt"
"io/ioutil"
"math"
"math/rand"
"os"
"sort"
"strconv"
"strings"
"testing"
"github.com/golang/snappy"
"github.com/influxdata/influxdb/tsdb/engine/tsm1"
)
type corpus map[string][]tsm1.Value
var (
basicCorpus = corpus{
tsm1.SeriesFieldKey("floats,k=f", "f"): []tsm1.Value{
tsm1.NewValue(1, float64(1.5)),
tsm1.NewValue(2, float64(3)),
},
tsm1.SeriesFieldKey("ints,k=i", "i"): []tsm1.Value{
tsm1.NewValue(10, int64(15)),
tsm1.NewValue(20, int64(30)),
},
tsm1.SeriesFieldKey("bools,k=b", "b"): []tsm1.Value{
tsm1.NewValue(100, true),
tsm1.NewValue(200, false),
},
tsm1.SeriesFieldKey("strings,k=s", "s"): []tsm1.Value{
tsm1.NewValue(1000, "1k"),
tsm1.NewValue(2000, "2k"),
},
}
basicCorpusExpLines = []string{
"floats,k=f f=1.5 1",
"floats,k=f f=3 2",
"ints,k=i i=15i 10",
"ints,k=i i=30i 20",
"bools,k=b b=true 100",
"bools,k=b b=false 200",
`strings,k=s s="1k" 1000`,
`strings,k=s s="2k" 2000`,
}
escapeStringCorpus = corpus{
tsm1.SeriesFieldKey("t", "s"): []tsm1.Value{
tsm1.NewValue(1, `1. "quotes"`),
tsm1.NewValue(2, `2. back\slash`),
tsm1.NewValue(3, `3. bs\q"`),
},
}
escCorpusExpLines = []string{
`t s="1. \"quotes\"" 1`,
`t s="2. back\\slash" 2`,
`t s="3. bs\\q\"" 3`,
}
)
func Test_exportWALFile(t *testing.T) {
for _, c := range []struct {
corpus corpus
lines []string
}{
{corpus: basicCorpus, lines: basicCorpusExpLines},
{corpus: escapeStringCorpus, lines: escCorpusExpLines},
} {
walFile := writeCorpusToWALFile(c.corpus)
defer os.Remove(walFile.Name())
var out bytes.Buffer
if err := newCommand().exportWALFile(walFile.Name(), &out, func() {}); err != nil {
t.Fatal(err)
}
lines := strings.Split(out.String(), "\n")
for _, exp := range c.lines {
found := false
for _, l := range lines {
if exp == l {
found = true
break
}
}
if !found {
t.Fatalf("expected line %q to be in exported output:\n%s", exp, out.String())
}
}
}
}
func Test_exportTSMFile(t *testing.T) {
for _, c := range []struct {
corpus corpus
lines []string
}{
{corpus: basicCorpus, lines: basicCorpusExpLines},
{corpus: escapeStringCorpus, lines: escCorpusExpLines},
} {
tsmFile := writeCorpusToTSMFile(c.corpus)
defer os.Remove(tsmFile.Name())
var out bytes.Buffer
if err := newCommand().exportTSMFile(tsmFile.Name(), &out); err != nil {
t.Fatal(err)
}
lines := strings.Split(out.String(), "\n")
for _, exp := range c.lines {
found := false
for _, l := range lines {
if exp == l {
found = true
break
}
}
if !found {
t.Fatalf("expected line %q to be in exported output:\n%s", exp, out.String())
}
}
}
}
var sink interface{}
func benchmarkExportTSM(c corpus, b *testing.B) {
// Garbage collection is relatively likely to happen during export, so track allocations.
b.ReportAllocs()
f := writeCorpusToTSMFile(c)
defer os.Remove(f.Name())
cmd := newCommand()
var out bytes.Buffer
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
if err := cmd.exportTSMFile(f.Name(), &out); err != nil {
b.Fatal(err)
}
sink = out.Bytes()
out.Reset()
}
}
func BenchmarkExportTSMFloats_100s_250vps(b *testing.B) {
benchmarkExportTSM(makeFloatsCorpus(100, 250), b)
}
func BenchmarkExportTSMInts_100s_250vps(b *testing.B) {
benchmarkExportTSM(makeIntsCorpus(100, 250), b)
}
func BenchmarkExportTSMBools_100s_250vps(b *testing.B) {
benchmarkExportTSM(makeBoolsCorpus(100, 250), b)
}
func BenchmarkExportTSMStrings_100s_250vps(b *testing.B) {
benchmarkExportTSM(makeStringsCorpus(100, 250), b)
}
func benchmarkExportWAL(c corpus, b *testing.B) {
// Garbage collection is relatively likely to happen during export, so track allocations.
b.ReportAllocs()
f := writeCorpusToWALFile(c)
defer os.Remove(f.Name())
cmd := newCommand()
var out bytes.Buffer
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
if err := cmd.exportWALFile(f.Name(), &out, func() {}); err != nil {
b.Fatal(err)
}
sink = out.Bytes()
out.Reset()
}
}
func BenchmarkExportWALFloats_100s_250vps(b *testing.B) {
benchmarkExportWAL(makeFloatsCorpus(100, 250), b)
}
func BenchmarkExportWALInts_100s_250vps(b *testing.B) {
benchmarkExportWAL(makeIntsCorpus(100, 250), b)
}
func BenchmarkExportWALBools_100s_250vps(b *testing.B) {
benchmarkExportWAL(makeBoolsCorpus(100, 250), b)
}
func BenchmarkExportWALStrings_100s_250vps(b *testing.B) {
benchmarkExportWAL(makeStringsCorpus(100, 250), b)
}
// newCommand returns a command that discards its output and that accepts all timestamps.
func newCommand() *Command {
return &Command{
Stderr: ioutil.Discard,
Stdout: ioutil.Discard,
startTime: math.MinInt64,
endTime: math.MaxInt64,
}
}
// makeCorpus returns a new corpus filled with values generated by fn.
// The RNG passed to fn is seeded with numSeries * numValuesPerSeries, for predictable output.
func makeCorpus(numSeries, numValuesPerSeries int, fn func(*rand.Rand) interface{}) corpus {
rng := rand.New(rand.NewSource(int64(numSeries) * int64(numValuesPerSeries)))
var unixNano int64
corpus := make(corpus, numSeries)
for i := 0; i < numSeries; i++ {
vals := make([]tsm1.Value, numValuesPerSeries)
for j := 0; j < numValuesPerSeries; j++ {
vals[j] = tsm1.NewValue(unixNano, fn(rng))
unixNano++
}
k := fmt.Sprintf("m,t=%d", i)
corpus[tsm1.SeriesFieldKey(k, "x")] = vals
}
return corpus
}
func makeFloatsCorpus(numSeries, numFloatsPerSeries int) corpus {
return makeCorpus(numSeries, numFloatsPerSeries, func(rng *rand.Rand) interface{} {
return rng.Float64()
})
}
func makeIntsCorpus(numSeries, numIntsPerSeries int) corpus {
return makeCorpus(numSeries, numIntsPerSeries, func(rng *rand.Rand) interface{} {
// This will only return positive integers. That's probably okay.
return rng.Int63()
})
}
func makeBoolsCorpus(numSeries, numBoolsPerSeries int) corpus {
return makeCorpus(numSeries, numBoolsPerSeries, func(rng *rand.Rand) interface{} {
return rand.Int63n(2) == 1
})
}
func makeStringsCorpus(numSeries, numStringsPerSeries int) corpus {
return makeCorpus(numSeries, numStringsPerSeries, func(rng *rand.Rand) interface{} {
// The string will randomly have 2-6 parts
parts := make([]string, rand.Intn(4)+2)
for i := range parts {
// Each part is a random base36-encoded number
parts[i] = strconv.FormatInt(rand.Int63(), 36)
}
// Join the individual parts with underscores.
return strings.Join(parts, "_")
})
}
// writeCorpusToWALFile writes the given corpus as a WAL file, and returns a handle to that file.
// It is the caller's responsibility to remove the returned temp file.
// writeCorpusToWALFile will panic on any error that occurs.
func writeCorpusToWALFile(c corpus) *os.File {
walFile, err := ioutil.TempFile("", "export_test_corpus_wal")
if err != nil {
panic(err)
}
e := &tsm1.WriteWALEntry{Values: c}
b, err := e.Encode(nil)
if err != nil {
panic(err)
}
w := tsm1.NewWALSegmentWriter(walFile)
if err := w.Write(e.Type(), snappy.Encode(nil, b)); err != nil {
panic(err)
}
if err := w.Flush(); err != nil {
panic(err)
}
// (*tsm1.WALSegmentWriter).sync isn't exported, but it only Syncs the file anyway.
if err := walFile.Sync(); err != nil {
panic(err)
}
return walFile
}
// writeCorpusToTSMFile writes the given corpus as a TSM file, and returns a handle to that file.
// It is the caller's responsibility to remove the returned temp file.
// writeCorpusToTSMFile will panic on any error that occurs.
func writeCorpusToTSMFile(c corpus) *os.File {
tsmFile, err := ioutil.TempFile("", "export_test_corpus_tsm")
if err != nil {
panic(err)
}
w, err := tsm1.NewTSMWriter(tsmFile)
if err != nil {
panic(err)
}
// Write the series in alphabetical order so that each test run is comparable,
// given an identical corpus.
keys := make([]string, 0, len(c))
for k := range c {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
if err := w.Write(k, c[k]); err != nil {
panic(err)
}
}
if err := w.WriteIndex(); err != nil {
panic(err)
}
if err := w.Close(); err != nil {
panic(err)
}
return tsmFile
}

View File

@ -0,0 +1,43 @@
// Package help contains the help for the influx_inspect command.
package help
import (
"fmt"
"io"
"os"
"strings"
)
// Command displays help for command-line sub-commands.
type Command struct {
Stdout io.Writer
}
// NewCommand returns a new instance of Command.
func NewCommand() *Command {
return &Command{
Stdout: os.Stdout,
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage))
return nil
}
const usage = `
Usage: influx_inspect [[command] [arguments]]
The commands are:
dumptsm dumps low-level details about tsm1 files.
export exports raw data from a shard to line protocol
help display this help message
report displays a shard level report
verify verifies integrity of TSM files
"help" is the default command.
Use "influx_inspect [command] -help" for more information about a command.
`

View File

@ -0,0 +1,3 @@
package help_test
// TODO: write some tests

View File

@ -0,0 +1,90 @@
// The influx_inspect command displays detailed information about InfluxDB data files.
package main
import (
"fmt"
"io"
"log"
"os"
"github.com/influxdata/influxdb/cmd"
"github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi"
"github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm"
"github.com/influxdata/influxdb/cmd/influx_inspect/export"
"github.com/influxdata/influxdb/cmd/influx_inspect/help"
"github.com/influxdata/influxdb/cmd/influx_inspect/report"
"github.com/influxdata/influxdb/cmd/influx_inspect/verify"
_ "github.com/influxdata/influxdb/tsdb/engine"
)
func main() {
m := NewMain()
if err := m.Run(os.Args[1:]...); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
// Main represents the program execution.
type Main struct {
Logger *log.Logger
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewMain returns a new instance of Main.
func NewMain() *Main {
return &Main{
Logger: log.New(os.Stderr, "[influx_inspect] ", log.LstdFlags),
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
}
// Run determines and runs the command specified by the CLI args.
func (m *Main) Run(args ...string) error {
name, args := cmd.ParseCommandName(args)
// Extract name from args.
switch name {
case "", "help":
if err := help.NewCommand().Run(args...); err != nil {
return fmt.Errorf("help: %s", err)
}
case "dumptsi":
name := dumptsi.NewCommand()
if err := name.Run(args...); err != nil {
return fmt.Errorf("dumptsi: %s", err)
}
case "dumptsmdev":
fmt.Fprintf(m.Stderr, "warning: dumptsmdev is deprecated, use dumptsm instead.\n")
fallthrough
case "dumptsm":
name := dumptsm.NewCommand()
if err := name.Run(args...); err != nil {
return fmt.Errorf("dumptsm: %s", err)
}
case "export":
name := export.NewCommand()
if err := name.Run(args...); err != nil {
return fmt.Errorf("export: %s", err)
}
case "report":
name := report.NewCommand()
if err := name.Run(args...); err != nil {
return fmt.Errorf("report: %s", err)
}
case "verify":
name := verify.NewCommand()
if err := name.Run(args...); err != nil {
return fmt.Errorf("verify: %s", err)
}
default:
return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influx_inspect help' for usage`+"\n\n", name)
}
return nil
}

View File

@ -0,0 +1,192 @@
// Package report reports statistics about TSM files.
package report
import (
"flag"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb/engine/tsm1"
"github.com/retailnext/hllpp"
)
// Command represents the program execution for "influxd report".
type Command struct {
Stderr io.Writer
Stdout io.Writer
dir string
pattern string
detailed bool
}
// NewCommand returns a new instance of Command.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
fs := flag.NewFlagSet("report", flag.ExitOnError)
fs.StringVar(&cmd.pattern, "pattern", "", "Include only files matching a pattern")
fs.BoolVar(&cmd.detailed, "detailed", false, "Report detailed cardinality estimates")
fs.SetOutput(cmd.Stdout)
fs.Usage = cmd.printUsage
if err := fs.Parse(args); err != nil {
return err
}
cmd.dir = fs.Arg(0)
start := time.Now()
files, err := filepath.Glob(filepath.Join(cmd.dir, fmt.Sprintf("*.%s", tsm1.TSMFileExtension)))
if err != nil {
return err
}
var filtered []string
if cmd.pattern != "" {
for _, f := range files {
if strings.Contains(f, cmd.pattern) {
filtered = append(filtered, f)
}
}
files = filtered
}
if len(files) == 0 {
return fmt.Errorf("no tsm files at %v", cmd.dir)
}
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
fmt.Fprintln(tw, strings.Join([]string{"File", "Series", "Load Time"}, "\t"))
totalSeries := hllpp.New()
tagCardinalities := map[string]*hllpp.HLLPP{}
measCardinalities := map[string]*hllpp.HLLPP{}
fieldCardinalities := map[string]*hllpp.HLLPP{}
for _, f := range files {
file, err := os.OpenFile(f, os.O_RDONLY, 0600)
if err != nil {
fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", f, err)
continue
}
loadStart := time.Now()
reader, err := tsm1.NewTSMReader(file)
if err != nil {
fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", file.Name(), err)
continue
}
loadTime := time.Since(loadStart)
seriesCount := reader.KeyCount()
for i := 0; i < seriesCount; i++ {
key, _ := reader.KeyAt(i)
totalSeries.Add([]byte(key))
if cmd.detailed {
sep := strings.Index(string(key), "#!~#")
seriesKey, field := key[:sep], key[sep+4:]
measurement, tags := models.ParseKey(seriesKey)
measCount, ok := measCardinalities[measurement]
if !ok {
measCount = hllpp.New()
measCardinalities[measurement] = measCount
}
measCount.Add([]byte(key))
fieldCount, ok := fieldCardinalities[measurement]
if !ok {
fieldCount = hllpp.New()
fieldCardinalities[measurement] = fieldCount
}
fieldCount.Add([]byte(field))
for _, t := range tags {
tagCount, ok := tagCardinalities[string(t.Key)]
if !ok {
tagCount = hllpp.New()
tagCardinalities[string(t.Key)] = tagCount
}
tagCount.Add(t.Value)
}
}
}
reader.Close()
fmt.Fprintln(tw, strings.Join([]string{
filepath.Base(file.Name()),
strconv.FormatInt(int64(seriesCount), 10),
loadTime.String(),
}, "\t"))
tw.Flush()
}
tw.Flush()
println()
fmt.Printf("Statistics\n")
fmt.Printf("\tSeries:\n")
fmt.Printf("\t\tTotal (est): %d\n", totalSeries.Count())
if cmd.detailed {
fmt.Printf("\tMeasurements (est):\n")
for _, t := range sortKeys(measCardinalities) {
fmt.Printf("\t\t%v: %d (%d%%)\n", t, measCardinalities[t].Count(), int((float64(measCardinalities[t].Count())/float64(totalSeries.Count()))*100))
}
fmt.Printf("\tFields (est):\n")
for _, t := range sortKeys(fieldCardinalities) {
fmt.Printf("\t\t%v: %d\n", t, fieldCardinalities[t].Count())
}
fmt.Printf("\tTags (est):\n")
for _, t := range sortKeys(tagCardinalities) {
fmt.Printf("\t\t%v: %d\n", t, tagCardinalities[t].Count())
}
}
fmt.Printf("Completed in %s\n", time.Since(start))
return nil
}
// sortKeys is a quick helper to return the sorted set of a map's keys
func sortKeys(vals map[string]*hllpp.HLLPP) (keys []string) {
for k := range vals {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// printUsage prints the usage message to STDERR.
func (cmd *Command) printUsage() {
usage := `Displays shard level report.
Usage: influx_inspect report [flags]
-pattern <pattern>
Include only files matching a pattern.
-detailed
Report detailed cardinality estimates.
Defaults to "false".
`
fmt.Fprintf(cmd.Stdout, usage)
}

View File

@ -0,0 +1,3 @@
package report_test
// TODO: write some tests

View File

@ -0,0 +1,120 @@
// Package verify verifies integrity of TSM files.
package verify
import (
"flag"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
"text/tabwriter"
"time"
"github.com/influxdata/influxdb/tsdb/engine/tsm1"
)
// Command represents the program execution for "influx_inspect verify".
type Command struct {
Stderr io.Writer
Stdout io.Writer
}
// NewCommand returns a new instance of Command.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
var path string
fs := flag.NewFlagSet("verify", flag.ExitOnError)
fs.StringVar(&path, "dir", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]")
fs.SetOutput(cmd.Stdout)
fs.Usage = cmd.printUsage
if err := fs.Parse(args); err != nil {
return err
}
start := time.Now()
dataPath := filepath.Join(path, "data")
brokenBlocks := 0
totalBlocks := 0
// No need to do this in a loop
ext := fmt.Sprintf(".%s", tsm1.TSMFileExtension)
// Get all TSM files by walking through the data dir
files := []string{}
err := filepath.Walk(dataPath, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if filepath.Ext(path) == ext {
files = append(files, path)
}
return nil
})
if err != nil {
panic(err)
}
tw := tabwriter.NewWriter(cmd.Stdout, 16, 8, 0, '\t', 0)
// Verify the checksums of every block in every file
for _, f := range files {
file, err := os.OpenFile(f, os.O_RDONLY, 0600)
if err != nil {
return err
}
reader, err := tsm1.NewTSMReader(file)
if err != nil {
return err
}
blockItr := reader.BlockIterator()
brokenFileBlocks := 0
count := 0
for blockItr.Next() {
totalBlocks++
key, _, _, _, checksum, buf, err := blockItr.Read()
if err != nil {
brokenBlocks++
fmt.Fprintf(tw, "%s: could not get checksum for key %v block %d due to error: %q\n", f, key, count, err)
} else if expected := crc32.ChecksumIEEE(buf); checksum != expected {
brokenBlocks++
fmt.Fprintf(tw, "%s: got %d but expected %d for key %v, block %d\n", f, checksum, expected, key, count)
}
count++
}
if brokenFileBlocks == 0 {
fmt.Fprintf(tw, "%s: healthy\n", f)
}
reader.Close()
}
fmt.Fprintf(tw, "Broken Blocks: %d / %d, in %vs\n", brokenBlocks, totalBlocks, time.Since(start).Seconds())
tw.Flush()
return nil
}
// printUsage prints the usage message to STDERR.
func (cmd *Command) printUsage() {
usage := fmt.Sprintf(`Verifies the integrity of TSM files.
Usage: influx_inspect verify [flags]
-dir <path>
Root storage path
Defaults to "%[1]s/.influxdb".
`, os.Getenv("HOME"))
fmt.Fprintf(cmd.Stdout, usage)
}

View File

@ -0,0 +1,3 @@
package verify_test
// TODO: write some tests

View File

@ -0,0 +1,43 @@
# `influx_stress`
If you run into any issues with this tool please mention @jackzampolin when you create an issue.
## Ways to run
### `influx_stress`
This runs a basic stress test with the [default config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) For more information on the configuration file please see the default.
### `influx_stress -config someConfig.toml`
This runs the stress test with a valid configuration file located at `someConfig.tom`
### `influx_stress -v2 -config someConfig.iql`
This runs the stress test with a valid `v2` configuration file. For more information about the `v2` stress test see the [v2 stress README](https://github.com/influxdata/influxdb/blob/master/stress/v2/README.md).
## Flags
If flags are defined they overwrite the config from any file passed in.
### `-addr` string
IP address and port of database where response times will persist (e.g., localhost:8086)
`default` = "http://localhost:8086"
### `-config` string
The relative path to the stress test configuration file.
`default` = [config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml)
### `-cpuprofile` filename
Writes the result of Go's cpu profile to filename
`default` = no profiling
### `-database` string
Name of database on `-addr` that `influx_stress` will persist write and query response times
`default` = "stress"
### `-tags` value
A comma separated list of tags to add to write and query response times.
`default` = ""

View File

@ -0,0 +1,92 @@
# This section can be removed
[provision]
# The basic provisioner simply deletes and creates database.
# If `reset_database` is false, it will not attempt to delete the database
[provision.basic]
# If enabled the provisioner will actually run
enabled = true
# Address of the instance that is to be provisioned
address = "localhost:8086"
# Database the will be created/deleted
database = "stress"
# Attempt to delete database
reset_database = true
# This section cannot be commented out
# To prevent writes set `enabled=false`
# in [write.influx_client.basic]
[write]
[write.point_generator]
# The basic point generator will generate points of the form
# `cpu,host=server-%v,location=us-west value=234 123456`
[write.point_generator.basic]
# number of points that will be written for each of the series
point_count = 100
# number of series
series_count = 100000
# How much time between each timestamp
tick = "10s"
# Randomize timestamp a bit (not functional)
jitter = true
# Precision of points that are being written
precision = "s"
# name of the measurement that will be written
measurement = "cpu"
# The date for the first point that is written into influx
start_date = "2006-Jan-02"
# Defines a tag for a series
[[write.point_generator.basic.tag]]
key = "host"
value = "server"
[[write.point_generator.basic.tag]]
key = "location"
value = "us-west"
# Defines a field for a series
[[write.point_generator.basic.field]]
key = "value"
value = "float64" # supported types: float64, int, bool
[write.influx_client]
[write.influx_client.basic]
# If enabled the writer will actually write
enabled = true
# Addresses is an array of the Influxdb instances
addresses = ["localhost:8086"] # stress_test_server runs on port 1234
# Database that is being written to
database = "stress"
# Precision of points that are being written
precision = "s"
# Size of batches that are sent to db
batch_size = 10000
# Interval between each batch
batch_interval = "0s"
# How many concurrent writers to the db
concurrency = 10
# ssl enabled?
ssl = false
# format of points that are written to influxdb
format = "line_http" # line_udp (not supported yet), graphite_tcp (not supported yet), graphite_udp (not supported yet)
# This section can be removed
[read]
[read.query_generator]
[read.query_generator.basic]
# Template of the query that will be ran against the instance
template = "SELECT count(value) FROM cpu where host='server-%v'"
# How many times the templated query will be ran
query_count = 250
[read.query_client]
[read.query_client.basic]
# if enabled the reader will actually read
enabled = true
# Address of the instance that will be queried
addresses = ["localhost:8086"]
# Database that will be queried
database = "stress"
# Interval bewteen queries
query_interval = "100ms"
# Number of concurrent queriers
concurrency = 1

View File

@ -0,0 +1,71 @@
// Command influx_stress is deprecated; use github.com/influxdata/influx-stress instead.
package main
import (
"flag"
"fmt"
"log"
"os"
"runtime/pprof"
"github.com/influxdata/influxdb/stress"
v2 "github.com/influxdata/influxdb/stress/v2"
)
var (
useV2 = flag.Bool("v2", false, "Use version 2 of stress tool")
config = flag.String("config", "", "The stress test file")
cpuprofile = flag.String("cpuprofile", "", "Write the cpu profile to `filename`")
db = flag.String("db", "", "target database within test system for write and query load")
)
func main() {
o := stress.NewOutputConfig()
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
fmt.Println(err)
return
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
if *useV2 {
if *config != "" {
v2.RunStress(*config)
} else {
v2.RunStress("stress/v2/iql/file.iql")
}
} else {
c, err := stress.NewConfig(*config)
if err != nil {
log.Fatal(err)
return
}
if *db != "" {
c.Provision.Basic.Database = *db
c.Write.InfluxClients.Basic.Database = *db
c.Read.QueryClients.Basic.Database = *db
}
w := stress.NewWriter(c.Write.PointGenerators.Basic, &c.Write.InfluxClients.Basic)
r := stress.NewQuerier(&c.Read.QueryGenerators.Basic, &c.Read.QueryClients.Basic)
s := stress.NewStressTest(&c.Provision.Basic, w, r)
bw := stress.NewBroadcastChannel()
bw.Register(c.Write.InfluxClients.Basic.BasicWriteHandler)
bw.Register(o.HTTPHandler("write"))
br := stress.NewBroadcastChannel()
br.Register(c.Read.QueryClients.Basic.BasicReadHandler)
br.Register(o.HTTPHandler("read"))
s.Start(bw.Handle, br.Handle)
}
}

View File

@ -0,0 +1,152 @@
# Converting b1 and bz1 shards to tsm1
`influx_tsm` is a tool for converting b1 and bz1 shards to tsm1
format. Converting shards to tsm1 format results in a very significant
reduction in disk usage, and significantly improved write-throughput,
when writing data into those shards.
Conversion can be controlled on a database-by-database basis. By
default a database is backed up before it is converted, allowing you
to roll back any changes. Because of the backup process, ensure the
host system has at least as much free disk space as the disk space
consumed by the _data_ directory of your InfluxDB system.
The tool automatically ignores tsm1 shards, and can be run
idempotently on any database.
Conversion is an offline process, and the InfluxDB system must be
stopped during conversion. However the conversion process reads and
writes shards directly on disk and should be fast.
## Steps
Follow these steps to perform a conversion.
* Identify the databases you wish to convert. You can convert one or more databases at a time. By default all databases are converted.
* Decide on parallel operation. By default the conversion operation peforms each operation in a serial manner. This minimizes load on the host system performing the conversion, but also takes the most time. If you wish to minimize the time conversion takes, enable parallel mode. Conversion will then perform as many operations as possible in parallel, but the process may place significant load on the host system (CPU, disk, and RAM, usage will all increase).
* Stop all write-traffic to your InfluxDB system.
* Restart the InfluxDB service and wait until all WAL data is flushed to disk -- this has completed when the system responds to queries. This is to ensure all data is present in shards.
* Stop the InfluxDB service. It should not be restarted until conversion is complete.
* Run conversion tool. Depending on the size of the data directory, this might be a lengthy operation. Consider running the conversion tool under a "screen" session to avoid any interruptions.
* Unless you ran the conversion tool as the same user as that which runs InfluxDB, then you may need to set the correct read-and-write permissions on the new tsm1 directories.
* Restart node and ensure data looks correct.
* If everything looks OK, you may then wish to remove or archive the backed-up databases.
* Restart write traffic.
## Example session
Below is an example session, showing a database being converted.
```
$ # Create a backup location that the `influxdb` user has full access to
$ mkdir -m 0777 /path/to/influxdb_backup
$ sudo -u influxdb influx_tsm -backup /path/to/influxdb_backup -parallel /var/lib/influxdb/data
b1 and bz1 shard conversion.
-----------------------------------
Data directory is: /var/lib/influxdb/data
Backup directory is: /path/to/influxdb_backup
Databases specified: all
Database backups enabled: yes
Parallel mode enabled (GOMAXPROCS): yes (8)
Found 1 shards that will be converted.
Database Retention Path Engine Size
_internal monitor /var/lib/influxdb/data/_internal/monitor/1 bz1 65536
These shards will be converted. Proceed? y/N: y
Conversion starting....
Backing up 1 databases...
2016/01/28 12:23:43.699266 Backup of databse '_internal' started
2016/01/28 12:23:43.699883 Backing up file /var/lib/influxdb/data/_internal/monitor/1
2016/01/28 12:23:43.700052 Database _internal backed up (851.776µs)
2016/01/28 12:23:43.700320 Starting conversion of shard: /var/lib/influxdb/data/_internal/monitor/1
2016/01/28 12:23:43.706276 Conversion of /var/lib/influxdb/data/_internal/monitor/1 successful (6.040148ms)
Summary statistics
========================================
Databases converted: 1
Shards converted: 1
TSM files created: 1
Points read: 369
Points written: 369
NaN filtered: 0
Inf filtered: 0
Points without fields filtered: 0
Disk usage pre-conversion (bytes): 65536
Disk usage post-conversion (bytes): 11000
Reduction factor: 83%
Bytes per TSM point: 29.81
Total conversion time: 7.330443ms
$ # restart node, verify data
$ sudo rm -r /path/to/influxdb_backup
```
Note that the tool first lists the shards that will be converted,
before asking for confirmation. You can abort the conversion process
at this step if you just wish to see what would be converted, or if
the list of shards does not look correct.
__WARNING:__ If you run the `influx_tsm` tool as a user other than the
`influxdb` user (or the user that the InfluxDB process runs under),
please make sure to verify the shard permissions are correct prior to
starting InfluxDB. If needed, shard permissions can be corrected with
the `chown` command. For example:
```
sudo chown -R influxdb:influxdb /var/lib/influxdb
```
## Rolling back a conversion
After a successful backup (the message `Database XYZ backed up` was
logged), you have a duplicate of that database in the _backup_
directory you provided on the command line. If, when checking your
data after a successful conversion, you notice things missing or
something just isn't right, you can "undo" the conversion:
- Shut down your node (this is very important)
- Remove the database's directory from the influxdb `data` directory (default: `~/.influxdb/data/XYZ` for binary installations or `/var/lib/influxdb/data/XYZ` for packaged installations)
- Copy (to really make sure the shard is preserved) the database's directory from the backup directory you created into the `data` directory.
Using the same directories as above, and assuming a database named `stats`:
```
$ sudo rm -r /var/lib/influxdb/data/stats
$ sudo cp -r /path/to/influxdb_backup/stats /var/lib/influxdb/data/
$ # restart influxd node
```
#### How to avoid downtime when upgrading shards
*Identify non-`tsm1` shards*
Non-`tsm1` shards are files of the form: `data/<database>/<retention_policy>/<shard_id>`.
`tsm1` shards are files of the form: `data/<database>/<retention_policy>/<shard_id>/<file>.tsm`.
*Determine which `bz`/`bz1` shards are cold for writes*
Run the `SHOW SHARDS` query to see the start and end dates for shards.
If the date range for a shard does not span the current time then the shard is said to be cold for writes.
This means that no new points are expected to be added to the shard.
The shard whose date range spans now is said to be hot for writes.
You can only safely convert cold shards without stopping the InfluxDB process.
*Convert cold shards*
1. Copy each of the cold shards you'd like to convert to a new directory with the structure `/tmp/data/<database>/<retention_policy>/<shard_id>`.
2. Run the `influx_tsm` tool on the copied files:
```
influx_tsm -parallel /tmp/data/
```
3. Remove the existing cold `b1`/`bz1` shards from the production data directory.
4. Move the new `tsm1` shards into the original directory, overwriting the existing `b1`/`bz1` shards of the same name. Do this simultaneously with step 3 to avoid any query errors.
5. Wait an hour, a day, or a week (depending on your retention period) for any hot `b1`/`bz1` shards to become cold and repeat steps 1 through 4 on the newly cold shards.
> **Note:** Any points written to the cold shards after making a copy will be lost when the `tsm1` shard overwrites the existing cold shard.
Nothing in InfluxDB will prevent writes to cold shards, they are merely unexpected, not impossible.
It is your responsibility to prevent writes to cold shards to prevent data loss.

View File

@ -0,0 +1,270 @@
// Package b1 reads data from b1 shards.
package b1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/b1"
import (
"encoding/binary"
"math"
"sort"
"time"
"github.com/boltdb/bolt"
"github.com/influxdata/influxdb/cmd/influx_tsm/stats"
"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb"
"github.com/influxdata/influxdb/tsdb/engine/tsm1"
)
// DefaultChunkSize is the size of chunks read from the b1 shard
const DefaultChunkSize int = 1000
var excludedBuckets = map[string]bool{
"fields": true,
"meta": true,
"series": true,
"wal": true,
}
// Reader is used to read all data from a b1 shard.
type Reader struct {
path string
db *bolt.DB
tx *bolt.Tx
cursors []*cursor
currCursor int
keyBuf string
values []tsm1.Value
valuePos int
fields map[string]*tsdb.MeasurementFields
codecs map[string]*tsdb.FieldCodec
stats *stats.Stats
}
// NewReader returns a reader for the b1 shard at path.
func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader {
r := &Reader{
path: path,
fields: make(map[string]*tsdb.MeasurementFields),
codecs: make(map[string]*tsdb.FieldCodec),
stats: stats,
}
if chunkSize <= 0 {
chunkSize = DefaultChunkSize
}
r.values = make([]tsm1.Value, chunkSize)
return r
}
// Open opens the reader.
func (r *Reader) Open() error {
// Open underlying storage.
db, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second})
if err != nil {
return err
}
r.db = db
// Load fields.
if err := r.db.View(func(tx *bolt.Tx) error {
meta := tx.Bucket([]byte("fields"))
c := meta.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
mf := &tsdb.MeasurementFields{}
if err := mf.UnmarshalBinary(v); err != nil {
return err
}
r.fields[string(k)] = mf
r.codecs[string(k)] = tsdb.NewFieldCodec(mf.Fields)
}
return nil
}); err != nil {
return err
}
seriesSet := make(map[string]bool)
// ignore series index and find all series in this shard
if err := r.db.View(func(tx *bolt.Tx) error {
tx.ForEach(func(name []byte, _ *bolt.Bucket) error {
key := string(name)
if !excludedBuckets[key] {
seriesSet[key] = true
}
return nil
})
return nil
}); err != nil {
return err
}
r.tx, err = r.db.Begin(false)
if err != nil {
return err
}
// Create cursor for each field of each series.
for s := range seriesSet {
measurement := tsdb.MeasurementFromSeriesKey(s)
fields := r.fields[measurement]
if fields == nil {
r.stats.IncrFiltered()
continue
}
for _, f := range fields.Fields {
c := newCursor(r.tx, s, f.Name, r.codecs[measurement])
c.SeekTo(0)
r.cursors = append(r.cursors, c)
}
}
sort.Sort(cursors(r.cursors))
return nil
}
// Next returns whether any data remains to be read. It must be called before
// the next call to Read().
func (r *Reader) Next() bool {
r.valuePos = 0
OUTER:
for {
if r.currCursor >= len(r.cursors) {
// All cursors drained. No more data remains.
return false
}
cc := r.cursors[r.currCursor]
r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field)
for {
k, v := cc.Next()
if k == -1 {
// Go to next cursor and try again.
r.currCursor++
if r.valuePos == 0 {
// The previous cursor had no data. Instead of returning
// just go immediately to the next cursor.
continue OUTER
}
// There is some data available. Indicate that it should be read.
return true
}
if f, ok := v.(float64); ok {
if math.IsInf(f, 0) {
r.stats.AddPointsRead(1)
r.stats.IncrInf()
continue
}
if math.IsNaN(f) {
r.stats.AddPointsRead(1)
r.stats.IncrNaN()
continue
}
}
r.values[r.valuePos] = tsm1.NewValue(k, v)
r.valuePos++
if r.valuePos >= len(r.values) {
return true
}
}
}
}
// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is
// emitted completely for every field, in every series, before the next field is processed.
// Data from Read() adheres to the requirements for writing to tsm1 shards
func (r *Reader) Read() (string, []tsm1.Value, error) {
return r.keyBuf, r.values[:r.valuePos], nil
}
// Close closes the reader.
func (r *Reader) Close() error {
r.tx.Rollback()
return r.db.Close()
}
// cursor provides ordered iteration across a series.
type cursor struct {
// Bolt cursor and readahead buffer.
cursor *bolt.Cursor
keyBuf int64
valBuf interface{}
series string
field string
dec *tsdb.FieldCodec
}
// Cursor returns an iterator for a key over a single field.
func newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor {
cur := &cursor{
keyBuf: -2,
series: series,
field: field,
dec: dec,
}
// Retrieve series bucket.
b := tx.Bucket([]byte(series))
if b != nil {
cur.cursor = b.Cursor()
}
return cur
}
// Seek moves the cursor to a position.
func (c *cursor) SeekTo(seek int64) {
var seekBytes [8]byte
binary.BigEndian.PutUint64(seekBytes[:], uint64(seek))
k, v := c.cursor.Seek(seekBytes[:])
c.keyBuf, c.valBuf = tsdb.DecodeKeyValue(c.field, c.dec, k, v)
}
// Next returns the next key/value pair from the cursor.
func (c *cursor) Next() (key int64, value interface{}) {
for {
k, v := func() (int64, interface{}) {
if c.keyBuf != -2 {
k, v := c.keyBuf, c.valBuf
c.keyBuf = -2
return k, v
}
k, v := c.cursor.Next()
if k == nil {
return -1, nil
}
return tsdb.DecodeKeyValue(c.field, c.dec, k, v)
}()
if k != -1 && v == nil {
// There is a point in the series at the next timestamp,
// but not for this cursor's field. Go to the next point.
continue
}
return k, v
}
}
// Sort b1 cursors in correct order for writing to TSM files.
type cursors []*cursor
func (a cursors) Len() int { return len(a) }
func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a cursors) Less(i, j int) bool {
if a[i].series == a[j].series {
return a[i].field < a[j].field
}
return a[i].series < a[j].series
}

View File

@ -0,0 +1,371 @@
// Package bz1 reads data from bz1 shards.
package bz1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/bz1"
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"math"
"sort"
"time"
"github.com/boltdb/bolt"
"github.com/golang/snappy"
"github.com/influxdata/influxdb/cmd/influx_tsm/stats"
"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb"
"github.com/influxdata/influxdb/tsdb/engine/tsm1"
)
// DefaultChunkSize is the size of chunks read from the bz1 shard
const DefaultChunkSize = 1000
// Reader is used to read all data from a bz1 shard.
type Reader struct {
path string
db *bolt.DB
tx *bolt.Tx
cursors []*cursor
currCursor int
keyBuf string
values []tsm1.Value
valuePos int
fields map[string]*tsdb.MeasurementFields
codecs map[string]*tsdb.FieldCodec
stats *stats.Stats
}
// NewReader returns a reader for the bz1 shard at path.
func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader {
r := &Reader{
path: path,
fields: make(map[string]*tsdb.MeasurementFields),
codecs: make(map[string]*tsdb.FieldCodec),
stats: stats,
}
if chunkSize <= 0 {
chunkSize = DefaultChunkSize
}
r.values = make([]tsm1.Value, chunkSize)
return r
}
// Open opens the reader.
func (r *Reader) Open() error {
// Open underlying storage.
db, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second})
if err != nil {
return err
}
r.db = db
seriesSet := make(map[string]bool)
if err := r.db.View(func(tx *bolt.Tx) error {
var data []byte
meta := tx.Bucket([]byte("meta"))
if meta == nil {
// No data in this shard.
return nil
}
pointsBucket := tx.Bucket([]byte("points"))
if pointsBucket == nil {
return nil
}
if err := pointsBucket.ForEach(func(key, _ []byte) error {
seriesSet[string(key)] = true
return nil
}); err != nil {
return err
}
buf := meta.Get([]byte("fields"))
if buf == nil {
// No data in this shard.
return nil
}
data, err = snappy.Decode(nil, buf)
if err != nil {
return err
}
if err := json.Unmarshal(data, &r.fields); err != nil {
return err
}
return nil
}); err != nil {
return err
}
// Build the codec for each measurement.
for k, v := range r.fields {
r.codecs[k] = tsdb.NewFieldCodec(v.Fields)
}
r.tx, err = r.db.Begin(false)
if err != nil {
return err
}
// Create cursor for each field of each series.
for s := range seriesSet {
measurement := tsdb.MeasurementFromSeriesKey(s)
fields := r.fields[measurement]
if fields == nil {
r.stats.IncrFiltered()
continue
}
for _, f := range fields.Fields {
c := newCursor(r.tx, s, f.Name, r.codecs[measurement])
if c == nil {
continue
}
c.SeekTo(0)
r.cursors = append(r.cursors, c)
}
}
sort.Sort(cursors(r.cursors))
return nil
}
// Next returns whether there is any more data to be read.
func (r *Reader) Next() bool {
r.valuePos = 0
OUTER:
for {
if r.currCursor >= len(r.cursors) {
// All cursors drained. No more data remains.
return false
}
cc := r.cursors[r.currCursor]
r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field)
for {
k, v := cc.Next()
if k == -1 {
// Go to next cursor and try again.
r.currCursor++
if r.valuePos == 0 {
// The previous cursor had no data. Instead of returning
// just go immediately to the next cursor.
continue OUTER
}
// There is some data available. Indicate that it should be read.
return true
}
if f, ok := v.(float64); ok {
if math.IsInf(f, 0) {
r.stats.AddPointsRead(1)
r.stats.IncrInf()
continue
}
if math.IsNaN(f) {
r.stats.AddPointsRead(1)
r.stats.IncrNaN()
continue
}
}
r.values[r.valuePos] = tsm1.NewValue(k, v)
r.valuePos++
if r.valuePos >= len(r.values) {
return true
}
}
}
}
// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is
// emitted completely for every field, in every series, before the next field is processed.
// Data from Read() adheres to the requirements for writing to tsm1 shards
func (r *Reader) Read() (string, []tsm1.Value, error) {
return r.keyBuf, r.values[:r.valuePos], nil
}
// Close closes the reader.
func (r *Reader) Close() error {
r.tx.Rollback()
return r.db.Close()
}
// cursor provides ordered iteration across a series.
type cursor struct {
cursor *bolt.Cursor
buf []byte // uncompressed buffer
off int // buffer offset
fieldIndices []int
index int
series string
field string
dec *tsdb.FieldCodec
keyBuf int64
valBuf interface{}
}
// newCursor returns an instance of a bz1 cursor.
func newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor {
// Retrieve points bucket. Ignore if there is no bucket.
b := tx.Bucket([]byte("points")).Bucket([]byte(series))
if b == nil {
return nil
}
return &cursor{
cursor: b.Cursor(),
series: series,
field: field,
dec: dec,
keyBuf: -2,
}
}
// Seek moves the cursor to a position.
func (c *cursor) SeekTo(seek int64) {
var seekBytes [8]byte
binary.BigEndian.PutUint64(seekBytes[:], uint64(seek))
// Move cursor to appropriate block and set to buffer.
k, v := c.cursor.Seek(seekBytes[:])
if v == nil { // get the last block, it might have this time
_, v = c.cursor.Last()
} else if seek < int64(binary.BigEndian.Uint64(k)) { // the seek key is less than this block, go back one and check
_, v = c.cursor.Prev()
// if the previous block max time is less than the seek value, reset to where we were originally
if v == nil || seek > int64(binary.BigEndian.Uint64(v[0:8])) {
_, v = c.cursor.Seek(seekBytes[:])
}
}
c.setBuf(v)
// Read current block up to seek position.
c.seekBuf(seekBytes[:])
// Return current entry.
c.keyBuf, c.valBuf = c.read()
}
// seekBuf moves the cursor to a position within the current buffer.
func (c *cursor) seekBuf(seek []byte) (key, value []byte) {
for {
// Slice off the current entry.
buf := c.buf[c.off:]
// Exit if current entry's timestamp is on or after the seek.
if len(buf) == 0 {
return
}
if bytes.Compare(buf[0:8], seek) != -1 {
return
}
c.off += entryHeaderSize + entryDataSize(buf)
}
}
// Next returns the next key/value pair from the cursor. If there are no values
// remaining, -1 is returned.
func (c *cursor) Next() (int64, interface{}) {
for {
k, v := func() (int64, interface{}) {
if c.keyBuf != -2 {
k, v := c.keyBuf, c.valBuf
c.keyBuf = -2
return k, v
}
// Ignore if there is no buffer.
if len(c.buf) == 0 {
return -1, nil
}
// Move forward to next entry.
c.off += entryHeaderSize + entryDataSize(c.buf[c.off:])
// If no items left then read first item from next block.
if c.off >= len(c.buf) {
_, v := c.cursor.Next()
c.setBuf(v)
}
return c.read()
}()
if k != -1 && v == nil {
// There is a point in the series at the next timestamp,
// but not for this cursor's field. Go to the next point.
continue
}
return k, v
}
}
// setBuf saves a compressed block to the buffer.
func (c *cursor) setBuf(block []byte) {
// Clear if the block is empty.
if len(block) == 0 {
c.buf, c.off, c.fieldIndices, c.index = c.buf[0:0], 0, c.fieldIndices[0:0], 0
return
}
// Otherwise decode block into buffer.
// Skip over the first 8 bytes since they are the max timestamp.
buf, err := snappy.Decode(nil, block[8:])
if err != nil {
c.buf = c.buf[0:0]
fmt.Printf("block decode error: %s\n", err)
}
c.buf, c.off = buf, 0
}
// read reads the current key and value from the current block.
func (c *cursor) read() (key int64, value interface{}) {
// Return nil if the offset is at the end of the buffer.
if c.off >= len(c.buf) {
return -1, nil
}
// Otherwise read the current entry.
buf := c.buf[c.off:]
dataSize := entryDataSize(buf)
return tsdb.DecodeKeyValue(c.field, c.dec, buf[0:8], buf[entryHeaderSize:entryHeaderSize+dataSize])
}
// Sort bz1 cursors in correct order for writing to TSM files.
type cursors []*cursor
func (a cursors) Len() int { return len(a) }
func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a cursors) Less(i, j int) bool {
if a[i].series == a[j].series {
return a[i].field < a[j].field
}
return a[i].series < a[j].series
}
// entryHeaderSize is the number of bytes required for the header.
const entryHeaderSize = 8 + 4
// entryDataSize returns the size of an entry's data field, in bytes.
func entryDataSize(v []byte) int { return int(binary.BigEndian.Uint32(v[8:12])) }

View File

@ -0,0 +1,118 @@
package main
import (
"fmt"
"os"
"path/filepath"
"github.com/influxdata/influxdb/cmd/influx_tsm/stats"
"github.com/influxdata/influxdb/tsdb/engine/tsm1"
)
const (
maxBlocksPerKey = 65535
)
// KeyIterator is used to iterate over b* keys for conversion to tsm keys
type KeyIterator interface {
Next() bool
Read() (string, []tsm1.Value, error)
}
// Converter encapsulates the logic for converting b*1 shards to tsm1 shards.
type Converter struct {
path string
maxTSMFileSize uint32
sequence int
stats *stats.Stats
}
// NewConverter returns a new instance of the Converter.
func NewConverter(path string, sz uint32, stats *stats.Stats) *Converter {
return &Converter{
path: path,
maxTSMFileSize: sz,
stats: stats,
}
}
// Process writes the data provided by iter to a tsm1 shard.
func (c *Converter) Process(iter KeyIterator) error {
// Ensure the tsm1 directory exists.
if err := os.MkdirAll(c.path, 0777); err != nil {
return err
}
// Iterate until no more data remains.
var w tsm1.TSMWriter
var keyCount map[string]int
for iter.Next() {
k, v, err := iter.Read()
if err != nil {
return err
}
if w == nil {
w, err = c.nextTSMWriter()
if err != nil {
return err
}
keyCount = map[string]int{}
}
if err := w.Write(k, v); err != nil {
return err
}
keyCount[k]++
c.stats.AddPointsRead(len(v))
c.stats.AddPointsWritten(len(v))
// If we have a max file size configured and we're over it, start a new TSM file.
if w.Size() > c.maxTSMFileSize || keyCount[k] == maxBlocksPerKey {
if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues {
return err
}
c.stats.AddTSMBytes(w.Size())
if err := w.Close(); err != nil {
return err
}
w = nil
}
}
if w != nil {
if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues {
return err
}
c.stats.AddTSMBytes(w.Size())
if err := w.Close(); err != nil {
return err
}
}
return nil
}
// nextTSMWriter returns the next TSMWriter for the Converter.
func (c *Converter) nextTSMWriter() (tsm1.TSMWriter, error) {
c.sequence++
fileName := filepath.Join(c.path, fmt.Sprintf("%09d-%09d.%s", 1, c.sequence, tsm1.TSMFileExtension))
fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return nil, err
}
// Create the writer for the new TSM file.
w, err := tsm1.NewTSMWriter(fd)
if err != nil {
return nil, err
}
c.stats.IncrTSMFileCount()
return w, nil
}

View File

@ -0,0 +1,415 @@
// Command influx_tsm converts b1 or bz1 shards (from InfluxDB releases earlier than v0.11)
// to the current tsm1 format.
package main
import (
"bufio"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"sort"
"strings"
"text/tabwriter"
"time"
"net/http"
_ "net/http/pprof"
"github.com/influxdata/influxdb/cmd/influx_tsm/b1"
"github.com/influxdata/influxdb/cmd/influx_tsm/bz1"
"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb"
)
// ShardReader reads b* shards and converts to tsm shards
type ShardReader interface {
KeyIterator
Open() error
Close() error
}
const (
tsmExt = "tsm"
)
var description = `
Convert a database from b1 or bz1 format to tsm1 format.
This tool will backup the directories before conversion (if not disabled).
The backed-up files must be removed manually, generally after starting up the
node again to make sure all of data has been converted correctly.
To restore a backup:
Shut down the node, remove the converted directory, and
copy the backed-up directory to the original location.`
type options struct {
DataPath string
BackupPath string
DBs []string
DebugAddr string
TSMSize uint64
Parallel bool
SkipBackup bool
UpdateInterval time.Duration
Yes bool
CPUFile string
}
func (o *options) Parse() error {
fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
var dbs string
fs.StringVar(&dbs, "dbs", "", "Comma-delimited list of databases to convert. Default is to convert all databases.")
fs.Uint64Var(&opts.TSMSize, "sz", maxTSMSz, "Maximum size of individual TSM files.")
fs.BoolVar(&opts.Parallel, "parallel", false, "Perform parallel conversion. (up to GOMAXPROCS shards at once)")
fs.BoolVar(&opts.SkipBackup, "nobackup", false, "Disable database backups. Not recommended.")
fs.StringVar(&opts.BackupPath, "backup", "", "The location to backup up the current databases. Must not be within the data directory.")
fs.StringVar(&opts.DebugAddr, "debug", "", "If set, http debugging endpoints will be enabled on the given address")
fs.DurationVar(&opts.UpdateInterval, "interval", 5*time.Second, "How often status updates are printed.")
fs.BoolVar(&opts.Yes, "y", false, "Don't ask, just convert")
fs.StringVar(&opts.CPUFile, "profile", "", "CPU Profile location")
fs.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %v [options] <data-path> \n", os.Args[0])
fmt.Fprintf(os.Stderr, "%v\n\nOptions:\n", description)
fs.PrintDefaults()
fmt.Fprintf(os.Stderr, "\n")
}
if err := fs.Parse(os.Args[1:]); err != nil {
return err
}
if len(fs.Args()) < 1 {
return errors.New("no data directory specified")
}
var err error
if o.DataPath, err = filepath.Abs(fs.Args()[0]); err != nil {
return err
}
if o.DataPath, err = filepath.EvalSymlinks(filepath.Clean(o.DataPath)); err != nil {
return err
}
if o.TSMSize > maxTSMSz {
return fmt.Errorf("bad TSM file size, maximum TSM file size is %d", maxTSMSz)
}
// Check if specific databases were requested.
o.DBs = strings.Split(dbs, ",")
if len(o.DBs) == 1 && o.DBs[0] == "" {
o.DBs = nil
}
if !o.SkipBackup {
if o.BackupPath == "" {
return errors.New("either -nobackup or -backup DIR must be set")
}
if o.BackupPath, err = filepath.Abs(o.BackupPath); err != nil {
return err
}
if o.BackupPath, err = filepath.EvalSymlinks(filepath.Clean(o.BackupPath)); err != nil {
if os.IsNotExist(err) {
return errors.New("backup directory must already exist")
}
return err
}
if strings.HasPrefix(o.BackupPath, o.DataPath) {
fmt.Println(o.BackupPath, o.DataPath)
return errors.New("backup directory cannot be contained within data directory")
}
}
if o.DebugAddr != "" {
log.Printf("Starting debugging server on http://%v", o.DebugAddr)
go func() {
log.Fatal(http.ListenAndServe(o.DebugAddr, nil))
}()
}
return nil
}
var opts options
const maxTSMSz uint64 = 2 * 1024 * 1024 * 1024
func init() {
log.SetOutput(os.Stderr)
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
}
func main() {
if err := opts.Parse(); err != nil {
log.Fatal(err)
}
// Determine the list of databases
dbs, err := ioutil.ReadDir(opts.DataPath)
if err != nil {
log.Fatalf("failed to access data directory at %v: %v\n", opts.DataPath, err)
}
fmt.Println() // Cleanly separate output from start of program.
if opts.Parallel {
if !isEnvSet("GOMAXPROCS") {
// Only modify GOMAXPROCS if it wasn't set in the environment
// This means 'GOMAXPROCS=1 influx_tsm -parallel' will not actually
// run in parallel
runtime.GOMAXPROCS(runtime.NumCPU())
}
}
var badUser string
if opts.SkipBackup {
badUser = "(NOT RECOMMENDED)"
}
// Dump summary of what is about to happen.
fmt.Println("b1 and bz1 shard conversion.")
fmt.Println("-----------------------------------")
fmt.Println("Data directory is: ", opts.DataPath)
if !opts.SkipBackup {
fmt.Println("Backup directory is: ", opts.BackupPath)
}
fmt.Println("Databases specified: ", allDBs(opts.DBs))
fmt.Println("Database backups enabled: ", yesno(!opts.SkipBackup), badUser)
fmt.Printf("Parallel mode enabled (GOMAXPROCS): %s (%d)\n", yesno(opts.Parallel), runtime.GOMAXPROCS(0))
fmt.Println()
shards := collectShards(dbs)
// Anything to convert?
fmt.Printf("\nFound %d shards that will be converted.\n", len(shards))
if len(shards) == 0 {
fmt.Println("Nothing to do.")
return
}
// Display list of convertible shards.
fmt.Println()
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Database\tRetention\tPath\tEngine\tSize")
for _, si := range shards {
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%d\n", si.Database, si.RetentionPolicy, si.FullPath(opts.DataPath), si.FormatAsString(), si.Size)
}
w.Flush()
if !opts.Yes {
// Get confirmation from user.
fmt.Printf("\nThese shards will be converted. Proceed? y/N: ")
liner := bufio.NewReader(os.Stdin)
yn, err := liner.ReadString('\n')
if err != nil {
log.Fatalf("failed to read response: %v", err)
}
yn = strings.TrimRight(strings.ToLower(yn), "\n")
if yn != "y" {
log.Fatal("Conversion aborted.")
}
}
fmt.Println("Conversion starting....")
if opts.CPUFile != "" {
f, err := os.Create(opts.CPUFile)
if err != nil {
log.Fatal(err)
}
if err = pprof.StartCPUProfile(f); err != nil {
log.Fatal(err)
}
defer pprof.StopCPUProfile()
}
tr := newTracker(shards, opts)
if err := tr.Run(); err != nil {
log.Fatalf("Error occurred preventing completion: %v\n", err)
}
tr.PrintStats()
}
func collectShards(dbs []os.FileInfo) tsdb.ShardInfos {
// Get the list of shards for conversion.
var shards tsdb.ShardInfos
for _, db := range dbs {
d := tsdb.NewDatabase(filepath.Join(opts.DataPath, db.Name()))
shs, err := d.Shards()
if err != nil {
log.Fatalf("Failed to access shards for database %v: %v\n", d.Name(), err)
}
shards = append(shards, shs...)
}
sort.Sort(shards)
shards = shards.FilterFormat(tsdb.TSM1)
if len(dbs) > 0 {
shards = shards.ExclusiveDatabases(opts.DBs)
}
return shards
}
// backupDatabase backs up the database named db
func backupDatabase(db string) error {
copyFile := func(path string, info os.FileInfo, err error) error {
// Strip the DataPath from the path and replace with BackupPath.
toPath := strings.Replace(path, opts.DataPath, opts.BackupPath, 1)
if info.IsDir() {
return os.MkdirAll(toPath, info.Mode())
}
in, err := os.Open(path)
if err != nil {
return err
}
defer in.Close()
srcInfo, err := os.Stat(path)
if err != nil {
return err
}
out, err := os.OpenFile(toPath, os.O_CREATE|os.O_WRONLY, info.Mode())
if err != nil {
return err
}
defer out.Close()
dstInfo, err := os.Stat(toPath)
if err != nil {
return err
}
if dstInfo.Size() == srcInfo.Size() {
log.Printf("Backup file already found for %v with correct size, skipping.", path)
return nil
}
if dstInfo.Size() > srcInfo.Size() {
log.Printf("Invalid backup file found for %v, replacing with good copy.", path)
if err := out.Truncate(0); err != nil {
return err
}
if _, err := out.Seek(0, io.SeekStart); err != nil {
return err
}
}
if dstInfo.Size() > 0 {
log.Printf("Resuming backup of file %v, starting at %v bytes", path, dstInfo.Size())
}
off, err := out.Seek(0, io.SeekEnd)
if err != nil {
return err
}
if _, err := in.Seek(off, io.SeekStart); err != nil {
return err
}
log.Printf("Backing up file %v", path)
_, err = io.Copy(out, in)
return err
}
return filepath.Walk(filepath.Join(opts.DataPath, db), copyFile)
}
// convertShard converts the shard in-place.
func convertShard(si *tsdb.ShardInfo, tr *tracker) error {
src := si.FullPath(opts.DataPath)
dst := fmt.Sprintf("%v.%v", src, tsmExt)
var reader ShardReader
switch si.Format {
case tsdb.BZ1:
reader = bz1.NewReader(src, &tr.Stats, 0)
case tsdb.B1:
reader = b1.NewReader(src, &tr.Stats, 0)
default:
return fmt.Errorf("Unsupported shard format: %v", si.FormatAsString())
}
// Open the shard, and create a converter.
if err := reader.Open(); err != nil {
return fmt.Errorf("Failed to open %v for conversion: %v", src, err)
}
defer reader.Close()
converter := NewConverter(dst, uint32(opts.TSMSize), &tr.Stats)
// Perform the conversion.
if err := converter.Process(reader); err != nil {
return fmt.Errorf("Conversion of %v failed: %v", src, err)
}
// Delete source shard, and rename new tsm1 shard.
if err := reader.Close(); err != nil {
return fmt.Errorf("Conversion of %v failed due to close: %v", src, err)
}
if err := os.RemoveAll(si.FullPath(opts.DataPath)); err != nil {
return fmt.Errorf("Deletion of %v failed: %v", src, err)
}
if err := os.Rename(dst, src); err != nil {
return fmt.Errorf("Rename of %v to %v failed: %v", dst, src, err)
}
return nil
}
// ParallelGroup allows the maximum parrallelism of a set of operations to be controlled.
type ParallelGroup chan struct{}
// NewParallelGroup returns a group which allows n operations to run in parallel. A value of 0
// means no operations will ever run.
func NewParallelGroup(n int) ParallelGroup {
return make(chan struct{}, n)
}
// Do executes one operation of the ParallelGroup
func (p ParallelGroup) Do(f func()) {
p <- struct{}{} // acquire working slot
defer func() { <-p }()
f()
}
// yesno returns "yes" for true, "no" for false.
func yesno(b bool) string {
if b {
return "yes"
}
return "no"
}
// allDBs returns "all" if all databases are requested for conversion.
func allDBs(dbs []string) string {
if dbs == nil {
return "all"
}
return fmt.Sprintf("%v", dbs)
}
// isEnvSet checks to see if a variable was set in the environment
func isEnvSet(name string) bool {
for _, s := range os.Environ() {
if strings.SplitN(s, "=", 2)[0] == name {
return true
}
}
return false
}

View File

@ -0,0 +1,55 @@
// Package stats contains statistics for converting non-TSM shards to TSM.
package stats
import (
"sync/atomic"
"time"
)
// Stats are the statistics captured while converting non-TSM shards to TSM
type Stats struct {
NanFiltered uint64
InfFiltered uint64
FieldsFiltered uint64
PointsWritten uint64
PointsRead uint64
TsmFilesCreated uint64
TsmBytesWritten uint64
CompletedShards uint64
TotalTime time.Duration
}
// AddPointsRead increments the number of read points.
func (s *Stats) AddPointsRead(n int) {
atomic.AddUint64(&s.PointsRead, uint64(n))
}
// AddPointsWritten increments the number of written points.
func (s *Stats) AddPointsWritten(n int) {
atomic.AddUint64(&s.PointsWritten, uint64(n))
}
// AddTSMBytes increments the number of TSM Bytes.
func (s *Stats) AddTSMBytes(n uint32) {
atomic.AddUint64(&s.TsmBytesWritten, uint64(n))
}
// IncrTSMFileCount increments the number of TSM files created.
func (s *Stats) IncrTSMFileCount() {
atomic.AddUint64(&s.TsmFilesCreated, 1)
}
// IncrNaN increments the number of NaNs filtered.
func (s *Stats) IncrNaN() {
atomic.AddUint64(&s.NanFiltered, 1)
}
// IncrInf increments the number of Infs filtered.
func (s *Stats) IncrInf() {
atomic.AddUint64(&s.InfFiltered, 1)
}
// IncrFiltered increments the number of fields filtered.
func (s *Stats) IncrFiltered() {
atomic.AddUint64(&s.FieldsFiltered, 1)
}

View File

@ -0,0 +1,130 @@
package main
import (
"fmt"
"log"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/influxdata/influxdb/cmd/influx_tsm/stats"
"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb"
)
// tracker will orchestrate and track the conversions of non-TSM shards to TSM
type tracker struct {
Stats stats.Stats
shards tsdb.ShardInfos
opts options
pg ParallelGroup
wg sync.WaitGroup
}
// newTracker will setup and return a clean tracker instance
func newTracker(shards tsdb.ShardInfos, opts options) *tracker {
t := &tracker{
shards: shards,
opts: opts,
pg: NewParallelGroup(runtime.GOMAXPROCS(0)),
}
return t
}
func (t *tracker) Run() error {
conversionStart := time.Now()
// Backup each directory.
if !opts.SkipBackup {
databases := t.shards.Databases()
fmt.Printf("Backing up %d databases...\n", len(databases))
t.wg.Add(len(databases))
for i := range databases {
db := databases[i]
go t.pg.Do(func() {
defer t.wg.Done()
start := time.Now()
log.Printf("Backup of database '%v' started", db)
err := backupDatabase(db)
if err != nil {
log.Fatalf("Backup of database %v failed: %v\n", db, err)
}
log.Printf("Database %v backed up (%v)\n", db, time.Since(start))
})
}
t.wg.Wait()
} else {
fmt.Println("Database backup disabled.")
}
t.wg.Add(len(t.shards))
for i := range t.shards {
si := t.shards[i]
go t.pg.Do(func() {
defer func() {
atomic.AddUint64(&t.Stats.CompletedShards, 1)
t.wg.Done()
}()
start := time.Now()
log.Printf("Starting conversion of shard: %v", si.FullPath(opts.DataPath))
if err := convertShard(si, t); err != nil {
log.Fatalf("Failed to convert %v: %v\n", si.FullPath(opts.DataPath), err)
}
log.Printf("Conversion of %v successful (%v)\n", si.FullPath(opts.DataPath), time.Since(start))
})
}
done := make(chan struct{})
go func() {
t.wg.Wait()
close(done)
}()
WAIT_LOOP:
for {
select {
case <-done:
break WAIT_LOOP
case <-time.After(opts.UpdateInterval):
t.StatusUpdate()
}
}
t.Stats.TotalTime = time.Since(conversionStart)
return nil
}
func (t *tracker) StatusUpdate() {
shardCount := atomic.LoadUint64(&t.Stats.CompletedShards)
pointCount := atomic.LoadUint64(&t.Stats.PointsRead)
pointWritten := atomic.LoadUint64(&t.Stats.PointsWritten)
log.Printf("Still Working: Completed Shards: %d/%d Points read/written: %d/%d", shardCount, len(t.shards), pointCount, pointWritten)
}
func (t *tracker) PrintStats() {
preSize := t.shards.Size()
postSize := int64(t.Stats.TsmBytesWritten)
fmt.Printf("\nSummary statistics\n========================================\n")
fmt.Printf("Databases converted: %d\n", len(t.shards.Databases()))
fmt.Printf("Shards converted: %d\n", len(t.shards))
fmt.Printf("TSM files created: %d\n", t.Stats.TsmFilesCreated)
fmt.Printf("Points read: %d\n", t.Stats.PointsRead)
fmt.Printf("Points written: %d\n", t.Stats.PointsWritten)
fmt.Printf("NaN filtered: %d\n", t.Stats.NanFiltered)
fmt.Printf("Inf filtered: %d\n", t.Stats.InfFiltered)
fmt.Printf("Points without fields filtered: %d\n", t.Stats.FieldsFiltered)
fmt.Printf("Disk usage pre-conversion (bytes): %d\n", preSize)
fmt.Printf("Disk usage post-conversion (bytes): %d\n", postSize)
fmt.Printf("Reduction factor: %d%%\n", 100*(preSize-postSize)/preSize)
fmt.Printf("Bytes per TSM point: %.2f\n", float64(postSize)/float64(t.Stats.PointsWritten))
fmt.Printf("Total conversion time: %v\n", t.Stats.TotalTime)
fmt.Println()
}

View File

@ -0,0 +1,119 @@
package tsdb
import (
"encoding/binary"
"errors"
"fmt"
"math"
)
const (
fieldFloat = 1
fieldInteger = 2
fieldBoolean = 3
fieldString = 4
)
var (
// ErrFieldNotFound is returned when a field cannot be found.
ErrFieldNotFound = errors.New("field not found")
// ErrFieldUnmappedID is returned when the system is presented, during decode, with a field ID
// there is no mapping for.
ErrFieldUnmappedID = errors.New("field ID not mapped")
)
// FieldCodec provides encoding and decoding functionality for the fields of a given
// Measurement.
type FieldCodec struct {
fieldsByID map[uint8]*Field
fieldsByName map[string]*Field
}
// NewFieldCodec returns a FieldCodec for the given Measurement. Must be called with
// a RLock that protects the Measurement.
func NewFieldCodec(fields map[string]*Field) *FieldCodec {
fieldsByID := make(map[uint8]*Field, len(fields))
fieldsByName := make(map[string]*Field, len(fields))
for _, f := range fields {
fieldsByID[f.ID] = f
fieldsByName[f.Name] = f
}
return &FieldCodec{fieldsByID: fieldsByID, fieldsByName: fieldsByName}
}
// FieldIDByName returns the ID for the given field.
func (f *FieldCodec) FieldIDByName(s string) (uint8, error) {
fi := f.fieldsByName[s]
if fi == nil {
return 0, ErrFieldNotFound
}
return fi.ID, nil
}
// DecodeByID scans a byte slice for a field with the given ID, converts it to its
// expected type, and return that value.
func (f *FieldCodec) DecodeByID(targetID uint8, b []byte) (interface{}, error) {
var value interface{}
for {
if len(b) == 0 {
// No more bytes.
return nil, ErrFieldNotFound
}
field := f.fieldsByID[b[0]]
if field == nil {
// This can happen, though is very unlikely. If this node receives encoded data, to be written
// to disk, and is queried for that data before its metastore is updated, there will be no field
// mapping for the data during decode. All this can happen because data is encoded by the node
// that first received the write request, not the node that actually writes the data to disk.
// So if this happens, the read must be aborted.
return nil, ErrFieldUnmappedID
}
switch field.Type {
case fieldFloat:
if field.ID == targetID {
value = math.Float64frombits(binary.BigEndian.Uint64(b[1:9]))
}
b = b[9:]
case fieldInteger:
if field.ID == targetID {
value = int64(binary.BigEndian.Uint64(b[1:9]))
}
b = b[9:]
case fieldBoolean:
if field.ID == targetID {
value = b[1] == 1
}
b = b[2:]
case fieldString:
length := binary.BigEndian.Uint16(b[1:3])
if field.ID == targetID {
value = string(b[3 : 3+length])
}
b = b[3+length:]
default:
panic(fmt.Sprintf("unsupported value type during decode by id: %T", field.Type))
}
if value != nil {
return value, nil
}
}
}
// DecodeByName scans a byte slice for a field with the given name, converts it to its
// expected type, and return that value.
func (f *FieldCodec) DecodeByName(name string, b []byte) (interface{}, error) {
fi := f.FieldByName(name)
if fi == nil {
return 0, ErrFieldNotFound
}
return f.DecodeByID(fi.ID, b)
}
// FieldByName returns the field by its name. It will return a nil if not found
func (f *FieldCodec) FieldByName(name string) *Field {
return f.fieldsByName[name]
}

View File

@ -0,0 +1,244 @@
// Pacage tsdb abstracts the various shard types supported by the influx_tsm command.
package tsdb // import "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb"
import (
"fmt"
"os"
"path"
"path/filepath"
"sort"
"time"
"github.com/boltdb/bolt"
"github.com/influxdata/influxdb/pkg/slices"
)
// Flags for differentiating between engines
const (
B1 = iota
BZ1
TSM1
)
// EngineFormat holds the flag for the engine
type EngineFormat int
// String returns the string format of the engine.
func (e EngineFormat) String() string {
switch e {
case TSM1:
return "tsm1"
case B1:
return "b1"
case BZ1:
return "bz1"
default:
panic("unrecognized shard engine format")
}
}
// ShardInfo is the description of a shard on disk.
type ShardInfo struct {
Database string
RetentionPolicy string
Path string
Format EngineFormat
Size int64
}
// FormatAsString returns the format of the shard as a string.
func (s *ShardInfo) FormatAsString() string {
return s.Format.String()
}
// FullPath returns the full path to the shard, given the data directory root.
func (s *ShardInfo) FullPath(dataPath string) string {
return filepath.Join(dataPath, s.Database, s.RetentionPolicy, s.Path)
}
// ShardInfos is an array of ShardInfo
type ShardInfos []*ShardInfo
func (s ShardInfos) Len() int { return len(s) }
func (s ShardInfos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ShardInfos) Less(i, j int) bool {
if s[i].Database == s[j].Database {
if s[i].RetentionPolicy == s[j].RetentionPolicy {
return s[i].Path < s[j].Path
}
return s[i].RetentionPolicy < s[j].RetentionPolicy
}
return s[i].Database < s[j].Database
}
// Databases returns the sorted unique set of databases for the shards.
func (s ShardInfos) Databases() []string {
dbm := make(map[string]bool)
for _, ss := range s {
dbm[ss.Database] = true
}
var dbs []string
for k := range dbm {
dbs = append(dbs, k)
}
sort.Strings(dbs)
return dbs
}
// FilterFormat returns a copy of the ShardInfos, with shards of the given
// format removed.
func (s ShardInfos) FilterFormat(fmt EngineFormat) ShardInfos {
var a ShardInfos
for _, si := range s {
if si.Format != fmt {
a = append(a, si)
}
}
return a
}
// Size returns the space on disk consumed by the shards.
func (s ShardInfos) Size() int64 {
var sz int64
for _, si := range s {
sz += si.Size
}
return sz
}
// ExclusiveDatabases returns a copy of the ShardInfo, with shards associated
// with the given databases present. If the given set is empty, all databases
// are returned.
func (s ShardInfos) ExclusiveDatabases(exc []string) ShardInfos {
var a ShardInfos
// Empty set? Return everything.
if len(exc) == 0 {
a = make(ShardInfos, len(s))
copy(a, s)
return a
}
for _, si := range s {
if slices.Exists(exc, si.Database) {
a = append(a, si)
}
}
return a
}
// Database represents an entire database on disk.
type Database struct {
path string
}
// NewDatabase creates a database instance using data at path.
func NewDatabase(path string) *Database {
return &Database{path: path}
}
// Name returns the name of the database.
func (d *Database) Name() string {
return path.Base(d.path)
}
// Path returns the path to the database.
func (d *Database) Path() string {
return d.path
}
// Shards returns information for every shard in the database.
func (d *Database) Shards() ([]*ShardInfo, error) {
fd, err := os.Open(d.path)
if err != nil {
return nil, err
}
// Get each retention policy.
rps, err := fd.Readdirnames(-1)
if err != nil {
return nil, err
}
// Process each retention policy.
var shardInfos []*ShardInfo
for _, rp := range rps {
rpfd, err := os.Open(filepath.Join(d.path, rp))
if err != nil {
return nil, err
}
// Process each shard
shards, err := rpfd.Readdirnames(-1)
if err != nil {
return nil, err
}
for _, sh := range shards {
fmt, sz, err := shardFormat(filepath.Join(d.path, rp, sh))
if err != nil {
return nil, err
}
si := &ShardInfo{
Database: d.Name(),
RetentionPolicy: path.Base(rp),
Path: sh,
Format: fmt,
Size: sz,
}
shardInfos = append(shardInfos, si)
}
}
sort.Sort(ShardInfos(shardInfos))
return shardInfos, nil
}
// shardFormat returns the format and size on disk of the shard at path.
func shardFormat(path string) (EngineFormat, int64, error) {
// If it's a directory then it's a tsm1 engine
fi, err := os.Stat(path)
if err != nil {
return 0, 0, err
}
if fi.Mode().IsDir() {
return TSM1, fi.Size(), nil
}
// It must be a BoltDB-based engine.
db, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 1 * time.Second})
if err != nil {
return 0, 0, err
}
defer db.Close()
var format EngineFormat
err = db.View(func(tx *bolt.Tx) error {
// Retrieve the meta bucket.
b := tx.Bucket([]byte("meta"))
// If no format is specified then it must be an original b1 database.
if b == nil {
format = B1
return nil
}
// There is an actual format indicator.
switch f := string(b.Get([]byte("format"))); f {
case "b1", "v1":
format = B1
case "bz1":
format = BZ1
default:
return fmt.Errorf("unrecognized engine format: %s", f)
}
return nil
})
return format, fi.Size(), err
}

View File

@ -0,0 +1,122 @@
// Code generated by protoc-gen-gogo.
// source: internal/meta.proto
// DO NOT EDIT!
/*
Package internal is a generated protocol buffer package.
It is generated from these files:
internal/meta.proto
It has these top-level messages:
Series
Tag
MeasurementFields
Field
*/
package internal
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type Series struct {
Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"`
Tags []*Tag `protobuf:"bytes,2,rep,name=Tags" json:"Tags,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Series) Reset() { *m = Series{} }
func (m *Series) String() string { return proto.CompactTextString(m) }
func (*Series) ProtoMessage() {}
func (m *Series) GetKey() string {
if m != nil && m.Key != nil {
return *m.Key
}
return ""
}
func (m *Series) GetTags() []*Tag {
if m != nil {
return m.Tags
}
return nil
}
type Tag struct {
Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"`
Value *string `protobuf:"bytes,2,req,name=Value" json:"Value,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Tag) Reset() { *m = Tag{} }
func (m *Tag) String() string { return proto.CompactTextString(m) }
func (*Tag) ProtoMessage() {}
func (m *Tag) GetKey() string {
if m != nil && m.Key != nil {
return *m.Key
}
return ""
}
func (m *Tag) GetValue() string {
if m != nil && m.Value != nil {
return *m.Value
}
return ""
}
type MeasurementFields struct {
Fields []*Field `protobuf:"bytes,1,rep,name=Fields" json:"Fields,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *MeasurementFields) Reset() { *m = MeasurementFields{} }
func (m *MeasurementFields) String() string { return proto.CompactTextString(m) }
func (*MeasurementFields) ProtoMessage() {}
func (m *MeasurementFields) GetFields() []*Field {
if m != nil {
return m.Fields
}
return nil
}
type Field struct {
ID *int32 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"`
Type *int32 `protobuf:"varint,3,req,name=Type" json:"Type,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Field) Reset() { *m = Field{} }
func (m *Field) String() string { return proto.CompactTextString(m) }
func (*Field) ProtoMessage() {}
func (m *Field) GetID() int32 {
if m != nil && m.ID != nil {
return *m.ID
}
return 0
}
func (m *Field) GetName() string {
if m != nil && m.Name != nil {
return *m.Name
}
return ""
}
func (m *Field) GetType() int32 {
if m != nil && m.Type != nil {
return *m.Type
}
return 0
}

View File

@ -0,0 +1,60 @@
package tsdb
import (
"encoding/binary"
"strings"
"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal"
"github.com/influxdata/influxdb/influxql"
"github.com/gogo/protobuf/proto"
)
// Field represents an encoded field.
type Field struct {
ID uint8 `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Type influxql.DataType `json:"type,omitempty"`
}
// MeasurementFields is a mapping from measurements to its fields.
type MeasurementFields struct {
Fields map[string]*Field `json:"fields"`
Codec *FieldCodec
}
// UnmarshalBinary decodes the object from a binary format.
func (m *MeasurementFields) UnmarshalBinary(buf []byte) error {
var pb internal.MeasurementFields
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
m.Fields = make(map[string]*Field)
for _, f := range pb.Fields {
m.Fields[f.GetName()] = &Field{ID: uint8(f.GetID()), Name: f.GetName(), Type: influxql.DataType(f.GetType())}
}
return nil
}
// Series represents a series in the shard.
type Series struct {
Key string
Tags map[string]string
}
// MeasurementFromSeriesKey returns the Measurement name for a given series.
func MeasurementFromSeriesKey(key string) string {
return strings.SplitN(key, ",", 2)[0]
}
// DecodeKeyValue decodes the key and value from bytes.
func DecodeKeyValue(field string, dec *FieldCodec, k, v []byte) (int64, interface{}) {
// Convert key to a timestamp.
key := int64(binary.BigEndian.Uint64(k[0:8]))
decValue, err := dec.DecodeByName(field, v)
if err != nil {
return key, nil
}
return key, decValue
}

View File

@ -0,0 +1,387 @@
// Package backup is the backup subcommand for the influxd command.
package backup
import (
"encoding/binary"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/influxdata/influxdb/services/snapshotter"
"github.com/influxdata/influxdb/tcp"
)
const (
// Suffix is a suffix added to the backup while it's in-process.
Suffix = ".pending"
// Metafile is the base name given to the metastore backups.
Metafile = "meta"
// BackupFilePattern is the beginning of the pattern for a backup
// file. They follow the scheme <database>.<retention>.<shardID>.<increment>
BackupFilePattern = "%s.%s.%05d"
)
// Command represents the program execution for "influxd backup".
type Command struct {
// The logger passed to the ticker during execution.
Logger *log.Logger
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
host string
path string
database string
}
// NewCommand returns a new instance of Command with default settings.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
}
}
// Run executes the program.
func (cmd *Command) Run(args ...string) error {
// Set up logger.
cmd.Logger = log.New(cmd.Stderr, "", log.LstdFlags)
// Parse command line arguments.
retentionPolicy, shardID, since, err := cmd.parseFlags(args)
if err != nil {
return err
}
// based on the arguments passed in we only backup the minimum
if shardID != "" {
// always backup the metastore
if err := cmd.backupMetastore(); err != nil {
return err
}
err = cmd.backupShard(retentionPolicy, shardID, since)
} else if retentionPolicy != "" {
err = cmd.backupRetentionPolicy(retentionPolicy, since)
} else if cmd.database != "" {
err = cmd.backupDatabase(since)
} else {
err = cmd.backupMetastore()
}
if err != nil {
cmd.Logger.Printf("backup failed: %v", err)
return err
}
cmd.Logger.Println("backup complete")
return nil
}
// parseFlags parses and validates the command line arguments into a request object.
func (cmd *Command) parseFlags(args []string) (retentionPolicy, shardID string, since time.Time, err error) {
fs := flag.NewFlagSet("", flag.ContinueOnError)
fs.StringVar(&cmd.host, "host", "localhost:8088", "")
fs.StringVar(&cmd.database, "database", "", "")
fs.StringVar(&retentionPolicy, "retention", "", "")
fs.StringVar(&shardID, "shard", "", "")
var sinceArg string
fs.StringVar(&sinceArg, "since", "", "")
fs.SetOutput(cmd.Stderr)
fs.Usage = cmd.printUsage
err = fs.Parse(args)
if err != nil {
return
}
if sinceArg != "" {
since, err = time.Parse(time.RFC3339, sinceArg)
if err != nil {
return
}
}
// Ensure that only one arg is specified.
if fs.NArg() == 0 {
return "", "", time.Unix(0, 0), errors.New("backup destination path required")
} else if fs.NArg() != 1 {
return "", "", time.Unix(0, 0), errors.New("only one backup path allowed")
}
cmd.path = fs.Arg(0)
err = os.MkdirAll(cmd.path, 0700)
return
}
// backupShard will write a tar archive of the passed in shard with any TSM files that have been
// created since the time passed in
func (cmd *Command) backupShard(retentionPolicy string, shardID string, since time.Time) error {
id, err := strconv.ParseUint(shardID, 10, 64)
if err != nil {
return err
}
shardArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, fmt.Sprintf(BackupFilePattern, cmd.database, retentionPolicy, id)))
if err != nil {
return err
}
cmd.Logger.Printf("backing up db=%v rp=%v shard=%v to %s since %s",
cmd.database, retentionPolicy, shardID, shardArchivePath, since)
req := &snapshotter.Request{
Type: snapshotter.RequestShardBackup,
Database: cmd.database,
RetentionPolicy: retentionPolicy,
ShardID: id,
Since: since,
}
// TODO: verify shard backup data
return cmd.downloadAndVerify(req, shardArchivePath, nil)
}
// backupDatabase will request the database information from the server and then backup the metastore and
// every shard in every retention policy in the database. Each shard will be written to a separate tar.
func (cmd *Command) backupDatabase(since time.Time) error {
cmd.Logger.Printf("backing up db=%s since %s", cmd.database, since)
req := &snapshotter.Request{
Type: snapshotter.RequestDatabaseInfo,
Database: cmd.database,
}
response, err := cmd.requestInfo(req)
if err != nil {
return err
}
return cmd.backupResponsePaths(response, since)
}
// backupRetentionPolicy will request the retention policy information from the server and then backup
// the metastore and every shard in the retention policy. Each shard will be written to a separate tar.
func (cmd *Command) backupRetentionPolicy(retentionPolicy string, since time.Time) error {
cmd.Logger.Printf("backing up rp=%s since %s", retentionPolicy, since)
req := &snapshotter.Request{
Type: snapshotter.RequestRetentionPolicyInfo,
Database: cmd.database,
RetentionPolicy: retentionPolicy,
}
response, err := cmd.requestInfo(req)
if err != nil {
return err
}
return cmd.backupResponsePaths(response, since)
}
// backupResponsePaths will backup the metastore and all shard paths in the response struct
func (cmd *Command) backupResponsePaths(response *snapshotter.Response, since time.Time) error {
if err := cmd.backupMetastore(); err != nil {
return err
}
// loop through the returned paths and back up each shard
for _, path := range response.Paths {
rp, id, err := retentionAndShardFromPath(path)
if err != nil {
return err
}
if err := cmd.backupShard(rp, id, since); err != nil {
return err
}
}
return nil
}
// backupMetastore will backup the metastore on the host to the passed in path. Database and retention policy backups
// will force a backup of the metastore as well as requesting a specific shard backup from the command line
func (cmd *Command) backupMetastore() error {
metastoreArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, Metafile))
if err != nil {
return err
}
cmd.Logger.Printf("backing up metastore to %s", metastoreArchivePath)
req := &snapshotter.Request{
Type: snapshotter.RequestMetastoreBackup,
}
return cmd.downloadAndVerify(req, metastoreArchivePath, func(file string) error {
binData, err := ioutil.ReadFile(file)
if err != nil {
return err
}
magic := binary.BigEndian.Uint64(binData[:8])
if magic != snapshotter.BackupMagicHeader {
cmd.Logger.Println("Invalid metadata blob, ensure the metadata service is running (default port 8088)")
return errors.New("invalid metadata received")
}
return nil
})
}
// nextPath returns the next file to write to.
func (cmd *Command) nextPath(path string) (string, error) {
// Iterate through incremental files until one is available.
for i := 0; ; i++ {
s := fmt.Sprintf(path+".%02d", i)
if _, err := os.Stat(s); os.IsNotExist(err) {
return s, nil
} else if err != nil {
return "", err
}
}
}
// downloadAndVerify will download either the metastore or shard to a temp file and then
// rename it to a good backup file name after complete
func (cmd *Command) downloadAndVerify(req *snapshotter.Request, path string, validator func(string) error) error {
tmppath := path + Suffix
if err := cmd.download(req, tmppath); err != nil {
return err
}
if validator != nil {
if err := validator(tmppath); err != nil {
if rmErr := os.Remove(tmppath); rmErr != nil {
cmd.Logger.Printf("Error cleaning up temporary file: %v", rmErr)
}
return err
}
}
f, err := os.Stat(tmppath)
if err != nil {
return err
}
// There was nothing downloaded, don't create an empty backup file.
if f.Size() == 0 {
return os.Remove(tmppath)
}
// Rename temporary file to final path.
if err := os.Rename(tmppath, path); err != nil {
return fmt.Errorf("rename: %s", err)
}
return nil
}
// download downloads a snapshot of either the metastore or a shard from a host to a given path.
func (cmd *Command) download(req *snapshotter.Request, path string) error {
// Create local file to write to.
f, err := os.Create(path)
if err != nil {
return fmt.Errorf("open temp file: %s", err)
}
defer f.Close()
for i := 0; i < 10; i++ {
if err = func() error {
// Connect to snapshotter service.
conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader)
if err != nil {
return err
}
defer conn.Close()
// Write the request
if err := json.NewEncoder(conn).Encode(req); err != nil {
return fmt.Errorf("encode snapshot request: %s", err)
}
// Read snapshot from the connection
if n, err := io.Copy(f, conn); err != nil || n == 0 {
return fmt.Errorf("copy backup to file: err=%v, n=%d", err, n)
}
return nil
}(); err == nil {
break
} else if err != nil {
cmd.Logger.Printf("Download shard %v failed %s. Retrying (%d)...\n", req.ShardID, err, i)
time.Sleep(time.Second)
}
}
return err
}
// requestInfo will request the database or retention policy information from the host
func (cmd *Command) requestInfo(request *snapshotter.Request) (*snapshotter.Response, error) {
// Connect to snapshotter service.
conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader)
if err != nil {
return nil, err
}
defer conn.Close()
// Write the request
if err := json.NewEncoder(conn).Encode(request); err != nil {
return nil, fmt.Errorf("encode snapshot request: %s", err)
}
// Read the response
var r snapshotter.Response
if err := json.NewDecoder(conn).Decode(&r); err != nil {
return nil, err
}
return &r, nil
}
// printUsage prints the usage message to STDERR.
func (cmd *Command) printUsage() {
fmt.Fprintf(cmd.Stdout, `Downloads a snapshot of a data node and saves it to disk.
Usage: influxd backup [flags] PATH
-host <host:port>
The host to connect to snapshot. Defaults to 127.0.0.1:8088.
-database <name>
The database to backup.
-retention <name>
Optional. The retention policy to backup.
-shard <id>
Optional. The shard id to backup. If specified, retention is required.
-since <2015-12-24T08:12:23>
Optional. Do an incremental backup since the passed in RFC3339
formatted time.
`)
}
// retentionAndShardFromPath will take the shard relative path and split it into the
// retention policy name and shard ID. The first part of the path should be the database name.
func retentionAndShardFromPath(path string) (retention, shard string, err error) {
a := strings.Split(path, string(filepath.Separator))
if len(a) != 3 {
return "", "", fmt.Errorf("expected database, retention policy, and shard id in path: %s", path)
}
return a[1], a[2], nil
}

View File

@ -0,0 +1,46 @@
// Package help is the help subcommand of the influxd command.
package help
import (
"fmt"
"io"
"os"
"strings"
)
// Command displays help for command-line sub-commands.
type Command struct {
Stdout io.Writer
}
// NewCommand returns a new instance of Command.
func NewCommand() *Command {
return &Command{
Stdout: os.Stdout,
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage))
return nil
}
const usage = `
Configure and start an InfluxDB server.
Usage: influxd [[command] [arguments]]
The commands are:
backup downloads a snapshot of a data node and saves it to disk
config display the default configuration
help display this help message
restore uses a snapshot of a data node to rebuild a cluster
run run node with existing configuration
version displays the InfluxDB version
"run" is the default command.
Use "influxd [command] -help" for more information about a command.
`

View File

@ -0,0 +1,177 @@
// Command influxd is the InfluxDB server.
package main
import (
"flag"
"fmt"
"io"
"math/rand"
"os"
"os/signal"
"syscall"
"time"
"github.com/influxdata/influxdb/cmd"
"github.com/influxdata/influxdb/cmd/influxd/backup"
"github.com/influxdata/influxdb/cmd/influxd/help"
"github.com/influxdata/influxdb/cmd/influxd/restore"
"github.com/influxdata/influxdb/cmd/influxd/run"
"github.com/uber-go/zap"
)
// These variables are populated via the Go linker.
var (
version string
commit string
branch string
)
func init() {
// If commit, branch, or build time are not set, make that clear.
if version == "" {
version = "unknown"
}
if commit == "" {
commit = "unknown"
}
if branch == "" {
branch = "unknown"
}
}
func main() {
rand.Seed(time.Now().UnixNano())
m := NewMain()
if err := m.Run(os.Args[1:]...); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
// Main represents the program execution.
type Main struct {
Logger zap.Logger
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewMain return a new instance of Main.
func NewMain() *Main {
return &Main{
Logger: zap.New(
zap.NewTextEncoder(),
zap.Output(os.Stderr),
),
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
}
// Run determines and runs the command specified by the CLI args.
func (m *Main) Run(args ...string) error {
name, args := cmd.ParseCommandName(args)
// Extract name from args.
switch name {
case "", "run":
cmd := run.NewCommand()
// Tell the server the build details.
cmd.Version = version
cmd.Commit = commit
cmd.Branch = branch
cmd.Logger = m.Logger
if err := cmd.Run(args...); err != nil {
return fmt.Errorf("run: %s", err)
}
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
m.Logger.Info("Listening for signals")
// Block until one of the signals above is received
<-signalCh
m.Logger.Info("Signal received, initializing clean shutdown...")
go cmd.Close()
// Block again until another signal is received, a shutdown timeout elapses,
// or the Command is gracefully closed
m.Logger.Info("Waiting for clean shutdown...")
select {
case <-signalCh:
m.Logger.Info("second signal received, initializing hard shutdown")
case <-time.After(time.Second * 30):
m.Logger.Info("time limit reached, initializing hard shutdown")
case <-cmd.Closed:
m.Logger.Info("server shutdown completed")
}
// goodbye.
case "backup":
name := backup.NewCommand()
if err := name.Run(args...); err != nil {
return fmt.Errorf("backup: %s", err)
}
case "restore":
name := restore.NewCommand()
if err := name.Run(args...); err != nil {
return fmt.Errorf("restore: %s", err)
}
case "config":
if err := run.NewPrintConfigCommand().Run(args...); err != nil {
return fmt.Errorf("config: %s", err)
}
case "version":
if err := NewVersionCommand().Run(args...); err != nil {
return fmt.Errorf("version: %s", err)
}
case "help":
if err := help.NewCommand().Run(args...); err != nil {
return fmt.Errorf("help: %s", err)
}
default:
return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influxd help' for usage`+"\n\n", name)
}
return nil
}
// VersionCommand represents the command executed by "influxd version".
type VersionCommand struct {
Stdout io.Writer
Stderr io.Writer
}
// NewVersionCommand return a new instance of VersionCommand.
func NewVersionCommand() *VersionCommand {
return &VersionCommand{
Stdout: os.Stdout,
Stderr: os.Stderr,
}
}
// Run prints the current version and commit info.
func (cmd *VersionCommand) Run(args ...string) error {
// Parse flags in case -h is specified.
fs := flag.NewFlagSet("", flag.ContinueOnError)
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, versionUsage) }
if err := fs.Parse(args); err != nil {
return err
}
// Print version info.
fmt.Fprintf(cmd.Stdout, "InfluxDB v%s (git: %s %s)\n", version, branch, commit)
return nil
}
var versionUsage = `Displays the InfluxDB version, build branch and git commit hash.
Usage: influxd version
`

View File

@ -0,0 +1,355 @@
// Package restore is the restore subcommand for the influxd command,
// for restoring from a backup.
package restore
import (
"archive/tar"
"bytes"
"encoding/binary"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"github.com/influxdata/influxdb/cmd/influxd/backup"
"github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/services/snapshotter"
)
// Command represents the program execution for "influxd restore".
type Command struct {
Stdout io.Writer
Stderr io.Writer
backupFilesPath string
metadir string
datadir string
database string
retention string
shard string
// TODO: when the new meta stuff is done this should not be exported or be gone
MetaConfig *meta.Config
}
// NewCommand returns a new instance of Command with default settings.
func NewCommand() *Command {
return &Command{
Stdout: os.Stdout,
Stderr: os.Stderr,
MetaConfig: meta.NewConfig(),
}
}
// Run executes the program.
func (cmd *Command) Run(args ...string) error {
if err := cmd.parseFlags(args); err != nil {
return err
}
if cmd.metadir != "" {
if err := cmd.unpackMeta(); err != nil {
return err
}
}
if cmd.shard != "" {
return cmd.unpackShard(cmd.shard)
} else if cmd.retention != "" {
return cmd.unpackRetention()
} else if cmd.datadir != "" {
return cmd.unpackDatabase()
}
return nil
}
// parseFlags parses and validates the command line arguments.
func (cmd *Command) parseFlags(args []string) error {
fs := flag.NewFlagSet("", flag.ContinueOnError)
fs.StringVar(&cmd.metadir, "metadir", "", "")
fs.StringVar(&cmd.datadir, "datadir", "", "")
fs.StringVar(&cmd.database, "database", "", "")
fs.StringVar(&cmd.retention, "retention", "", "")
fs.StringVar(&cmd.shard, "shard", "", "")
fs.SetOutput(cmd.Stdout)
fs.Usage = cmd.printUsage
if err := fs.Parse(args); err != nil {
return err
}
cmd.MetaConfig = meta.NewConfig()
cmd.MetaConfig.Dir = cmd.metadir
// Require output path.
cmd.backupFilesPath = fs.Arg(0)
if cmd.backupFilesPath == "" {
return fmt.Errorf("path with backup files required")
}
// validate the arguments
if cmd.metadir == "" && cmd.database == "" {
return fmt.Errorf("-metadir or -database are required to restore")
}
if cmd.database != "" && cmd.datadir == "" {
return fmt.Errorf("-datadir is required to restore")
}
if cmd.shard != "" {
if cmd.database == "" {
return fmt.Errorf("-database is required to restore shard")
}
if cmd.retention == "" {
return fmt.Errorf("-retention is required to restore shard")
}
} else if cmd.retention != "" && cmd.database == "" {
return fmt.Errorf("-database is required to restore retention policy")
}
return nil
}
// unpackMeta reads the metadata from the backup directory and initializes a raft
// cluster and replaces the root metadata.
func (cmd *Command) unpackMeta() error {
// find the meta file
metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup.Metafile+".*"))
if err != nil {
return err
}
if len(metaFiles) == 0 {
return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath)
}
latest := metaFiles[len(metaFiles)-1]
fmt.Fprintf(cmd.Stdout, "Using metastore snapshot: %v\n", latest)
// Read the metastore backup
f, err := os.Open(latest)
if err != nil {
return err
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, f); err != nil {
return fmt.Errorf("copy: %s", err)
}
b := buf.Bytes()
var i int
// Make sure the file is actually a meta store backup file
magic := binary.BigEndian.Uint64(b[:8])
if magic != snapshotter.BackupMagicHeader {
return fmt.Errorf("invalid metadata file")
}
i += 8
// Size of the meta store bytes
length := int(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
metaBytes := b[i : i+length]
i += int(length)
// Size of the node.json bytes
length = int(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
nodeBytes := b[i : i+length]
// Unpack into metadata.
var data meta.Data
if err := data.UnmarshalBinary(metaBytes); err != nil {
return fmt.Errorf("unmarshal: %s", err)
}
// Copy meta config and remove peers so it starts in single mode.
c := cmd.MetaConfig
c.Dir = cmd.metadir
// Create the meta dir
if os.MkdirAll(c.Dir, 0700); err != nil {
return err
}
// Write node.json back to meta dir
if err := ioutil.WriteFile(filepath.Join(c.Dir, "node.json"), nodeBytes, 0655); err != nil {
return err
}
client := meta.NewClient(c)
if err := client.Open(); err != nil {
return err
}
defer client.Close()
// Force set the full metadata.
if err := client.SetData(&data); err != nil {
return fmt.Errorf("set data: %s", err)
}
// remove the raft.db file if it exists
err = os.Remove(filepath.Join(cmd.metadir, "raft.db"))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
// remove the node.json file if it exists
err = os.Remove(filepath.Join(cmd.metadir, "node.json"))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return nil
}
// unpackShard will look for all backup files in the path matching this shard ID
// and restore them to the data dir
func (cmd *Command) unpackShard(shardID string) error {
// make sure the shard isn't already there so we don't clobber anything
restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention, shardID)
if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("shard already present: %s", restorePath)
}
id, err := strconv.ParseUint(shardID, 10, 64)
if err != nil {
return err
}
// find the shard backup files
pat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup.BackupFilePattern, cmd.database, cmd.retention, id))
return cmd.unpackFiles(pat + ".*")
}
// unpackDatabase will look for all backup files in the path matching this database
// and restore them to the data dir
func (cmd *Command) unpackDatabase() error {
// make sure the shard isn't already there so we don't clobber anything
restorePath := filepath.Join(cmd.datadir, cmd.database)
if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("database already present: %s", restorePath)
}
// find the database backup files
pat := filepath.Join(cmd.backupFilesPath, cmd.database)
return cmd.unpackFiles(pat + ".*")
}
// unpackRetention will look for all backup files in the path matching this retention
// and restore them to the data dir
func (cmd *Command) unpackRetention() error {
// make sure the shard isn't already there so we don't clobber anything
restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention)
if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("retention already present: %s", restorePath)
}
// find the retention backup files
pat := filepath.Join(cmd.backupFilesPath, cmd.database)
return cmd.unpackFiles(fmt.Sprintf("%s.%s.*", pat, cmd.retention))
}
// unpackFiles will look for backup files matching the pattern and restore them to the data dir
func (cmd *Command) unpackFiles(pat string) error {
fmt.Printf("Restoring from backup %s\n", pat)
backupFiles, err := filepath.Glob(pat)
if err != nil {
return err
}
if len(backupFiles) == 0 {
return fmt.Errorf("no backup files for %s in %s", pat, cmd.backupFilesPath)
}
for _, fn := range backupFiles {
if err := cmd.unpackTar(fn); err != nil {
return err
}
}
return nil
}
// unpackTar will restore a single tar archive to the data dir
func (cmd *Command) unpackTar(tarFile string) error {
f, err := os.Open(tarFile)
if err != nil {
return err
}
defer f.Close()
tr := tar.NewReader(f)
for {
hdr, err := tr.Next()
if err == io.EOF {
return nil
} else if err != nil {
return err
}
if err := cmd.unpackFile(tr, hdr.Name); err != nil {
return err
}
}
}
// unpackFile will copy the current file from the tar archive to the data dir
func (cmd *Command) unpackFile(tr *tar.Reader, fileName string) error {
nativeFileName := filepath.FromSlash(fileName)
fn := filepath.Join(cmd.datadir, nativeFileName)
fmt.Printf("unpacking %s\n", fn)
if err := os.MkdirAll(filepath.Dir(fn), 0777); err != nil {
return fmt.Errorf("error making restore dir: %s", err.Error())
}
ff, err := os.Create(fn)
if err != nil {
return err
}
defer ff.Close()
if _, err := io.Copy(ff, tr); err != nil {
return err
}
return nil
}
// printUsage prints the usage message to STDERR.
func (cmd *Command) printUsage() {
fmt.Fprintf(cmd.Stdout, `Uses backups from the PATH to restore the metastore, databases,
retention policies, or specific shards. The InfluxDB process must not be
running during a restore.
Usage: influxd restore [flags] PATH
-metadir <path>
Optional. If set the metastore will be recovered to the given path.
-datadir <path>
Optional. If set the restore process will recover the specified
database, retention policy or shard to the given directory.
-database <name>
Optional. Required if no metadir given. Will restore the database
TSM files.
-retention <name>
Optional. If given, database is required. Will restore the retention policy's
TSM files.
-shard <id>
Optional. If given, database and retention are required. Will restore the shard's
TSM files.
`)
}

View File

@ -0,0 +1,261 @@
// Package run is the run (default) subcommand for the influxd command.
package run
import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"strconv"
"time"
"github.com/uber-go/zap"
)
const logo = `
8888888 .d888 888 8888888b. 888888b.
888 d88P" 888 888 "Y88b 888 "88b
888 888 888 888 888 888 .88P
888 88888b. 888888 888 888 888 888 888 888 888 8888888K.
888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b
888 888 888 888 888 888 888 X88K 888 888 888 888
888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P
8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P"
`
// Command represents the command executed by "influxd run".
type Command struct {
Version string
Branch string
Commit string
BuildTime string
closing chan struct{}
Closed chan struct{}
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
Logger zap.Logger
Server *Server
}
// NewCommand return a new instance of Command.
func NewCommand() *Command {
return &Command{
closing: make(chan struct{}),
Closed: make(chan struct{}),
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
Logger: zap.New(zap.NullEncoder()),
}
}
// Run parses the config from args and runs the server.
func (cmd *Command) Run(args ...string) error {
// Parse the command line flags.
options, err := cmd.ParseFlags(args...)
if err != nil {
return err
}
// Print sweet InfluxDB logo.
fmt.Print(logo)
// Mark start-up in log.
cmd.Logger.Info(fmt.Sprintf("InfluxDB starting, version %s, branch %s, commit %s",
cmd.Version, cmd.Branch, cmd.Commit))
cmd.Logger.Info(fmt.Sprintf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0)))
// Write the PID file.
if err := cmd.writePIDFile(options.PIDFile); err != nil {
return fmt.Errorf("write pid file: %s", err)
}
// Parse config
config, err := cmd.ParseConfig(options.GetConfigPath())
if err != nil {
return fmt.Errorf("parse config: %s", err)
}
// Apply any environment variables on top of the parsed config
if err := config.ApplyEnvOverrides(); err != nil {
return fmt.Errorf("apply env config: %v", err)
}
// Validate the configuration.
if err := config.Validate(); err != nil {
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err)
}
if config.HTTPD.PprofEnabled {
// Turn on block profiling to debug stuck databases
runtime.SetBlockProfileRate(int(1 * time.Second))
}
// Create server from config and start it.
buildInfo := &BuildInfo{
Version: cmd.Version,
Commit: cmd.Commit,
Branch: cmd.Branch,
Time: cmd.BuildTime,
}
s, err := NewServer(config, buildInfo)
if err != nil {
return fmt.Errorf("create server: %s", err)
}
s.Logger = cmd.Logger
s.CPUProfile = options.CPUProfile
s.MemProfile = options.MemProfile
if err := s.Open(); err != nil {
return fmt.Errorf("open server: %s", err)
}
cmd.Server = s
// Begin monitoring the server's error channel.
go cmd.monitorServerErrors()
return nil
}
// Close shuts down the server.
func (cmd *Command) Close() error {
defer close(cmd.Closed)
close(cmd.closing)
if cmd.Server != nil {
return cmd.Server.Close()
}
return nil
}
func (cmd *Command) monitorServerErrors() {
logger := log.New(cmd.Stderr, "", log.LstdFlags)
for {
select {
case err := <-cmd.Server.Err():
logger.Println(err)
case <-cmd.closing:
return
}
}
}
// ParseFlags parses the command line flags from args and returns an options set.
func (cmd *Command) ParseFlags(args ...string) (Options, error) {
var options Options
fs := flag.NewFlagSet("", flag.ContinueOnError)
fs.StringVar(&options.ConfigPath, "config", "", "")
fs.StringVar(&options.PIDFile, "pidfile", "", "")
// Ignore hostname option.
_ = fs.String("hostname", "", "")
fs.StringVar(&options.CPUProfile, "cpuprofile", "", "")
fs.StringVar(&options.MemProfile, "memprofile", "", "")
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) }
if err := fs.Parse(args); err != nil {
return Options{}, err
}
return options, nil
}
// writePIDFile writes the process ID to path.
func (cmd *Command) writePIDFile(path string) error {
// Ignore if path is not set.
if path == "" {
return nil
}
// Ensure the required directory structure exists.
err := os.MkdirAll(filepath.Dir(path), 0777)
if err != nil {
return fmt.Errorf("mkdir: %s", err)
}
// Retrieve the PID and write it.
pid := strconv.Itoa(os.Getpid())
if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil {
return fmt.Errorf("write file: %s", err)
}
return nil
}
// ParseConfig parses the config at path.
// It returns a demo configuration if path is blank.
func (cmd *Command) ParseConfig(path string) (*Config, error) {
// Use demo configuration if no config path is specified.
if path == "" {
cmd.Logger.Info("no configuration provided, using default settings")
return NewDemoConfig()
}
cmd.Logger.Info(fmt.Sprintf("Using configuration at: %s", path))
config := NewConfig()
if err := config.FromTomlFile(path); err != nil {
return nil, err
}
return config, nil
}
const usage = `Runs the InfluxDB server.
Usage: influxd run [flags]
-config <path>
Set the path to the configuration file.
This defaults to the environment variable INFLUXDB_CONFIG_PATH,
~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file
is present at any of these locations.
Disable the automatic loading of a configuration file using
the null device (such as /dev/null).
-pidfile <path>
Write process ID to a file.
-cpuprofile <path>
Write CPU profiling information to a file.
-memprofile <path>
Write memory usage information to a file.
`
// Options represents the command line options that can be parsed.
type Options struct {
ConfigPath string
PIDFile string
CPUProfile string
MemProfile string
}
// GetConfigPath returns the config path from the options.
// It will return a path by searching in this order:
// 1. The CLI option in ConfigPath
// 2. The environment variable INFLUXDB_CONFIG_PATH
// 3. The first influxdb.conf file on the path:
// - ~/.influxdb
// - /etc/influxdb
func (opt *Options) GetConfigPath() string {
if opt.ConfigPath != "" {
if opt.ConfigPath == os.DevNull {
return ""
}
return opt.ConfigPath
} else if envVar := os.Getenv("INFLUXDB_CONFIG_PATH"); envVar != "" {
return envVar
}
for _, path := range []string{
os.ExpandEnv("${HOME}/.influxdb/influxdb.conf"),
"/etc/influxdb/influxdb.conf",
} {
if _, err := os.Stat(path); err == nil {
return path
}
}
return ""
}

View File

@ -0,0 +1,363 @@
package run
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/user"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/influxdata/influxdb/coordinator"
"github.com/influxdata/influxdb/monitor"
"github.com/influxdata/influxdb/monitor/diagnostics"
"github.com/influxdata/influxdb/services/collectd"
"github.com/influxdata/influxdb/services/continuous_querier"
"github.com/influxdata/influxdb/services/graphite"
"github.com/influxdata/influxdb/services/httpd"
"github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/services/opentsdb"
"github.com/influxdata/influxdb/services/precreator"
"github.com/influxdata/influxdb/services/retention"
"github.com/influxdata/influxdb/services/subscriber"
"github.com/influxdata/influxdb/services/udp"
"github.com/influxdata/influxdb/tsdb"
)
const (
// DefaultBindAddress is the default address for various RPC services.
DefaultBindAddress = "127.0.0.1:8088"
)
// Config represents the configuration format for the influxd binary.
type Config struct {
Meta *meta.Config `toml:"meta"`
Data tsdb.Config `toml:"data"`
Coordinator coordinator.Config `toml:"coordinator"`
Retention retention.Config `toml:"retention"`
Precreator precreator.Config `toml:"shard-precreation"`
Monitor monitor.Config `toml:"monitor"`
Subscriber subscriber.Config `toml:"subscriber"`
HTTPD httpd.Config `toml:"http"`
GraphiteInputs []graphite.Config `toml:"graphite"`
CollectdInputs []collectd.Config `toml:"collectd"`
OpenTSDBInputs []opentsdb.Config `toml:"opentsdb"`
UDPInputs []udp.Config `toml:"udp"`
ContinuousQuery continuous_querier.Config `toml:"continuous_queries"`
// Server reporting
ReportingDisabled bool `toml:"reporting-disabled"`
// BindAddress is the address that all TCP services use (Raft, Snapshot, Cluster, etc.)
BindAddress string `toml:"bind-address"`
}
// NewConfig returns an instance of Config with reasonable defaults.
func NewConfig() *Config {
c := &Config{}
c.Meta = meta.NewConfig()
c.Data = tsdb.NewConfig()
c.Coordinator = coordinator.NewConfig()
c.Precreator = precreator.NewConfig()
c.Monitor = monitor.NewConfig()
c.Subscriber = subscriber.NewConfig()
c.HTTPD = httpd.NewConfig()
c.GraphiteInputs = []graphite.Config{graphite.NewConfig()}
c.CollectdInputs = []collectd.Config{collectd.NewConfig()}
c.OpenTSDBInputs = []opentsdb.Config{opentsdb.NewConfig()}
c.UDPInputs = []udp.Config{udp.NewConfig()}
c.ContinuousQuery = continuous_querier.NewConfig()
c.Retention = retention.NewConfig()
c.BindAddress = DefaultBindAddress
return c
}
// NewDemoConfig returns the config that runs when no config is specified.
func NewDemoConfig() (*Config, error) {
c := NewConfig()
var homeDir string
// By default, store meta and data files in current users home directory
u, err := user.Current()
if err == nil {
homeDir = u.HomeDir
} else if os.Getenv("HOME") != "" {
homeDir = os.Getenv("HOME")
} else {
return nil, fmt.Errorf("failed to determine current user for storage")
}
c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta")
c.Data.Dir = filepath.Join(homeDir, ".influxdb/data")
c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal")
return c, nil
}
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
// This is for Windows compatability only.
// See https://github.com/influxdata/telegraf/issues/1378.
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
}
// FromTomlFile loads the config from a TOML file.
func (c *Config) FromTomlFile(fpath string) error {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return err
}
bs = trimBOM(bs)
return c.FromToml(string(bs))
}
// FromToml loads the config from TOML.
func (c *Config) FromToml(input string) error {
// Replace deprecated [cluster] with [coordinator]
re := regexp.MustCompile(`(?m)^\s*\[cluster\]`)
input = re.ReplaceAllStringFunc(input, func(in string) string {
in = strings.TrimSpace(in)
out := "[coordinator]"
log.Printf("deprecated config option %s replaced with %s; %s will not be supported in a future release\n", in, out, in)
return out
})
_, err := toml.Decode(input, c)
return err
}
// Validate returns an error if the config is invalid.
func (c *Config) Validate() error {
if err := c.Meta.Validate(); err != nil {
return err
}
if err := c.Data.Validate(); err != nil {
return err
}
if err := c.Monitor.Validate(); err != nil {
return err
}
if err := c.ContinuousQuery.Validate(); err != nil {
return err
}
if err := c.Retention.Validate(); err != nil {
return err
}
if err := c.Precreator.Validate(); err != nil {
return err
}
if err := c.Subscriber.Validate(); err != nil {
return err
}
for _, graphite := range c.GraphiteInputs {
if err := graphite.Validate(); err != nil {
return fmt.Errorf("invalid graphite config: %v", err)
}
}
for _, collectd := range c.CollectdInputs {
if err := collectd.Validate(); err != nil {
return fmt.Errorf("invalid collectd config: %v", err)
}
}
return nil
}
// ApplyEnvOverrides apply the environment configuration on top of the config.
func (c *Config) ApplyEnvOverrides() error {
return c.applyEnvOverrides("INFLUXDB", reflect.ValueOf(c), "")
}
func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value, structKey string) error {
// If we have a pointer, dereference it
element := spec
if spec.Kind() == reflect.Ptr {
element = spec.Elem()
}
value := os.Getenv(prefix)
switch element.Kind() {
case reflect.String:
if len(value) == 0 {
return nil
}
element.SetString(value)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var intValue int64
// Handle toml.Duration
if element.Type().Name() == "Duration" {
dur, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value)
}
intValue = dur.Nanoseconds()
} else {
var err error
intValue, err = strconv.ParseInt(value, 0, element.Type().Bits())
if err != nil {
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value)
}
}
element.SetInt(intValue)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
intValue, err := strconv.ParseUint(value, 0, element.Type().Bits())
if err != nil {
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value)
}
element.SetUint(intValue)
case reflect.Bool:
boolValue, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value)
}
element.SetBool(boolValue)
case reflect.Float32, reflect.Float64:
floatValue, err := strconv.ParseFloat(value, element.Type().Bits())
if err != nil {
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value)
}
element.SetFloat(floatValue)
case reflect.Slice:
// If the type is s slice, apply to each using the index as a suffix, e.g. GRAPHITE_0, GRAPHITE_0_TEMPLATES_0 or GRAPHITE_0_TEMPLATES="item1,item2"
for j := 0; j < element.Len(); j++ {
f := element.Index(j)
if err := c.applyEnvOverrides(prefix, f, structKey); err != nil {
return err
}
if err := c.applyEnvOverrides(fmt.Sprintf("%s_%d", prefix, j), f, structKey); err != nil {
return err
}
}
// If the type is s slice but have value not parsed as slice e.g. GRAPHITE_0_TEMPLATES="item1,item2"
if element.Len() == 0 && len(value) > 0 {
rules := strings.Split(value, ",")
for _, rule := range rules {
element.Set(reflect.Append(element, reflect.ValueOf(rule)))
}
}
case reflect.Struct:
typeOfSpec := element.Type()
for i := 0; i < element.NumField(); i++ {
field := element.Field(i)
// Skip any fields that we cannot set
if !field.CanSet() && field.Kind() != reflect.Slice {
continue
}
fieldName := typeOfSpec.Field(i).Name
configName := typeOfSpec.Field(i).Tag.Get("toml")
// Replace hyphens with underscores to avoid issues with shells
configName = strings.Replace(configName, "-", "_", -1)
envKey := strings.ToUpper(configName)
if prefix != "" {
envKey = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName))
}
// If it's a sub-config, recursively apply
if field.Kind() == reflect.Struct || field.Kind() == reflect.Ptr ||
field.Kind() == reflect.Slice || field.Kind() == reflect.Array {
if err := c.applyEnvOverrides(envKey, field, fieldName); err != nil {
return err
}
continue
}
value := os.Getenv(envKey)
// Skip any fields we don't have a value to set
if len(value) == 0 {
continue
}
if err := c.applyEnvOverrides(envKey, field, fieldName); err != nil {
return err
}
}
}
return nil
}
// Diagnostics returns a diagnostics representation of Config.
func (c *Config) Diagnostics() (*diagnostics.Diagnostics, error) {
return diagnostics.RowFromMap(map[string]interface{}{
"reporting-disabled": c.ReportingDisabled,
"bind-address": c.BindAddress,
}), nil
}
func (c *Config) diagnosticsClients() map[string]diagnostics.Client {
// Config settings that are always present.
m := map[string]diagnostics.Client{
"config": c,
"config-data": c.Data,
"config-meta": c.Meta,
"config-coordinator": c.Coordinator,
"config-retention": c.Retention,
"config-precreator": c.Precreator,
"config-monitor": c.Monitor,
"config-subscriber": c.Subscriber,
"config-httpd": c.HTTPD,
"config-cqs": c.ContinuousQuery,
}
// Config settings that can be repeated and can be disabled.
if g := graphite.Configs(c.GraphiteInputs); g.Enabled() {
m["config-graphite"] = g
}
if cc := collectd.Configs(c.CollectdInputs); cc.Enabled() {
m["config-collectd"] = cc
}
if t := opentsdb.Configs(c.OpenTSDBInputs); t.Enabled() {
m["config-opentsdb"] = t
}
if u := udp.Configs(c.UDPInputs); u.Enabled() {
m["config-udp"] = u
}
return m
}
// registerDiagnostics registers the config settings with the Monitor.
func (c *Config) registerDiagnostics(m *monitor.Monitor) {
for name, dc := range c.diagnosticsClients() {
m.RegisterDiagnosticsClient(name, dc)
}
}
// registerDiagnostics deregisters the config settings from the Monitor.
func (c *Config) deregisterDiagnostics(m *monitor.Monitor) {
for name := range c.diagnosticsClients() {
m.DeregisterDiagnosticsClient(name)
}
}

View File

@ -0,0 +1,92 @@
package run
import (
"flag"
"fmt"
"io"
"os"
"github.com/BurntSushi/toml"
)
// PrintConfigCommand represents the command executed by "influxd config".
type PrintConfigCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewPrintConfigCommand return a new instance of PrintConfigCommand.
func NewPrintConfigCommand() *PrintConfigCommand {
return &PrintConfigCommand{
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
}
// Run parses and prints the current config loaded.
func (cmd *PrintConfigCommand) Run(args ...string) error {
// Parse command flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
configPath := fs.String("config", "", "")
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) }
if err := fs.Parse(args); err != nil {
return err
}
// Parse config from path.
opt := Options{ConfigPath: *configPath}
config, err := cmd.parseConfig(opt.GetConfigPath())
if err != nil {
return fmt.Errorf("parse config: %s", err)
}
// Apply any environment variables on top of the parsed config
if err := config.ApplyEnvOverrides(); err != nil {
return fmt.Errorf("apply env config: %v", err)
}
// Validate the configuration.
if err := config.Validate(); err != nil {
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err)
}
toml.NewEncoder(cmd.Stdout).Encode(config)
fmt.Fprint(cmd.Stdout, "\n")
return nil
}
// ParseConfig parses the config at path.
// Returns a demo configuration if path is blank.
func (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) {
config, err := NewDemoConfig()
if err != nil {
config = NewConfig()
}
if path == "" {
return config, nil
}
fmt.Fprintf(os.Stderr, "Merging with configuration at: %s\n", path)
if err := config.FromTomlFile(path); err != nil {
return nil, err
}
return config, nil
}
var printConfigUsage = `Displays the default configuration.
Usage: influxd config [flags]
-config <path>
Set the path to the initial configuration file.
This defaults to the environment variable INFLUXDB_CONFIG_PATH,
~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file
is present at any of these locations.
Disable the automatic loading of a configuration file using
the null device (such as /dev/null).
`

View File

@ -0,0 +1,312 @@
package run_test
import (
"fmt"
"os"
"testing"
"github.com/BurntSushi/toml"
"github.com/influxdata/influxdb/cmd/influxd/run"
)
// Ensure the configuration can be parsed.
func TestConfig_Parse(t *testing.T) {
// Parse configuration.
var c run.Config
if err := c.FromToml(`
[meta]
dir = "/tmp/meta"
[data]
dir = "/tmp/data"
[coordinator]
[http]
bind-address = ":8087"
[[graphite]]
protocol = "udp"
[[graphite]]
protocol = "tcp"
[[collectd]]
bind-address = ":1000"
[[collectd]]
bind-address = ":1010"
[[opentsdb]]
bind-address = ":2000"
[[opentsdb]]
bind-address = ":2010"
[[opentsdb]]
bind-address = ":2020"
[[udp]]
bind-address = ":4444"
[monitoring]
enabled = true
[subscriber]
enabled = true
[continuous_queries]
enabled = true
`); err != nil {
t.Fatal(err)
}
// Validate configuration.
if c.Meta.Dir != "/tmp/meta" {
t.Fatalf("unexpected meta dir: %s", c.Meta.Dir)
} else if c.Data.Dir != "/tmp/data" {
t.Fatalf("unexpected data dir: %s", c.Data.Dir)
} else if c.HTTPD.BindAddress != ":8087" {
t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress)
} else if len(c.GraphiteInputs) != 2 {
t.Fatalf("unexpected graphiteInputs count: %d", len(c.GraphiteInputs))
} else if c.GraphiteInputs[0].Protocol != "udp" {
t.Fatalf("unexpected graphite protocol(0): %s", c.GraphiteInputs[0].Protocol)
} else if c.GraphiteInputs[1].Protocol != "tcp" {
t.Fatalf("unexpected graphite protocol(1): %s", c.GraphiteInputs[1].Protocol)
} else if c.CollectdInputs[0].BindAddress != ":1000" {
t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[0].BindAddress)
} else if c.CollectdInputs[1].BindAddress != ":1010" {
t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress)
} else if c.OpenTSDBInputs[0].BindAddress != ":2000" {
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress)
} else if c.OpenTSDBInputs[1].BindAddress != ":2010" {
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[1].BindAddress)
} else if c.OpenTSDBInputs[2].BindAddress != ":2020" {
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[2].BindAddress)
} else if c.UDPInputs[0].BindAddress != ":4444" {
t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress)
} else if c.Subscriber.Enabled != true {
t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled)
} else if c.ContinuousQuery.Enabled != true {
t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled)
}
}
// Ensure the configuration can be parsed.
func TestConfig_Parse_EnvOverride(t *testing.T) {
// Parse configuration.
var c run.Config
if _, err := toml.Decode(`
[meta]
dir = "/tmp/meta"
[data]
dir = "/tmp/data"
[coordinator]
[admin]
bind-address = ":8083"
[http]
bind-address = ":8087"
[[graphite]]
protocol = "udp"
templates = [
"default.* .template.in.config"
]
[[graphite]]
protocol = "tcp"
[[collectd]]
bind-address = ":1000"
[[collectd]]
bind-address = ":1010"
[[opentsdb]]
bind-address = ":2000"
[[opentsdb]]
bind-address = ":2010"
[[udp]]
bind-address = ":4444"
[[udp]]
[monitoring]
enabled = true
[continuous_queries]
enabled = true
`, &c); err != nil {
t.Fatal(err)
}
if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil {
t.Fatalf("failed to set env var: %v", err)
}
if err := os.Setenv("INFLUXDB_UDP_0_BIND_ADDRESS", ":5555"); err != nil {
t.Fatalf("failed to set env var: %v", err)
}
if err := os.Setenv("INFLUXDB_GRAPHITE_0_TEMPLATES_0", "overide.* .template.0"); err != nil {
t.Fatalf("failed to set env var: %v", err)
}
if err := os.Setenv("INFLUXDB_GRAPHITE_1_TEMPLATES", "overide.* .template.1.1,overide.* .template.1.2"); err != nil {
t.Fatalf("failed to set env var: %v", err)
}
if err := os.Setenv("INFLUXDB_GRAPHITE_1_PROTOCOL", "udp"); err != nil {
t.Fatalf("failed to set env var: %v", err)
}
if err := os.Setenv("INFLUXDB_COLLECTD_1_BIND_ADDRESS", ":1020"); err != nil {
t.Fatalf("failed to set env var: %v", err)
}
if err := os.Setenv("INFLUXDB_OPENTSDB_0_BIND_ADDRESS", ":2020"); err != nil {
t.Fatalf("failed to set env var: %v", err)
}
// uint64 type
if err := os.Setenv("INFLUXDB_DATA_CACHE_MAX_MEMORY_SIZE", "1000"); err != nil {
t.Fatalf("failed to set env var: %v", err)
}
if err := c.ApplyEnvOverrides(); err != nil {
t.Fatalf("failed to apply env overrides: %v", err)
}
if c.UDPInputs[0].BindAddress != ":5555" {
t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress)
}
if c.UDPInputs[1].BindAddress != ":1234" {
t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[1].BindAddress)
}
if len(c.GraphiteInputs[0].Templates) != 1 || c.GraphiteInputs[0].Templates[0] != "overide.* .template.0" {
t.Fatalf("unexpected graphite 0 templates: %+v", c.GraphiteInputs[0].Templates)
}
if len(c.GraphiteInputs[1].Templates) != 2 || c.GraphiteInputs[1].Templates[1] != "overide.* .template.1.2" {
t.Fatalf("unexpected graphite 1 templates: %+v", c.GraphiteInputs[1].Templates)
}
if c.GraphiteInputs[1].Protocol != "udp" {
t.Fatalf("unexpected graphite protocol: %s", c.GraphiteInputs[1].Protocol)
}
if c.CollectdInputs[1].BindAddress != ":1020" {
t.Fatalf("unexpected collectd bind address: %s", c.CollectdInputs[1].BindAddress)
}
if c.OpenTSDBInputs[0].BindAddress != ":2020" {
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[0].BindAddress)
}
if c.Data.CacheMaxMemorySize != 1000 {
t.Fatalf("unexpected cache max memory size: %v", c.Data.CacheMaxMemorySize)
}
}
func TestConfig_ValidateNoServiceConfigured(t *testing.T) {
var c run.Config
if _, err := toml.Decode(`
[meta]
enabled = false
[data]
enabled = false
`, &c); err != nil {
t.Fatal(err)
}
if e := c.Validate(); e == nil {
t.Fatalf("got nil, expected error")
}
}
func TestConfig_ValidateMonitorStore_MetaOnly(t *testing.T) {
c := run.NewConfig()
if _, err := toml.Decode(`
[monitor]
store-enabled = true
[meta]
dir = "foo"
[data]
enabled = false
`, &c); err != nil {
t.Fatal(err)
}
if err := c.Validate(); err == nil {
t.Fatalf("got nil, expected error")
}
}
func TestConfig_DeprecatedOptions(t *testing.T) {
// Parse configuration.
var c run.Config
if err := c.FromToml(`
[cluster]
max-select-point = 100
`); err != nil {
t.Fatal(err)
}
// Validate configuration.
if c.Coordinator.MaxSelectPointN != 100 {
t.Fatalf("unexpected coordinator max select points: %d", c.Coordinator.MaxSelectPointN)
}
}
// Ensure that Config.Validate correctly validates the individual subsections.
func TestConfig_InvalidSubsections(t *testing.T) {
// Precondition: NewDemoConfig must validate correctly.
c, err := run.NewDemoConfig()
if err != nil {
t.Fatalf("error creating demo config: %s", err)
}
if err := c.Validate(); err != nil {
t.Fatalf("new demo config failed validation: %s", err)
}
// For each subsection, load a config with a single invalid setting.
for _, tc := range []struct {
section string
kv string
}{
{"meta", `dir = ""`},
{"data", `dir = ""`},
{"monitor", `store-database = ""`},
{"continuous_queries", `run-interval = "0s"`},
{"subscriber", `http-timeout = "0s"`},
{"retention", `check-interval = "0s"`},
{"shard-precreation", `advance-period = "0s"`},
} {
c, err := run.NewDemoConfig()
if err != nil {
t.Fatalf("error creating demo config: %s", err)
}
s := fmt.Sprintf("\n[%s]\n%s\n", tc.section, tc.kv)
if err := c.FromToml(s); err != nil {
t.Fatalf("error loading toml %q: %s", s, err)
}
if err := c.Validate(); err == nil {
t.Fatalf("expected error but got nil for config: %s", s)
}
}
}

Some files were not shown because too many files have changed in this diff Show More