1
0
mirror of https://github.com/Oxalide/vsphere-influxdb-go.git synced 2023-10-10 11:36:51 +00:00

Compare commits

...

36 Commits

Author SHA1 Message Date
Andy Cobaugh
dac91ccd2d
Use url.UserPassword() so passwords are escaped (#101)
* add Dockerfile

* dockerfile: switch to multi-stage build to lighten up the final image

* dockerfile: update to latest images, and update the 2nd stage image on build

* Use url.UserPassword() so passwords are escaped
2020-04-01 14:15:04 +02:00
Andy Cobaugh
88b072f48e Add Dockerfile to project (#98)
* add Dockerfile

* dockerfile: switch to multi-stage build to lighten up the final image

* dockerfile: update to latest images, and update the 2nd stage image on build
2019-07-19 18:24:31 +02:00
Adrian Todorov
dc6df54936
Merge pull request #96 from MicKBfr/patch-2
change for RemoveHostDomainName
2019-03-14 15:52:36 +01:00
MicKBfr
e92e578a98
change for RemoveHostDomainName 2018-10-26 11:28:18 +02:00
Adrian Todorov
e0c31fa803 update to use nfpm instead of fpm for packaging 2018-09-16 15:47:55 +02:00
Adrian Todorov
9094700d21 rollback vDisk parsing for now 2018-09-16 15:39:38 +02:00
Adrian Todorov
23505fb8d3
Merge pull request #93 from MicKBfr/patch-1
Update vsphere-influxdb.go
2018-09-16 15:01:46 +02:00
MicKBfr
e9bccef96d
Update vsphere-influxdb.go 2018-09-13 17:02:26 +02:00
Adrian Todorov
639ef28ecf
Merge pull request #72 from MicKBfr/MicKBfr-patch-1
Remove domain from esx name
2018-09-03 22:07:41 +02:00
Adrian Todorov
57703fa0b3
Merge pull request #81 from krische/master
Compute usage percentage of datastore.
2018-08-09 18:32:26 +02:00
Brian Krische
a32178917a Compute usage percentage of datastore. 2018-06-25 07:39:38 -05:00
MicKBfr
3ddb5a30da
patch to made RemoveHostDomainName optional 2018-04-12 17:45:57 +02:00
MicKBfr
3eb0f579a9
Update vsphere-influxdb.go 2018-04-12 17:36:51 +02:00
MicKBfr
39330e4d31
Remove domain from esx name
Domain in esx name can be a problem with influxgraph and grafana.
2018-04-06 09:40:32 +02:00
Adrian Todorov
3f8a36f0cf virtual disk to datastore mapping 2018-03-31 15:15:52 +00:00
Adrian Todorov
d04a66083b fix copyright notice year 2018-03-31 15:03:58 +00:00
Adrian Todorov
1467c58c08 merge 2018-03-31 16:40:43 +02:00
Adrian Todorov
71d921b681
Merge pull request #63 from ncareau/patch-2
Typo in readme.
2018-02-24 22:13:32 +01:00
NMC
2091fe3207
Typo in readme.
Sorry, made a typo in my PR.
2018-02-24 15:33:11 -05:00
Adrian Todorov
770c98ecd4
Merge pull request #62 from ncareau/patch-1
Update README.md with note for time (check #31 comments)
2018-02-24 21:05:12 +01:00
NMC
c3dc5c2e75
Update Readme.md with note for time (view #31)
Added a note for the NTP service to be valid since there have been reports (including myself) where an invalid time on the ESXi server caused the script to dump no data at all.
2018-02-24 14:43:56 -05:00
Adrian Todorov
60baa6670b fix typo in README 2018-02-04 13:18:23 +01:00
Adrian Todorov
2656627e47 cleanup 2018-02-03 21:29:32 +01:00
Adrian Todorov
cc748fd2ae print PerformanceManager HistoricalInterval metric collection level if -debug is present 2018-02-03 21:27:12 +01:00
Adrian Todorov
a2b027825c print version if -debug is present 2018-02-03 21:22:10 +01:00
Adrian Todorov
b2da326242 fix version print to fmt.Println instead of stdlog.Println 2018-02-03 19:45:35 +01:00
Adrian Todorov
72f628354b bugfix 2018-01-07 16:17:37 +01:00
Adrian Todorov
076f633377 add a version flag 2018-01-07 16:08:06 +01:00
Adrian Todorov
59e2e1d2df bugfix 2018-01-07 15:47:30 +01:00
Adrian Todorov
a461957082 add a -test flag and test mode to output the data collected from vCenter and not send it to Influx 2018-01-07 15:39:49 +01:00
Adrian Todorov
bf09836f04 Add a default "" value for InfluxDB prefix for retrocompatibility post PR #53 2018-01-07 15:11:32 +01:00
Adrian Todorov
c252b28988
Merge pull request #53 from zooplus/master
Nested Folders and measurement prefix support
2018-01-07 15:07:16 +01:00
Artashes Arabajyan
d00ca9ee6d Merged old .gitignore that was somehow overwritten 2018-01-04 12:32:19 +01:00
Artashes Arabajyan
ba0b81c0ef go fmt 2018-01-04 12:25:10 +01:00
Artashes Arabajyan
8b519e50c8 The tool was not able to gather info from the datacenters in nested folders under the root folder - fixed, added config parameter to specify prefix for measurement, cleaned up logging formatting 2018-01-03 21:06:41 +01:00
Adrian Todorov
942b49d61e fix crash on VM cloning 2017-12-10 17:22:33 +01:00
8 changed files with 269 additions and 153 deletions

3
.gitignore vendored
View File

@ -11,3 +11,6 @@ vsphere-influxdb
# Configuration file # Configuration file
vsphere-influxdb.json vsphere-influxdb.json
# Vim swap files
*.swp

View File

@ -17,6 +17,6 @@ before_script:
script: script:
- git status - git status
after_success: after_success:
- gem install --no-ri --no-rdoc fpm # - gem install --no-ri --no-rdoc fpm
- test -n "$TRAVIS_TAG" && curl -sL https://git.io/goreleaser | bash - test -n "$TRAVIS_TAG" && curl -sL https://git.io/goreleaser | bash

18
Dockerfile Normal file
View File

@ -0,0 +1,18 @@
FROM golang:1.12-alpine3.10 as builder
WORKDIR /go/src/vsphere-influxdb-go
COPY . .
RUN apk --update add --virtual build-deps git
RUN go get -d -v ./...
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo
FROM alpine:3.10
RUN apk update \
&& apk upgrade \
&& apk add ca-certificates \
&& addgroup -S spock && adduser -S spock -G spock
COPY --from=0 /go/src/vsphere-influxdb-go/vsphere-influxdb-go /vsphere-influxdb-go
USER spock
CMD ["/vsphere-influxdb-go"]

39
Gopkg.lock generated
View File

@ -2,32 +2,65 @@
[[projects]] [[projects]]
digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
name = "github.com/davecgh/go-spew" name = "github.com/davecgh/go-spew"
packages = ["spew"] packages = ["spew"]
pruneopts = ""
revision = "346938d642f2ec3594ed81d874461961cd0faa76" revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0" version = "v1.1.0"
[[projects]] [[projects]]
digest = "1:ec837623c12abf4f44837163ad32274dd0c9dfa29f0d65068c60897bce34bb75"
name = "github.com/influxdata/influxdb" name = "github.com/influxdata/influxdb"
packages = ["client/v2","models","pkg/escape"] packages = [
"client/v2",
"models",
"pkg/escape",
]
pruneopts = ""
revision = "a7c3d3c0f7b74f71919047adbc4933460967a576" revision = "a7c3d3c0f7b74f71919047adbc4933460967a576"
version = "v1.3.6" version = "v1.3.6"
[[projects]] [[projects]]
digest = "1:c5342f3ddefc1644d1a3ed028e739bfabe5b457978084b9a28d42ed235c8bb4d"
name = "github.com/vmware/govmomi" name = "github.com/vmware/govmomi"
packages = [".","property","session","vim25","vim25/debug","vim25/methods","vim25/mo","vim25/progress","vim25/soap","vim25/types","vim25/xml"] packages = [
".",
"property",
"session",
"vim25",
"vim25/debug",
"vim25/methods",
"vim25/mo",
"vim25/progress",
"vim25/soap",
"vim25/types",
"vim25/xml",
]
pruneopts = ""
revision = "b63044e5f833781eb7b305bc035392480ee06a82" revision = "b63044e5f833781eb7b305bc035392480ee06a82"
version = "v0.15.0" version = "v0.15.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:70ca15641aa31be55859a7f75ddef3ae384ae18068deab8274668a1a77d1e84a"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = ["context"] packages = ["context"]
pruneopts = ""
revision = "4b14673ba32bee7f5ac0f990a48f033919fd418b" revision = "4b14673ba32bee7f5ac0f990a48f033919fd418b"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "0b7718e6a338978c4ea1efca3255565c667967ddd6ff68999a1d1cea5112209e" input-imports = [
"github.com/davecgh/go-spew/spew",
"github.com/influxdata/influxdb/client/v2",
"github.com/vmware/govmomi",
"github.com/vmware/govmomi/property",
"github.com/vmware/govmomi/vim25/methods",
"github.com/vmware/govmomi/vim25/mo",
"github.com/vmware/govmomi/vim25/types",
"golang.org/x/net/context",
]
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -49,7 +49,7 @@ For InfluxDB:
For vSphere: For vSphere:
* VSPHERE\_HOSTNAME * VSPHERE\_HOSTNAME
* VSPHERE\_USERNAME * VSPHERE\_USERNAME
* SPHERE\_PASSWORD * VSPHERE\_PASSWORD
Keep in mind, that currently only one vCenter/ESXi can be added via environment variable. Keep in mind, that currently only one vCenter/ESXi can be added via environment variable.
@ -57,6 +57,7 @@ If you set a domain, it will be automaticaly removed from the names of the found
Metrics collected are defined by associating ObjectType groups with Metric groups. Metrics collected are defined by associating ObjectType groups with Metric groups.
There have been reports of the script not working correctly when the time is incorrect on the vsphere or vcenter. Make sure that the time is valid or activate the NTP service on the machine.
# Run as a service # Run as a service

View File

@ -19,7 +19,7 @@ archive:
files: files:
- LICENSE.txt - LICENSE.txt
- README.md - README.md
fpm: nfpm:
# Your app's vendor. # Your app's vendor.
# Default is empty. # Default is empty.
vendor: Oxalide vendor: Oxalide

View File

@ -1,17 +1,17 @@
/* Copyright 2016 Adrian Todorov, Oxalide ato@oxalide.com /* Copyright 2016-2018 Adrian Todorov, Oxalide ato@oxalide.com
Original project author: https://github.com/cblomart Original project author: https://github.com/cblomart
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or the Free Software Foundation, either version 3 of the License, or
(at your option) any later version. (at your option) any later version.
This program is distributed in the hope that it will be useful, This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
package main package main
@ -47,11 +47,12 @@ const (
// Configuration is used to store config data // Configuration is used to store config data
type Configuration struct { type Configuration struct {
VCenters []*VCenter VCenters []*VCenter
Metrics []Metric Metrics []Metric
Interval int Interval int
Domain string Domain string
InfluxDB InfluxDB RemoveHostDomainName bool
InfluxDB InfluxDB
} }
// InfluxDB is used for InfluxDB connections // InfluxDB is used for InfluxDB connections
@ -60,6 +61,7 @@ type InfluxDB struct {
Username string Username string
Password string Password string
Database string Database string
Prefix string
} }
// VCenter for VMware vCenter connections // VCenter for VMware vCenter connections
@ -98,26 +100,28 @@ type EntityQuery struct {
Metrics []int32 Metrics []int32
} }
var debug bool var getversion, debug, test bool
var stdlog, errlog *log.Logger var stdlog, errlog *log.Logger
var version = "master"
// Connect to the actual vCenter connection used to query data // Connect to the actual vCenter connection used to query data
func (vcenter *VCenter) Connect() error { func (vcenter *VCenter) Connect() error {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
stdlog.Println("connecting to vcenter: " + vcenter.Hostname) stdlog.Println("Connecting to vcenter:", vcenter.Hostname)
u, err := url.Parse("https://" + vcenter.Username + ":" + vcenter.Password + "@" + vcenter.Hostname + "/sdk") u, err := url.Parse("https://" + vcenter.Hostname + "/sdk")
u.User = url.UserPassword(vcenter.Username, vcenter.Password)
if err != nil { if err != nil {
errlog.Println("Could not parse vcenter url: ", vcenter.Hostname) errlog.Println("Could not parse vcenter url:", vcenter.Hostname)
errlog.Println("Error: ", err) errlog.Println("Error:", err)
return err return err
} }
client, err := govmomi.NewClient(ctx, u, true) client, err := govmomi.NewClient(ctx, u, true)
if err != nil { if err != nil {
errlog.Println("Could not connect to vcenter: ", vcenter.Hostname) errlog.Println("Could not connect to vcenter:", vcenter.Hostname)
errlog.Println("Error: ", err) errlog.Println("Error:", err)
return err return err
} }
@ -133,7 +137,7 @@ func (vcenter *VCenter) Disconnect() error {
if vcenter.client != nil { if vcenter.client != nil {
if err := vcenter.client.Logout(ctx); err != nil { if err := vcenter.client.Logout(ctx); err != nil {
errlog.Println("Could not disconnect properly from vcenter", vcenter.Hostname, err) errlog.Println("Could not disconnect properly from vcenter:", vcenter.Hostname, err)
return err return err
} }
} }
@ -148,14 +152,26 @@ func (vcenter *VCenter) Init(config Configuration) error {
client := vcenter.client client := vcenter.client
// Print version
if debug {
aboutInfo := client.Client.ServiceContent.About
stdlog.Println("Version:", aboutInfo.FullName)
}
var perfmanager mo.PerformanceManager var perfmanager mo.PerformanceManager
err := client.RetrieveOne(ctx, *client.ServiceContent.PerfManager, nil, &perfmanager) err := client.RetrieveOne(ctx, *client.ServiceContent.PerfManager, nil, &perfmanager)
if err != nil { if err != nil {
errlog.Println("Could not get performance manager") errlog.Println("Could not get performance manager")
errlog.Println("Error: ", err) errlog.Println("Error:", err)
return err return err
} }
// Print PerformanceManager interval collection level
if debug {
stdlog.Println("PerformanceManager interval collection level")
spew.Dump(perfmanager.HistoricalInterval)
}
for _, perf := range perfmanager.PerfCounter { for _, perf := range perfmanager.PerfCounter {
groupinfo := perf.GroupInfo.GetElementDescription() groupinfo := perf.GroupInfo.GetElementDescription()
nameinfo := perf.NameInfo.GetElementDescription() nameinfo := perf.NameInfo.GetElementDescription()
@ -187,7 +203,7 @@ func (vcenter *VCenter) Init(config Configuration) error {
// Query a vcenter // Query a vcenter
func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.Client, nowTime time.Time) { func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.Client, nowTime time.Time) {
stdlog.Println("Setting up query inventory of vcenter: ", vcenter.Hostname) stdlog.Println("Setting up query inventory of vcenter:", vcenter.Hostname)
// Create the contect // Create the contect
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -200,7 +216,7 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
var viewManager mo.ViewManager var viewManager mo.ViewManager
err := client.RetrieveOne(ctx, *client.ServiceContent.ViewManager, nil, &viewManager) err := client.RetrieveOne(ctx, *client.ServiceContent.ViewManager, nil, &viewManager)
if err != nil { if err != nil {
errlog.Println("Could not get view manager from vcenter: " + vcenter.Hostname) errlog.Println("Could not get view manager from vcenter:", vcenter.Hostname)
errlog.Println("Error: ", err) errlog.Println("Error: ", err)
return return
} }
@ -209,16 +225,16 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
var rootFolder mo.Folder var rootFolder mo.Folder
err = client.RetrieveOne(ctx, client.ServiceContent.RootFolder, nil, &rootFolder) err = client.RetrieveOne(ctx, client.ServiceContent.RootFolder, nil, &rootFolder)
if err != nil { if err != nil {
errlog.Println("Could not get root folder from vcenter: " + vcenter.Hostname) errlog.Println("Could not get root folder from vcenter:", vcenter.Hostname)
errlog.Println("Error: ", err) errlog.Println("Error:", err)
return return
} }
datacenters := []types.ManagedObjectReference{} datacenters := []types.ManagedObjectReference{}
for _, child := range rootFolder.ChildEntity { for _, child := range rootFolder.ChildEntity {
if child.Type == "Datacenter" { //if child.Type == "Datacenter" {
datacenters = append(datacenters, child) datacenters = append(datacenters, child)
} //}
} }
// Get intresting object types from specified queries // Get intresting object types from specified queries
objectTypes := []string{} objectTypes := []string{}
@ -236,16 +252,16 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
req := types.CreateContainerView{This: viewManager.Reference(), Container: datacenter, Type: objectTypes, Recursive: true} req := types.CreateContainerView{This: viewManager.Reference(), Container: datacenter, Type: objectTypes, Recursive: true}
res, err := methods.CreateContainerView(ctx, client.RoundTripper, &req) res, err := methods.CreateContainerView(ctx, client.RoundTripper, &req)
if err != nil { if err != nil {
errlog.Println("Could not create container view from vcenter: " + vcenter.Hostname) errlog.Println("Could not create container view from vcenter:", vcenter.Hostname)
errlog.Println("Error: ", err) errlog.Println("Error:", err)
continue continue
} }
// Retrieve the created ContentView // Retrieve the created ContentView
var containerView mo.ContainerView var containerView mo.ContainerView
err = client.RetrieveOne(ctx, res.Returnval, nil, &containerView) err = client.RetrieveOne(ctx, res.Returnval, nil, &containerView)
if err != nil { if err != nil {
errlog.Println("Could not get container view from vcenter: " + vcenter.Hostname) errlog.Println("Could not get container view from vcenter:", vcenter.Hostname)
errlog.Println("Error: ", err) errlog.Println("Error:", err)
continue continue
} }
// Add found object to object list // Add found object to object list
@ -344,7 +360,7 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
// Retrieve properties for ResourcePools // Retrieve properties for ResourcePools
if len(respoolRefs) > 0 { if len(respoolRefs) > 0 {
if debug { if debug {
stdlog.Println("going inside ResourcePools") stdlog.Println("Going inside ResourcePools")
} }
err = pc.Retrieve(ctx, respoolRefs, []string{"name", "config", "vm"}, &respool) err = pc.Retrieve(ctx, respoolRefs, []string{"name", "config", "vm"}, &respool)
if err != nil { if err != nil {
@ -352,11 +368,11 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
return return
} }
for _, pool := range respool { for _, pool := range respool {
stdlog.Println(pool.Config.MemoryAllocation.GetResourceAllocationInfo().Limit)
stdlog.Println(pool.Config.CpuAllocation.GetResourceAllocationInfo().Limit)
if debug { if debug {
stdlog.Println("---resourcepool name - you should see every resourcepool here (+VMs inside)----") stdlog.Println("---resourcepool name - you should see every resourcepool here (+VMs inside)----")
stdlog.Println(pool.Name) stdlog.Println(pool.Name)
stdlog.Println(pool.Config.MemoryAllocation.GetResourceAllocationInfo().Limit)
stdlog.Println(pool.Config.CpuAllocation.GetResourceAllocationInfo().Limit)
} }
for _, vm := range pool.Vm { for _, vm := range pool.Vm {
if debug { if debug {
@ -374,10 +390,13 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
// Initialize the map that will hold the host MOR to cluster reference // Initialize the map that will hold the host MOR to cluster reference
hostToCluster := make(map[types.ManagedObjectReference]string) hostToCluster := make(map[types.ManagedObjectReference]string)
// Initialize the map that will hold the vDisk UUID per VM MOR to datastore reference
// vDiskToDatastore := make(map[types.ManagedObjectReference]map[string]string)
// Retrieve properties for clusters, if any // Retrieve properties for clusters, if any
if len(clusterRefs) > 0 { if len(clusterRefs) > 0 {
if debug { if debug {
stdlog.Println("going inside clusters") stdlog.Println("Going inside clusters")
} }
// Step 1 : Get ObjectContents and Host info for VM // Step 1 : Get ObjectContents and Host info for VM
@ -418,24 +437,34 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
respoolSummary[pools.Self]["name"] = pools.Summary.GetResourcePoolSummary().Name respoolSummary[pools.Self]["name"] = pools.Summary.GetResourcePoolSummary().Name
} }
// Retrieve properties for the hosts // Initialize the maps that will hold the extra tags and metrics for VMs
hostSummary := make(map[types.ManagedObjectReference]map[string]string) hostSummary := make(map[types.ManagedObjectReference]map[string]string)
hostExtraMetrics := make(map[types.ManagedObjectReference]map[string]int64) hostExtraMetrics := make(map[types.ManagedObjectReference]map[string]int64)
for _, host := range hsmo { for _, host := range hsmo {
// Extra tags per host
hostSummary[host.Self] = make(map[string]string) hostSummary[host.Self] = make(map[string]string)
hostSummary[host.Self]["name"] = host.Summary.Config.Name hostSummary[host.Self]["name"] = host.Summary.Config.Name
// Remove Domain Name from Host
if config.RemoveHostDomainName {
hostSummary[host.Self]["name"] = strings.Replace(host.Summary.Config.Name, config.Domain, "", -1)
}
hostSummary[host.Self]["cluster"] = hostToCluster[host.Self] hostSummary[host.Self]["cluster"] = hostToCluster[host.Self]
// Extra metrics per host
hostExtraMetrics[host.Self] = make(map[string]int64) hostExtraMetrics[host.Self] = make(map[string]int64)
hostExtraMetrics[host.Self]["uptime"] = int64(host.Summary.QuickStats.Uptime)
hostExtraMetrics[host.Self]["cpu_corecount_total"] = int64(host.Summary.Hardware.NumCpuThreads) hostExtraMetrics[host.Self]["cpu_corecount_total"] = int64(host.Summary.Hardware.NumCpuThreads)
} }
// Initialize the map that will hold all extra tags // Initialize the maps that will hold the extra tags and metrics for VMs
vmSummary := make(map[types.ManagedObjectReference]map[string]string) vmSummary := make(map[types.ManagedObjectReference]map[string]string)
vmExtraMetrics := make(map[types.ManagedObjectReference]map[string]int64)
// Assign extra details per VM in vmSummary // Assign extra details per VM in vmSummary
for _, vm := range vmmo { for _, vm := range vmmo {
// extra tags per VM
vmSummary[vm.Self] = make(map[string]string) vmSummary[vm.Self] = make(map[string]string)
// Ugly way to extract datastore value // Ugly way to extract datastore value
re, err := regexp.Compile(`\[(.*?)\]`) re, err := regexp.Compile(`\[(.*?)\]`)
@ -443,15 +472,34 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
fmt.Println(err) fmt.Println(err)
} }
vmSummary[vm.Self]["datastore"] = strings.Replace(strings.Replace(re.FindString(fmt.Sprintln(vm.Summary.Config)), "[", "", -1), "]", "", -1) vmSummary[vm.Self]["datastore"] = strings.Replace(strings.Replace(re.FindString(fmt.Sprintln(vm.Summary.Config)), "[", "", -1), "]", "", -1)
// List all devices to get vDisks
// for _, device := range vm.Config.Hardware.Device {
// // Hacky way to check if it's a vDisk and if it's datastore is different than the main one for VM
// if device.Backing.FileName != nil && device.Backing.Datastore.Name != vmSummary[vm.Self]["datastore"] {
// if vDiskToDatastore[vm.Self] == nil {
// vDiskToDatastore[vm.Self] = make(map[string]string)
// }
// vDiskToDatastore[vm.Self][device.diskObjectId] = device.Backing.Datastore.Name
// }
// }
if vmToCluster[vm.Self] != "" { if vmToCluster[vm.Self] != "" {
vmSummary[vm.Self]["cluster"] = vmToCluster[vm.Self] vmSummary[vm.Self]["cluster"] = vmToCluster[vm.Self]
} }
if vmToPool[vm.Self] != "" { if vmToPool[vm.Self] != "" {
vmSummary[vm.Self]["respool"] = vmToPool[vm.Self] vmSummary[vm.Self]["respool"] = vmToPool[vm.Self]
} }
vmSummary[vm.Self]["esx"] = hostSummary[*vm.Summary.Runtime.Host]["name"] if vm.Summary.Runtime.Host != nil {
} vmSummary[vm.Self]["esx"] = hostSummary[*vm.Summary.Runtime.Host]["name"]
}
// Extra metrics per VM
vmExtraMetrics[vm.Self] = make(map[string]int64)
vmExtraMetrics[vm.Self]["uptime"] = int64(vm.Summary.QuickStats.UptimeSeconds)
}
// fmt.Println("vDiskDatastore:")
// spew.Dump(vDiskToDatastore)
// get object names // get object names
objects := []mo.ManagedEntity{} objects := []mo.ManagedEntity{}
@ -466,16 +514,16 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
propreq := types.RetrieveProperties{SpecSet: []types.PropertyFilterSpec{{ObjectSet: objectSet, PropSet: []types.PropertySpec{*propSpec}}}} propreq := types.RetrieveProperties{SpecSet: []types.PropertyFilterSpec{{ObjectSet: objectSet, PropSet: []types.PropertySpec{*propSpec}}}}
propres, err := client.PropertyCollector().RetrieveProperties(ctx, propreq) propres, err := client.PropertyCollector().RetrieveProperties(ctx, propreq)
if err != nil { if err != nil {
errlog.Println("Could not retrieve object names from vcenter: " + vcenter.Hostname) errlog.Println("Could not retrieve object names from vcenter:", vcenter.Hostname)
errlog.Println("Error: ", err) errlog.Println("Error:", err)
return return
} }
//load retrieved properties //load retrieved properties
err = mo.LoadRetrievePropertiesResponse(propres, &objects) err = mo.LoadRetrievePropertiesResponse(propres, &objects)
if err != nil { if err != nil {
errlog.Println("Could not retrieve object names from vcenter: " + vcenter.Hostname) errlog.Println("Could not retrieve object names from vcenter:", vcenter.Hostname)
errlog.Println("Error: ", err) errlog.Println("Error:", err)
return return
} }
@ -522,8 +570,8 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
perfreq := types.QueryPerf{This: *client.ServiceContent.PerfManager, QuerySpec: queries} perfreq := types.QueryPerf{This: *client.ServiceContent.PerfManager, QuerySpec: queries}
perfres, err := methods.QueryPerf(ctx, client.RoundTripper, &perfreq) perfres, err := methods.QueryPerf(ctx, client.RoundTripper, &perfreq)
if err != nil { if err != nil {
errlog.Println("Could not request perfs from vcenter: " + vcenter.Hostname) errlog.Println("Could not request perfs from vcenter:", vcenter.Hostname)
errlog.Println("Error: ", err) errlog.Println("Error:", err)
return return
} }
@ -625,15 +673,21 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
specialTags[measurementName][tags["name"]][instanceName]["instance"] = instanceName specialTags[measurementName][tags["name"]][instanceName]["instance"] = instanceName
} }
} }
// Create the fields for the hostExtraMetrics
if metrics, ok := hostExtraMetrics[pem.Entity]; ok { if metrics, ok := hostExtraMetrics[pem.Entity]; ok {
for key, value := range metrics { for key, value := range metrics {
fields[key] = value fields[key] = value
} }
} }
// Create the fields for the vmExtraMetrics
if metrics, ok := vmExtraMetrics[pem.Entity]; ok {
for key, value := range metrics {
fields[key] = value
}
}
//create InfluxDB points //create InfluxDB points
pt, err := influxclient.NewPoint(entityName, tags, fields, nowTime) pt, err := influxclient.NewPoint(config.InfluxDB.Prefix+entityName, tags, fields, nowTime)
if err != nil { if err != nil {
errlog.Println(err) errlog.Println(err)
continue continue
@ -643,7 +697,7 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
for measurement, v := range specialFields { for measurement, v := range specialFields {
for name, metric := range v { for name, metric := range v {
for instance, value := range metric { for instance, value := range metric {
pt2, err := influxclient.NewPoint(measurement, specialTags[measurement][name][instance], value, time.Now()) pt2, err := influxclient.NewPoint(config.InfluxDB.Prefix+measurement, specialTags[measurement][name][instance], value, time.Now())
if err != nil { if err != nil {
errlog.Println(err) errlog.Println(err)
continue continue
@ -653,20 +707,13 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
} }
} }
// var respool []mo.ResourcePool
// err = pc.Retrieve(ctx, respoolRefs, []string{"name", "config", "vm"}, &respool)
// if err != nil {
// errlog.Println(err)
// continue
// }
for _, pool := range respool { for _, pool := range respool {
respoolFields := map[string]interface{}{ respoolFields := map[string]interface{}{
"cpu_limit": pool.Config.CpuAllocation.GetResourceAllocationInfo().Limit, "cpu_limit": pool.Config.CpuAllocation.GetResourceAllocationInfo().Limit,
"memory_limit": pool.Config.MemoryAllocation.GetResourceAllocationInfo().Limit, "memory_limit": pool.Config.MemoryAllocation.GetResourceAllocationInfo().Limit,
} }
respoolTags := map[string]string{"pool_name": pool.Name} respoolTags := map[string]string{"pool_name": pool.Name}
pt3, err := influxclient.NewPoint("resourcepool", respoolTags, respoolFields, time.Now()) pt3, err := influxclient.NewPoint(config.InfluxDB.Prefix+"resourcepool", respoolTags, respoolFields, time.Now())
if err != nil { if err != nil {
errlog.Println(err) errlog.Println(err)
continue continue
@ -678,9 +725,10 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
datastoreFields := map[string]interface{}{ datastoreFields := map[string]interface{}{
"capacity": datastore.Summary.Capacity, "capacity": datastore.Summary.Capacity,
"free_space": datastore.Summary.FreeSpace, "free_space": datastore.Summary.FreeSpace,
"usage": 1.0 - (float64(datastore.Summary.FreeSpace) / float64(datastore.Summary.Capacity)),
} }
datastoreTags := map[string]string{"ds_name": datastore.Summary.Name, "host": vcName} datastoreTags := map[string]string{"ds_name": datastore.Summary.Name, "host": vcName}
pt4, err := influxclient.NewPoint("datastore", datastoreTags, datastoreFields, time.Now()) pt4, err := influxclient.NewPoint(config.InfluxDB.Prefix+"datastore", datastoreTags, datastoreFields, time.Now())
if err != nil { if err != nil {
errlog.Println(err) errlog.Println(err)
continue continue
@ -690,14 +738,17 @@ func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.
} }
//InfluxDB send //InfluxDB send if not in test mode
err = InfluxDBClient.Write(bp) if test != true {
if err != nil { err = InfluxDBClient.Write(bp)
errlog.Println(err) if err != nil {
return errlog.Println(err)
return
}
stdlog.Println("Sent data to Influxdb from:", vcenter.Hostname)
} else {
spew.Dump(bp)
} }
stdlog.Println("sent data to Influxdb")
} }
func min(n ...int64) int64 { func min(n ...int64) int64 {
@ -715,7 +766,6 @@ func min(n ...int64) int64 {
} }
return min return min
} }
func max(n ...int64) int64 { func max(n ...int64) int64 {
var max int64 = -1 var max int64 = -1
for _, i := range n { for _, i := range n {
@ -766,7 +816,6 @@ func worker(id int, config Configuration, influxDBClient influxclient.Client, no
results <- true results <- true
continue continue
} }
if err := vcenter.Init(config); err == nil { if err := vcenter.Init(config); err == nil {
vcenter.Query(config, influxDBClient, nowTime) vcenter.Query(config, influxDBClient, nowTime)
} }
@ -782,10 +831,17 @@ func main() {
errlog = log.New(os.Stderr, "", log.Ldate|log.Ltime) errlog = log.New(os.Stderr, "", log.Ldate|log.Ltime)
flag.BoolVar(&debug, "debug", false, "Debug mode") flag.BoolVar(&debug, "debug", false, "Debug mode")
flag.BoolVar(&test, "test", false, "Test mode, data will be collected from vCenters, but nothing will be written to InfluxDB, only printed to stdout")
flag.BoolVar(&getversion, "version", false, "Get version and exit")
workerCount := flag.Int("workers", 4, "Number of concurrent workers to query vcenters") workerCount := flag.Int("workers", 4, "Number of concurrent workers to query vcenters")
cfgFile := flag.String("config", "/etc/"+baseName+".json", "Config file to use. Default is /etc/"+baseName+".json") cfgFile := flag.String("config", "/etc/"+baseName+".json", "Config file to use")
flag.Parse() flag.Parse()
if getversion {
fmt.Println("Version:", version)
os.Exit(0)
}
stdlog.Println("Starting", baseName, "with config file", *cfgFile) stdlog.Println("Starting", baseName, "with config file", *cfgFile)
// read the configuration // read the configuration
@ -810,7 +866,6 @@ func main() {
config.InfluxDB.Password = os.Getenv("INFLUX_PASSWORD") config.InfluxDB.Password = os.Getenv("INFLUX_PASSWORD")
config.InfluxDB.Database = os.Getenv("INFLUX_DATABASE") config.InfluxDB.Database = os.Getenv("INFLUX_DATABASE")
} }
// Support environment variables for VSphere // Support environment variables for VSphere
// Currently ony one server is supported and added to the list of vSphere servers // Currently ony one server is supported and added to the list of vSphere servers
if vhostname := os.Getenv("VSPHERE_HOSTNAME"); vhostname != "" { if vhostname := os.Getenv("VSPHERE_HOSTNAME"); vhostname != "" {

View File

@ -1,76 +1,82 @@
{ {
"Domain": ".lab", "Domain": ".lab",
"Interval": 60, "RemoveHostDomainName": false,
"VCenters": [ "Interval": 60,
{ "Username": "monitoring", "Password": "monixx", "Hostname": "vcenter-01.dc-01.lab" }, "VCenters": [
{ "Username": "monitoring", "Password": "monixx", "Hostname": "vcenter-01.dc-02.lab" }, { "Username": "monitoring", "Password": "monixx", "Hostname": "vcenter-01.dc-01.lab" },
{ "Username": "monitoring", "Password": "monixx", "Hostname": "vcenter-02.dc-02.lab" }, { "Username": "monitoring", "Password": "monixx", "Hostname": "vcenter-01.dc-02.lab" },
{ "Username": "monitoring", "Password": "monixx", "Hostname": "vcenter-01.home.lab" } { "Username": "monitoring", "Password": "monixx", "Hostname": "vcenter-02.dc-02.lab" },
{ "Username": "monitoring", "Password": "monixx", "Hostname": "vcenter-01.home.lab" }
],
], "InfluxDB": {
"InfluxDB": { "Prefix": "vsphere_",
"Hostname": "http://influxdb-01.dc-01.lab:8086", "Hostname": "http://influxdb-01.dc-01.lab:8086",
"Username": "vm", "Username": "vm",
"Password": "vmware334", "Password": "vmware334",
"Database": "vmware_performance" "Database": "vmware_performance"
}, },
"Metrics": [
{ "Metrics": [
"ObjectType": [ "VirtualMachine", "HostSystem" ], {
"Definition": [ "ObjectType": [ "VirtualMachine", "HostSystem" ],
{ "Metric": "cpu.usage.average", "Instances": "*" }, "Definition": [
{ "Metric": "cpu.usage.maximum", "Instances": "*" }, { "Metric": "cpu.usage.average", "Instances": "*" },
{ "Metric": "cpu.usagemhz.average", "Instances": "*" }, { "Metric": "cpu.usage.maximum", "Instances": "*" },
{ "Metric": "cpu.usagemhz.maximum", "Instances": "*" }, { "Metric": "cpu.usagemhz.average", "Instances": "*" },
{ "Metric": "cpu.wait.summation", "Instances": "*" }, { "Metric": "cpu.usagemhz.maximum", "Instances": "*" },
{ "Metric": "cpu.system.summation", "Instances": "*" }, { "Metric": "cpu.wait.summation", "Instances": "*" },
{ "Metric": "cpu.ready.summation", "Instances": "*" }, { "Metric": "cpu.system.summation", "Instances": "*" },
{ "Metric": "mem.usage.average", "Instances": "*" }, { "Metric": "cpu.ready.summation", "Instances": "*" },
{ "Metric": "mem.usage.maximum", "Instances": "*" }, { "Metric": "mem.usage.average", "Instances": "*" },
{ "Metric": "mem.consumed.average", "Instances": "*" }, { "Metric": "mem.usage.maximum", "Instances": "*" },
{ "Metric": "mem.consumed.maximum", "Instances": "*" }, { "Metric": "mem.consumed.average", "Instances": "*" },
{ "Metric": "mem.active.average", "Instances": "*" }, { "Metric": "mem.consumed.maximum", "Instances": "*" },
{ "Metric": "mem.active.maximum", "Instances": "*" }, { "Metric": "mem.active.average", "Instances": "*" },
{ "Metric": "mem.vmmemctl.average", "Instances": "*" }, { "Metric": "mem.active.maximum", "Instances": "*" },
{ "Metric": "mem.vmmemctl.maximum", "Instances": "*" }, { "Metric": "mem.vmmemctl.average", "Instances": "*" },
{ "Metric": "mem.totalCapacity.average", "Instances": "*" }, { "Metric": "mem.vmmemctl.maximum", "Instances": "*" },
{ "Metric": "net.packetsRx.summation", "Instances": "*" }, { "Metric": "mem.totalCapacity.average", "Instances": "*" },
{ "Metric": "net.packetsTx.summation", "Instances": "*" }, { "Metric": "net.packetsRx.summation", "Instances": "*" },
{ "Metric": "net.throughput.usage.average", "Instances": "*" }, { "Metric": "net.packetsTx.summation", "Instances": "*" },
{ "Metric": "net.received.average", "Instances": "*" }, { "Metric": "net.throughput.usage.average", "Instances": "*" },
{ "Metric": "net.transmitted.average", "Instances": "*" }, { "Metric": "net.received.average", "Instances": "*" },
{ "Metric": "net.throughput.usage.nfs.average", "Instances": "*" }, { "Metric": "net.transmitted.average", "Instances": "*" },
{ "Metric": "datastore.numberReadAveraged.average", "Instances": "*" }, { "Metric": "net.throughput.usage.nfs.average", "Instances": "*" },
{ "Metric": "datastore.numberWriteAveraged.average", "Instances": "*" }, { "Metric": "datastore.numberReadAveraged.average", "Instances": "*" },
{ "Metric": "datastore.read.average", "Instances": "*" }, { "Metric": "datastore.numberWriteAveraged.average", "Instances": "*" },
{ "Metric": "datastore.write.average", "Instances": "*" }, { "Metric": "datastore.read.average", "Instances": "*" },
{ "Metric": "datastore.totalReadLatency.average", "Instances": "*" }, { "Metric": "datastore.write.average", "Instances": "*" },
{ "Metric": "datastore.totalWriteLatency.average", "Instances": "*" }, { "Metric": "datastore.totalReadLatency.average", "Instances": "*" },
{ "Metric": "mem.capacity.provisioned.average", "Instances": "*"}, { "Metric": "datastore.totalWriteLatency.average", "Instances": "*" },
{ "Metric": "cpu.corecount.provisioned.average", "Instances": "*" } { "Metric": "mem.capacity.provisioned.average", "Instances": "*"},
] { "Metric": "cpu.corecount.provisioned.average", "Instances": "*" }
}, ]
{ },
"ObjectType": [ "VirtualMachine" ],
"Definition": [ {
{ "Metric": "datastore.datastoreVMObservedLatency.latest", "Instances": "*" } "ObjectType": [ "VirtualMachine" ],
] "Definition": [
}, { "Metric": "datastore.datastoreVMObservedLatency.latest", "Instances": "*" }
{ ]
"ObjectType": [ "HostSystem" ], },
"Definition": [
{ "Metric": "disk.maxTotalLatency.latest", "Instances": "" }, {
{ "Metric": "disk.numberReadAveraged.average", "Instances": "*" }, "ObjectType": [ "HostSystem" ],
{ "Metric": "disk.numberWriteAveraged.average", "Instances": "*" }, "Definition": [
{ "Metric": "net.throughput.contention.summation", "Instances": "*" } { "Metric": "disk.maxTotalLatency.latest", "Instances": "" },
] { "Metric": "disk.numberReadAveraged.average", "Instances": "*" },
}, { "Metric": "disk.numberWriteAveraged.average", "Instances": "*" },
{ { "Metric": "net.throughput.contention.summation", "Instances": "*" }
"ObjectType": [ "Datastore" ], ]
"Definition": [ },
{ "Metric": "disk.capacity.latest", "Instances": "*" },
{ "Metric": "disk.used.latest", "Instances": "*" } {
] "ObjectType": [ "Datastore" ],
} "Definition": [
] { "Metric": "disk.capacity.latest", "Instances": "*" },
{ "Metric": "disk.used.latest", "Instances": "*" }
]
}
]
} }