1
0
mirror of https://github.com/Oxalide/vsphere-influxdb-go.git synced 2023-10-10 13:36:51 +02:00
vsphere-influxdb-go/vsphere-influxdb.go

920 lines
27 KiB
Go
Raw Normal View History

2018-03-31 17:03:58 +02:00
/* Copyright 2016-2018 Adrian Todorov, Oxalide ato@oxalide.com
2018-01-04 12:25:10 +01:00
Original project author: https://github.com/cblomart
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (
2018-01-04 12:25:10 +01:00
"encoding/json"
"flag"
"fmt"
"log"
"math"
"net/url"
"os"
"path"
"regexp"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
influxclient "github.com/influxdata/influxdb/client/v2"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
)
const (
2018-01-04 12:25:10 +01:00
// name of the service
name = "vsphere-influxdb"
description = "send vsphere stats to influxdb"
)
2017-04-27 21:05:10 +02:00
// Configuration is used to store config data
type Configuration struct {
2018-01-04 12:25:10 +01:00
VCenters []*VCenter
Metrics []Metric
Interval int
Domain string
InfluxDB InfluxDB
}
2017-04-27 21:05:10 +02:00
// InfluxDB is used for InfluxDB connections
type InfluxDB struct {
2018-01-04 12:25:10 +01:00
Hostname string
Username string
Password string
Database string
Prefix string
}
2017-04-27 21:05:10 +02:00
// VCenter for VMware vCenter connections
type VCenter struct {
2018-01-04 12:25:10 +01:00
Hostname string
Username string
Password string
MetricGroups []*MetricGroup
client *govmomi.Client
}
2017-04-27 21:05:10 +02:00
// MetricDef metric definition
type MetricDef struct {
2018-01-04 12:25:10 +01:00
Metric string
Instances string
Key int32
}
2017-04-27 21:05:10 +02:00
// Metric is used for metrics retrieval
type Metric struct {
2018-01-04 12:25:10 +01:00
ObjectType []string
Definition []MetricDef
}
2017-04-27 21:05:10 +02:00
// MetricGroup is used for grouping metrics retrieval
type MetricGroup struct {
2018-01-04 12:25:10 +01:00
ObjectType string
Metrics []MetricDef
Mor []types.ManagedObjectReference
}
2017-04-27 21:05:10 +02:00
// EntityQuery are informations to query about an entity
type EntityQuery struct {
2018-01-04 12:25:10 +01:00
Name string
Entity types.ManagedObjectReference
Metrics []int32
}
2018-01-07 16:08:06 +01:00
var getversion, debug, test bool
var stdlog, errlog *log.Logger
2018-01-07 16:08:06 +01:00
var version = "master"
2017-04-27 21:05:10 +02:00
// Connect to the actual vCenter connection used to query data
func (vcenter *VCenter) Connect() error {
2018-01-04 12:25:10 +01:00
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stdlog.Println("Connecting to vcenter:", vcenter.Hostname)
u, err := url.Parse("https://" + vcenter.Username + ":" + vcenter.Password + "@" + vcenter.Hostname + "/sdk")
if err != nil {
errlog.Println("Could not parse vcenter url:", vcenter.Hostname)
errlog.Println("Error:", err)
return err
}
client, err := govmomi.NewClient(ctx, u, true)
if err != nil {
errlog.Println("Could not connect to vcenter:", vcenter.Hostname)
errlog.Println("Error:", err)
return err
}
vcenter.client = client
return nil
}
// Disconnect from the vCenter
func (vcenter *VCenter) Disconnect() error {
2018-01-04 12:25:10 +01:00
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
2017-05-24 13:59:37 +02:00
2018-01-04 12:25:10 +01:00
if vcenter.client != nil {
if err := vcenter.client.Logout(ctx); err != nil {
errlog.Println("Could not disconnect properly from vcenter:", vcenter.Hostname, err)
return err
}
}
2018-01-04 12:25:10 +01:00
return nil
}
// Init the VCenter connection
func (vcenter *VCenter) Init(config Configuration) error {
2018-01-04 12:25:10 +01:00
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client := vcenter.client
2018-02-03 21:22:10 +01:00
// Print version
if debug {
aboutInfo := client.Client.ServiceContent.About
stdlog.Println("Version:", aboutInfo.FullName)
}
2018-01-04 12:25:10 +01:00
var perfmanager mo.PerformanceManager
err := client.RetrieveOne(ctx, *client.ServiceContent.PerfManager, nil, &perfmanager)
if err != nil {
errlog.Println("Could not get performance manager")
errlog.Println("Error:", err)
return err
}
// Print PerformanceManager interval collection level
if debug {
stdlog.Println("PerformanceManager interval collection level")
spew.Dump(perfmanager.HistoricalInterval)
}
2018-01-04 12:25:10 +01:00
for _, perf := range perfmanager.PerfCounter {
groupinfo := perf.GroupInfo.GetElementDescription()
nameinfo := perf.NameInfo.GetElementDescription()
identifier := groupinfo.Key + "." + nameinfo.Key + "." + fmt.Sprint(perf.RollupType)
for _, metric := range config.Metrics {
for _, metricdef := range metric.Definition {
if metricdef.Metric == identifier {
metricd := MetricDef{Metric: metricdef.Metric, Instances: metricdef.Instances, Key: perf.Key}
for _, mtype := range metric.ObjectType {
added := false
for _, metricgroup := range vcenter.MetricGroups {
if metricgroup.ObjectType == mtype {
metricgroup.Metrics = append(metricgroup.Metrics, metricd)
added = true
break
}
}
if added == false {
metricgroup := MetricGroup{ObjectType: mtype, Metrics: []MetricDef{metricd}}
vcenter.MetricGroups = append(vcenter.MetricGroups, &metricgroup)
}
2018-03-31 17:15:52 +02:00
}
2018-01-04 12:25:10 +01:00
}
}
}
}
return nil
}
// Query a vcenter
func (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.Client, nowTime time.Time) {
2018-01-04 12:25:10 +01:00
stdlog.Println("Setting up query inventory of vcenter:", vcenter.Hostname)
// Create the contect
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Get the client
client := vcenter.client
// Create the view manager
var viewManager mo.ViewManager
err := client.RetrieveOne(ctx, *client.ServiceContent.ViewManager, nil, &viewManager)
if err != nil {
errlog.Println("Could not get view manager from vcenter:", vcenter.Hostname)
errlog.Println("Error: ", err)
return
}
// Get the Datacenters from root folder
var rootFolder mo.Folder
err = client.RetrieveOne(ctx, client.ServiceContent.RootFolder, nil, &rootFolder)
if err != nil {
errlog.Println("Could not get root folder from vcenter:", vcenter.Hostname)
errlog.Println("Error:", err)
return
}
datacenters := []types.ManagedObjectReference{}
for _, child := range rootFolder.ChildEntity {
//if child.Type == "Datacenter" {
datacenters = append(datacenters, child)
//}
}
// Get intresting object types from specified queries
objectTypes := []string{}
for _, group := range vcenter.MetricGroups {
objectTypes = append(objectTypes, group.ObjectType)
}
objectTypes = append(objectTypes, "ClusterComputeResource")
objectTypes = append(objectTypes, "ResourcePool")
objectTypes = append(objectTypes, "Datastore")
// Loop trought datacenters and create the intersting object reference list
mors := []types.ManagedObjectReference{}
for _, datacenter := range datacenters {
// Create the CreateContentView request
req := types.CreateContainerView{This: viewManager.Reference(), Container: datacenter, Type: objectTypes, Recursive: true}
res, err := methods.CreateContainerView(ctx, client.RoundTripper, &req)
if err != nil {
errlog.Println("Could not create container view from vcenter:", vcenter.Hostname)
errlog.Println("Error:", err)
continue
}
// Retrieve the created ContentView
var containerView mo.ContainerView
err = client.RetrieveOne(ctx, res.Returnval, nil, &containerView)
if err != nil {
errlog.Println("Could not get container view from vcenter:", vcenter.Hostname)
errlog.Println("Error:", err)
continue
}
// Add found object to object list
mors = append(mors, containerView.View...)
}
// Create MORS for each object type
vmRefs := []types.ManagedObjectReference{}
hostRefs := []types.ManagedObjectReference{}
clusterRefs := []types.ManagedObjectReference{}
respoolRefs := []types.ManagedObjectReference{}
datastoreRefs := []types.ManagedObjectReference{}
newMors := []types.ManagedObjectReference{}
if debug {
spew.Dump(mors)
}
// Assign each MORS type to a specific array
for _, mor := range mors {
if mor.Type == "VirtualMachine" {
vmRefs = append(vmRefs, mor)
newMors = append(newMors, mor)
} else if mor.Type == "HostSystem" {
hostRefs = append(hostRefs, mor)
newMors = append(newMors, mor)
} else if mor.Type == "ClusterComputeResource" {
clusterRefs = append(clusterRefs, mor)
} else if mor.Type == "ResourcePool" {
respoolRefs = append(respoolRefs, mor)
} else if mor.Type == "Datastore" {
datastoreRefs = append(datastoreRefs, mor)
}
}
// Copy the mors without the clusters
mors = newMors
pc := property.DefaultCollector(client.Client)
// govmomi segfaults when the list objects to retrieve is empty, so check everything
// Retrieve properties for all vms
var vmmo []mo.VirtualMachine
if len(vmRefs) > 0 {
err = pc.Retrieve(ctx, vmRefs, []string{"summary"}, &vmmo)
if err != nil {
fmt.Println(err)
return
}
}
// Retrieve properties for hosts
var hsmo []mo.HostSystem
if len(hostRefs) > 0 {
err = pc.Retrieve(ctx, hostRefs, []string{"parent", "summary"}, &hsmo)
if err != nil {
fmt.Println(err)
return
}
}
//Retrieve properties for Cluster(s)
var clmo []mo.ClusterComputeResource
if len(clusterRefs) > 0 {
err = pc.Retrieve(ctx, clusterRefs, []string{"name", "configuration", "host"}, &clmo)
if err != nil {
fmt.Println(err)
return
}
}
//Retrieve properties for ResourcePool
var rpmo []mo.ResourcePool
if len(respoolRefs) > 0 {
err = pc.Retrieve(ctx, respoolRefs, []string{"summary"}, &rpmo)
if err != nil {
fmt.Println(err)
return
}
}
// Retrieve summary property for all datastores
var dss []mo.Datastore
if len(datastoreRefs) > 0 {
err = pc.Retrieve(ctx, datastoreRefs, []string{"summary"}, &dss)
if err != nil {
log.Fatal(err)
return
}
}
// Initialize the map that will hold the VM MOR to ResourcePool reference
vmToPool := make(map[types.ManagedObjectReference]string)
var respool []mo.ResourcePool
// Retrieve properties for ResourcePools
if len(respoolRefs) > 0 {
if debug {
stdlog.Println("Going inside ResourcePools")
}
err = pc.Retrieve(ctx, respoolRefs, []string{"name", "config", "vm"}, &respool)
if err != nil {
fmt.Println(err)
return
}
for _, pool := range respool {
if debug {
stdlog.Println("---resourcepool name - you should see every resourcepool here (+VMs inside)----")
stdlog.Println(pool.Name)
stdlog.Println(pool.Config.MemoryAllocation.GetResourceAllocationInfo().Limit)
stdlog.Println(pool.Config.CpuAllocation.GetResourceAllocationInfo().Limit)
}
for _, vm := range pool.Vm {
if debug {
stdlog.Println("--VM ID - you should see every VM ID here--")
stdlog.Println(vm)
}
vmToPool[vm] = pool.Name
}
}
}
// Initialize the map that will hold the VM MOR to cluster reference
vmToCluster := make(map[types.ManagedObjectReference]string)
// Initialize the map that will hold the host MOR to cluster reference
hostToCluster := make(map[types.ManagedObjectReference]string)
// Initialize the map that will hold the vDisk UUID per VM MOR to datastore reference
vDiskToDatastore := make(map[types.ManagedObjectReference]map[string]string)
2018-01-04 12:25:10 +01:00
// Retrieve properties for clusters, if any
if len(clusterRefs) > 0 {
if debug {
stdlog.Println("Going inside clusters")
}
// Step 1 : Get ObjectContents and Host info for VM
// The host is found under the runtime structure.
// Step 2 : Step 2: Get the ManagedObjectReference from the Host we just got.
// Step 3 : Get a list all the clusters that vCenter knows about, and for each one, also get the host
// Step 4 : Loop through all clusters that exist (which we got in step 3), and loop through each host
// and see if that host matches the host we got in step 2 as the host of the vm.
// If we find it, return it, otherwise we return null.
for _, vm := range vmmo {
// check if VM is a clone in progress and skip it
if vm.Summary.Runtime.Host == nil {
continue
}
vmhost := vm.Summary.Runtime.Host
for _, cl := range clmo {
for _, host := range cl.Host {
hostToCluster[host] = cl.Name
if *vmhost == host {
vmToCluster[vm.Self] = cl.Name
}
}
}
}
}
// Retrieve properties for the pools
respoolSummary := make(map[types.ManagedObjectReference]map[string]string)
for _, pools := range rpmo {
respoolSummary[pools.Self] = make(map[string]string)
respoolSummary[pools.Self]["name"] = pools.Summary.GetResourcePoolSummary().Name
}
// Initialize the maps that will hold the extra tags and metrics for VMs
2018-01-04 12:25:10 +01:00
hostSummary := make(map[types.ManagedObjectReference]map[string]string)
hostExtraMetrics := make(map[types.ManagedObjectReference]map[string]int64)
for _, host := range hsmo {
// Extra tags per host
2018-01-04 12:25:10 +01:00
hostSummary[host.Self] = make(map[string]string)
hostSummary[host.Self]["name"] = host.Summary.Config.Name
hostSummary[host.Self]["cluster"] = hostToCluster[host.Self]
// Extra metrics per host
2018-01-04 12:25:10 +01:00
hostExtraMetrics[host.Self] = make(map[string]int64)
hostExtraMetrics[host.Self]["uptime"] = int64(host.Summary.QuickStats.Uptime)
2018-01-04 12:25:10 +01:00
hostExtraMetrics[host.Self]["cpu_corecount_total"] = int64(host.Summary.Hardware.NumCpuThreads)
}
// Initialize the maps that will hold the extra tags and metrics for VMs
2018-01-04 12:25:10 +01:00
vmSummary := make(map[types.ManagedObjectReference]map[string]string)
vmExtraMetrics := make(map[types.ManagedObjectReference]map[string]int64)
2018-01-04 12:25:10 +01:00
// Assign extra details per VM in vmSummary
for _, vm := range vmmo {
// extra tags per VM
2018-01-04 12:25:10 +01:00
vmSummary[vm.Self] = make(map[string]string)
// Ugly way to extract datastore value
re, err := regexp.Compile(`\[(.*?)\]`)
if err != nil {
fmt.Println(err)
}
vmSummary[vm.Self]["datastore"] = strings.Replace(strings.Replace(re.FindString(fmt.Sprintln(vm.Summary.Config)), "[", "", -1), "]", "", -1)
// List all devices to get vDisks
for _, device := range vm.Config.Hardware.Device {
// Hacky way to check if it's a vDisk and if it's datastore is different than the main one for VM
if device.Backing.FileName != nil && device.Backing.Datastore.Name != vmSummary[vm.Self]["datastore"] {
if vDiskToDatastore[vm.Self] == nil {
vDiskToDatastore[vm.Self] = make(map[string]string)
}
vDiskToDatastore[vm.Self][device.diskObjectId] = device.Backing.Datastore.Name
}
}
2018-01-04 12:25:10 +01:00
if vmToCluster[vm.Self] != "" {
vmSummary[vm.Self]["cluster"] = vmToCluster[vm.Self]
}
if vmToPool[vm.Self] != "" {
vmSummary[vm.Self]["respool"] = vmToPool[vm.Self]
}
if vm.Summary.Runtime.Host != nil {
vmSummary[vm.Self]["esx"] = hostSummary[*vm.Summary.Runtime.Host]["name"]
}
// Extra metrics per VM
vmExtraMetrics[vm.Self] = make(map[string]int64)
vmExtraMetrics[vm.Self]["uptime"] = int64(vm.Summary.QuickStats.UptimeSeconds)
2018-01-04 12:25:10 +01:00
}
2018-03-31 17:15:52 +02:00
fmt.Println("vDiskDatastore:")
spew.Dump(vDiskToDatastore)
2018-01-04 12:25:10 +01:00
// get object names
objects := []mo.ManagedEntity{}
//object for propery collection
propSpec := &types.PropertySpec{Type: "ManagedEntity", PathSet: []string{"name"}}
var objectSet []types.ObjectSpec
for _, mor := range mors {
objectSet = append(objectSet, types.ObjectSpec{Obj: mor, Skip: types.NewBool(false)})
}
//retrieve name property
propreq := types.RetrieveProperties{SpecSet: []types.PropertyFilterSpec{{ObjectSet: objectSet, PropSet: []types.PropertySpec{*propSpec}}}}
propres, err := client.PropertyCollector().RetrieveProperties(ctx, propreq)
if err != nil {
errlog.Println("Could not retrieve object names from vcenter:", vcenter.Hostname)
errlog.Println("Error:", err)
return
}
//load retrieved properties
err = mo.LoadRetrievePropertiesResponse(propres, &objects)
if err != nil {
errlog.Println("Could not retrieve object names from vcenter:", vcenter.Hostname)
errlog.Println("Error:", err)
return
}
//create a map to resolve object names
morToName := make(map[types.ManagedObjectReference]string)
for _, object := range objects {
morToName[object.Self] = object.Name
}
//create a map to resolve metric names
metricToName := make(map[int32]string)
for _, metricgroup := range vcenter.MetricGroups {
for _, metricdef := range metricgroup.Metrics {
metricToName[metricdef.Key] = metricdef.Metric
}
}
// Create Queries from interesting objects and requested metrics
queries := []types.PerfQuerySpec{}
// Common parameters
intervalIDint := 20
var intervalID int32
intervalID = int32(intervalIDint)
endTime := time.Now().Add(time.Duration(-1) * time.Second)
startTime := endTime.Add(time.Duration(-config.Interval) * time.Second)
// Parse objects
for _, mor := range mors {
metricIds := []types.PerfMetricId{}
for _, metricgroup := range vcenter.MetricGroups {
if metricgroup.ObjectType == mor.Type {
for _, metricdef := range metricgroup.Metrics {
metricIds = append(metricIds, types.PerfMetricId{CounterId: metricdef.Key, Instance: metricdef.Instances})
}
}
}
queries = append(queries, types.PerfQuerySpec{Entity: mor, StartTime: &startTime, EndTime: &endTime, MetricId: metricIds, IntervalId: intervalID})
}
// Query the performances
perfreq := types.QueryPerf{This: *client.ServiceContent.PerfManager, QuerySpec: queries}
perfres, err := methods.QueryPerf(ctx, client.RoundTripper, &perfreq)
if err != nil {
errlog.Println("Could not request perfs from vcenter:", vcenter.Hostname)
errlog.Println("Error:", err)
return
}
// Get the result
vcName := strings.Replace(vcenter.Hostname, config.Domain, "", -1)
//Influx batch points
bp, err := influxclient.NewBatchPoints(influxclient.BatchPointsConfig{
Database: config.InfluxDB.Database,
Precision: "s",
})
if err != nil {
errlog.Println(err)
return
}
for _, base := range perfres.Returnval {
pem := base.(*types.PerfEntityMetric)
entityName := strings.ToLower(pem.Entity.Type)
name := strings.ToLower(strings.Replace(morToName[pem.Entity], config.Domain, "", -1))
//Create map for InfluxDB fields
fields := make(map[string]interface{})
// Create map for InfluxDB tags
tags := map[string]string{"host": vcName, "name": name}
// Add extra per VM tags
if summary, ok := vmSummary[pem.Entity]; ok {
for key, tag := range summary {
tags[key] = tag
}
}
if summary, ok := hostSummary[pem.Entity]; ok {
for key, tag := range summary {
tags[key] = tag
}
}
if summary, ok := respoolSummary[pem.Entity]; ok {
for key, tag := range summary {
tags[key] = tag
}
}
specialFields := make(map[string]map[string]map[string]map[string]interface{})
specialTags := make(map[string]map[string]map[string]map[string]string)
nowTime := time.Now()
for _, baseserie := range pem.Value {
serie := baseserie.(*types.PerfMetricIntSeries)
metricName := strings.ToLower(metricToName[serie.Id.CounterId])
influxMetricName := strings.Replace(metricName, ".", "_", -1)
instanceName := strings.ToLower(strings.Replace(serie.Id.Instance, ".", "_", -1))
measurementName := strings.Split(metricName, ".")[0]
if strings.Index(influxMetricName, "datastore") != -1 {
instanceName = ""
}
var value int64 = -1
if strings.HasSuffix(metricName, ".average") {
value = average(serie.Value...)
} else if strings.HasSuffix(metricName, ".maximum") {
value = max(serie.Value...)
} else if strings.HasSuffix(metricName, ".minimum") {
value = min(serie.Value...)
} else if strings.HasSuffix(metricName, ".latest") {
value = serie.Value[len(serie.Value)-1]
} else if strings.HasSuffix(metricName, ".summation") {
value = sum(serie.Value...)
}
if instanceName == "" {
fields[influxMetricName] = value
} else {
// init maps
if specialFields[measurementName] == nil {
specialFields[measurementName] = make(map[string]map[string]map[string]interface{})
specialTags[measurementName] = make(map[string]map[string]map[string]string)
}
if specialFields[measurementName][tags["name"]] == nil {
specialFields[measurementName][tags["name"]] = make(map[string]map[string]interface{})
specialTags[measurementName][tags["name"]] = make(map[string]map[string]string)
}
if specialFields[measurementName][tags["name"]][instanceName] == nil {
specialFields[measurementName][tags["name"]][instanceName] = make(map[string]interface{})
specialTags[measurementName][tags["name"]][instanceName] = make(map[string]string)
}
specialFields[measurementName][tags["name"]][instanceName][influxMetricName] = value
for k, v := range tags {
specialTags[measurementName][tags["name"]][instanceName][k] = v
}
specialTags[measurementName][tags["name"]][instanceName]["instance"] = instanceName
}
}
// Create the fields for the hostExtraMetrics
2018-01-04 12:25:10 +01:00
if metrics, ok := hostExtraMetrics[pem.Entity]; ok {
for key, value := range metrics {
fields[key] = value
}
}
// Create the fields for the vmExtraMetrics
if metrics, ok := vmExtraMetrics[pem.Entity]; ok {
for key, value := range metrics {
fields[key] = value
}
}
2018-01-04 12:25:10 +01:00
//create InfluxDB points
pt, err := influxclient.NewPoint(config.InfluxDB.Prefix+entityName, tags, fields, nowTime)
if err != nil {
errlog.Println(err)
continue
}
bp.AddPoint(pt)
for measurement, v := range specialFields {
for name, metric := range v {
for instance, value := range metric {
pt2, err := influxclient.NewPoint(config.InfluxDB.Prefix+measurement, specialTags[measurement][name][instance], value, time.Now())
if err != nil {
errlog.Println(err)
continue
}
bp.AddPoint(pt2)
}
}
}
for _, pool := range respool {
respoolFields := map[string]interface{}{
"cpu_limit": pool.Config.CpuAllocation.GetResourceAllocationInfo().Limit,
"memory_limit": pool.Config.MemoryAllocation.GetResourceAllocationInfo().Limit,
}
respoolTags := map[string]string{"pool_name": pool.Name}
pt3, err := influxclient.NewPoint(config.InfluxDB.Prefix+"resourcepool", respoolTags, respoolFields, time.Now())
if err != nil {
errlog.Println(err)
continue
}
bp.AddPoint(pt3)
}
for _, datastore := range dss {
datastoreFields := map[string]interface{}{
"capacity": datastore.Summary.Capacity,
"free_space": datastore.Summary.FreeSpace,
2018-06-25 14:32:09 +02:00
"usage": 1.0 - (float64(datastore.Summary.FreeSpace)/float64(datastore.Summary.Capacity)),
2018-01-04 12:25:10 +01:00
}
datastoreTags := map[string]string{"ds_name": datastore.Summary.Name, "host": vcName}
pt4, err := influxclient.NewPoint(config.InfluxDB.Prefix+"datastore", datastoreTags, datastoreFields, time.Now())
if err != nil {
errlog.Println(err)
continue
}
bp.AddPoint(pt4)
}
}
//InfluxDB send if not in test mode
if test != true {
err = InfluxDBClient.Write(bp)
if err != nil {
errlog.Println(err)
return
}
stdlog.Println("Sent data to Influxdb from:", vcenter.Hostname)
} else {
spew.Dump(bp)
2018-01-04 12:25:10 +01:00
}
}
func min(n ...int64) int64 {
2018-01-04 12:25:10 +01:00
var min int64 = -1
for _, i := range n {
if i >= 0 {
if min == -1 {
min = i
} else {
if i < min {
min = i
}
}
}
}
return min
}
func max(n ...int64) int64 {
2018-01-04 12:25:10 +01:00
var max int64 = -1
for _, i := range n {
if i >= 0 {
if max == -1 {
max = i
} else {
if i > max {
max = i
}
}
}
}
return max
}
func sum(n ...int64) int64 {
2018-01-04 12:25:10 +01:00
var total int64
for _, i := range n {
if i > 0 {
total += i
}
}
return total
}
func average(n ...int64) int64 {
2018-01-04 12:25:10 +01:00
var total int64
var count int64
for _, i := range n {
if i >= 0 {
count++
total += i
}
}
favg := float64(total) / float64(count)
return int64(math.Floor(favg + .5))
}
func worker(id int, config Configuration, influxDBClient influxclient.Client, nowTime time.Time, vcenters <-chan *VCenter, results chan<- bool) {
2018-01-04 12:25:10 +01:00
for vcenter := range vcenters {
if debug {
stdlog.Println("Worker", id, "received vcenter", vcenter.Hostname)
}
if err := vcenter.Connect(); err != nil {
errlog.Println("Could not initialize connection to vcenter", vcenter.Hostname, err)
results <- true
continue
}
if err := vcenter.Init(config); err == nil {
vcenter.Query(config, influxDBClient, nowTime)
}
vcenter.Disconnect()
results <- true
}
}
2016-08-10 17:57:01 +02:00
func main() {
2018-01-04 12:25:10 +01:00
baseName := path.Base(os.Args[0])
stdlog = log.New(os.Stdout, "", log.Ldate|log.Ltime)
errlog = log.New(os.Stderr, "", log.Ldate|log.Ltime)
flag.BoolVar(&debug, "debug", false, "Debug mode")
flag.BoolVar(&test, "test", false, "Test mode, data will be collected from vCenters, but nothing will be written to InfluxDB, only printed to stdout")
2018-01-07 16:08:06 +01:00
flag.BoolVar(&getversion, "version", false, "Get version and exit")
2018-01-04 12:25:10 +01:00
workerCount := flag.Int("workers", 4, "Number of concurrent workers to query vcenters")
cfgFile := flag.String("config", "/etc/"+baseName+".json", "Config file to use")
flag.Parse()
2018-01-07 16:08:06 +01:00
if getversion {
fmt.Println("Version:", version)
2018-01-07 16:08:06 +01:00
os.Exit(0)
}
2018-01-04 12:25:10 +01:00
stdlog.Println("Starting", baseName, "with config file", *cfgFile)
// read the configuration
file, err := os.Open(*cfgFile)
if err != nil {
errlog.Println("Could not open configuration file", *cfgFile)
errlog.Fatalln(err)
}
jsondec := json.NewDecoder(file)
config := Configuration{}
err = jsondec.Decode(&config)
if err != nil {
errlog.Println("Could not decode configuration file", *cfgFile)
errlog.Fatalln(err)
}
// Support environemt variables / overrides for Influx Connection
if ihostname := os.Getenv("INFLUX_HOSTNAME"); ihostname != "" {
config.InfluxDB.Hostname = os.Getenv("INFLUX_HOSTNAME")
config.InfluxDB.Username = os.Getenv("INFLUX_USERNAME")
config.InfluxDB.Password = os.Getenv("INFLUX_PASSWORD")
config.InfluxDB.Database = os.Getenv("INFLUX_DATABASE")
}
// Support environment variables for VSphere
// Currently ony one server is supported and added to the list of vSphere servers
if vhostname := os.Getenv("VSPHERE_HOSTNAME"); vhostname != "" {
vc := VCenter{
Hostname: os.Getenv("VSPHERE_HOSTNAME"),
Username: os.Getenv("VSPHERE_USERNAME"),
Password: os.Getenv("VSPHERE_PASSWORD"),
}
config.VCenters = append(config.VCenters, &vc)
}
// Print configuration in debug mode
if debug {
stdlog.Println("---Configuration - you should see the config here---")
spew.Dump(config)
}
// Initialize InfluxDB and connect to database
InfluxDBClient, err := influxclient.NewHTTPClient(influxclient.HTTPConfig{
Addr: config.InfluxDB.Hostname,
Username: config.InfluxDB.Username,
Password: config.InfluxDB.Password,
})
if err != nil {
errlog.Println("Could not initialize InfluxDB client")
errlog.Fatalln(err)
}
if _, _, err := InfluxDBClient.Ping(0); err != nil {
errlog.Println("Could not connect to InfluxDB")
errlog.Fatalln(err)
}
defer InfluxDBClient.Close()
stdlog.Println("Successfully connected to Influx")
// make the channels, get the time, launch the goroutines
vcenterCount := len(config.VCenters)
vcenters := make(chan *VCenter, vcenterCount)
results := make(chan bool, vcenterCount)
nowTime := time.Now()
for i := 0; i < *workerCount; i++ {
go worker(i, config, InfluxDBClient, nowTime, vcenters, results)
}
for _, vcenter := range config.VCenters {
vcenters <- vcenter
}
close(vcenters)
for i := 0; i < vcenterCount; i++ {
<-results
}
}