diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 1219f1440f2cc..dee5745cf037a 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -91,6 +91,7 @@ following works: - github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt) - github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE) - github.com/dgryski/go-rendezvous [MIT License](https://github.com/dgryski/go-rendezvous/blob/master/LICENSE) +- github.com/digitalocean/go-libvirt [Apache License 2.0](https://github.com/digitalocean/go-libvirt/blob/master/LICENSE.md) - github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE) - github.com/djherbis/times [MIT License](https://github.com/djherbis/times/blob/master/LICENSE) - github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE) @@ -160,6 +161,7 @@ following works: - github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE) - github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/blob/master/LICENSE) - github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE) +- github.com/hashicorp/packer-plugin-sdk [Mozilla Public License 2.0](https://github.com/hashicorp/packer-plugin-sdk/blob/main/LICENSE) - github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) - github.com/huandu/xstrings [MIT License](https://github.com/huandu/xstrings/blob/master/LICENSE) - github.com/imdario/mergo [BSD 3-Clause "New" or "Revised" License](https://github.com/imdario/mergo/blob/master/LICENSE) @@ -279,6 +281,7 @@ following works: - github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE) - github.com/stretchr/testify [MIT License](https://github.com/stretchr/testify/blob/master/LICENSE) - github.com/testcontainers/testcontainers-go [MIT License](https://github.com/testcontainers/testcontainers-go/blob/main/LICENSE) +- github.com/thomasklein94/packer-plugin-libvirt [Mozilla Public License 2.0](https://github.com/thomasklein94/packer-plugin-libvirt/blob/main/LICENSE) - github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE) - github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE) - github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE) diff --git a/go.mod b/go.mod index c89b33d3cda41..e4461f3cea11c 100644 --- a/go.mod +++ b/go.mod @@ -53,6 +53,7 @@ require ( github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f github.com/couchbase/go-couchbase v0.1.1 github.com/denisenkom/go-mssqldb v0.12.0 + github.com/digitalocean/go-libvirt v0.0.0-20220811165305-15feff002086 github.com/dimchansky/utfbom v1.1.1 github.com/djherbis/times v1.5.0 github.com/docker/docker v20.10.17+incompatible @@ -147,6 +148,7 @@ require ( github.com/stretchr/testify v1.8.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/testcontainers/testcontainers-go v0.13.0 + github.com/thomasklein94/packer-plugin-libvirt v0.3.4 github.com/tidwall/gjson v1.14.3 github.com/tinylib/msgp v1.1.6 github.com/urfave/cli/v2 v2.16.3 @@ -287,12 +289,13 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v0.16.2 // indirect - github.com/hashicorp/go-immutable-radix v1.3.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/packer-plugin-sdk v0.3.1 // indirect github.com/hashicorp/serf v0.9.7 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/imdario/mergo v0.3.12 // indirect @@ -349,7 +352,6 @@ require ( github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/pborman/uuid v1.2.1 // indirect github.com/philhofer/fwd v1.1.1 // indirect - github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pierrec/lz4/v4 v4.1.15 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/transport v0.13.0 // indirect diff --git a/go.sum b/go.sum index 0e9c835ff1f14..f04e39dcae64a 100644 --- a/go.sum +++ b/go.sum @@ -694,6 +694,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/go-libvirt v0.0.0-20220811165305-15feff002086 h1:FTREXo+EVmU9nOCaQ46PvH0hs1Rt2/diCoTAtxzDxrA= +github.com/digitalocean/go-libvirt v0.0.0-20220811165305-15feff002086/go.mod h1:yhKBkgJm/PWVHCFHLlFwqhIzS7FcutIYmS/fmzex5LQ= github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= @@ -1280,8 +1282,9 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= @@ -1289,8 +1292,9 @@ github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -1330,6 +1334,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/packer-plugin-sdk v0.3.1 h1:Gr/mnihsdUcPfGiruFL93BQkiFh3EFPwyxxTWkwvRsQ= +github.com/hashicorp/packer-plugin-sdk v0.3.1/go.mod h1:+GzydiXdn0CkueigqXBsX4Poz5gfmFXZ/DkxKt4fmt4= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= @@ -1950,7 +1956,6 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.0.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.1/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -2258,6 +2263,8 @@ github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955u github.com/testcontainers/testcontainers-go v0.13.0 h1:OUujSlEGsXVo/ykPVZk3KanBNGN0TYb/7oKIPVn15JA= github.com/testcontainers/testcontainers-go v0.13.0/go.mod h1:z1abufU633Eb/FmSBTzV6ntZAC1eZBYPtaFsn4nPuDk= github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= +github.com/thomasklein94/packer-plugin-libvirt v0.3.4 h1:K+NkHFcZuiUTp4ZiDdBhWRMZiSMdsXwGuzyg4THKDAU= +github.com/thomasklein94/packer-plugin-libvirt v0.3.4/go.mod h1:FLQTTGhVNak3rFgrZCJ2TZR6Cywz7ef/+z5Pg11EvJg= github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw= github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -2671,6 +2678,7 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= golang.org/x/net v0.0.0-20220728211354-c7608f3a8462/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= diff --git a/plugins/inputs/all/libvirt.go b/plugins/inputs/all/libvirt.go new file mode 100644 index 0000000000000..89622e2c17cee --- /dev/null +++ b/plugins/inputs/all/libvirt.go @@ -0,0 +1,5 @@ +//go:build !custom || inputs || inputs.libvirt + +package all + +import _ "github.com/influxdata/telegraf/plugins/inputs/libvirt" // register plugin diff --git a/plugins/inputs/libvirt/README.md b/plugins/inputs/libvirt/README.md new file mode 100644 index 0000000000000..46e5922a38d13 --- /dev/null +++ b/plugins/inputs/libvirt/README.md @@ -0,0 +1,263 @@ +# Libvirt Input Plugin + +The `libvirt` plugin collects statistics about virtualized +guests on a system by using virtualization libvirt API, +created by RedHat's Emerging Technology group. +Metrics are gathered directly from the hypervisor on a host +system, which means that Telegraf doesn't have to be installed +and configured on a guest system. + +## Prerequisites + +For proper operation of the libvirt plugin, +it is required that the host system has: + +- enabled virtualization options for host CPU +- libvirtd and its dependencies installed and running +- qemu hypervisor installed and running +- at least one virtual machine for statistics monitoring + +Useful links: + +- [libvirt](https://libvirt.org/) +- [qemu](https://www.qemu.org/) + +## Configuration + +```toml +# The libvirt plugin collects statistics from virtualized guests using virtualization libvirt API. +[[inputs.libvirt]] + ## Domain names from which libvirt gather statistics. + ## By default (empty or missing array) the plugin gather statistics from each domain registered in the host system. + # domains = [] + + ## Libvirt connection URI with hypervisor. + ## The plugin supports multiple transport protocols and approaches which are configurable via the URI. + ## The general URI form: driver[+transport]://[username@][hostname][:port]/[path][?extraparameters] + ## Supported transport protocols: ssh, tcp, tls, unix + ## URI examples for each type of transport protocol: + ## 1. SSH: qemu+ssh:///system?keyfile=/&known_hosts=/ + ## 2. TCP: qemu+tcp:///system + ## 3. TLS: qemu+tls:///system?pkipath=/certs_dir/ + ## 4. UNIX: qemu+unix:///system?socket=/ + ## Default URI is qemu:///system + # libvirt_uri = "qemu:///system" + + ## Statistics groups for which libvirt plugin will gather statistics. + ## Supported statistics groups: state, cpu_total, balloon, vcpu, interface, block, perf, iothread, memory, dirtyrate + ## Empty array means no metrics for statistics groups will be exposed by the plugin. + ## By default the plugin will gather all available statistics. + # statistics_groups = ["state", "cpu_total", "balloon", "vcpu", "interface", "block", "perf", "iothread", "memory", "dirtyrate"] + + ## A list containing additional statistics to be exposed by libvirt plugin. + ## Supported additional statistics: vcpu_mapping + ## By default (empty or missing array) the plugin will not collect additional statistics. + # additional_statistics = [] +``` + +Useful links: + +- [Libvirt URI docs](https://libvirt.org/uri.html) +- [TLS setup for libvirt](https://wiki.libvirt.org/page/TLSSetup) + +In cases when one or more of the following occur: + +- the global Telegraf variable `interval` is set to a low value (e.g. 1s), +- a significant number of VMs are monitored, +- the medium connecting the plugin to the hypervisor is inefficient, + +It is possible that following warning in the logs appears: +`Collection took longer than expected`. + +For that case, `interval` should be set inside plugin configuration. +Its value should be adjusted to plugin's runtime environment. + +Example: + +```toml +[[inputs.libvirt]] + interval = "30s" +``` + +### Example configuration + +```toml +[[inputs.libvirt]] + domain_names = ["ubuntu_20"] + libvirt_uri = "qemu:///system" + libvirt_metrics = ["state", "interface"] + additional_statistics = ["vcpu_mapping"] +``` + +## Metrics + +See the table below for a list of metrics produced by the plugin. + +The exact metric format depends on the statistics libvirt reports, +which may vary depending on the version of libvirt on your system. + +The metrics are divided into the following groups of statistics: + +- state +- cpu_total +- balloon +- vcpu +- net +- perf +- block +- iothread +- memory +- dirtyrate +- vcpu_mapping - additional statistics + +Statistics groups from the plugin corresponds to the grouping of +metrics directly read from libvirtd using the `virsh domstats` command. +More details about metrics can be found at the links below: + +- [Domain statistics](https://libvirt.org/manpages/virsh.html#domstats) +- [Performance monitoring events](https://libvirt.org/formatdomain.html#performance-monitoring-events) + +| **Statistics group** | **Metric name** | **Exposed Telegraf field** | **Description** | +|:---|:---|:---|:---| +| **state** | state.state | state | state of the VM, returned as number from virDomainState enum | +||state.reason | reason | reason for entering given state, returned as int from virDomain*Reason enum corresponding to given state | +| **cpu_total** | cpu.time | time | total cpu time spent for this domain in nanoseconds | +|| cpu.user | user | user cpu time spent in nanoseconds | +|| cpu.system | system | system cpu time spent in nanoseconds | +|| cpu.haltpoll.success.time | haltpoll_success_time | cpu halt polling success time spent in nanoseconds | +|| cpu.haltpoll.fail.time | haltpoll_fail_time | cpu halt polling fail time spent in nanoseconds | +|| cpu.cache.monitor.count |count | the number of cache monitors for this domain | +|| cpu.cache.monitor.\.name | name | the name of cache monitor \, not available for kernels from 4.14 upwards | +|| cpu.cache.monitor.\.vcpus| vcpus |vcpu list of cache monitor \, not available for kernels from 4.14 upwards | +|| cpu.cache.monitor.\.bank.count | bank_count | the number of cache banks in cache monitor \, not available for kernels from 4.14 upwards | +|| cpu.cache.monitor.\.bank.\.id | id|host allocated cache id for bank \ in cache monitor \, not available for kernels from 4.14 upwards | +|| cpu.cache.monitor.\.bank.\.bytes | bytes | the number of bytes of last level cache that the domain is using on cache bank \, not available for kernels from 4.14 upwards| +| **balloon** | balloon.current | current | the memory in KiB currently used | +|| balloon.maximum | maximum | the maximum memory in KiB allowed | +|| balloon.swap_in | swap_in | the amount of data read from swap space (in KiB) | +|| balloon.swap_out | swap_out | the amount of memory written out to swap space (in KiB) | +|| balloon.major_fault | major_fault | the number of page faults when disk IO was required | +|| balloon.minor_fault | minor_fault | the number of other page faults | +|| balloon.unused | unused | the amount of memory left unused by the system (in KiB) | +|| balloon.available | available | the amount of usable memory as seen by the domain (in KiB) | +|| balloon.rss | rss | Resident Set Size of running domain's process (in KiB) | +|| balloon.usable | usable | the amount of memory which can be reclaimed by balloon without causing host swapping (in KiB) | +|| balloon.last-update | last_update | timestamp of the last update of statistics (in seconds) | +|| balloon.disk_caches | disk_caches | the amount of memory that can be reclaimed without additional I/O, typically disk (in KiB) | +|| balloon.hugetlb_pgalloc | hugetlb_pgalloc | the number of successful huge page allocations from inside the domain via virtio balloon | +|| balloon.hugetlb_pgfail | hugetlb_pgfail | the number of failed huge page allocations from inside the domain via virtio balloon | +| **vcpu** | vcpu.current | current | yes current number of online virtual CPUs | +|| vcpu.maximum | maximum | maximum number of online virtual CPUs | +|| vcpu.\.state | state | state of the virtual CPU \, as number from virVcpuState enum | +|| vcpu.\.time | time | virtual cpu time spent by virtual CPU \ (in microseconds) | +|| vcpu.\.wait | wait | virtual cpu time spent by virtual CPU \ waiting on I/O (in microseconds) | +|| vcpu.\.halted | halted | virtual CPU \ is halted: yes or no (may indicate the processor is idle or even disabled, depending on the architecture) | +|| vcpu.\.halted | halted_i | virtual CPU \ is halted: 1 (for "yes") or 0 (for other values) (may indicate the processor is idle or even disabled, depending on the architecture) | +|| vcpu.\.delay | delay | time the vCPU \ thread was enqueued by the host scheduler, but was waiting in the queue instead of running. Exposed to the VM as a steal time. | +|| --- | cpu_id | Information about mapping vcpu_id to cpu_id (id of physical cpu). Should only be exposed when statistics_group contains vcpu and additional_statistics contains vcpu_mapping (in config) | +| **interface** | net.count | count | number of network interfaces on this domain | +|| net.\.name | name | name of the interface \ | +|| net.\.rx.bytes | rx_bytes | number of bytes received | +|| net.\.rx.pkts | rx_pkts | number of packets received | +|| net.\.rx.errs | rx_errs | number of receive errors | +|| net.\.rx.drop | rx_drop | number of receive packets dropped | +|| net.\.tx.bytes | tx_bytes | number of bytes transmitted | +|| net.\.tx.pkts | tx_pkts | number of packets transmitted | +|| net.\.tx.errs | tx_errs | number of transmission errors | +|| net.\.tx.drop | tx_drop | number of transmit packets dropped | +| **perf** | perf.cmt | cmt | the cache usage in Byte currently used, not available for kernels from 4.14 upwards | +|| perf.mbmt | mbmt | total system bandwidth from one level of cache, not available for kernels from 4.14 upwards | +|| perf.mbml | mbml | bandwidth of memory traffic for a memory controller, not available for kernels from 4.14 upwards | +|| perf.cpu_cycles | cpu_cycles | the count of cpu cycles (total/elapsed) | +|| perf.instructions | instructions | the count of instructions | +|| perf.cache_references | cache_references | the count of cache hits | +|| perf.cache_misses | cache_misses | the count of caches misses | +|| perf.branch_instructions | branch_instructions | the count of branch instructions | +|| perf.branch_misses | branch_misses | the count of branch misses | +|| perf.bus_cycles | bus_cycles | the count of bus cycles | +|| perf.stalled_cycles_frontend | stalled_cycles_frontend | the count of stalled frontend cpu cycles | +|| perf.stalled_cycles_backend | stalled_cycles_backend | the count of stalled backend cpu cycles | +|| perf.ref_cpu_cycles | ref_cpu_cycles | the count of ref cpu cycles | +|| perf.cpu_clock | cpu_clock | the count of cpu clock time | +|| perf.task_clock | task_clock | the count of task clock time | +|| perf.page_faults | page_faults | the count of page faults | +|| perf.context_switches | context_switches | the count of context switches | +|| perf.cpu_migrations | cpu_migrations | the count of cpu migrations | +|| perf.page_faults_min | page_faults_min | the count of minor page faults | +|| perf.page_faults_maj | page_faults_maj | the count of major page faults | +|| perf.alignment_faults | alignment_faults | the count of alignment faults | +|| perf.emulation_faults | emulation_faults | the count of emulation faults | +| **block** | block.count | count | number of block devices being listed | +|| block.\.name | name | name of the target of the block device \ (the same name for multiple entries if --backing is present) | +|| block.\.backingIndex | backingIndex | when --backing is present, matches up with the \ index listed in domain XML for backing files | +|| block.\.path | path | file source of block device \, if it is a local file or block device | +|| block.\.rd.reqs | rd_reqs | number of read requests | +|| block.\.rd.bytes | rd_bytes | number of read bytes | +|| block.\.rd.times | rd_times | total time (ns) spent on reads | +|| block.\.wr.reqs | wr_reqs | number of write requests | +|| block.\.wr.bytes | wr_bytes | number of written bytes | +|| block.\.wr.times | wr_times | total time (ns) spent on writes | +|| block.\.fl.reqs | fl_reqs | total flush requests | +|| block.\.fl.times | fl_times | total time (ns) spent on cache flushing | +|| block.\.errors | errors | Xen only: the 'oo_req' value | +|| block.\.allocation | allocation | offset of highest written sector in bytes | +|| block.\.capacity | capacity | logical size of source file in bytes | +|| block.\.physical | physical | physical size of source file in bytes | +|| block.\.threshold | threshold | threshold (in bytes) for delivering the VIR_DOMAIN_EVENT_ID_BLOCK_THRESHOLD event. See domblkthreshold | +| **iothread** | iothread.count | count | maximum number of IOThreads in the subsequent list as unsigned int. Each IOThread in the list will will use it's iothread_id value as the \. There may be fewer \ entries than the iothread.count value if the polling values are not supported | +|| iothread.\.poll-max-ns | poll_max_ns | maximum polling time in nanoseconds used by the \ IOThread. A value of 0 (zero) indicates polling is disabled | +|| iothread.\.poll-grow | poll_grow | polling time grow value. A value of 0 (zero) growth is managed by the hypervisor | +|| iothread.\.poll-shrink | poll_shrink | polling time shrink value. A value of (zero) indicates shrink is managed by hypervisor | +| **memory** | memory.bandwidth.monitor.count | count | the number of memory bandwidth monitors for this domain, not available for kernels from 4.14 upwards | +|| memory.bandwidth.monitor.\.name | name | the name of monitor \, not available for kernels from 4.14 upwards | +|| memory.bandwidth.monitor.\.vcpus | vcpus | the vcpu list of monitor \, not available for kernels from 4.14 upwards | +|| memory.bandwidth.monitor.\.node.count | node_count | the number of memory controller in monitor \, not available for kernels from 4.14 upwards | +|| memory.bandwidth.monitor.\.node.\.id | id | host allocated memory controller id for controller \ of monitor \, not available for kernels from 4.14 upwards | +|| memory.bandwidth.monitor.\.node.\.bytes.local | bytes_local | the accumulative bytes consumed by \@vcpus that passing through the memory controller in the same processor that the scheduled host CPU belongs to, not available for kernels from 4.14 upwards | +|| memory.bandwidth.monitor.\.node.\.bytes.total | bytes_total | the total bytes consumed by \@vcpus that passing through all memory controllers, either local or remote controller, not available for kernels from 4.14 upwards | +| **dirtyrate** | dirtyrate.calc_status | calc_status | the status of last memory dirty rate calculation, returned as number from virDomainDirtyRateStatus enum | +|| dirtyrate.calc_start_time | calc_start_time the | start time of last memory dirty rate calculation | +|| dirtyrate.calc_period | calc_period | the period of last memory dirty rate calculation | +|| dirtyrate.megabytes_per_second | megabytes_per_second | the calculated memory dirty rate in MiB/s | +|| dirtyrate.calc_mode | calc_mode | the calculation mode used last measurement (page-sampling/dirty-bitmap/dirty-ring) | +|| dirtyrate.vcpu.\.megabytes_per_second | megabytes_per_second | the calculated memory dirty rate for a virtual cpu in MiB/s | + +### Additional statistics + +| **Statistics group** | **Exposed Telegraf tag** | **Exposed Telegraf field** |**Description** | +|:-------------------------------|:-----------------------------:|:-------------------------------:|:-----------------------| +| **vcpu_mapping** | vcpu_id | --- | ID of Virtual CPU | +|| --- | cpu_id | Comma separated list (exposed as a string) of Physical CPU IDs | + +## Example Output + +```text +libvirt_cpu_affinity,domain_name=U22,host=localhost,vcpu_id=0 cpu_id="1,2,3" 1662383707000000000 +libvirt_cpu_affinity,domain_name=U22,host=localhost,vcpu_id=1 cpu_id="1,2,3,4,5,6,7,8,9,10" 1662383707000000000 +libvirt_balloon,domain_name=U22,host=localhost current=4194304i,maximum=4194304i,swap_in=0i,swap_out=0i,major_fault=0i,minor_fault=0i,unused=3928628i,available=4018480i,rss=1036012i,usable=3808724i,last_update=1654611373i,disk_caches=68820i,hugetlb_pgalloc=0i,hugetlb_pgfail=0i 1662383709000000000 +libvirt_vcpu_total,domain_name=U22,host=localhost maximum=2i,current=2i 1662383709000000000 +libvirt_vcpu,domain_name=U22,host=localhost,vcpu_id=0 state=1i,time=17943740000000i,wait=0i,halted="no",halted_i=0i,delay=14246609424i,cpu_id=1i 1662383709000000000 +libvirt_vcpu,domain_name=U22,host=localhost,vcpu_id=1 state=1i,time=18288400000000i,wait=0i,halted="yes",halted_i=1i,delay=12902231142i,cpu_id=3i 1662383709000000000 +libvirt_net_total,domain_name=U22,host=localhost count=1i 1662383709000000000 +libvirt_net,domain_name=U22,host=localhost,interface_id=0 name="vnet0",rx_bytes=110i,rx_pkts=1i,rx_errs=0i,rx_drop=31007i,tx_bytes=0i,tx_pkts=0i,tx_errs=0i,tx_drop=0i 1662383709000000000 +libvirt_block_total,domain_name=U22,host=localhost count=1i 1662383709000000000 +libvirt_block,domain_name=U22,host=localhost,block_id=0 rd=17337818234i,path=name="vda",backingIndex=1i,path="/tmp/ubuntu_image.img",rd_reqs=11354i,rd_bytes=330314752i,rd_times=6240559566i,wr_reqs=52440i,wr_bytes=1183828480i,wr_times=21887150375i,fl_reqs=32250i,fl_times=23158998353i,errors=0i,allocation=770048000i,capacity=2361393152i,physical=770052096i,threshold=2147483648i +libvirt_perf,domain_name=U22,host=localhost cmt=19087360i,mbmt=77168640i,mbml=67788800i,cpu_cycles=29858995122i,instructions=0i,cache_references=3053301695i,cache_misses=609441024i,branch_instructions=2623890194i,branch_misses=103707961i,bus_cycles=188105628i,stalled_cycles_frontend=0i,stalled_cycles_backend=0i,ref_cpu_cycles=30766094039i,cpu_clock=25166642695i,task_clock=25263578917i,page_faults=2670i,context_switches=294284i,cpu_migrations=17949i,page_faults_min=2670i,page_faults_maj=0i,alignment_faults=0i,emulation_faults=0i 1662383709000000000 +libvirt_dirtyrate,domain_name=U22,host=localhost calc_status=2i,calc_start_time=348414i,calc_period=1i,dirtyrate.megabytes_per_second=4i,calc_mode="dirty-ring" 1662383709000000000 +libvirt_dirtyrate_vcpu,domain_name=U22,host=localhost,vcpu_id=0 megabytes_per_second=2i 1662383709000000000 +libvirt_dirtyrate_vcpu,domain_name=U22,host=localhost,vcpu_id=1 megabytes_per_second=2i 1662383709000000000 +libvirt_state,domain_name=U22,host=localhost state=1i,reason=5i 1662383709000000000 +libvirt_cpu,domain_name=U22,host=localhost time=67419144867000i,user=63886161852000i,system=3532983015000i,haltpoll_success_time=516907915i,haltpoll_fail_time=2727253643i 1662383709000000000 +libvirt_cpu_cache_monitor_total,domain_name=U22,host=localhost count=1i 1662383709000000000 +libvirt_cpu_cache_monitor,domain_name=U22,host=localhost,cache_monitor_id=0 name="any_name_vcpus_0-3",vcpus="0-3",bank_count=1i 1662383709000000000 +libvirt_cpu_cache_monitor_bank,domain_name=U22,host=localhost,cache_monitor_id=0,bank_index=0 id=0i,bytes=5406720i 1662383709000000000 +libvirt_iothread_total,domain_name=U22,host=localhost count=1i 1662383709000000000 +libvirt_iothread,domain_name=U22,host=localhost,iothread_id=0 poll_max_ns=32768i,poll_grow=0i,poll_shrink=0i 1662383709000000000 +libvirt_memory_bandwidth_monitor_total,domain_name=U22,host=localhost count=2i 1662383709000000000 +libvirt_memory_bandwidth_monitor,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=0 name="any_name_vcpus_0-4",vcpus="0-4",node_count=2i 1662383709000000000 +libvirt_memory_bandwidth_monitor,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=1 name="vcpus_7",vcpus="7",node_count=2i 1662383709000000000 +libvirt_memory_bandwidth_monitor_node,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=0,controller_index=0 id=0i,bytes_total=10208067584i,bytes_local=4807114752i 1662383709000000000 +libvirt_memory_bandwidth_monitor_node,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=0,controller_index=1 id=1i,bytes_total=8693735424i,bytes_local=5850161152i 1662383709000000000 +libvirt_memory_bandwidth_monitor_node,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=1,controller_index=0 id=0i,bytes_total=853811200i,bytes_local=290701312i 1662383709000000000 +libvirt_memory_bandwidth_monitor_node,domain_name=U22,host=localhost,memory_bandwidth_monitor_id=1,controller_index=1 id=1i,bytes_total=406044672i,bytes_local=229425152i 1662383709000000000 +``` diff --git a/plugins/inputs/libvirt/libvirt.go b/plugins/inputs/libvirt/libvirt.go new file mode 100644 index 0000000000000..bf945554c1ba3 --- /dev/null +++ b/plugins/inputs/libvirt/libvirt.go @@ -0,0 +1,264 @@ +package libvirt + +import ( + _ "embed" + "fmt" + "golang.org/x/sync/errgroup" + "sync" + + golibvirt "github.com/digitalocean/go-libvirt" + libvirtutils "github.com/thomasklein94/packer-plugin-libvirt/libvirt-utils" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +//go:embed sample.conf +var sampleConfig string + +const ( + domainStatsState uint32 = 1 + domainStatsCPUTotal uint32 = 2 + domainStatsBalloon uint32 = 4 + domainStatsVCPU uint32 = 8 + domainStatsInterface uint32 = 16 + domainStatsBlock uint32 = 32 + domainStatsPerf uint32 = 64 + domainStatsIothread uint32 = 128 + domainStatsMemory uint32 = 256 + domainStatsDirtyrate uint32 = 512 + domainStatsAll uint32 = 1023 + defaultLibvirtURI = "qemu:///system" + pluginName = "libvirt" +) + +type Libvirt struct { + LibvirtURI string `toml:"libvirt_uri"` + Domains []string `toml:"domains"` + StatisticsGroups []string `toml:"statistics_groups"` + AdditionalStatistics []string `toml:"additional_statistics"` + Log telegraf.Logger `toml:"-"` + + utils utils + metricNumber uint32 + vcpuMappingEnabled bool + domainsMap map[string]struct{} +} + +func (l *Libvirt) SampleConfig() string { + return sampleConfig +} + +func (l *Libvirt) Init() error { + if len(l.Domains) == 0 { + l.Log.Debugf("No domains given. Collecting metrics from all available domains.") + } + l.domainsMap = make(map[string]struct{}, len(l.Domains)) + for _, domain := range l.Domains { + l.domainsMap[domain] = struct{}{} + } + + if l.LibvirtURI == "" { + l.Log.Debugf("Using default libvirt url - %q", defaultLibvirtURI) + l.LibvirtURI = defaultLibvirtURI + } + + if err := l.validateLibvirtURI(); err != nil { + return err + } + + // setting to defaults only when statistics_groups is missing in config + if l.StatisticsGroups == nil { + l.Log.Debugf("Setting libvirt to gather all metrics.") + l.metricNumber = domainStatsAll + } else { + if err := l.calculateMetricNumber(); err != nil { + return err + } + } + + if err := l.validateAdditionalStatistics(); err != nil { + return err + } + + if !l.isThereAnythingToGather() { + return fmt.Errorf("all configuration options are empty or invalid. Did not find anything to gather") + } + + return nil +} + +func (l *Libvirt) validateLibvirtURI() error { + uri := libvirtutils.LibvirtUri{} + err := uri.Unmarshal(l.LibvirtURI) + if err != nil { + return err + } + + // dialer not needed, calling this just for validating libvirt URI as soon as possible: + _, err = libvirtutils.NewDialerFromLibvirtUri(uri) + return err +} + +func (l *Libvirt) calculateMetricNumber() error { + var libvirtMetricNumber = map[string]uint32{ + "state": domainStatsState, + "cpu_total": domainStatsCPUTotal, + "balloon": domainStatsBalloon, + "vcpu": domainStatsVCPU, + "interface": domainStatsInterface, + "block": domainStatsBlock, + "perf": domainStatsPerf, + "iothread": domainStatsIothread, + "memory": domainStatsMemory, + "dirtyrate": domainStatsDirtyrate} + + metricIsSet := make(map[string]bool) + for _, metricName := range l.StatisticsGroups { + metricNumber, exists := libvirtMetricNumber[metricName] + if !exists { + return fmt.Errorf("unrecognized metrics name %q", metricName) + } + if _, ok := metricIsSet[metricName]; ok { + return fmt.Errorf("duplicated statistics group in config: %q", metricName) + } + l.metricNumber += metricNumber + metricIsSet[metricName] = true + } + + return nil +} + +func (l *Libvirt) validateAdditionalStatistics() error { + for _, stat := range l.AdditionalStatistics { + switch stat { + case "vcpu_mapping": + if l.vcpuMappingEnabled { + return fmt.Errorf("duplicated additional statistic in config: %q", stat) + } + l.vcpuMappingEnabled = true + default: + return fmt.Errorf("additional statistics: %v is not supported by this plugin", stat) + } + } + return nil +} + +func (l *Libvirt) isThereAnythingToGather() bool { + return l.metricNumber > 0 || len(l.AdditionalStatistics) > 0 +} + +func (l *Libvirt) Gather(acc telegraf.Accumulator) error { + var err error + if err = l.utils.EnsureConnected(l.LibvirtURI); err != nil { + return err + } + + // Get all available domains + gatheredDomains, err := l.utils.GatherAllDomains() + if handledErr := handleError(err, "error occurred while gathering all domains", l.utils); handledErr != nil { + return handledErr + } else if len(gatheredDomains) == 0 { + l.Log.Debug("Couldn't find any domains on system") + return nil + } + + // Exclude domain. + domains := l.filterDomains(gatheredDomains) + if len(domains) == 0 { + l.Log.Debug("Configured domains are not available on system") + return nil + } + + var vcpuInfos map[string][]vcpuAffinity + if l.vcpuMappingEnabled { + vcpuInfos, err = l.getVcpuMapping(domains) + if handledErr := handleError(err, "error occurred while gathering vcpu mapping", l.utils); handledErr != nil { + return handledErr + } + } + + err = l.gatherMetrics(domains, vcpuInfos, acc) + return handleError(err, "error occurred while gathering metrics", l.utils) +} + +func handleError(err error, errMessage string, utils utils) error { + if err != nil { + if chanErr := utils.Disconnect(); chanErr != nil { + return fmt.Errorf("%s: %v; error occurred when disconnecting: %v", errMessage, err, chanErr) + } + return fmt.Errorf("%s: %v", errMessage, err) + } + return nil +} + +func (l *Libvirt) filterDomains(availableDomains []golibvirt.Domain) []golibvirt.Domain { + if len(l.domainsMap) == 0 { + return availableDomains + } + + var filteredDomains []golibvirt.Domain + for _, domain := range availableDomains { + if _, ok := l.domainsMap[domain.Name]; ok { + filteredDomains = append(filteredDomains, domain) + } + } + + return filteredDomains +} + +func (l *Libvirt) gatherMetrics(domains []golibvirt.Domain, vcpuInfos map[string][]vcpuAffinity, acc telegraf.Accumulator) error { + stats, err := l.utils.GatherStatsForDomains(domains, l.metricNumber) + if err != nil { + return err + } + + l.addMetrics(stats, vcpuInfos, acc) + return nil +} + +func (l *Libvirt) getVcpuMapping(domains []golibvirt.Domain) (map[string][]vcpuAffinity, error) { + pCPUs, err := l.utils.GatherNumberOfPCPUs() + if err != nil { + return nil, err + } + + var vcpuInfos = make(map[string][]vcpuAffinity) + group := errgroup.Group{} + mutex := &sync.RWMutex{} + for i := range domains { + domain := domains[i] + + // Executing GatherVcpuMapping can take some time, it is worth to call it in parallel + group.Go(func() error { + vcpuInfo, err := l.utils.GatherVcpuMapping(domain, pCPUs, l.shouldGetCurrentPCPU()) + if err != nil { + return err + } + + mutex.Lock() + vcpuInfos[domain.Name] = vcpuInfo + mutex.Unlock() + return nil + }) + } + + err = group.Wait() + if err != nil { + return nil, err + } + + return vcpuInfos, nil +} + +func (l *Libvirt) shouldGetCurrentPCPU() bool { + return l.vcpuMappingEnabled && (l.metricNumber&domainStatsVCPU) != 0 +} + +func init() { + inputs.Add(pluginName, func() telegraf.Input { + return &Libvirt{ + utils: &utilsImpl{}, + } + }) +} diff --git a/plugins/inputs/libvirt/libvirt_metric_format.go b/plugins/inputs/libvirt/libvirt_metric_format.go new file mode 100644 index 0000000000000..91946ed2cc66e --- /dev/null +++ b/plugins/inputs/libvirt/libvirt_metric_format.go @@ -0,0 +1,571 @@ +package libvirt + +import ( + "regexp" + "strings" + + golibvirt "github.com/digitalocean/go-libvirt" + + "github.com/influxdata/telegraf" +) + +var ( + cpuCacheMonitorRegexp = regexp.MustCompile(`^cache\.monitor\..+?\.(name|vcpus|bank_count)$`) + cpuCacheMonitorBankRegexp = regexp.MustCompile(`^cache\.monitor\..+?\.bank\..+?\.(id|bytes)$`) + memoryBandwidthMonitorRegexp = regexp.MustCompile(`^bandwidth\.monitor\..+?\.(name|vcpus|node_count)$`) + memoryBandwidthMonitorNodeRegexp = regexp.MustCompile(`^bandwidth\.monitor\..+?\.node\..+?\.(id|bytes_local|bytes_total)$`) +) + +func (l *Libvirt) addMetrics(stats []golibvirt.DomainStatsRecord, vcpuInfos map[string][]vcpuAffinity, acc telegraf.Accumulator) { + domainsMetrics := l.translateMetrics(stats) + + for domainName, metrics := range domainsMetrics { + for metricType, values := range metrics { + switch metricType { + case "state": + l.addStateMetrics(values, domainName, acc) + case "cpu": + l.addCPUMetrics(values, domainName, acc) + case "balloon": + l.addBalloonMetrics(values, domainName, acc) + case "vcpu": + l.addVcpuMetrics(values, domainName, vcpuInfos[domainName], acc) + case "net": + l.addInterfaceMetrics(values, domainName, acc) + case "perf": + l.addPerfMetrics(values, domainName, acc) + case "block": + l.addBlockMetrics(values, domainName, acc) + case "iothread": + l.addIothreadMetrics(values, domainName, acc) + case "memory": + l.addMemoryMetrics(values, domainName, acc) + case "dirtyrate": + l.addDirtyrateMetrics(values, domainName, acc) + } + } + } + + if l.vcpuMappingEnabled { + for domainName, vcpuInfo := range vcpuInfos { + var tags = make(map[string]string) + var fields = make(map[string]interface{}) + + for _, vcpu := range vcpuInfo { + tags["domain_name"] = domainName + tags["vcpu_id"] = vcpu.vcpuID + fields["cpu_id"] = vcpu.coresAffinity + acc.AddFields("libvirt_cpu_affinity", fields, tags) + } + } + } +} + +func (l *Libvirt) translateMetrics(stats []golibvirt.DomainStatsRecord) map[string]map[string]map[string]golibvirt.TypedParamValue { + metrics := make(map[string]map[string]map[string]golibvirt.TypedParamValue) + for _, stat := range stats { + if stat.Params != nil { + if metrics[stat.Dom.Name] == nil { + metrics[stat.Dom.Name] = make(map[string]map[string]golibvirt.TypedParamValue) + } + + for _, params := range stat.Params { + statGroup := strings.Split(params.Field, ".")[0] + if metrics[stat.Dom.Name][statGroup] == nil { + metrics[stat.Dom.Name][statGroup] = make(map[string]golibvirt.TypedParamValue) + } + + metrics[stat.Dom.Name][statGroup][strings.TrimPrefix(params.Field, statGroup+".")] = params.Value + } + } + } + + return metrics +} + +func (l *Libvirt) addStateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { + var stateFields = make(map[string]interface{}) + var stateTags = map[string]string{ + "domain_name": domainName, + } + + for key, metric := range metrics { + switch key { + case "state", "reason": + stateFields[key] = metric.I + } + } + + if len(stateFields) > 0 { + acc.AddFields("libvirt_state", stateFields, stateTags) + } +} + +func (l *Libvirt) addCPUMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { + var cpuFields = make(map[string]interface{}) + var cpuCacheMonitorTotalFields = make(map[string]interface{}) + + var cpuCacheMonitorData = make(map[string]map[string]interface{}) + var cpuCacheMonitorBankData = make(map[string]map[string]map[string]interface{}) + + var cpuTags = map[string]string{ + "domain_name": domainName, + } + + for key, metric := range metrics { + switch key { + case "time", "user", "system": + cpuFields[key] = metric.I + case "haltpoll.success.time", "haltpoll.fail.time": + cpuFields[strings.ReplaceAll(key, ".", "_")] = metric.I + case "cache.monitor.count": + cpuCacheMonitorTotalFields["count"] = metric.I + default: + if strings.Contains(key, "bank.count") { + key = strings.ReplaceAll(key, "bank.count", "bank_count") + } + + cpuStat := strings.Split(key, ".") + if len(cpuStat) == 4 && cpuCacheMonitorRegexp.MatchString(key) { + cacheMonitorID := cpuStat[2] + cpuCacheMonitorFields, ok := cpuCacheMonitorData[cacheMonitorID] + if !ok { + cpuCacheMonitorFields = make(map[string]interface{}) + cpuCacheMonitorData[cacheMonitorID] = cpuCacheMonitorFields + } + + cpuCacheMonitorFields[cpuStat[3]] = metric.I + } else if len(cpuStat) == 6 && cpuCacheMonitorBankRegexp.MatchString(key) { + cacheMonitorID := cpuStat[2] + bankIndex := cpuStat[4] + + bankData, ok := cpuCacheMonitorBankData[cacheMonitorID] + if !ok { + bankData = make(map[string]map[string]interface{}) + cpuCacheMonitorBankData[cacheMonitorID] = bankData + } + + bankFields, ok := cpuCacheMonitorBankData[cacheMonitorID][bankIndex] + if !ok { + bankFields = make(map[string]interface{}) + bankData[bankIndex] = bankFields + } + + bankFields[cpuStat[5]] = metric.I + } + } + } + + if len(cpuFields) > 0 { + acc.AddFields("libvirt_cpu", cpuFields, cpuTags) + } + + if len(cpuCacheMonitorTotalFields) > 0 { + acc.AddFields("libvirt_cpu_cache_monitor_total", cpuCacheMonitorTotalFields, cpuTags) + } + + for cpuID, cpuCacheMonitorFields := range cpuCacheMonitorData { + if len(cpuCacheMonitorFields) > 0 { + cpuCacheMonitorTags := map[string]string{ + "domain_name": domainName, + "cache_monitor_id": cpuID, + } + acc.AddFields("libvirt_cpu_cache_monitor", cpuCacheMonitorFields, cpuCacheMonitorTags) + } + } + + for cacheMonitorID, bankData := range cpuCacheMonitorBankData { + for bankIndex, bankFields := range bankData { + if len(bankFields) > 0 { + bankTags := map[string]string{ + "domain_name": domainName, + "cache_monitor_id": cacheMonitorID, + "bank_index": bankIndex, + } + acc.AddFields("libvirt_cpu_cache_monitor_bank", bankFields, bankTags) + } + } + } +} + +func (l *Libvirt) addBalloonMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { + var balloonFields = make(map[string]interface{}) + var balloonTags = map[string]string{ + "domain_name": domainName, + } + + for key, metric := range metrics { + switch key { + case "current", "maximum", "swap_in", "swap_out", "major_fault", "minor_fault", "unused", "available", + "rss", "usable", "disk_caches", "hugetlb_pgalloc", "hugetlb_pgfail": + balloonFields[key] = metric.I + case "last-update": + balloonFields["last_update"] = metric.I + } + } + + if len(balloonFields) > 0 { + acc.AddFields("libvirt_balloon", balloonFields, balloonTags) + } +} + +func (l *Libvirt) addVcpuMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, vcpuInfos []vcpuAffinity, acc telegraf.Accumulator) { + var vcpuTotalFields = make(map[string]interface{}) + var vcpuData = make(map[string]map[string]interface{}) + + var vcpuTotalTags = map[string]string{ + "domain_name": domainName, + } + + for key, metric := range metrics { + switch key { + case "current", "maximum": + vcpuTotalFields[key] = metric.I + default: + vcpuStat := strings.Split(key, ".") + if len(vcpuStat) != 2 { + continue + } + vcpuID := vcpuStat[0] + fieldName := vcpuStat[1] + vcpuFields, ok := vcpuData[vcpuID] + if !ok { + vcpuFields = make(map[string]interface{}) + vcpuData[vcpuID] = vcpuFields + } + + switch fieldName { + case "halted": + haltedIntegerValue := 0 + if metric.I == "yes" { + haltedIntegerValue = 1 + } + + vcpuFields["halted_i"] = haltedIntegerValue + fallthrough + case "state", "time", "wait", "delay": + vcpuFields[fieldName] = metric.I + } + } + } + + if len(vcpuTotalFields) > 0 { + acc.AddFields("libvirt_vcpu_total", vcpuTotalFields, vcpuTotalTags) + } + + for vcpuID, vcpuFields := range vcpuData { + if len(vcpuFields) > 0 { + vcpuTags := map[string]string{ + "domain_name": domainName, + "vcpu_id": vcpuID, + } + + if pCPUID := l.getCurrentPCPUForVCPU(vcpuID, vcpuInfos); pCPUID >= 0 { + vcpuFields["cpu_id"] = pCPUID + } + + acc.AddFields("libvirt_vcpu", vcpuFields, vcpuTags) + } + } +} + +func (l *Libvirt) getCurrentPCPUForVCPU(vcpuID string, vcpuInfos []vcpuAffinity) int32 { + if !l.shouldGetCurrentPCPU() { + return -1 + } + + for _, vcpuInfo := range vcpuInfos { + if vcpuInfo.vcpuID == vcpuID { + return vcpuInfo.currentPCPUID + } + } + + return -1 +} + +func (l *Libvirt) addInterfaceMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { + var netTotalFields = make(map[string]interface{}) + var netData = make(map[string]map[string]interface{}) + + var netTotalTags = map[string]string{ + "domain_name": domainName, + } + + for key, metric := range metrics { + if key == "count" { + netTotalFields[key] = metric.I + } else { + netStat := strings.SplitN(key, ".", 2) + if len(netStat) < 2 { + continue + } + + netID := netStat[0] + netFields, ok := netData[netID] + if !ok { + netFields = make(map[string]interface{}) + netData[netID] = netFields + } + + fieldName := strings.ReplaceAll(netStat[1], ".", "_") + switch fieldName { + case "name", "rx_bytes", "rx_pkts", "rx_errs", "rx_drop", "tx_bytes", "tx_pkts", "tx_errs", "tx_drop": + netFields[fieldName] = metric.I + } + } + } + + if len(netTotalFields) > 0 { + acc.AddFields("libvirt_net_total", netTotalFields, netTotalTags) + } + + for netID, netFields := range netData { + if len(netFields) > 0 { + netTags := map[string]string{ + "domain_name": domainName, + "interface_id": netID, + } + acc.AddFields("libvirt_net", netFields, netTags) + } + } +} + +func (l *Libvirt) addPerfMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { + var perfFields = make(map[string]interface{}) + var perfTags = map[string]string{ + "domain_name": domainName, + } + + for key, metric := range metrics { + switch key { + case "cmt", "mbmt", "mbml", "cpu_cycles", "instructions", "cache_references", "cache_misses", + "branch_instructions", "branch_misses", "bus_cycles", "stalled_cycles_frontend", "stalled_cycles_backend", + "ref_cpu_cycles", "cpu_clock", "task_clock", "page_faults", "context_switches", + "cpu_migrations", "page_faults_min", "page_faults_maj", "alignment_faults", "emulation_faults": + perfFields[key] = metric.I + } + } + + if len(perfFields) > 0 { + acc.AddFields("libvirt_perf", perfFields, perfTags) + } +} + +func (l *Libvirt) addBlockMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { + var blockTotalFields = make(map[string]interface{}) + var blockData = make(map[string]map[string]interface{}) + + var blockTotalTags = map[string]string{ + "domain_name": domainName, + } + + for key, metric := range metrics { + if key == "count" { + blockTotalFields["count"] = metric.I + } else { + blockStat := strings.SplitN(key, ".", 2) + if len(blockStat) < 2 { + continue + } + + blockID := blockStat[0] + blockFields, ok := blockData[blockID] + if !ok { + blockFields = make(map[string]interface{}) + blockData[blockID] = blockFields + } + + fieldName := strings.ReplaceAll(blockStat[1], ".", "_") + switch fieldName { + case "name", "backingIndex", "path", "rd_reqs", "rd_bytes", "rd_times", "wr_reqs", "wr_bytes", "wr_times", + "fl_reqs", "fl_times", "errors", "allocation", "capacity", "physical", "threshold": + blockFields[fieldName] = metric.I + } + } + } + + if len(blockTotalFields) > 0 { + acc.AddFields("libvirt_block_total", blockTotalFields, blockTotalTags) + } + + for blockID, blockFields := range blockData { + if len(blockFields) > 0 { + blockTags := map[string]string{ + "domain_name": domainName, + "block_id": blockID, + } + acc.AddFields("libvirt_block", blockFields, blockTags) + } + } +} + +func (l *Libvirt) addIothreadMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { + var iothreadTotalFields = make(map[string]interface{}) + var iothreadData = make(map[string]map[string]interface{}) + + var iothreadTotalTags = map[string]string{ + "domain_name": domainName, + } + + for key, metric := range metrics { + if key == "count" { + iothreadTotalFields["count"] = metric.I + } else { + iothreadStat := strings.Split(key, ".") + if len(iothreadStat) != 2 { + continue + } + + iothreadID := iothreadStat[0] + iothreadFields, ok := iothreadData[iothreadID] + if !ok { + iothreadFields = make(map[string]interface{}) + iothreadData[iothreadID] = iothreadFields + } + + fieldName := strings.ReplaceAll(iothreadStat[1], "-", "_") + switch fieldName { + case "poll_max_ns", "poll_grow", "poll_shrink": + iothreadFields[fieldName] = metric.I + } + } + } + + if len(iothreadTotalFields) > 0 { + acc.AddFields("libvirt_iothread_total", iothreadTotalFields, iothreadTotalTags) + } + + for iothreadID, iothreadFields := range iothreadData { + if len(iothreadFields) > 0 { + iothreadTags := map[string]string{ + "domain_name": domainName, + "iothread_id": iothreadID, + } + acc.AddFields("libvirt_iothread", iothreadFields, iothreadTags) + } + } +} + +func (l *Libvirt) addMemoryMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { + var memoryBandwidthMonitorTotalFields = make(map[string]interface{}) + + var memoryBandwidthMonitorData = make(map[string]map[string]interface{}) + var memoryBandwidthMonitorNodeData = make(map[string]map[string]map[string]interface{}) + + var memoryBandwidthMonitorTotalTags = map[string]string{ + "domain_name": domainName, + } + + for key, metric := range metrics { + switch key { + case "bandwidth.monitor.count": + memoryBandwidthMonitorTotalFields["count"] = metric.I + default: + if strings.Contains(key, "node.count") { + key = strings.ReplaceAll(key, "node.count", "node_count") + } else if strings.Contains(key, "bytes.local") { + key = strings.ReplaceAll(key, "bytes.local", "bytes_local") + } else if strings.Contains(key, "bytes.total") { + key = strings.ReplaceAll(key, "bytes.total", "bytes_total") + } + + memoryStat := strings.Split(key, ".") + if len(memoryStat) == 4 && memoryBandwidthMonitorRegexp.MatchString(key) { + memoryBandwidthMonitorID := memoryStat[2] + memoryBandwidthMonitorFields, ok := memoryBandwidthMonitorData[memoryBandwidthMonitorID] + if !ok { + memoryBandwidthMonitorFields = make(map[string]interface{}) + memoryBandwidthMonitorData[memoryBandwidthMonitorID] = memoryBandwidthMonitorFields + } + + memoryBandwidthMonitorFields[memoryStat[3]] = metric.I + } else if len(memoryStat) == 6 && memoryBandwidthMonitorNodeRegexp.MatchString(key) { + memoryBandwidthMonitorID := memoryStat[2] + controllerIndex := memoryStat[4] + + nodeData, ok := memoryBandwidthMonitorNodeData[memoryBandwidthMonitorID] + if !ok { + nodeData = make(map[string]map[string]interface{}) + memoryBandwidthMonitorNodeData[memoryBandwidthMonitorID] = nodeData + } + + nodeFields, ok := memoryBandwidthMonitorNodeData[memoryBandwidthMonitorID][controllerIndex] + if !ok { + nodeFields = make(map[string]interface{}) + nodeData[controllerIndex] = nodeFields + } + + nodeFields[memoryStat[5]] = metric.I + } + } + } + + if len(memoryBandwidthMonitorTotalFields) > 0 { + acc.AddFields("libvirt_memory_bandwidth_monitor_total", memoryBandwidthMonitorTotalFields, memoryBandwidthMonitorTotalTags) + } + + for memoryBandwidthMonitorID, memoryFields := range memoryBandwidthMonitorData { + if len(memoryFields) > 0 { + tags := map[string]string{ + "domain_name": domainName, + "memory_bandwidth_monitor_id": memoryBandwidthMonitorID, + } + acc.AddFields("libvirt_memory_bandwidth_monitor", memoryFields, tags) + } + } + + for memoryBandwidthMonitorID, nodeData := range memoryBandwidthMonitorNodeData { + for controllerIndex, nodeFields := range nodeData { + if len(nodeFields) > 0 { + tags := map[string]string{ + "domain_name": domainName, + "memory_bandwidth_monitor_id": memoryBandwidthMonitorID, + "controller_index": controllerIndex, + } + acc.AddFields("libvirt_memory_bandwidth_monitor_node", nodeFields, tags) + } + } + } +} + +func (l *Libvirt) addDirtyrateMetrics(metrics map[string]golibvirt.TypedParamValue, domainName string, acc telegraf.Accumulator) { + var dirtyrateFields = make(map[string]interface{}) + var dirtyrateVcpuData = make(map[string]map[string]interface{}) + + var dirtyrateTags = map[string]string{ + "domain_name": domainName, + } + + for key, metric := range metrics { + switch key { + case "calc_status", "calc_start_time", "calc_period", + "megabytes_per_second", "calc_mode": + dirtyrateFields[key] = metric.I + default: + dirtyrateStat := strings.Split(key, ".") + if len(dirtyrateStat) == 3 && dirtyrateStat[0] == "vcpu" && dirtyrateStat[2] == "megabytes_per_second" { + vcpuID := dirtyrateStat[1] + dirtyRateFields, ok := dirtyrateVcpuData[vcpuID] + if !ok { + dirtyRateFields = make(map[string]interface{}) + dirtyrateVcpuData[vcpuID] = dirtyRateFields + } + dirtyRateFields[dirtyrateStat[2]] = metric.I + } + } + } + + if len(dirtyrateFields) > 0 { + acc.AddFields("libvirt_dirtyrate", dirtyrateFields, dirtyrateTags) + } + + for vcpuID, dirtyRateFields := range dirtyrateVcpuData { + if len(dirtyRateFields) > 0 { + dirtyRateTags := map[string]string{ + "domain_name": domainName, + "vcpu_id": vcpuID, + } + acc.AddFields("libvirt_dirtyrate_vcpu", dirtyRateFields, dirtyRateTags) + } + } +} diff --git a/plugins/inputs/libvirt/libvirt_test.go b/plugins/inputs/libvirt/libvirt_test.go new file mode 100644 index 0000000000000..3039c64d6402b --- /dev/null +++ b/plugins/inputs/libvirt/libvirt_test.go @@ -0,0 +1,1066 @@ +package libvirt + +import ( + "fmt" + "testing" + "time" + + golibvirt "github.com/digitalocean/go-libvirt" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +func TestLibvirt_Init(t *testing.T) { + t.Run("throw error when user provided duplicated state metric name", func(t *testing.T) { + l := Libvirt{ + StatisticsGroups: []string{"state", "state"}, + Log: testutil.Logger{}, + } + err := l.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "duplicated statistics group in config") + }) + + t.Run("throw error when user provided wrong metric name", func(t *testing.T) { + l := Libvirt{ + StatisticsGroups: []string{"statusQvo"}, + Log: testutil.Logger{}, + } + err := l.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "unrecognized metrics name") + }) + + t.Run("throw error when user provided invalid uri", func(t *testing.T) { + mockLibvirtUtils := MockLibvirtUtils{} + l := Libvirt{ + LibvirtURI: "this/is/wrong/uri", + utils: &mockLibvirtUtils, + Log: testutil.Logger{}, + } + err := l.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "can't parse") + }) + + t.Run("successfully initialize libvirt on correct user input", func(t *testing.T) { + mockLibvirtUtils := MockLibvirtUtils{} + l := Libvirt{ + StatisticsGroups: []string{"state", "cpu_total", "vcpu", "interface"}, + utils: &mockLibvirtUtils, + LibvirtURI: defaultLibvirtURI, + Log: testutil.Logger{}, + } + err := l.Init() + require.NoError(t, err) + }) +} + +func TestLibvirt_Gather(t *testing.T) { + t.Run("wrong uri throws error", func(t *testing.T) { + var acc testutil.Accumulator + mockLibvirtUtils := MockLibvirtUtils{} + l := Libvirt{ + LibvirtURI: "this/is/wrong/uri", + Log: testutil.Logger{}, + utils: &mockLibvirtUtils, + } + mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(fmt.Errorf("failed to connect")).Once() + err := l.Gather(&acc) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to connect") + mockLibvirtUtils.AssertExpectations(t) + }) + + t.Run("error when read error happened in gathering domains", func(t *testing.T) { + var acc testutil.Accumulator + mockLibvirtUtils := MockLibvirtUtils{} + l := Libvirt{ + utils: &mockLibvirtUtils, + Log: testutil.Logger{}, + StatisticsGroups: []string{"state"}, + } + mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(nil).Once(). + On("GatherAllDomains", mock.Anything).Return(nil, fmt.Errorf("gather domain error")).Once(). + On("Disconnect").Return(nil).Once() + + err := l.Gather(&acc) + require.Error(t, err) + require.Contains(t, err.Error(), "gather domain error") + mockLibvirtUtils.AssertExpectations(t) + }) + + t.Run("no error when empty list of domains is returned", func(t *testing.T) { + var acc testutil.Accumulator + mockLibvirtUtils := MockLibvirtUtils{} + l := Libvirt{ + utils: &mockLibvirtUtils, + Log: testutil.Logger{}, + StatisticsGroups: []string{"state"}, + } + mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(nil).Once(). + On("GatherAllDomains", mock.Anything).Return([]golibvirt.Domain{}, nil).Once() + + err := l.Gather(&acc) + require.NoError(t, err) + // require.Contains(t, err.Error(), "couldn't find any domains on system") + mockLibvirtUtils.AssertExpectations(t) + }) + + t.Run("error when gathering metrics by number", func(t *testing.T) { + var acc testutil.Accumulator + mockLibvirtUtils := MockLibvirtUtils{} + l := Libvirt{ + utils: &mockLibvirtUtils, + Log: testutil.Logger{}, + StatisticsGroups: []string{"state"}, + } + mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(nil).Once(). + On("GatherAllDomains", mock.Anything).Return(domains, nil).Once(). + On("GatherStatsForDomains", mock.Anything, mock.Anything). + Return(nil, fmt.Errorf("gathering metric by number error")).Once(). + On("Disconnect").Return(nil).Once() + + err := l.Init() + require.NoError(t, err) + + err = l.Gather(&acc) + require.Error(t, err) + require.Contains(t, err.Error(), "gathering metric by number error") + mockLibvirtUtils.AssertExpectations(t) + }) + + var successfulTests = []struct { + testName string + allDomains interface{} + excludeDomains []string + statsForDomains interface{} + expectedMetrics []telegraf.Metric + vcpuMapping []vcpuAffinity + }{ + {"successfully gather from host that has domains", domains, nil, domainStats, append(expectedMetrics, expectedVcpuAffinityMetrics...), vcpusMapping}, + {"successfully gather from host for excluded domain", domains, []string{"Droplet-33436"}, domainStats[1:], append(expectedMetrics[1:], expectedVcpuAffinityMetrics[2:]...), vcpusMapping}, + } + for _, test := range successfulTests { + t.Run(test.testName, func(t *testing.T) { + var acc testutil.Accumulator + mockLibvirtUtils := MockLibvirtUtils{} + l := Libvirt{ + utils: &mockLibvirtUtils, + Log: testutil.Logger{}, + StatisticsGroups: []string{"state"}, + Domains: test.excludeDomains, + AdditionalStatistics: []string{"vcpu_mapping"}, + } + mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(nil).Once(). + On("GatherAllDomains", mock.Anything).Return(test.allDomains, nil).Once(). + On("GatherVcpuMapping", domains[0], mock.Anything, mock.Anything).Return(test.vcpuMapping, nil).Maybe(). + On("GatherVcpuMapping", domains[1], mock.Anything, mock.Anything).Return(test.vcpuMapping, nil).Once(). + On("GatherNumberOfPCPUs").Return(4, nil).Once(). + On("GatherStatsForDomains", mock.Anything, mock.Anything).Return(test.statsForDomains, nil).Once() + + err := l.Init() + require.NoError(t, err) + + err = l.Gather(&acc) + require.NoError(t, err) + + actual := acc.GetTelegrafMetrics() + expected := test.expectedMetrics + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics(), testutil.IgnoreTime()) + mockLibvirtUtils.AssertExpectations(t) + }) + } +} + +func TestLibvirt_GatherMetrics(t *testing.T) { + var successfulTests = []struct { + testName string + allDomains interface{} + excludeDomains []string + statsForDomains interface{} + expectedMetrics []telegraf.Metric + vcpuMapping []vcpuAffinity + }{ + {"successfully gather memory metrics from host that has domains", domains, nil, memoryStats, expectedMemoryMetrics, nil}, + {"successfully gather balloon metrics from host that has domains", domains, nil, balloonStats, expectedBalloonMetrics, nil}, + {"successfully gather perf metrics from host that has domains", domains, nil, perfStats, expectedPerfMetrics, nil}, + {"successfully gather cpu metrics from host that has domains", domains, nil, cpuStats, expectedCPUMetrics, nil}, + {"successfully gather interface metrics from host that has domains", domains, nil, interfaceStats, expectedInterfaceMetrics, nil}, + {"successfully gather block metrics from host that has domains", domains, nil, blockStats, expectedBlockMetrics, nil}, + {"successfully gather iothread metrics from host that has domains", domains, nil, iothreadStats, expectedIOThreadMetrics, nil}, + {"successfully gather dirtyrate metrics from host that has domains", domains, nil, dirtyrateStats, expectedDirtyrateMetrics, nil}, + {"successfully gather vcpu metrics from host that has domains", domains, nil, vcpuStats, expectedVCPUMetrics, nil}, + {"successfully gather vcpu metrics with vCPU from host that has domains", domains, nil, vcpuStats, expectedExtendedVCPUMetrics, vcpusMapping}, + } + for _, test := range successfulTests { + t.Run(test.testName, func(t *testing.T) { + var acc testutil.Accumulator + mockLibvirtUtils := MockLibvirtUtils{} + l := Libvirt{ + utils: &mockLibvirtUtils, + Log: testutil.Logger{}, + StatisticsGroups: []string{}, + Domains: test.excludeDomains, + AdditionalStatistics: []string{}, + } + + mockLibvirtUtils.On("EnsureConnected", mock.Anything).Return(nil).Once(). + On("GatherAllDomains", mock.Anything).Return(test.allDomains, nil).Once(). + On("GatherStatsForDomains", mock.Anything, mock.Anything).Return(test.statsForDomains, nil).Once() + + if test.vcpuMapping != nil { + l.vcpuMappingEnabled = true + l.metricNumber = domainStatsVCPU + mockLibvirtUtils.On("GatherNumberOfPCPUs").Return(4, nil).Once(). + On("GatherVcpuMapping", domains[0], mock.Anything, mock.Anything).Return(test.vcpuMapping, nil).Once(). + On("GatherVcpuMapping", domains[1], mock.Anything, mock.Anything).Return([]vcpuAffinity{}, nil).Once() + } + + err := l.Gather(&acc) + require.NoError(t, err) + + actual := acc.GetTelegrafMetrics() + expected := test.expectedMetrics + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics(), testutil.IgnoreTime()) + mockLibvirtUtils.AssertExpectations(t) + }) + } +} + +func TestLibvirt_validateLibvirtUri(t *testing.T) { + t.Run("no error on good uri provided", func(t *testing.T) { + l := Libvirt{ + LibvirtURI: defaultLibvirtURI, + Log: testutil.Logger{}, + } + err := l.validateLibvirtURI() + require.NoError(t, err) + }) + + t.Run("unmarshal error on bad uri provided", func(t *testing.T) { + l := Libvirt{ + LibvirtURI: "this/is/invalid/uri", + Log: testutil.Logger{}, + } + err := l.validateLibvirtURI() + require.Error(t, err) + require.Contains(t, err.Error(), "can't parse '"+l.LibvirtURI+"' as a libvirt uri") + }) + + t.Run("dialer error on bad ssh uri provided", func(t *testing.T) { + l := Libvirt{ + LibvirtURI: "qemu+ssh://invalid@host:666/system", + Log: testutil.Logger{}, + } + err := l.validateLibvirtURI() + require.Error(t, err) + require.Contains(t, err.Error(), "ssh transport requires keyfile parameter") + }) +} + +func TestLibvirt_calculateMetricNumber(t *testing.T) { + t.Run("error on duplicated metric name", func(t *testing.T) { + l := Libvirt{ + StatisticsGroups: []string{"state", "state"}, + Log: testutil.Logger{}, + } + err := l.calculateMetricNumber() + require.Error(t, err) + require.Contains(t, err.Error(), "duplicated statistics group in config") + }) + + t.Run("error on unrecognized metric name", func(t *testing.T) { + l := Libvirt{ + StatisticsGroups: []string{"invalidName"}, + Log: testutil.Logger{}, + } + err := l.calculateMetricNumber() + require.Error(t, err) + require.Contains(t, err.Error(), "unrecognized metrics name") + }) + + t.Run("correctly calculates metrics number provided", func(t *testing.T) { + metrics := []string{"state", "cpu_total", "vcpu", "interface", "block", "balloon", + "memory", "perf", "iothread", "dirtyrate"} + + l := Libvirt{ + StatisticsGroups: metrics, + Log: testutil.Logger{}, + } + err := l.calculateMetricNumber() + require.NoError(t, err) + require.Equal(t, l.metricNumber, domainStatsAll) + }) +} + +func TestLibvirt_filterDomains(t *testing.T) { + t.Run("success filter domains", func(t *testing.T) { + l := Libvirt{ + Domains: []string{"Droplet-844329", "Droplet-33436"}, + Log: testutil.Logger{}, + } + + result := l.filterDomains(domains) + require.NotEmpty(t, result) + }) + + t.Run("failed on something", func(t *testing.T) { + + }) +} + +var ( + domains = []golibvirt.Domain{ + {Name: "Droplet-844329", UUID: golibvirt.UUID{}, ID: 0}, + {Name: "Droplet-33436", UUID: golibvirt.UUID{}, ID: 0}, + } + + domainStats = []golibvirt.DomainStatsRecord{ + { + Dom: domains[0], + Params: []golibvirt.TypedParam{ + {Field: "state.reason", Value: *golibvirt.NewTypedParamValueLlong(2)}, + {Field: "state.state", Value: *golibvirt.NewTypedParamValueLlong(1)}, + }, + }, + { + Dom: domains[1], + Params: []golibvirt.TypedParam{ + {Field: "state.reason", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "state.state", Value: *golibvirt.NewTypedParamValueLlong(1)}, + }, + }, + } + + memoryStats = []golibvirt.DomainStatsRecord{ + { + Dom: domains[0], + Params: []golibvirt.TypedParam{ + {Field: "memory.bandwidth.monitor.count", Value: *golibvirt.NewTypedParamValueLlong(2)}, + {Field: "memory.bandwidth.monitor.0.name", Value: *golibvirt.NewTypedParamValueString("any_name_vcpus_0-4")}, + {Field: "memory.bandwidth.monitor.0.vcpus", Value: *golibvirt.NewTypedParamValueString("0-4")}, + {Field: "memory.bandwidth.monitor.0.node.count", Value: *golibvirt.NewTypedParamValueLlong(2)}, + {Field: "memory.bandwidth.monitor.1.name", Value: *golibvirt.NewTypedParamValueString("vcpus_7")}, + {Field: "memory.bandwidth.monitor.1.vcpus", Value: *golibvirt.NewTypedParamValueString("7")}, + {Field: "memory.bandwidth.monitor.1.node.count", Value: *golibvirt.NewTypedParamValueLlong(2)}, + {Field: "memory.bandwidth.monitor.0.node.0.id", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "memory.bandwidth.monitor.0.node.0.bytes.total", Value: *golibvirt.NewTypedParamValueLlong(10208067584)}, + {Field: "memory.bandwidth.monitor.0.node.0.bytes.local", Value: *golibvirt.NewTypedParamValueLlong(4807114752)}, + {Field: "memory.bandwidth.monitor.0.node.1.id", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "memory.bandwidth.monitor.0.node.1.bytes.total", Value: *golibvirt.NewTypedParamValueLlong(8693735424)}, + {Field: "memory.bandwidth.monitor.0.node.1.bytes.local", Value: *golibvirt.NewTypedParamValueLlong(5850161152)}, + {Field: "memory.bandwidth.monitor.1.node.0.id", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "memory.bandwidth.monitor.1.node.0.bytes.total", Value: *golibvirt.NewTypedParamValueLlong(853811200)}, + {Field: "memory.bandwidth.monitor.1.node.0.bytes.local", Value: *golibvirt.NewTypedParamValueLlong(290701312)}, + {Field: "memory.bandwidth.monitor.1.node.1.id", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "memory.bandwidth.monitor.1.node.1.bytes.total", Value: *golibvirt.NewTypedParamValueLlong(406044672)}, + {Field: "memory.bandwidth.monitor.1.node.1.bytes.local", Value: *golibvirt.NewTypedParamValueLlong(229425152)}, + }, + }, + } + + cpuStats = []golibvirt.DomainStatsRecord{ + { + Dom: domains[0], + Params: []golibvirt.TypedParam{ + {Field: "cpu.time", Value: *golibvirt.NewTypedParamValueLlong(67419144867000)}, + {Field: "cpu.user", Value: *golibvirt.NewTypedParamValueLlong(63886161852000)}, + {Field: "cpu.system", Value: *golibvirt.NewTypedParamValueLlong(3532983015000)}, + {Field: "cpu.haltpoll.success.time", Value: *golibvirt.NewTypedParamValueLlong(516907915)}, + {Field: "cpu.haltpoll.fail.time", Value: *golibvirt.NewTypedParamValueLlong(2727253643)}, + {Field: "cpu.cache.monitor.count", Value: *golibvirt.NewTypedParamValueLlong(2)}, + {Field: "cpu.cache.monitor.0.name", Value: *golibvirt.NewTypedParamValueString("any_name_vcpus_0-3")}, + {Field: "cpu.cache.monitor.0.vcpus", Value: *golibvirt.NewTypedParamValueString("0-3")}, + {Field: "cpu.cache.monitor.0.bank.count", Value: *golibvirt.NewTypedParamValueLlong(2)}, + {Field: "cpu.cache.monitor.1.name", Value: *golibvirt.NewTypedParamValueString("vcpus_4-9")}, + {Field: "cpu.cache.monitor.1.vcpus", Value: *golibvirt.NewTypedParamValueString("4-9")}, + {Field: "cpu.cache.monitor.1.bank.count", Value: *golibvirt.NewTypedParamValueLlong(2)}, + {Field: "cpu.cache.monitor.0.bank.0.id", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "cpu.cache.monitor.0.bank.0.bytes", Value: *golibvirt.NewTypedParamValueLlong(5406720)}, + {Field: "cpu.cache.monitor.0.bank.1.id", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "cpu.cache.monitor.0.bank.1.bytes", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "cpu.cache.monitor.1.bank.0.id", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "cpu.cache.monitor.1.bank.0.bytes", Value: *golibvirt.NewTypedParamValueLlong(720896)}, + {Field: "cpu.cache.monitor.1.bank.1.id", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "cpu.cache.monitor.1.bank.1.bytes", Value: *golibvirt.NewTypedParamValueLlong(8200192)}, + }, + }, + } + + balloonStats = []golibvirt.DomainStatsRecord{ + { + Dom: domains[0], + Params: []golibvirt.TypedParam{ + {Field: "balloon.current", Value: *golibvirt.NewTypedParamValueLlong(4194304)}, + {Field: "balloon.maximum", Value: *golibvirt.NewTypedParamValueLlong(4194304)}, + {Field: "balloon.swap_in", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "balloon.swap_out", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "balloon.major_fault", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "balloon.minor_fault", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "balloon.unused", Value: *golibvirt.NewTypedParamValueLlong(3928628)}, + {Field: "balloon.available", Value: *golibvirt.NewTypedParamValueLlong(4018480)}, + {Field: "balloon.rss", Value: *golibvirt.NewTypedParamValueLlong(1036012)}, + {Field: "balloon.usable", Value: *golibvirt.NewTypedParamValueLlong(3808724)}, + {Field: "balloon.last-update", Value: *golibvirt.NewTypedParamValueLlong(1654611373)}, + {Field: "balloon.disk_caches", Value: *golibvirt.NewTypedParamValueLlong(68820)}, + {Field: "balloon.hugetlb_pgalloc", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "balloon.hugetlb_pgfail", Value: *golibvirt.NewTypedParamValueLlong(0)}, + }, + }, + } + + perfStats = []golibvirt.DomainStatsRecord{ + { + Dom: domains[0], + Params: []golibvirt.TypedParam{ + {Field: "perf.cmt", Value: *golibvirt.NewTypedParamValueLlong(19087360)}, + {Field: "perf.mbmt", Value: *golibvirt.NewTypedParamValueLlong(77168640)}, + {Field: "perf.mbml", Value: *golibvirt.NewTypedParamValueLlong(67788800)}, + {Field: "perf.cpu_cycles", Value: *golibvirt.NewTypedParamValueLlong(29858995122)}, + {Field: "perf.instructions", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "perf.cache_references", Value: *golibvirt.NewTypedParamValueLlong(3053301695)}, + {Field: "perf.cache_misses", Value: *golibvirt.NewTypedParamValueLlong(609441024)}, + {Field: "perf.branch_instructions", Value: *golibvirt.NewTypedParamValueLlong(2623890194)}, + {Field: "perf.branch_misses", Value: *golibvirt.NewTypedParamValueLlong(103707961)}, + {Field: "perf.bus_cycles", Value: *golibvirt.NewTypedParamValueLlong(188105628)}, + {Field: "perf.stalled_cycles_frontend", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "perf.stalled_cycles_backend", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "perf.ref_cpu_cycles", Value: *golibvirt.NewTypedParamValueLlong(30766094039)}, + {Field: "perf.cpu_clock", Value: *golibvirt.NewTypedParamValueLlong(25166642695)}, + {Field: "perf.task_clock", Value: *golibvirt.NewTypedParamValueLlong(25263578917)}, + {Field: "perf.page_faults", Value: *golibvirt.NewTypedParamValueLlong(2670)}, + {Field: "perf.context_switches", Value: *golibvirt.NewTypedParamValueLlong(294284)}, + {Field: "perf.cpu_migrations", Value: *golibvirt.NewTypedParamValueLlong(17949)}, + {Field: "perf.page_faults_min", Value: *golibvirt.NewTypedParamValueLlong(2670)}, + {Field: "perf.page_faults_maj", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "perf.alignment_faults", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "perf.emulation_faults", Value: *golibvirt.NewTypedParamValueLlong(0)}, + }, + }, + } + + interfaceStats = []golibvirt.DomainStatsRecord{ + { + Dom: domains[0], + Params: []golibvirt.TypedParam{ + {Field: "net.count", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "net.0.name", Value: *golibvirt.NewTypedParamValueString("vnet0")}, + {Field: "net.0.rx.bytes", Value: *golibvirt.NewTypedParamValueLlong(110)}, + {Field: "net.0.rx.pkts", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "net.0.rx.errs", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "net.0.rx.drop", Value: *golibvirt.NewTypedParamValueLlong(31007)}, + {Field: "net.0.tx.bytes", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "net.0.tx.pkts", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "net.0.tx.errs", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "net.0.tx.drop", Value: *golibvirt.NewTypedParamValueLlong(0)}, + }, + }, + } + + blockStats = []golibvirt.DomainStatsRecord{ + { + Dom: domains[0], + Params: []golibvirt.TypedParam{ + {Field: "block.count", Value: *golibvirt.NewTypedParamValueLlong(2)}, + {Field: "block.0.name", Value: *golibvirt.NewTypedParamValueString("vda")}, + {Field: "block.0.backingIndex", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "block.0.path", Value: *golibvirt.NewTypedParamValueString("/tmp/ubuntu_image.img")}, + {Field: "block.0.rd.reqs", Value: *golibvirt.NewTypedParamValueLlong(11354)}, + {Field: "block.0.rd.bytes", Value: *golibvirt.NewTypedParamValueLlong(330314752)}, + {Field: "block.0.rd.times", Value: *golibvirt.NewTypedParamValueLlong(6240559566)}, + {Field: "block.0.wr.reqs", Value: *golibvirt.NewTypedParamValueLlong(52440)}, + {Field: "block.0.wr.bytes", Value: *golibvirt.NewTypedParamValueLlong(1183828480)}, + {Field: "block.0.wr.times", Value: *golibvirt.NewTypedParamValueLlong(21887150375)}, + {Field: "block.0.fl.reqs", Value: *golibvirt.NewTypedParamValueLlong(32250)}, + {Field: "block.0.fl.times", Value: *golibvirt.NewTypedParamValueLlong(23158998353)}, + {Field: "block.0.errors", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "block.0.allocation", Value: *golibvirt.NewTypedParamValueLlong(770048000)}, + {Field: "block.0.capacity", Value: *golibvirt.NewTypedParamValueLlong(2361393152)}, + {Field: "block.0.physical", Value: *golibvirt.NewTypedParamValueLlong(770052096)}, + {Field: "block.0.threshold", Value: *golibvirt.NewTypedParamValueLlong(2147483648)}, + {Field: "block.1.name", Value: *golibvirt.NewTypedParamValueString("vda1")}, + {Field: "block.1.backingIndex", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "block.1.path", Value: *golibvirt.NewTypedParamValueString("/tmp/ubuntu_image1.img")}, + {Field: "block.1.rd.reqs", Value: *golibvirt.NewTypedParamValueLlong(11354)}, + {Field: "block.1.rd.bytes", Value: *golibvirt.NewTypedParamValueLlong(330314752)}, + {Field: "block.1.rd.times", Value: *golibvirt.NewTypedParamValueLlong(6240559566)}, + {Field: "block.1.wr.reqs", Value: *golibvirt.NewTypedParamValueLlong(52440)}, + {Field: "block.1.wr.bytes", Value: *golibvirt.NewTypedParamValueLlong(1183828480)}, + {Field: "block.1.wr.times", Value: *golibvirt.NewTypedParamValueLlong(21887150375)}, + {Field: "block.1.fl.reqs", Value: *golibvirt.NewTypedParamValueLlong(32250)}, + {Field: "block.1.fl.times", Value: *golibvirt.NewTypedParamValueLlong(23158998353)}, + {Field: "block.1.errors", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "block.1.allocation", Value: *golibvirt.NewTypedParamValueLlong(770048000)}, + {Field: "block.1.capacity", Value: *golibvirt.NewTypedParamValueLlong(2361393152)}, + {Field: "block.1.physical", Value: *golibvirt.NewTypedParamValueLlong(770052096)}, + {Field: "block.1.threshold", Value: *golibvirt.NewTypedParamValueLlong(2147483648)}, + }, + }, + } + + iothreadStats = []golibvirt.DomainStatsRecord{ + { + Dom: domains[0], + Params: []golibvirt.TypedParam{ + {Field: "iothread.count", Value: *golibvirt.NewTypedParamValueLlong(2)}, + {Field: "iothread.0.poll-max-ns", Value: *golibvirt.NewTypedParamValueLlong(32768)}, + {Field: "iothread.0.poll-grow", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "iothread.0.poll-shrink", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "iothread.1.poll-max-ns", Value: *golibvirt.NewTypedParamValueLlong(32769)}, + {Field: "iothread.1.poll-grow", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "iothread.1.poll-shrink", Value: *golibvirt.NewTypedParamValueLlong(0)}, + }, + }, + } + + dirtyrateStats = []golibvirt.DomainStatsRecord{ + { + Dom: domains[0], + Params: []golibvirt.TypedParam{ + {Field: "dirtyrate.calc_status", Value: *golibvirt.NewTypedParamValueLlong(2)}, + {Field: "dirtyrate.calc_start_time", Value: *golibvirt.NewTypedParamValueLlong(348414)}, + {Field: "dirtyrate.calc_period", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "dirtyrate.megabytes_per_second", Value: *golibvirt.NewTypedParamValueLlong(4)}, + {Field: "dirtyrate.calc_mode", Value: *golibvirt.NewTypedParamValueString("dirty-ring")}, + {Field: "dirtyrate.vcpu.0.megabytes_per_second", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "dirtyrate.vcpu.1.megabytes_per_second", Value: *golibvirt.NewTypedParamValueLlong(2)}, + }, + }, + } + + vcpuStats = []golibvirt.DomainStatsRecord{ + { + Dom: domains[0], + Params: []golibvirt.TypedParam{ + {Field: "vcpu.current", Value: *golibvirt.NewTypedParamValueLlong(3)}, + {Field: "vcpu.maximum", Value: *golibvirt.NewTypedParamValueLlong(3)}, + {Field: "vcpu.0.state", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "vcpu.0.time", Value: *golibvirt.NewTypedParamValueLlong(17943740000000)}, + {Field: "vcpu.0.wait", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "vcpu.0.halted", Value: *golibvirt.NewTypedParamValueString("no")}, + {Field: "vcpu.0.delay", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "vcpu.1.state", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "vcpu.1.time", Value: *golibvirt.NewTypedParamValueLlong(17943740000000)}, + {Field: "vcpu.1.wait", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "vcpu.1.halted", Value: *golibvirt.NewTypedParamValueString("yes")}, + {Field: "vcpu.1.delay", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "vcpu.2.state", Value: *golibvirt.NewTypedParamValueLlong(1)}, + {Field: "vcpu.2.time", Value: *golibvirt.NewTypedParamValueLlong(17943740000000)}, + {Field: "vcpu.2.wait", Value: *golibvirt.NewTypedParamValueLlong(0)}, + {Field: "vcpu.2.delay", Value: *golibvirt.NewTypedParamValueLlong(0)}, + }, + }, + } + + vcpusMapping = []vcpuAffinity{ + {"0", "0,1,2,3", 0}, + {"1", "1,2,3,4", 1}, + } + + expectedMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_state", + map[string]string{"domain_name": "Droplet-844329"}, + map[string]interface{}{ + "reason": 2, + "state": 1, + }, + time.Now()), + testutil.MustMetric("libvirt_state", + map[string]string{"domain_name": "Droplet-33436"}, + map[string]interface{}{ + "reason": 1, + "state": 1, + }, + time.Now()), + } + + expectedMemoryMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_memory_bandwidth_monitor_total", + map[string]string{"domain_name": "Droplet-844329"}, + map[string]interface{}{ + "count": 2, + }, + time.Now()), + testutil.MustMetric("libvirt_memory_bandwidth_monitor", + map[string]string{"domain_name": "Droplet-844329", "memory_bandwidth_monitor_id": "0"}, + map[string]interface{}{ + "name": "any_name_vcpus_0-4", + "vcpus": "0-4", + "node_count": 2, + }, + time.Now()), + testutil.MustMetric("libvirt_memory_bandwidth_monitor", + map[string]string{"domain_name": "Droplet-844329", "memory_bandwidth_monitor_id": "1"}, + map[string]interface{}{ + "name": "vcpus_7", + "vcpus": "7", + "node_count": 2, + }, + time.Now()), + testutil.MustMetric("libvirt_memory_bandwidth_monitor_node", + map[string]string{"domain_name": "Droplet-844329", "memory_bandwidth_monitor_id": "0", "controller_index": "0"}, + map[string]interface{}{ + "id": 0, + "bytes_total": int64(10208067584), + "bytes_local": int64(4807114752), + }, + time.Now()), + testutil.MustMetric("libvirt_memory_bandwidth_monitor_node", + map[string]string{"domain_name": "Droplet-844329", "memory_bandwidth_monitor_id": "0", "controller_index": "1"}, + map[string]interface{}{ + "id": 1, + "bytes_total": int64(8693735424), + "bytes_local": int64(5850161152), + }, + time.Now()), + testutil.MustMetric("libvirt_memory_bandwidth_monitor_node", + map[string]string{"domain_name": "Droplet-844329", "memory_bandwidth_monitor_id": "1", "controller_index": "0"}, + map[string]interface{}{ + "id": 0, + "bytes_total": 853811200, + "bytes_local": 290701312, + }, + time.Now()), + testutil.MustMetric("libvirt_memory_bandwidth_monitor_node", + map[string]string{"domain_name": "Droplet-844329", "memory_bandwidth_monitor_id": "1", "controller_index": "1"}, + map[string]interface{}{ + "id": 1, + "bytes_total": 406044672, + "bytes_local": 229425152, + }, + time.Now()), + } + + expectedCPUMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_cpu", + map[string]string{"domain_name": "Droplet-844329"}, + map[string]interface{}{ + "time": int64(67419144867000), + "user": int64(63886161852000), + "system": int64(3532983015000), + "haltpoll_success_time": 516907915, + "haltpoll_fail_time": int64(2727253643), + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_cache_monitor_total", + map[string]string{"domain_name": "Droplet-844329"}, + map[string]interface{}{ + "count": 2, + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_cache_monitor", + map[string]string{"domain_name": "Droplet-844329", "cache_monitor_id": "0"}, + map[string]interface{}{ + "name": "any_name_vcpus_0-3", + "vcpus": "0-3", + "bank_count": 2, + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_cache_monitor", + map[string]string{"domain_name": "Droplet-844329", "cache_monitor_id": "1"}, + map[string]interface{}{ + "name": "vcpus_4-9", + "vcpus": "4-9", + "bank_count": 2, + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_cache_monitor_bank", + map[string]string{"domain_name": "Droplet-844329", "cache_monitor_id": "0", "bank_index": "0"}, + map[string]interface{}{ + "id": 0, + "bytes": 5406720, + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_cache_monitor_bank", + map[string]string{"domain_name": "Droplet-844329", "cache_monitor_id": "0", "bank_index": "1"}, + map[string]interface{}{ + "id": 1, + "bytes": 0, + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_cache_monitor_bank", + map[string]string{"domain_name": "Droplet-844329", "cache_monitor_id": "1", "bank_index": "0"}, + map[string]interface{}{ + "id": 0, + "bytes": 720896, + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_cache_monitor_bank", + map[string]string{"domain_name": "Droplet-844329", "cache_monitor_id": "1", "bank_index": "1"}, + map[string]interface{}{ + "id": 1, + "bytes": 8200192, + }, + time.Now()), + } + + expectedVcpuAffinityMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_cpu_affinity", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "0"}, + map[string]interface{}{ + "cpu_id": "0,1,2,3", + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_affinity", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "1"}, + map[string]interface{}{ + "cpu_id": "1,2,3,4", + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_affinity", + map[string]string{ + "domain_name": "Droplet-33436", + "vcpu_id": "0"}, + map[string]interface{}{ + "cpu_id": "0,1,2,3", + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_affinity", + map[string]string{ + "domain_name": "Droplet-33436", + "vcpu_id": "1"}, + map[string]interface{}{ + "cpu_id": "1,2,3,4", + }, + time.Now()), + } + + expectedBalloonMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_balloon", + map[string]string{ + "domain_name": "Droplet-844329", + }, + map[string]interface{}{ + "current": 4194304, + "maximum": 4194304, + "swap_in": 0, + "swap_out": 0, + "major_fault": 0, + "minor_fault": 0, + "unused": 3928628, + "available": 4018480, + "rss": 1036012, + "usable": 3808724, + "last_update": 1654611373, + "disk_caches": 68820, + "hugetlb_pgalloc": 0, + "hugetlb_pgfail": 0, + }, + time.Now()), + } + + expectedPerfMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_perf", + map[string]string{ + "domain_name": "Droplet-844329", + }, + map[string]interface{}{ + "cmt": 19087360, + "mbmt": 77168640, + "mbml": 67788800, + "cpu_cycles": int64(29858995122), + "instructions": 0, + "cache_references": int64(3053301695), + "cache_misses": 609441024, + "branch_instructions": int64(2623890194), + "branch_misses": 103707961, + "bus_cycles": 188105628, + "stalled_cycles_frontend": 0, + "stalled_cycles_backend": 0, + "ref_cpu_cycles": int64(30766094039), + "cpu_clock": int64(25166642695), + "task_clock": int64(25263578917), + "page_faults": 2670, + "context_switches": 294284, + "cpu_migrations": 17949, + "page_faults_min": 2670, + "page_faults_maj": 0, + "alignment_faults": 0, + "emulation_faults": 0, + }, + time.Now()), + } + + expectedInterfaceMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_net_total", + map[string]string{ + "domain_name": "Droplet-844329", + }, + map[string]interface{}{ + "count": 1, + }, + time.Now()), + testutil.MustMetric("libvirt_net", + map[string]string{ + "domain_name": "Droplet-844329", + "interface_id": "0", + }, + map[string]interface{}{ + "name": "vnet0", + "rx_bytes": 110, + "rx_pkts": 1, + "rx_errs": 0, + "rx_drop": 31007, + "tx_bytes": 0, + "tx_pkts": 0, + "tx_errs": 0, + "tx_drop": 0, + }, + time.Now()), + } + + expectedBlockMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_block_total", + map[string]string{ + "domain_name": "Droplet-844329", + }, + map[string]interface{}{ + "count": 2, + }, + time.Now()), + testutil.MustMetric("libvirt_block", + map[string]string{ + "domain_name": "Droplet-844329", + "block_id": "0", + }, + map[string]interface{}{ + "name": "vda", + "backingIndex": 1, + "path": "/tmp/ubuntu_image.img", + "rd_reqs": 11354, + "rd_bytes": 330314752, + "rd_times": int64(6240559566), + "wr_reqs": 52440, + "wr_bytes": 1183828480, + "wr_times": int64(21887150375), + "fl_reqs": 32250, + "fl_times": int64(23158998353), + "errors": 0, + "allocation": 770048000, + "capacity": int64(2361393152), + "physical": 770052096, + "threshold": int64(2147483648), + }, + time.Now()), + testutil.MustMetric("libvirt_block", + map[string]string{ + "domain_name": "Droplet-844329", + "block_id": "1", + }, + map[string]interface{}{ + "name": "vda1", + "backingIndex": 1, + "path": "/tmp/ubuntu_image1.img", + "rd_reqs": 11354, + "rd_bytes": 330314752, + "rd_times": int64(6240559566), + "wr_reqs": 52440, + "wr_bytes": 1183828480, + "wr_times": int64(21887150375), + "fl_reqs": 32250, + "fl_times": int64(23158998353), + "errors": 0, + "allocation": 770048000, + "capacity": int64(2361393152), + "physical": 770052096, + "threshold": int64(2147483648), + }, + time.Now()), + } + + expectedIOThreadMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_iothread_total", + map[string]string{ + "domain_name": "Droplet-844329", + }, + map[string]interface{}{ + "count": 2, + }, + time.Now()), + testutil.MustMetric("libvirt_iothread", + map[string]string{ + "domain_name": "Droplet-844329", + "iothread_id": "0", + }, + map[string]interface{}{ + "poll_max_ns": 32768, + "poll_grow": 0, + "poll_shrink": 0, + }, + time.Now()), + testutil.MustMetric("libvirt_iothread", + map[string]string{ + "domain_name": "Droplet-844329", + "iothread_id": "1", + }, + map[string]interface{}{ + "poll_max_ns": 32769, + "poll_grow": 0, + "poll_shrink": 0, + }, + time.Now()), + } + + expectedDirtyrateMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_dirtyrate", + map[string]string{ + "domain_name": "Droplet-844329", + }, + map[string]interface{}{ + "calc_status": 2, + "calc_start_time": 348414, + "calc_period": 1, + "megabytes_per_second": 4, + "calc_mode": "dirty-ring", + }, + time.Now()), + testutil.MustMetric("libvirt_dirtyrate_vcpu", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "0", + }, + map[string]interface{}{ + "megabytes_per_second": 1, + }, + time.Now()), + testutil.MustMetric("libvirt_dirtyrate_vcpu", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "1", + }, + map[string]interface{}{ + "megabytes_per_second": 2, + }, + time.Now()), + } + + expectedVCPUMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_vcpu_total", + map[string]string{ + "domain_name": "Droplet-844329", + }, + map[string]interface{}{ + "current": 3, + "maximum": 3, + }, + time.Now()), + testutil.MustMetric("libvirt_vcpu", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "0", + }, + map[string]interface{}{ + "state": 1, + "time": int64(17943740000000), + "wait": 0, + "halted": "no", + "halted_i": 0, + "delay": 0, + }, + time.Now()), + testutil.MustMetric("libvirt_vcpu", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "1", + }, + map[string]interface{}{ + "state": 1, + "time": int64(17943740000000), + "wait": 0, + "halted": "yes", + "halted_i": 1, + "delay": 0, + }, + time.Now()), + testutil.MustMetric("libvirt_vcpu", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "2", + }, + map[string]interface{}{ + "state": 1, + "time": int64(17943740000000), + "wait": 0, + "delay": 0, + }, + time.Now()), + } + + expectedExtendedVCPUMetrics = []telegraf.Metric{ + testutil.MustMetric("libvirt_cpu_affinity", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "0"}, + map[string]interface{}{ + "cpu_id": "0,1,2,3", + }, + time.Now()), + testutil.MustMetric("libvirt_cpu_affinity", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "1"}, + map[string]interface{}{ + "cpu_id": "1,2,3,4", + }, + time.Now()), + testutil.MustMetric("libvirt_vcpu_total", + map[string]string{ + "domain_name": "Droplet-844329", + }, + map[string]interface{}{ + "current": 3, + "maximum": 3, + }, + time.Now()), + testutil.MustMetric("libvirt_vcpu", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "0", + }, + map[string]interface{}{ + "state": 1, + "time": int64(17943740000000), + "wait": 0, + "halted": "no", + "halted_i": 0, + "delay": 0, + "cpu_id": 0, + }, + time.Now()), + testutil.MustMetric("libvirt_vcpu", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "1", + }, + map[string]interface{}{ + "state": 1, + "time": int64(17943740000000), + "wait": 0, + "halted": "yes", + "halted_i": 1, + "delay": 0, + "cpu_id": 1, + }, + time.Now()), + testutil.MustMetric("libvirt_vcpu", + map[string]string{ + "domain_name": "Droplet-844329", + "vcpu_id": "2", + }, + map[string]interface{}{ + "state": 1, + "time": int64(17943740000000), + "wait": 0, + "delay": 0, + }, + time.Now()), + } +) diff --git a/plugins/inputs/libvirt/libvirt_utils.go b/plugins/inputs/libvirt/libvirt_utils.go new file mode 100644 index 0000000000000..1da6a9dd9f1ef --- /dev/null +++ b/plugins/inputs/libvirt/libvirt_utils.go @@ -0,0 +1,146 @@ +package libvirt + +import ( + "strconv" + "strings" + + golibvirt "github.com/digitalocean/go-libvirt" + libvirtutils "github.com/thomasklein94/packer-plugin-libvirt/libvirt-utils" +) + +type utils interface { + GatherAllDomains() (domains []golibvirt.Domain, err error) + GatherStatsForDomains(domains []golibvirt.Domain, metricNumber uint32) ([]golibvirt.DomainStatsRecord, error) + GatherNumberOfPCPUs() (int, error) + GatherVcpuMapping(domain golibvirt.Domain, pCPUs int, shouldGetCurrentPCPU bool) ([]vcpuAffinity, error) + EnsureConnected(libvirtURI string) error + Disconnect() error +} + +type utilsImpl struct { + libvirt *golibvirt.Libvirt +} + +type vcpuAffinity struct { + vcpuID string + coresAffinity string + currentPCPUID int32 +} + +// GatherAllDomains gathers all domains on system +func (l *utilsImpl) GatherAllDomains() (domains []golibvirt.Domain, err error) { + allDomainStatesFlag := golibvirt.ConnectListDomainsRunning + golibvirt.ConnectListDomainsPaused + + golibvirt.ConnectListDomainsShutoff + golibvirt.ConnectListDomainsOther + + domains, _, err = l.libvirt.ConnectListAllDomains(1, allDomainStatesFlag) + return domains, err +} + +// GatherStatsForDomains gathers stats for given domains based on number that was previously calculated +func (l *utilsImpl) GatherStatsForDomains(domains []golibvirt.Domain, metricNumber uint32) ([]golibvirt.DomainStatsRecord, error) { + if metricNumber == 0 { + // do not need to do expensive call if no stats were set to gather + return []golibvirt.DomainStatsRecord{}, nil + } + + allDomainStatesFlag := golibvirt.ConnectGetAllDomainsStatsRunning + golibvirt.ConnectGetAllDomainsStatsPaused + + golibvirt.ConnectGetAllDomainsStatsShutoff + golibvirt.ConnectGetAllDomainsStatsOther + + return l.libvirt.ConnectGetAllDomainStats(domains, metricNumber, allDomainStatesFlag) +} + +func (l *utilsImpl) GatherNumberOfPCPUs() (int, error) { + //nolint:dogsled //Using only needed values from library function + _, _, _, _, nodes, sockets, cores, threads, err := l.libvirt.NodeGetInfo() + if err != nil { + return 0, err + } + + return int(nodes * sockets * cores * threads), nil +} + +// GatherVcpuMapping is based on official go-libvirt library: +// https://github.com/libvirt/libvirt-go-module/blob/268a5d02e00cc9b3d5d7fa6c08d753071e7d14b8/domain.go#L4516 +// (this library cannot be used here because of C bindings) +func (l *utilsImpl) GatherVcpuMapping(domain golibvirt.Domain, pCPUs int, shouldGetCurrentPCPU bool) ([]vcpuAffinity, error) { + //nolint:dogsled //Using only needed values from library function + _, _, _, vCPUs, _, err := l.libvirt.DomainGetInfo(domain) + if err != nil { + return nil, err + } + + bytesToHoldPCPUs := (pCPUs + 7) / 8 + + cpuInfo, vcpuPinInfo, err := l.libvirt.DomainGetVcpus(domain, int32(vCPUs), int32(bytesToHoldPCPUs)) + if err != nil { + // DomainGetVcpus gets not only affinity (1:N mapping from VCPU to PCPU) + // but also realtime 1:1 mapping from VCPU to PCPU + // Unfortunately it will return nothing (only error) for inactive domains -> for that case use + // DomainGetVcpuPinInfo (which only gets affinity but even for inactive domains) + + vcpuPinInfo, _, err = l.libvirt.DomainGetVcpuPinInfo(domain, int32(vCPUs), int32(bytesToHoldPCPUs), uint32(golibvirt.DomainAffectCurrent)) + if err != nil { + return nil, err + } + } + + var vcpuAffinities []vcpuAffinity + for i := 0; i < int(vCPUs); i++ { + var coresAffinity []string + for j := 0; j < pCPUs; j++ { + aByte := (i * bytesToHoldPCPUs) + (j / 8) + aBit := j % 8 + + if (vcpuPinInfo[aByte] & (1 << uint(aBit))) != 0 { + coresAffinity = append(coresAffinity, strconv.Itoa(j)) + } + } + + vcpu := vcpuAffinity{ + vcpuID: strconv.FormatInt(int64(i), 10), + coresAffinity: strings.Join(coresAffinity, ","), + currentPCPUID: -1, + } + + if shouldGetCurrentPCPU && i < len(cpuInfo) { + vcpu.currentPCPUID = cpuInfo[i].CPU + } + + if len(coresAffinity) > 0 { + vcpuAffinities = append(vcpuAffinities, vcpu) + } + } + + return vcpuAffinities, nil +} + +func (l *utilsImpl) EnsureConnected(libvirtURI string) error { + if isConnected(l.libvirt) { + return nil + } + + driver, err := libvirtutils.ConnectByUriString(libvirtURI) + if err != nil { + return err + } + l.libvirt = driver + return nil +} + +func (l *utilsImpl) Disconnect() error { + l.libvirt = nil + return nil +} + +func isConnected(driver *golibvirt.Libvirt) bool { + if driver == nil { + return false + } + + select { + case <-driver.Disconnected(): + return false + default: + } + return true +} diff --git a/plugins/inputs/libvirt/libvirt_utils_mock.go b/plugins/inputs/libvirt/libvirt_utils_mock.go new file mode 100644 index 0000000000000..3ebdf322c1301 --- /dev/null +++ b/plugins/inputs/libvirt/libvirt_utils_mock.go @@ -0,0 +1,146 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package libvirt + +import ( + go_libvirt "github.com/digitalocean/go-libvirt" + mock "github.com/stretchr/testify/mock" +) + +// MockLibvirtUtils is an autogenerated mock type for the utils type +type MockLibvirtUtils struct { + mock.Mock +} + +// Disconnect provides a mock function with given fields: +func (_m *MockLibvirtUtils) Disconnect() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EnsureConnected provides a mock function with given fields: libvirtURI +func (_m *MockLibvirtUtils) EnsureConnected(libvirtURI string) error { + ret := _m.Called(libvirtURI) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(libvirtURI) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GatherAllDomains provides a mock function with given fields: +func (_m *MockLibvirtUtils) GatherAllDomains() ([]go_libvirt.Domain, error) { + ret := _m.Called() + + var r0 []go_libvirt.Domain + if rf, ok := ret.Get(0).(func() []go_libvirt.Domain); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]go_libvirt.Domain) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GatherNumberOfPCPUs provides a mock function with given fields: +func (_m *MockLibvirtUtils) GatherNumberOfPCPUs() (int, error) { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GatherStatsForDomains provides a mock function with given fields: domains, metricNumber +func (_m *MockLibvirtUtils) GatherStatsForDomains(domains []go_libvirt.Domain, metricNumber uint32) ([]go_libvirt.DomainStatsRecord, error) { + ret := _m.Called(domains, metricNumber) + + var r0 []go_libvirt.DomainStatsRecord + if rf, ok := ret.Get(0).(func([]go_libvirt.Domain, uint32) []go_libvirt.DomainStatsRecord); ok { + r0 = rf(domains, metricNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]go_libvirt.DomainStatsRecord) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func([]go_libvirt.Domain, uint32) error); ok { + r1 = rf(domains, metricNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GatherVcpuMapping provides a mock function with given fields: domain, pCPUs, shouldGetCurrentPCPU +func (_m *MockLibvirtUtils) GatherVcpuMapping(domain go_libvirt.Domain, pCPUs int, shouldGetCurrentPCPU bool) ([]vcpuAffinity, error) { + ret := _m.Called(domain, pCPUs, shouldGetCurrentPCPU) + + var r0 []vcpuAffinity + if rf, ok := ret.Get(0).(func(go_libvirt.Domain, int, bool) []vcpuAffinity); ok { + r0 = rf(domain, pCPUs, shouldGetCurrentPCPU) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]vcpuAffinity) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(go_libvirt.Domain, int, bool) error); ok { + r1 = rf(domain, pCPUs, shouldGetCurrentPCPU) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewMockLibvirtUtils interface { + mock.TestingT + Cleanup(func()) +} + +// NewMockLibvirtUtils creates a new instance of MockLibvirtUtils. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockLibvirtUtils(t mockConstructorTestingTNewMockLibvirtUtils) *MockLibvirtUtils { + mock := &MockLibvirtUtils{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/plugins/inputs/libvirt/sample.conf b/plugins/inputs/libvirt/sample.conf new file mode 100644 index 0000000000000..561294c76550a --- /dev/null +++ b/plugins/inputs/libvirt/sample.conf @@ -0,0 +1,29 @@ +# The libvirt plugin collects statistics from virtualized guests using virtualization libvirt API. +[[inputs.libvirt]] + ## Domain names from which libvirt gather statistics. + ## By default (empty or missing array) the plugin gather statistics from each domain registered in the host system. + # domains = [] + + ## Libvirt connection URI with hypervisor. + ## The plugin supports multiple transport protocols and approaches which are configurable via the URI. + ## The general URI form: driver[+transport]://[username@][hostname][:port]/[path][?extraparameters] + ## Supported transport protocols: ssh, tcp, tls, unix + ## URI examples for each type of transport protocol: + ## 1. SSH: qemu+ssh:///system?keyfile=/&known_hosts=/ + ## 2. TCP: qemu+tcp:///system + ## 3. TLS: qemu+tls:///system?pkipath=/certs_dir/ + ## 4. UNIX: qemu+unix:///system?socket=/ + ## Default URI is qemu:///system + # libvirt_uri = "qemu:///system" + + ## Statistics groups for which libvirt plugin will gather statistics. + ## Supported statistics groups: state, cpu_total, balloon, vcpu, interface, block, perf, iothread, memory, dirtyrate + ## Empty array means no metrics for statistics groups will be exposed by the plugin. + ## By default the plugin will gather all available statistics. + # statistics_groups = ["state", "cpu_total", "balloon", "vcpu", "interface", "block", "perf", "iothread", "memory", "dirtyrate"] + + ## A list containing additional statistics to be exposed by libvirt plugin. + ## Supported additional statistics: vcpu_mapping + ## By default (empty or missing array) the plugin will not collect additional statistics. + # additional_statistics = [] +