1830 lines
66 KiB
Django/Jinja
1830 lines
66 KiB
Django/Jinja
# Telegraf Configuration
|
|
#
|
|
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
|
# declared inputs, and sent to the declared outputs.
|
|
#
|
|
# Plugins must be declared in here to be active.
|
|
# To deactivate a plugin, comment out the name and any variables.
|
|
#
|
|
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
|
# file would generate.
|
|
#
|
|
# Environment variables can be used anywhere in this config file, simply surround
|
|
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
|
|
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
|
|
|
|
|
|
# Global tags can be specified here in key="value" format.
|
|
[global_tags]
|
|
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
|
# rack = "1a"
|
|
## Environment variables can be used as tags, and throughout the config file
|
|
# user = "$USER"
|
|
|
|
|
|
# Configuration for telegraf agent
|
|
[agent]
|
|
## Default data collection interval for all inputs
|
|
interval = "10s"
|
|
## Rounds collection interval to 'interval'
|
|
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
|
round_interval = true
|
|
|
|
## Telegraf will send metrics to outputs in batches of at most
|
|
## metric_batch_size metrics.
|
|
## This controls the size of writes that Telegraf sends to output plugins.
|
|
metric_batch_size = 1000
|
|
|
|
## Maximum number of unwritten metrics per output. Increasing this value
|
|
## allows for longer periods of output downtime without dropping metrics at the
|
|
## cost of higher maximum memory usage.
|
|
metric_buffer_limit = 10000
|
|
|
|
## Collection jitter is used to jitter the collection by a random amount.
|
|
## Each plugin will sleep for a random time within jitter before collecting.
|
|
## This can be used to avoid many plugins querying things like sysfs at the
|
|
## same time, which can have a measurable effect on the system.
|
|
collection_jitter = "0s"
|
|
|
|
## Default flushing interval for all outputs. Maximum flush_interval will be
|
|
## flush_interval + flush_jitter
|
|
flush_interval = "10s"
|
|
## Jitter the flush interval by a random amount. This is primarily to avoid
|
|
## large write spikes for users running a large number of telegraf instances.
|
|
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
|
flush_jitter = "0s"
|
|
|
|
## By default or when set to "0s", precision will be set to the same
|
|
## timestamp order as the collection interval, with the maximum being 1s.
|
|
## ie, when interval = "10s", precision will be "1s"
|
|
## when interval = "250ms", precision will be "1ms"
|
|
## Precision will NOT be used for service inputs. It is up to each individual
|
|
## service input to set the timestamp at the appropriate precision.
|
|
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
|
precision = ""
|
|
|
|
## Log at debug level.
|
|
# debug = false
|
|
## Log only error level messages.
|
|
# quiet = false
|
|
|
|
## Log target controls the destination for logs and can be one of "file",
|
|
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
|
|
## is determined by the "logfile" setting.
|
|
# logtarget = "file"
|
|
|
|
## Name of the file to be logged to when using the "file" logtarget. If set to
|
|
## the empty string then logs are written to stderr.
|
|
# logfile = ""
|
|
|
|
## The logfile will be rotated after the time interval specified. When set
|
|
## to 0 no time based rotation is performed. Logs are rotated only when
|
|
## written to, if there is no log activity rotation may be delayed.
|
|
# logfile_rotation_interval = "0d"
|
|
|
|
## The logfile will be rotated when it becomes larger than the specified
|
|
## size. When set to 0 no size based rotation is performed.
|
|
# logfile_rotation_max_size = "0MB"
|
|
|
|
## Maximum number of rotated archives to keep, any older logs are deleted.
|
|
## If set to -1, no archives are removed.
|
|
# logfile_rotation_max_archives = 5
|
|
|
|
## Override default hostname, if empty use os.Hostname()
|
|
hostname = ""
|
|
## If set to true, do no set the "host" tag in the telegraf agent.
|
|
omit_hostname = false
|
|
|
|
|
|
###############################################################################
|
|
# OUTPUT PLUGINS #
|
|
###############################################################################
|
|
|
|
|
|
# Configuration for sending metrics to InfluxDB
|
|
#[[outputs.influxdb]]
|
|
## The full HTTP or UDP URL for your InfluxDB instance.
|
|
##
|
|
## Multiple URLs can be specified for a single cluster, only ONE of the
|
|
## urls will be written to each interval.
|
|
# urls = ["unix:///var/run/influxdb.sock"]
|
|
# urls = ["udp://127.0.0.1:8089"]
|
|
# urls = ["http://127.0.0.1:8086"]
|
|
|
|
## The target database for metrics; will be created as needed.
|
|
## For UDP url endpoint database needs to be configured on server side.
|
|
# database = "telegraf"
|
|
|
|
## The value of this tag will be used to determine the database. If this
|
|
## tag is not set the 'database' option is used as the default.
|
|
# database_tag = ""
|
|
|
|
## If true, the 'database_tag' will not be included in the written metric.
|
|
# exclude_database_tag = false
|
|
|
|
## If true, no CREATE DATABASE queries will be sent. Set to true when using
|
|
## Telegraf with a user without permissions to create databases or when the
|
|
## database already exists.
|
|
# skip_database_creation = false
|
|
|
|
## Name of existing retention policy to write to. Empty string writes to
|
|
## the default retention policy. Only takes effect when using HTTP.
|
|
# retention_policy = ""
|
|
|
|
## The value of this tag will be used to determine the retention policy. If this
|
|
## tag is not set the 'retention_policy' option is used as the default.
|
|
# retention_policy_tag = ""
|
|
|
|
## If true, the 'retention_policy_tag' will not be included in the written metric.
|
|
# exclude_retention_policy_tag = false
|
|
|
|
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
|
|
## Only takes effect when using HTTP.
|
|
# write_consistency = "any"
|
|
|
|
## Timeout for HTTP messages.
|
|
# timeout = "5s"
|
|
|
|
## HTTP Basic Auth
|
|
# username = "telegraf"
|
|
# password = "metricsmetricsmetricsmetrics"
|
|
|
|
## HTTP User-Agent
|
|
# user_agent = "telegraf"
|
|
|
|
## UDP payload size is the maximum packet size to send.
|
|
# udp_payload = "512B"
|
|
|
|
## Optional TLS Config for use on HTTP connections.
|
|
# tls_ca = "/etc/telegraf/ca.pem"
|
|
# tls_cert = "/etc/telegraf/cert.pem"
|
|
# tls_key = "/etc/telegraf/key.pem"
|
|
## Use TLS but skip chain & host verification
|
|
# insecure_skip_verify = false
|
|
|
|
## HTTP Proxy override, if unset values the standard proxy environment
|
|
## variables are consulted to determine which proxy, if any, should be used.
|
|
# http_proxy = "http://corporate.proxy:3128"
|
|
|
|
## Additional HTTP headers
|
|
# http_headers = {"X-Special-Header" = "Special-Value"}
|
|
|
|
## HTTP Content-Encoding for write request body, can be set to "gzip" to
|
|
## compress body or "identity" to apply no encoding.
|
|
# content_encoding = "gzip"
|
|
|
|
## When true, Telegraf will output unsigned integers as unsigned values,
|
|
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
|
|
## integer values. Enabling this option will result in field type errors if
|
|
## existing data has been written.
|
|
# influx_uint_support = false
|
|
|
|
# # Send metrics to nowhere at all
|
|
# [[outputs.discard]]
|
|
# # no configuration
|
|
|
|
|
|
# # Send metrics to command as input over stdin
|
|
# [[outputs.exec]]
|
|
# ## Command to ingest metrics via stdin.
|
|
# command = ["tee", "-a", "/dev/null"]
|
|
#
|
|
# ## Timeout for command to complete.
|
|
# # timeout = "5s"
|
|
#
|
|
# ## Data format to output.
|
|
# ## Each data format has its own unique set of configuration options, read
|
|
# ## more about them here:
|
|
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
# # data_format = "influx"
|
|
|
|
|
|
# # Run executable as long-running output plugin
|
|
# [[outputs.execd]]
|
|
# ## Program to run as daemon
|
|
# command = ["my-telegraf-output", "--some-flag", "value"]
|
|
#
|
|
# ## Delay before the process is restarted after an unexpected termination
|
|
# restart_delay = "10s"
|
|
#
|
|
# ## Data format to export.
|
|
# ## Each data format has its own unique set of configuration options, read
|
|
# ## more about them here:
|
|
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
# data_format = "influx"
|
|
|
|
|
|
# Send telegraf metrics to file(s)
|
|
[[outputs.file]]
|
|
## Files to write to, "stdout" is a specially handled file.
|
|
files = ["stdout", "/tmp/metrics.out"]
|
|
|
|
## Use batch serialization format instead of line based delimiting. The
|
|
## batch format allows for the production of non line based output formats and
|
|
## may more efficiently encode metric groups.
|
|
# use_batch_format = false
|
|
|
|
## The file will be rotated after the time interval specified. When set
|
|
## to 0 no time based rotation is performed.
|
|
# rotation_interval = "0d"
|
|
|
|
## The logfile will be rotated when it becomes larger than the specified
|
|
## size. When set to 0 no size based rotation is performed.
|
|
# rotation_max_size = "0MB"
|
|
|
|
## Maximum number of rotated archives to keep, any older logs are deleted.
|
|
## If set to -1, no archives are removed.
|
|
# rotation_max_archives = 5
|
|
|
|
## Data format to output.
|
|
## Each data format has its own unique set of configuration options, read
|
|
## more about them here:
|
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
data_format = "influx"
|
|
|
|
|
|
# # Configuration for Graphite server to send metrics to
|
|
# [[outputs.graphite]]
|
|
# ## TCP endpoint for your graphite instance.
|
|
# ## If multiple endpoints are configured, output will be load balanced.
|
|
# ## Only one of the endpoints will be written to with each iteration.
|
|
# servers = ["localhost:2003"]
|
|
# ## Prefix metrics name
|
|
# prefix = ""
|
|
# ## Graphite output template
|
|
# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
# template = "host.tags.measurement.field"
|
|
#
|
|
# ## Enable Graphite tags support
|
|
# # graphite_tag_support = false
|
|
#
|
|
# ## Character for separating metric name and field for Graphite tags
|
|
# # graphite_separator = "."
|
|
#
|
|
# ## Graphite templates patterns
|
|
# ## 1. Template for cpu
|
|
# ## 2. Template for disk*
|
|
# ## 3. Default template
|
|
# # templates = [
|
|
# # "cpu tags.measurement.host.field",
|
|
# # "disk* measurement.field",
|
|
# # "host.measurement.tags.field"
|
|
# #]
|
|
#
|
|
# ## timeout in seconds for the write connection to graphite
|
|
# timeout = 2
|
|
#
|
|
# ## Optional TLS Config
|
|
# # tls_ca = "/etc/telegraf/ca.pem"
|
|
# # tls_cert = "/etc/telegraf/cert.pem"
|
|
# # tls_key = "/etc/telegraf/key.pem"
|
|
# ## Use TLS but skip chain & host verification
|
|
# # insecure_skip_verify = false
|
|
|
|
|
|
# # Send telegraf metrics to graylog
|
|
# [[outputs.graylog]]
|
|
# ## UDP endpoint for your graylog instance.
|
|
# servers = ["127.0.0.1:12201"]
|
|
#
|
|
# ## The field to use as the GELF short_message, if unset the static string
|
|
# ## "telegraf" will be used.
|
|
# ## example: short_message_field = "message"
|
|
# # short_message_field = ""
|
|
|
|
|
|
# # Configurable HTTP health check resource based on metrics
|
|
# [[outputs.health]]
|
|
# ## Address and port to listen on.
|
|
# ## ex: service_address = "http://localhost:8080"
|
|
# ## service_address = "unix:///var/run/telegraf-health.sock"
|
|
# # service_address = "http://:8080"
|
|
#
|
|
# ## The maximum duration for reading the entire request.
|
|
# # read_timeout = "5s"
|
|
# ## The maximum duration for writing the entire response.
|
|
# # write_timeout = "5s"
|
|
#
|
|
# ## Username and password to accept for HTTP basic authentication.
|
|
# # basic_username = "user1"
|
|
# # basic_password = "secret"
|
|
#
|
|
# ## Allowed CA certificates for client certificates.
|
|
# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
|
#
|
|
# ## TLS server certificate and private key.
|
|
# # tls_cert = "/etc/telegraf/cert.pem"
|
|
# # tls_key = "/etc/telegraf/key.pem"
|
|
#
|
|
# ## One or more check sub-tables should be defined, it is also recommended to
|
|
# ## use metric filtering to limit the metrics that flow into this output.
|
|
# ##
|
|
# ## When using the default buffer sizes, this example will fail when the
|
|
# ## metric buffer is half full.
|
|
# ##
|
|
# ## namepass = ["internal_write"]
|
|
# ## tagpass = { output = ["influxdb"] }
|
|
# ##
|
|
# ## [[outputs.health.compares]]
|
|
# ## field = "buffer_size"
|
|
# ## lt = 5000.0
|
|
# ##
|
|
# ## [[outputs.health.contains]]
|
|
# ## field = "buffer_size"
|
|
|
|
|
|
# # A plugin that can transmit metrics over HTTP
|
|
# [[outputs.http]]
|
|
# ## URL is the address to send metrics to
|
|
# url = "http://127.0.0.1:8080/telegraf"
|
|
#
|
|
# ## Timeout for HTTP message
|
|
# # timeout = "5s"
|
|
#
|
|
# ## HTTP method, one of: "POST" or "PUT"
|
|
# # method = "POST"
|
|
#
|
|
# ## HTTP Basic Auth credentials
|
|
# # username = "username"
|
|
# # password = "pa$$word"
|
|
#
|
|
# ## OAuth2 Client Credentials Grant
|
|
# # client_id = "clientid"
|
|
# # client_secret = "secret"
|
|
# # token_url = "https://indentityprovider/oauth2/v1/token"
|
|
# # scopes = ["urn:opc:idm:__myscopes__"]
|
|
#
|
|
# ## Optional TLS Config
|
|
# # tls_ca = "/etc/telegraf/ca.pem"
|
|
# # tls_cert = "/etc/telegraf/cert.pem"
|
|
# # tls_key = "/etc/telegraf/key.pem"
|
|
# ## Use TLS but skip chain & host verification
|
|
# # insecure_skip_verify = false
|
|
#
|
|
# ## Data format to output.
|
|
# ## Each data format has it's own unique set of configuration options, read
|
|
# ## more about them here:
|
|
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
# # data_format = "influx"
|
|
#
|
|
# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
|
|
# ## compress body or "identity" to apply no encoding.
|
|
# # content_encoding = "identity"
|
|
#
|
|
# ## Additional HTTP headers
|
|
# # [outputs.http.headers]
|
|
# # # Should be set manually to "application/json" for json data_format
|
|
# # Content-Type = "text/plain; charset=utf-8"
|
|
#
|
|
# ## Idle (keep-alive) connection timeout.
|
|
# ## Maximum amount of time before idle connection is closed.
|
|
# ## Zero means no limit.
|
|
# # idle_conn_timeout = 0
|
|
|
|
|
|
# # Configuration for sending metrics to InfluxDB
|
|
[[outputs.influxdb_v2]]
|
|
## The URLs of the InfluxDB cluster nodes.
|
|
##
|
|
## Multiple URLs can be specified for a single cluster, only ONE of the
|
|
## urls will be written to each interval.
|
|
## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
|
|
urls = ["{{influx_url}}"]
|
|
|
|
## Token for authentication.
|
|
token = "{{influx_token}}"
|
|
|
|
## Organization is the name of the organization you wish to write to; must exist.
|
|
organization = "{{influx_org}}"
|
|
|
|
## Destination bucket to write into.
|
|
bucket = "{{influx_bucket}}"
|
|
|
|
## The value of this tag will be used to determine the bucket. If this
|
|
## tag is not set the 'bucket' option is used as the default.
|
|
# bucket_tag = ""
|
|
|
|
## If true, the bucket tag will not be added to the metric.
|
|
# exclude_bucket_tag = false
|
|
|
|
## Timeout for HTTP messages.
|
|
# timeout = "5s"
|
|
|
|
## Additional HTTP headers
|
|
# http_headers = {"X-Special-Header" = "Special-Value"}
|
|
|
|
## HTTP Proxy override, if unset values the standard proxy environment
|
|
## variables are consulted to determine which proxy, if any, should be used.
|
|
# http_proxy = "http://corporate.proxy:3128"
|
|
|
|
## HTTP User-Agent
|
|
# user_agent = "telegraf"
|
|
|
|
## Content-Encoding for write request body, can be set to "gzip" to
|
|
## compress body or "identity" to apply no encoding.
|
|
# content_encoding = "gzip"
|
|
|
|
## Enable or disable uint support for writing uints influxdb 2.0.
|
|
# influx_uint_support = false
|
|
|
|
## Optional TLS Config for use on HTTP connections.
|
|
# tls_ca = "/etc/telegraf/ca.pem"
|
|
# tls_cert = "/etc/telegraf/cert.pem"
|
|
# tls_key = "/etc/telegraf/key.pem"
|
|
## Use TLS but skip chain & host verification
|
|
# insecure_skip_verify = false
|
|
|
|
|
|
# # Configuration for the Kafka server to send metrics to
|
|
# [[outputs.kafka]]
|
|
# ## URLs of kafka brokers
|
|
# brokers = ["localhost:9092"]
|
|
# ## Kafka topic for producer messages
|
|
# topic = "telegraf"
|
|
#
|
|
# ## The value of this tag will be used as the topic. If not set the 'topic'
|
|
# ## option is used.
|
|
# # topic_tag = ""
|
|
#
|
|
# ## If true, the 'topic_tag' will be removed from to the metric.
|
|
# # exclude_topic_tag = false
|
|
#
|
|
# ## Optional Client id
|
|
# # client_id = "Telegraf"
|
|
#
|
|
# ## Set the minimal supported Kafka version. Setting this enables the use of new
|
|
# ## Kafka features and APIs. Of particular interest, lz4 compression
|
|
# ## requires at least version 0.10.0.0.
|
|
# ## ex: version = "1.1.0"
|
|
# # version = ""
|
|
#
|
|
# ## Optional topic suffix configuration.
|
|
# ## If the section is omitted, no suffix is used.
|
|
# ## Following topic suffix methods are supported:
|
|
# ## measurement - suffix equals to separator + measurement's name
|
|
# ## tags - suffix equals to separator + specified tags' values
|
|
# ## interleaved with separator
|
|
#
|
|
# ## Suffix equals to "_" + measurement name
|
|
# # [outputs.kafka.topic_suffix]
|
|
# # method = "measurement"
|
|
# # separator = "_"
|
|
#
|
|
# ## Suffix equals to "__" + measurement's "foo" tag value.
|
|
# ## If there's no such a tag, suffix equals to an empty string
|
|
# # [outputs.kafka.topic_suffix]
|
|
# # method = "tags"
|
|
# # keys = ["foo"]
|
|
# # separator = "__"
|
|
#
|
|
# ## Suffix equals to "_" + measurement's "foo" and "bar"
|
|
# ## tag values, separated by "_". If there is no such tags,
|
|
# ## their values treated as empty strings.
|
|
# # [outputs.kafka.topic_suffix]
|
|
# # method = "tags"
|
|
# # keys = ["foo", "bar"]
|
|
# # separator = "_"
|
|
#
|
|
# ## The routing tag specifies a tagkey on the metric whose value is used as
|
|
# ## the message key. The message key is used to determine which partition to
|
|
# ## send the message to. This tag is prefered over the routing_key option.
|
|
# routing_tag = "host"
|
|
#
|
|
# ## The routing key is set as the message key and used to determine which
|
|
# ## partition to send the message to. This value is only used when no
|
|
# ## routing_tag is set or as a fallback when the tag specified in routing tag
|
|
# ## is not found.
|
|
# ##
|
|
# ## If set to "random", a random value will be generated for each message.
|
|
# ##
|
|
# ## When unset, no message key is added and each message is routed to a random
|
|
# ## partition.
|
|
# ##
|
|
# ## ex: routing_key = "random"
|
|
# ## routing_key = "telegraf"
|
|
# # routing_key = ""
|
|
#
|
|
# ## Compression codec represents the various compression codecs recognized by
|
|
# ## Kafka in messages.
|
|
# ## 0 : None
|
|
# ## 1 : Gzip
|
|
# ## 2 : Snappy
|
|
# ## 3 : LZ4
|
|
# ## 4 : ZSTD
|
|
# # compression_codec = 0
|
|
#
|
|
# ## Idempotent Writes
|
|
# ## If enabled, exactly one copy of each message is written.
|
|
# # idempotent_writes = false
|
|
#
|
|
# ## RequiredAcks is used in Produce Requests to tell the broker how many
|
|
# ## replica acknowledgements it must see before responding
|
|
# ## 0 : the producer never waits for an acknowledgement from the broker.
|
|
# ## This option provides the lowest latency but the weakest durability
|
|
# ## guarantees (some data will be lost when a server fails).
|
|
# ## 1 : the producer gets an acknowledgement after the leader replica has
|
|
# ## received the data. This option provides better durability as the
|
|
# ## client waits until the server acknowledges the request as successful
|
|
# ## (only messages that were written to the now-dead leader but not yet
|
|
# ## replicated will be lost).
|
|
# ## -1: the producer gets an acknowledgement after all in-sync replicas have
|
|
# ## received the data. This option provides the best durability, we
|
|
# ## guarantee that no messages will be lost as long as at least one in
|
|
# ## sync replica remains.
|
|
# # required_acks = -1
|
|
#
|
|
# ## The maximum number of times to retry sending a metric before failing
|
|
# ## until the next flush.
|
|
# # max_retry = 3
|
|
#
|
|
# ## The maximum permitted size of a message. Should be set equal to or
|
|
# ## smaller than the broker's 'message.max.bytes'.
|
|
# # max_message_bytes = 1000000
|
|
#
|
|
# ## Optional TLS Config
|
|
# # tls_ca = "/etc/telegraf/ca.pem"
|
|
# # tls_cert = "/etc/telegraf/cert.pem"
|
|
# # tls_key = "/etc/telegraf/key.pem"
|
|
# ## Use TLS but skip chain & host verification
|
|
# # insecure_skip_verify = false
|
|
#
|
|
# ## Optional SASL Config
|
|
# # sasl_username = "kafka"
|
|
# # sasl_password = "secret"
|
|
#
|
|
# ## Optional SASL:
|
|
# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI
|
|
# ## (defaults to PLAIN)
|
|
# # sasl_mechanism = ""
|
|
#
|
|
# ## used if sasl_mechanism is GSSAPI (experimental)
|
|
# # sasl_gssapi_service_name = ""
|
|
# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH
|
|
# # sasl_gssapi_auth_type = "KRB5_USER_AUTH"
|
|
# # sasl_gssapi_kerberos_config_path = "/"
|
|
# # sasl_gssapi_realm = "realm"
|
|
# # sasl_gssapi_key_tab_path = ""
|
|
# # sasl_gssapi_disable_pafxfast = false
|
|
#
|
|
# ## used if sasl_mechanism is OAUTHBEARER (experimental)
|
|
# # sasl_access_token = ""
|
|
#
|
|
# ## SASL protocol version. When connecting to Azure EventHub set to 0.
|
|
# # sasl_version = 1
|
|
#
|
|
# ## Data format to output.
|
|
# ## Each data format has its own unique set of configuration options, read
|
|
# ## more about them here:
|
|
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
# # data_format = "influx"
|
|
|
|
|
|
# # Configuration for the Prometheus client to spawn
|
|
# [[outputs.prometheus_client]]
|
|
# ## Address to listen on
|
|
# listen = ":9273"
|
|
#
|
|
# ## Metric version controls the mapping from Telegraf metrics into
|
|
# ## Prometheus format. When using the prometheus input, use the same value in
|
|
# ## both plugins to ensure metrics are round-tripped without modification.
|
|
# ##
|
|
# ## example: metric_version = 1;
|
|
# ## metric_version = 2; recommended version
|
|
# # metric_version = 1
|
|
#
|
|
# ## Use HTTP Basic Authentication.
|
|
# # basic_username = "Foo"
|
|
# # basic_password = "Bar"
|
|
#
|
|
# ## If set, the IP Ranges which are allowed to access metrics.
|
|
# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"]
|
|
# # ip_range = []
|
|
#
|
|
# ## Path to publish the metrics on.
|
|
# # path = "/metrics"
|
|
#
|
|
# ## Expiration interval for each metric. 0 == no expiration
|
|
# # expiration_interval = "60s"
|
|
#
|
|
# ## Collectors to enable, valid entries are "gocollector" and "process".
|
|
# ## If unset, both are enabled.
|
|
# # collectors_exclude = ["gocollector", "process"]
|
|
#
|
|
# ## Send string metrics as Prometheus labels.
|
|
# ## Unless set to false all string metrics will be sent as labels.
|
|
# # string_as_label = true
|
|
#
|
|
# ## If set, enable TLS with the given certificate.
|
|
# # tls_cert = "/etc/ssl/telegraf.crt"
|
|
# # tls_key = "/etc/ssl/telegraf.key"
|
|
#
|
|
# ## Set one or more allowed client CA certificate file names to
|
|
# ## enable mutually authenticated TLS connections
|
|
# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
|
#
|
|
# ## Export metric collection time.
|
|
# # export_timestamp = false
|
|
|
|
|
|
# # Configuration for the Riemann server to send metrics to
|
|
# [[outputs.riemann]]
|
|
# ## The full TCP or UDP URL of the Riemann server
|
|
# url = "tcp://localhost:5555"
|
|
#
|
|
# ## Riemann event TTL, floating-point time in seconds.
|
|
# ## Defines how long that an event is considered valid for in Riemann
|
|
# # ttl = 30.0
|
|
#
|
|
# ## Separator to use between measurement and field name in Riemann service name
|
|
# ## This does not have any effect if 'measurement_as_attribute' is set to 'true'
|
|
# separator = "/"
|
|
#
|
|
# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
|
|
# # measurement_as_attribute = false
|
|
#
|
|
# ## Send string metrics as Riemann event states.
|
|
# ## Unless enabled all string metrics will be ignored
|
|
# # string_as_state = false
|
|
#
|
|
# ## A list of tag keys whose values get sent as Riemann tags.
|
|
# ## If empty, all Telegraf tag values will be sent as tags
|
|
# # tag_keys = ["telegraf","custom_tag"]
|
|
#
|
|
# ## Additional Riemann tags to send.
|
|
# # tags = ["telegraf-output"]
|
|
#
|
|
# ## Description for Riemann event
|
|
# # description_text = "metrics collected from telegraf"
|
|
#
|
|
# ## Riemann client write timeout, defaults to "5s" if not set.
|
|
# # timeout = "5s"
|
|
|
|
|
|
# # Configuration for the Riemann server to send metrics to
|
|
# [[outputs.riemann_legacy]]
|
|
# ## URL of server
|
|
# url = "localhost:5555"
|
|
# ## transport protocol to use either tcp or udp
|
|
# transport = "tcp"
|
|
# ## separator to use between input name and field name in Riemann service name
|
|
# separator = " "
|
|
|
|
|
|
# # Generic socket writer capable of handling multiple socket types.
|
|
# [[outputs.socket_writer]]
|
|
# ## URL to connect to
|
|
# # address = "tcp://127.0.0.1:8094"
|
|
# # address = "tcp://example.com:http"
|
|
# # address = "tcp4://127.0.0.1:8094"
|
|
# # address = "tcp6://127.0.0.1:8094"
|
|
# # address = "tcp6://[2001:db8::1]:8094"
|
|
# # address = "udp://127.0.0.1:8094"
|
|
# # address = "udp4://127.0.0.1:8094"
|
|
# # address = "udp6://127.0.0.1:8094"
|
|
# # address = "unix:///tmp/telegraf.sock"
|
|
# # address = "unixgram:///tmp/telegraf.sock"
|
|
#
|
|
# ## Optional TLS Config
|
|
# # tls_ca = "/etc/telegraf/ca.pem"
|
|
# # tls_cert = "/etc/telegraf/cert.pem"
|
|
# # tls_key = "/etc/telegraf/key.pem"
|
|
# ## Use TLS but skip chain & host verification
|
|
# # insecure_skip_verify = false
|
|
#
|
|
# ## Period between keep alive probes.
|
|
# ## Only applies to TCP sockets.
|
|
# ## 0 disables keep alive probes.
|
|
# ## Defaults to the OS configuration.
|
|
# # keep_alive_period = "5m"
|
|
#
|
|
# ## Content encoding for packet-based connections (i.e. UDP, unixgram).
|
|
# ## Can be set to "gzip" or to "identity" to apply no encoding.
|
|
# ##
|
|
# # content_encoding = "identity"
|
|
#
|
|
# ## Data format to generate.
|
|
# ## Each data format has its own unique set of configuration options, read
|
|
# ## more about them here:
|
|
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
# # data_format = "influx"
|
|
|
|
|
|
# # Configuration for Syslog server to send metrics to
|
|
# [[outputs.syslog]]
|
|
# ## URL to connect to
|
|
# ## ex: address = "tcp://127.0.0.1:8094"
|
|
# ## ex: address = "tcp4://127.0.0.1:8094"
|
|
# ## ex: address = "tcp6://127.0.0.1:8094"
|
|
# ## ex: address = "tcp6://[2001:db8::1]:8094"
|
|
# ## ex: address = "udp://127.0.0.1:8094"
|
|
# ## ex: address = "udp4://127.0.0.1:8094"
|
|
# ## ex: address = "udp6://127.0.0.1:8094"
|
|
# address = "tcp://127.0.0.1:8094"
|
|
#
|
|
# ## Optional TLS Config
|
|
# # tls_ca = "/etc/telegraf/ca.pem"
|
|
# # tls_cert = "/etc/telegraf/cert.pem"
|
|
# # tls_key = "/etc/telegraf/key.pem"
|
|
# ## Use TLS but skip chain & host verification
|
|
# # insecure_skip_verify = false
|
|
#
|
|
# ## Period between keep alive probes.
|
|
# ## Only applies to TCP sockets.
|
|
# ## 0 disables keep alive probes.
|
|
# ## Defaults to the OS configuration.
|
|
# # keep_alive_period = "5m"
|
|
#
|
|
# ## The framing technique with which it is expected that messages are
|
|
# ## transported (default = "octet-counting"). Whether the messages come
|
|
# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
|
|
# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
|
|
# ## be one of "octet-counting", "non-transparent".
|
|
# # framing = "octet-counting"
|
|
#
|
|
# ## The trailer to be expected in case of non-transparent framing (default = "LF").
|
|
# ## Must be one of "LF", or "NUL".
|
|
# # trailer = "LF"
|
|
#
|
|
# ## SD-PARAMs settings
|
|
# ## Syslog messages can contain key/value pairs within zero or more
|
|
# ## structured data sections. For each unrecognized metric tag/field a
|
|
# ## SD-PARAMS is created.
|
|
# ##
|
|
# ## Example:
|
|
# ## [[outputs.syslog]]
|
|
# ## sdparam_separator = "_"
|
|
# ## default_sdid = "default@32473"
|
|
# ## sdids = ["foo@123", "bar@456"]
|
|
# ##
|
|
# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1
|
|
# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y]
|
|
#
|
|
# ## SD-PARAMs separator between the sdid and tag/field key (default = "_")
|
|
# # sdparam_separator = "_"
|
|
#
|
|
# ## Default sdid used for tags/fields that don't contain a prefix defined in
|
|
# ## the explicit sdids setting below If no default is specified, no SD-PARAMs
|
|
# ## will be used for unrecognized field.
|
|
# # default_sdid = "default@32473"
|
|
#
|
|
# ## List of explicit prefixes to extract from tag/field keys and use as the
|
|
# ## SDID, if they match (see above example for more details):
|
|
# # sdids = ["foo@123", "bar@456"]
|
|
#
|
|
# ## Default severity value. Severity and Facility are used to calculate the
|
|
# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field
|
|
# ## with key "severity_code" is defined. If unset, 5 (notice) is the default
|
|
# # default_severity_code = 5
|
|
#
|
|
# ## Default facility value. Facility and Severity are used to calculate the
|
|
# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with
|
|
# ## key "facility_code" is defined. If unset, 1 (user-level) is the default
|
|
# # default_facility_code = 1
|
|
#
|
|
# ## Default APP-NAME value (RFC5424#section-6.2.5)
|
|
# ## Used when no metric tag with key "appname" is defined.
|
|
# ## If unset, "Telegraf" is the default
|
|
# # default_appname = "Telegraf"
|
|
|
|
|
|
###############################################################################
|
|
# PROCESSOR PLUGINS #
|
|
###############################################################################
|
|
|
|
|
|
# # Attach AWS EC2 metadata to metrics
|
|
# [[processors.aws_ec2]]
|
|
# ## Instance identity document tags to attach to metrics.
|
|
# ## For more information see:
|
|
# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
|
|
# ##
|
|
# ## Available tags:
|
|
# ## * accountId
|
|
# ## * architecture
|
|
# ## * availabilityZone
|
|
# ## * billingProducts
|
|
# ## * imageId
|
|
# ## * instanceId
|
|
# ## * instanceType
|
|
# ## * kernelId
|
|
# ## * pendingTime
|
|
# ## * privateIp
|
|
# ## * ramdiskId
|
|
# ## * region
|
|
# ## * version
|
|
# imds_tags = []
|
|
#
|
|
# ## EC2 instance tags retrieved with DescribeTags action.
|
|
# ## In case tag is empty upon retrieval it's omitted when tagging metrics.
|
|
# ## Note that in order for this to work, role attached to EC2 instance or AWS
|
|
# ## credentials available from the environment must have a policy attached, that
|
|
# ## allows ec2:DescribeTags.
|
|
# ##
|
|
# ## For more information see:
|
|
# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html
|
|
# ec2_tags = []
|
|
#
|
|
# ## Timeout for http requests made by against aws ec2 metadata endpoint.
|
|
# timeout = "10s"
|
|
#
|
|
# ## ordered controls whether or not the metrics need to stay in the same order
|
|
# ## this plugin received them in. If false, this plugin will change the order
|
|
# ## with requests hitting cached results moving through immediately and not
|
|
# ## waiting on slower lookups. This may cause issues for you if you are
|
|
# ## depending on the order of metrics staying the same. If so, set this to true.
|
|
# ## Keeping the metrics ordered may be slightly slower.
|
|
# ordered = false
|
|
#
|
|
# ## max_parallel_calls is the maximum number of AWS API calls to be in flight
|
|
# ## at the same time.
|
|
# ## It's probably best to keep this number fairly low.
|
|
# max_parallel_calls = 10
|
|
|
|
|
|
# # Clone metrics and apply modifications.
|
|
# [[processors.clone]]
|
|
# ## All modifications on inputs and aggregators can be overridden:
|
|
# # name_override = "new_name"
|
|
# # name_prefix = "new_name_prefix"
|
|
# # name_suffix = "new_name_suffix"
|
|
#
|
|
# ## Tags to be added (all values must be strings)
|
|
# # [processors.clone.tags]
|
|
# # additional_tag = "tag_value"
|
|
|
|
|
|
# # Convert values to another metric value type
|
|
# [[processors.converter]]
|
|
# ## Tags to convert
|
|
# ##
|
|
# ## The table key determines the target type, and the array of key-values
|
|
# ## select the keys to convert. The array may contain globs.
|
|
# ## <target-type> = [<tag-key>...]
|
|
# [processors.converter.tags]
|
|
# measurement = []
|
|
# string = []
|
|
# integer = []
|
|
# unsigned = []
|
|
# boolean = []
|
|
# float = []
|
|
#
|
|
# ## Fields to convert
|
|
# ##
|
|
# ## The table key determines the target type, and the array of key-values
|
|
# ## select the keys to convert. The array may contain globs.
|
|
# ## <target-type> = [<field-key>...]
|
|
# [processors.converter.fields]
|
|
# measurement = []
|
|
# tag = []
|
|
# string = []
|
|
# integer = []
|
|
# unsigned = []
|
|
# boolean = []
|
|
# float = []
|
|
|
|
|
|
# # Dates measurements, tags, and fields that pass through this filter.
|
|
# [[processors.date]]
|
|
# ## New tag to create
|
|
# tag_key = "month"
|
|
#
|
|
# ## New field to create (cannot set both field_key and tag_key)
|
|
# # field_key = "month"
|
|
#
|
|
# ## Date format string, must be a representation of the Go "reference time"
|
|
# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
|
|
# date_format = "Jan"
|
|
#
|
|
# ## If destination is a field, date format can also be one of
|
|
# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
|
|
# # date_format = "unix"
|
|
#
|
|
# ## Offset duration added to the date string when writing the new tag.
|
|
# # date_offset = "0s"
|
|
#
|
|
# ## Timezone to use when creating the tag or field using a reference time
|
|
# ## string. This can be set to one of "UTC", "Local", or to a location name
|
|
# ## in the IANA Time Zone database.
|
|
# ## example: timezone = "America/Los_Angeles"
|
|
# # timezone = "UTC"
|
|
|
|
|
|
# # Filter metrics with repeating field values
|
|
# [[processors.dedup]]
|
|
# ## Maximum time to suppress output
|
|
# dedup_interval = "600s"
|
|
|
|
|
|
# # Defaults sets default value(s) for specified fields that are not set on incoming metrics.
|
|
# [[processors.defaults]]
|
|
# ## Ensures a set of fields always exists on your metric(s) with their
|
|
# ## respective default value.
|
|
# ## For any given field pair (key = default), if it's not set, a field
|
|
# ## is set on the metric with the specified default.
|
|
# ##
|
|
# ## A field is considered not set if it is nil on the incoming metric;
|
|
# ## or it is not nil but its value is an empty string or is a string
|
|
# ## of one or more spaces.
|
|
# ## <target-field> = <value>
|
|
# # [processors.defaults.fields]
|
|
# # field_1 = "bar"
|
|
# # time_idle = 0
|
|
# # is_error = true
|
|
|
|
|
|
# # Map enum values according to given table.
|
|
# [[processors.enum]]
|
|
# [[processors.enum.mapping]]
|
|
# ## Name of the field to map. Globs accepted.
|
|
# field = "status"
|
|
#
|
|
# ## Name of the tag to map. Globs accepted.
|
|
# # tag = "status"
|
|
#
|
|
# ## Destination tag or field to be used for the mapped value. By default the
|
|
# ## source tag or field is used, overwriting the original value.
|
|
# dest = "status_code"
|
|
#
|
|
# ## Default value to be used for all values not contained in the mapping
|
|
# ## table. When unset, the unmodified value for the field will be used if no
|
|
# ## match is found.
|
|
# # default = 0
|
|
#
|
|
# ## Table of mappings
|
|
# [processors.enum.mapping.value_mappings]
|
|
# green = 1
|
|
# amber = 2
|
|
# red = 3
|
|
|
|
|
|
# # Run executable as long-running processor plugin
|
|
# [[processors.execd]]
|
|
# ## Program to run as daemon
|
|
# ## eg: command = ["/path/to/your_program", "arg1", "arg2"]
|
|
# command = ["cat"]
|
|
#
|
|
# ## Delay before the process is restarted after an unexpected termination
|
|
# restart_delay = "10s"
|
|
|
|
|
|
# # Performs file path manipulations on tags and fields
|
|
# [[processors.filepath]]
|
|
# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
|
|
# # [[processors.filepath.basename]]
|
|
# # tag = "path"
|
|
# # dest = "basepath"
|
|
#
|
|
# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory
|
|
# # [[processors.filepath.dirname]]
|
|
# # field = "path"
|
|
#
|
|
# ## Treat the tag value as a path, converting it to its the last element without its suffix
|
|
# # [[processors.filepath.stem]]
|
|
# # tag = "path"
|
|
#
|
|
# ## Treat the tag value as a path, converting it to the shortest path name equivalent
|
|
# ## to path by purely lexical processing
|
|
# # [[processors.filepath.clean]]
|
|
# # tag = "path"
|
|
#
|
|
# ## Treat the tag value as a path, converting it to a relative path that is lexically
|
|
# ## equivalent to the source path when joined to 'base_path'
|
|
# # [[processors.filepath.rel]]
|
|
# # tag = "path"
|
|
# # base_path = "/var/log"
|
|
#
|
|
# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
|
|
# ## effect on Windows
|
|
# # [[processors.filepath.toslash]]
|
|
# # tag = "path"
|
|
|
|
|
|
# # Add a tag of the network interface name looked up over SNMP by interface number
|
|
# [[processors.ifname]]
|
|
# ## Name of tag holding the interface number
|
|
# # tag = "ifIndex"
|
|
#
|
|
# ## Name of output tag where service name will be added
|
|
# # dest = "ifName"
|
|
#
|
|
# ## Name of tag of the SNMP agent to request the interface name from
|
|
# # agent = "agent"
|
|
#
|
|
# ## Timeout for each request.
|
|
# # timeout = "5s"
|
|
#
|
|
# ## SNMP version; can be 1, 2, or 3.
|
|
# # version = 2
|
|
#
|
|
# ## SNMP community string.
|
|
# # community = "public"
|
|
#
|
|
# ## Number of retries to attempt.
|
|
# # retries = 3
|
|
#
|
|
# ## The GETBULK max-repetitions parameter.
|
|
# # max_repetitions = 10
|
|
#
|
|
# ## SNMPv3 authentication and encryption options.
|
|
# ##
|
|
# ## Security Name.
|
|
# # sec_name = "myuser"
|
|
# ## Authentication protocol; one of "MD5", "SHA", or "".
|
|
# # auth_protocol = "MD5"
|
|
# ## Authentication password.
|
|
# # auth_password = "pass"
|
|
# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
|
|
# # sec_level = "authNoPriv"
|
|
# ## Context Name.
|
|
# # context_name = ""
|
|
# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
|
|
# # priv_protocol = ""
|
|
# ## Privacy password used for encrypted messages.
|
|
# # priv_password = ""
|
|
#
|
|
# ## max_parallel_lookups is the maximum number of SNMP requests to
|
|
# ## make at the same time.
|
|
# # max_parallel_lookups = 100
|
|
#
|
|
# ## ordered controls whether or not the metrics need to stay in the
|
|
# ## same order this plugin received them in. If false, this plugin
|
|
# ## may change the order when data is cached. If you need metrics to
|
|
# ## stay in order set this to true. keeping the metrics ordered may
|
|
# ## be slightly slower
|
|
# # ordered = false
|
|
#
|
|
# ## cache_ttl is the amount of time interface names are cached for a
|
|
# ## given agent. After this period elapses if names are needed they
|
|
# ## will be retrieved again.
|
|
# # cache_ttl = "8h"
|
|
|
|
|
|
# # Apply metric modifications using override semantics.
|
|
# [[processors.override]]
|
|
# ## All modifications on inputs and aggregators can be overridden:
|
|
# # name_override = "new_name"
|
|
# # name_prefix = "new_name_prefix"
|
|
# # name_suffix = "new_name_suffix"
|
|
#
|
|
# ## Tags to be added (all values must be strings)
|
|
# # [processors.override.tags]
|
|
# # additional_tag = "tag_value"
|
|
|
|
|
|
# # Parse a value in a specified field/tag(s) and add the result in a new metric
|
|
# [[processors.parser]]
|
|
# ## The name of the fields whose value will be parsed.
|
|
# parse_fields = []
|
|
#
|
|
# ## If true, incoming metrics are not emitted.
|
|
# drop_original = false
|
|
#
|
|
# ## If set to override, emitted metrics will be merged by overriding the
|
|
# ## original metric using the newly parsed metrics.
|
|
# merge = "override"
|
|
#
|
|
# ## The dataformat to be read from files
|
|
# ## Each data format has its own unique set of configuration options, read
|
|
# ## more about them here:
|
|
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
# data_format = "influx"
|
|
|
|
|
|
# # Rotate a single valued metric into a multi field metric
|
|
# [[processors.pivot]]
|
|
# ## Tag to use for naming the new field.
|
|
# tag_key = "name"
|
|
# ## Field to use as the value of the new field.
|
|
# value_key = "value"
|
|
|
|
|
|
# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file
|
|
# [[processors.port_name]]
|
|
# [[processors.port_name]]
|
|
# ## Name of tag holding the port number
|
|
# # tag = "port"
|
|
# ## Or name of the field holding the port number
|
|
# # field = "port"
|
|
#
|
|
# ## Name of output tag or field (depending on the source) where service name will be added
|
|
# # dest = "service"
|
|
#
|
|
# ## Default tcp or udp
|
|
# # default_protocol = "tcp"
|
|
#
|
|
# ## Tag containing the protocol (tcp or udp, case-insensitive)
|
|
# # protocol_tag = "proto"
|
|
#
|
|
# ## Field containing the protocol (tcp or udp, case-insensitive)
|
|
# # protocol_field = "proto"
|
|
|
|
|
|
# # Print all metrics that pass through this filter.
|
|
# [[processors.printer]]
|
|
|
|
|
|
# # Transforms tag and field values with regex pattern
|
|
# [[processors.regex]]
|
|
# ## Tag and field conversions defined in a separate sub-tables
|
|
# # [[processors.regex.tags]]
|
|
# # ## Tag to change
|
|
# # key = "resp_code"
|
|
# # ## Regular expression to match on a tag value
|
|
# # pattern = "^(\\d)\\d\\d$"
|
|
# # ## Matches of the pattern will be replaced with this string. Use ${1}
|
|
# # ## notation to use the text of the first submatch.
|
|
# # replacement = "${1}xx"
|
|
#
|
|
# # [[processors.regex.fields]]
|
|
# # ## Field to change
|
|
# # key = "request"
|
|
# # ## All the power of the Go regular expressions available here
|
|
# # ## For example, named subgroups
|
|
# # pattern = "^/api(?P<method>/[\\w/]+)\\S*"
|
|
# # replacement = "${method}"
|
|
# # ## If result_key is present, a new field will be created
|
|
# # ## instead of changing existing field
|
|
# # result_key = "method"
|
|
#
|
|
# ## Multiple conversions may be applied for one field sequentially
|
|
# ## Let's extract one more value
|
|
# # [[processors.regex.fields]]
|
|
# # key = "request"
|
|
# # pattern = ".*category=(\\w+).*"
|
|
# # replacement = "${1}"
|
|
# # result_key = "search_category"
|
|
|
|
|
|
# # Rename measurements, tags, and fields that pass through this filter.
|
|
# [[processors.rename]]
|
|
|
|
|
|
# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name
|
|
# [[processors.reverse_dns]]
|
|
# ## For optimal performance, you may want to limit which metrics are passed to this
|
|
# ## processor. eg:
|
|
# ## namepass = ["my_metric_*"]
|
|
#
|
|
# ## cache_ttl is how long the dns entries should stay cached for.
|
|
# ## generally longer is better, but if you expect a large number of diverse lookups
|
|
# ## you'll want to consider memory use.
|
|
# cache_ttl = "24h"
|
|
#
|
|
# ## lookup_timeout is how long should you wait for a single dns request to repsond.
|
|
# ## this is also the maximum acceptable latency for a metric travelling through
|
|
# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
|
|
# ## be passed on unaltered.
|
|
# ## multiple simultaneous resolution requests for the same IP will only make a
|
|
# ## single rDNS request, and they will all wait for the answer for this long.
|
|
# lookup_timeout = "3s"
|
|
#
|
|
# ## max_parallel_lookups is the maximum number of dns requests to be in flight
|
|
# ## at the same time. Requesting hitting cached values do not count against this
|
|
# ## total, and neither do mulptiple requests for the same IP.
|
|
# ## It's probably best to keep this number fairly low.
|
|
# max_parallel_lookups = 10
|
|
#
|
|
# ## ordered controls whether or not the metrics need to stay in the same order
|
|
# ## this plugin received them in. If false, this plugin will change the order
|
|
# ## with requests hitting cached results moving through immediately and not
|
|
# ## waiting on slower lookups. This may cause issues for you if you are
|
|
# ## depending on the order of metrics staying the same. If so, set this to true.
|
|
# ## keeping the metrics ordered may be slightly slower.
|
|
# ordered = false
|
|
#
|
|
# [[processors.reverse_dns.lookup]]
|
|
# ## get the ip from the field "source_ip", and put the result in the field "source_name"
|
|
# field = "source_ip"
|
|
# dest = "source_name"
|
|
#
|
|
# [[processors.reverse_dns.lookup]]
|
|
# ## get the ip from the tag "destination_ip", and put the result in the tag
|
|
# ## "destination_name".
|
|
# tag = "destination_ip"
|
|
# dest = "destination_name"
|
|
#
|
|
# ## If you would prefer destination_name to be a field instead, you can use a
|
|
# ## processors.converter after this one, specifying the order attribute.
|
|
|
|
|
|
# # Add the S2 Cell ID as a tag based on latitude and longitude fields
|
|
# [[processors.s2geo]]
|
|
# ## The name of the lat and lon fields containing WGS-84 latitude and
|
|
# ## longitude in decimal degrees.
|
|
# # lat_field = "lat"
|
|
# # lon_field = "lon"
|
|
#
|
|
# ## New tag to create
|
|
# # tag_key = "s2_cell_id"
|
|
#
|
|
# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html)
|
|
# # cell_level = 9
|
|
|
|
|
|
# # Process metrics using a Starlark script
|
|
# [[processors.starlark]]
|
|
# ## The Starlark source can be set as a string in this configuration file, or
|
|
# ## by referencing a file containing the script. Only one source or script
|
|
# ## should be set at once.
|
|
# ##
|
|
# ## Source of the Starlark script.
|
|
# source = '''
|
|
# def apply(metric):
|
|
# return metric
|
|
# '''
|
|
#
|
|
# ## File containing a Starlark script.
|
|
# # script = "/usr/local/bin/myscript.star"
|
|
#
|
|
# ## The constants of the Starlark script.
|
|
# # [processors.starlark.constants]
|
|
# # max_size = 10
|
|
# # threshold = 0.75
|
|
# # default_name = "Julia"
|
|
# # debug_mode = true
|
|
|
|
|
|
# # Perform string processing on tags, fields, and measurements
|
|
# [[processors.strings]]
|
|
# ## Convert a tag value to uppercase
|
|
# # [[processors.strings.uppercase]]
|
|
# # tag = "method"
|
|
#
|
|
# ## Convert a field value to lowercase and store in a new field
|
|
# # [[processors.strings.lowercase]]
|
|
# # field = "uri_stem"
|
|
# # dest = "uri_stem_normalised"
|
|
#
|
|
# ## Convert a field value to titlecase
|
|
# # [[processors.strings.titlecase]]
|
|
# # field = "status"
|
|
#
|
|
# ## Trim leading and trailing whitespace using the default cutset
|
|
# # [[processors.strings.trim]]
|
|
# # field = "message"
|
|
#
|
|
# ## Trim leading characters in cutset
|
|
# # [[processors.strings.trim_left]]
|
|
# # field = "message"
|
|
# # cutset = "\t"
|
|
#
|
|
# ## Trim trailing characters in cutset
|
|
# # [[processors.strings.trim_right]]
|
|
# # field = "message"
|
|
# # cutset = "\r\n"
|
|
#
|
|
# ## Trim the given prefix from the field
|
|
# # [[processors.strings.trim_prefix]]
|
|
# # field = "my_value"
|
|
# # prefix = "my_"
|
|
#
|
|
# ## Trim the given suffix from the field
|
|
# # [[processors.strings.trim_suffix]]
|
|
# # field = "read_count"
|
|
# # suffix = "_count"
|
|
#
|
|
# ## Replace all non-overlapping instances of old with new
|
|
# # [[processors.strings.replace]]
|
|
# # measurement = "*"
|
|
# # old = ":"
|
|
# # new = "_"
|
|
#
|
|
# ## Trims strings based on width
|
|
# # [[processors.strings.left]]
|
|
# # field = "message"
|
|
# # width = 10
|
|
#
|
|
# ## Decode a base64 encoded utf-8 string
|
|
# # [[processors.strings.base64decode]]
|
|
# # field = "message"
|
|
|
|
|
|
# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.
|
|
# [[processors.tag_limit]]
|
|
# ## Maximum number of tags to preserve
|
|
# limit = 10
|
|
#
|
|
# ## List of tags to preferentially preserve
|
|
# keep = ["foo", "bar", "baz"]
|
|
|
|
|
|
# # Uses a Go template to create a new tag
|
|
# [[processors.template]]
|
|
# ## Tag to set with the output of the template.
|
|
# tag = "topic"
|
|
#
|
|
# ## Go template used to create the tag value. In order to ease TOML
|
|
# ## escaping requirements, you may wish to use single quotes around the
|
|
# ## template string.
|
|
# template = '{ { .Tag "hostname" }}.{ { .Tag "level" }}'
|
|
|
|
|
|
# # Print all metrics that pass through this filter.
|
|
# [[processors.topk]]
|
|
# ## How many seconds between aggregations
|
|
# # period = 10
|
|
#
|
|
# ## How many top metrics to return
|
|
# # k = 10
|
|
#
|
|
# ## Over which tags should the aggregation be done. Globs can be specified, in
|
|
# ## which case any tag matching the glob will aggregated over. If set to an
|
|
# ## empty list is no aggregation over tags is done
|
|
# # group_by = ['*']
|
|
#
|
|
# ## Over which fields are the top k are calculated
|
|
# # fields = ["value"]
|
|
#
|
|
# ## What aggregation to use. Options: sum, mean, min, max
|
|
# # aggregation = "mean"
|
|
#
|
|
# ## Instead of the top k largest metrics, return the bottom k lowest metrics
|
|
# # bottomk = false
|
|
#
|
|
# ## The plugin assigns each metric a GroupBy tag generated from its name and
|
|
# ## tags. If this setting is different than "" the plugin will add a
|
|
# ## tag (which name will be the value of this setting) to each metric with
|
|
# ## the value of the calculated GroupBy tag. Useful for debugging
|
|
# # add_groupby_tag = ""
|
|
#
|
|
# ## These settings provide a way to know the position of each metric in
|
|
# ## the top k. The 'add_rank_field' setting allows to specify for which
|
|
# ## fields the position is required. If the list is non empty, then a field
|
|
# ## will be added to each and every metric for each string present in this
|
|
# ## setting. This field will contain the ranking of the group that
|
|
# ## the metric belonged to when aggregated over that field.
|
|
# ## The name of the field will be set to the name of the aggregation field,
|
|
# ## suffixed with the string '_topk_rank'
|
|
# # add_rank_fields = []
|
|
#
|
|
# ## These settings provide a way to know what values the plugin is generating
|
|
# ## when aggregating metrics. The 'add_aggregate_field' setting allows to
|
|
# ## specify for which fields the final aggregation value is required. If the
|
|
# ## list is non empty, then a field will be added to each every metric for
|
|
# ## each field present in this setting. This field will contain
|
|
# ## the computed aggregation for the group that the metric belonged to when
|
|
# ## aggregated over that field.
|
|
# ## The name of the field will be set to the name of the aggregation field,
|
|
# ## suffixed with the string '_topk_aggregate'
|
|
# # add_aggregate_fields = []
|
|
|
|
|
|
# # Rotate multi field metric into several single field metrics
|
|
# [[processors.unpivot]]
|
|
# ## Tag to use for the name.
|
|
# tag_key = "name"
|
|
# ## Field to use for the name of the value.
|
|
# value_key = "value"
|
|
|
|
|
|
###############################################################################
|
|
# AGGREGATOR PLUGINS #
|
|
###############################################################################
|
|
|
|
|
|
# # Keep the aggregate basicstats of each metric passing through.
|
|
# [[aggregators.basicstats]]
|
|
# ## The period on which to flush & clear the aggregator.
|
|
# period = "30s"
|
|
#
|
|
# ## If true, the original metric will be dropped by the
|
|
# ## aggregator and will not get sent to the output plugins.
|
|
# drop_original = false
|
|
#
|
|
# ## Configures which basic stats to push as fields
|
|
# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
|
|
|
|
|
|
# # Calculates a derivative for every field.
|
|
# [[aggregators.derivative]]
|
|
# ## The period in which to flush the aggregator.
|
|
# period = "30s"
|
|
# ##
|
|
# ## If true, the original metric will be dropped by the
|
|
# ## aggregator and will not get sent to the output plugins.
|
|
# drop_original = false
|
|
# ##
|
|
# ## This aggregator will estimate a derivative for each field, which is
|
|
# ## contained in both the first and last metric of the aggregation interval.
|
|
# ## Without further configuration the derivative will be calculated with
|
|
# ## respect to the time difference between these two measurements in seconds.
|
|
# ## The formula applied is for every field:
|
|
# ##
|
|
# ## value_last - value_first
|
|
# ## derivative = --------------------------
|
|
# ## time_difference_in_seconds
|
|
# ##
|
|
# ## The resulting derivative will be named *fieldname_rate*. The suffix
|
|
# ## "_rate" can be configured by the *suffix* parameter. When using a
|
|
# ## derivation variable you can include its name for more clarity.
|
|
# # suffix = "_rate"
|
|
# ##
|
|
# ## As an abstraction the derivative can be calculated not only by the time
|
|
# ## difference but by the difference of a field, which is contained in the
|
|
# ## measurement. This field is assumed to be monotonously increasing. This
|
|
# ## feature is used by specifying a *variable*.
|
|
# ## Make sure the specified variable is not filtered and exists in the metrics
|
|
# ## passed to this aggregator!
|
|
# # variable = ""
|
|
# ##
|
|
# ## When using a field as the derivation parameter the name of that field will
|
|
# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*.
|
|
# ##
|
|
# ## Note, that the calculation is based on the actual timestamp of the
|
|
# ## measurements. When there is only one measurement during that period, the
|
|
# ## measurement will be rolled over to the next period. The maximum number of
|
|
# ## such roll-overs can be configured with a default of 10.
|
|
# # max_roll_over = 10
|
|
# ##
|
|
|
|
|
|
# # Report the final metric of a series
|
|
# [[aggregators.final]]
|
|
# ## The period on which to flush & clear the aggregator.
|
|
# period = "30s"
|
|
# ## If true, the original metric will be dropped by the
|
|
# ## aggregator and will not get sent to the output plugins.
|
|
# drop_original = false
|
|
#
|
|
# ## The time that a series is not updated until considering it final.
|
|
# series_timeout = "5m"
|
|
|
|
|
|
# # Create aggregate histograms.
|
|
# [[aggregators.histogram]]
|
|
# ## The period in which to flush the aggregator.
|
|
# period = "30s"
|
|
#
|
|
# ## If true, the original metric will be dropped by the
|
|
# ## aggregator and will not get sent to the output plugins.
|
|
# drop_original = false
|
|
#
|
|
# ## If true, the histogram will be reset on flush instead
|
|
# ## of accumulating the results.
|
|
# reset = false
|
|
#
|
|
# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added.
|
|
# ## Defaults to true.
|
|
# cumulative = true
|
|
#
|
|
# ## Example config that aggregates all fields of the metric.
|
|
# # [[aggregators.histogram.config]]
|
|
# # ## Right borders of buckets (with +Inf implicitly added).
|
|
# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
|
# # ## The name of metric.
|
|
# # measurement_name = "cpu"
|
|
#
|
|
# ## Example config that aggregates only specific fields of the metric.
|
|
# # [[aggregators.histogram.config]]
|
|
# # ## Right borders of buckets (with +Inf implicitly added).
|
|
# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
|
# # ## The name of metric.
|
|
# # measurement_name = "diskio"
|
|
# # ## The concrete fields of metric
|
|
# # fields = ["io_time", "read_time", "write_time"]
|
|
|
|
|
|
# # Merge metrics into multifield metrics by series key
|
|
# [[aggregators.merge]]
|
|
# ## If true, the original metric will be dropped by the
|
|
# ## aggregator and will not get sent to the output plugins.
|
|
# drop_original = true
|
|
|
|
|
|
# # Keep the aggregate min/max of each metric passing through.
|
|
# [[aggregators.minmax]]
|
|
# ## General Aggregator Arguments:
|
|
# ## The period on which to flush & clear the aggregator.
|
|
# period = "30s"
|
|
# ## If true, the original metric will be dropped by the
|
|
# ## aggregator and will not get sent to the output plugins.
|
|
# drop_original = false
|
|
|
|
|
|
# # Keep the aggregate quantiles of each metric passing through.
|
|
# [[aggregators.quantile]]
|
|
# ## General Aggregator Arguments:
|
|
# ## The period on which to flush & clear the aggregator.
|
|
# period = "30s"
|
|
#
|
|
# ## If true, the original metric will be dropped by the
|
|
# ## aggregator and will not get sent to the output plugins.
|
|
# drop_original = false
|
|
#
|
|
# ## Quantiles to output in the range [0,1]
|
|
# # quantiles = [0.25, 0.5, 0.75]
|
|
#
|
|
# ## Type of aggregation algorithm
|
|
# ## Supported are:
|
|
# ## "t-digest" -- approximation using centroids, can cope with large number of samples
|
|
# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7)
|
|
# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8)
|
|
# ## NOTE: Do not use "exact" algorithms with large number of samples
|
|
# ## to not impair performance or memory consumption!
|
|
# # algorithm = "t-digest"
|
|
#
|
|
# ## Compression for approximation (t-digest). The value needs to be
|
|
# ## greater or equal to 1.0. Smaller values will result in more
|
|
# ## performance but less accuracy.
|
|
# # compression = 100.0
|
|
|
|
|
|
# # Count the occurrence of values in fields.
|
|
# [[aggregators.valuecounter]]
|
|
# ## General Aggregator Arguments:
|
|
# ## The period on which to flush & clear the aggregator.
|
|
# period = "30s"
|
|
# ## If true, the original metric will be dropped by the
|
|
# ## aggregator and will not get sent to the output plugins.
|
|
# drop_original = false
|
|
# ## The fields for which the values will be counted
|
|
# fields = []
|
|
|
|
|
|
###############################################################################
|
|
# INPUT PLUGINS #
|
|
###############################################################################
|
|
|
|
# Read metrics from one or more commands that can output to stdout
|
|
# [[inputs.exec]]
|
|
# ## Commands array
|
|
# commands = [
|
|
# "/tmp/test.sh",
|
|
# "/usr/bin/mycollector --foo=bar",
|
|
# "/tmp/collect_*.sh"
|
|
# ]
|
|
#
|
|
# ## Timeout for each command to complete.
|
|
# timeout = "5s"
|
|
#
|
|
# ## measurement name suffix (for separating different commands)
|
|
# name_suffix = "_mycollector"
|
|
#
|
|
# ## Data format to consume.
|
|
# ## Each data format has its own unique set of configuration options, read
|
|
# ## more about them here:
|
|
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
# data_format = "influx"
|
|
|
|
# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
|
|
# [[inputs.influxdb]]
|
|
# ## Works with InfluxDB debug endpoints out of the box,
|
|
# ## but other services can use this format too.
|
|
# ## See the influxdb plugin's README for more details.
|
|
#
|
|
# ## Multiple URLs from which to read InfluxDB-formatted JSON
|
|
# ## Default is "http://localhost:8086/debug/vars".
|
|
# urls = [
|
|
# "http://localhost:8086/debug/vars"
|
|
# ]
|
|
#
|
|
# ## Username and password to send using HTTP Basic Authentication.
|
|
# # username = ""
|
|
# # password = ""
|
|
#
|
|
# ## Optional TLS Config
|
|
# # tls_ca = "/etc/telegraf/ca.pem"
|
|
# # tls_cert = "/etc/telegraf/cert.pem"
|
|
# # tls_key = "/etc/telegraf/key.pem"
|
|
# ## Use TLS but skip chain & host verification
|
|
# # insecure_skip_verify = false
|
|
#
|
|
# ## http request & header timeout
|
|
# timeout = "5s"
|
|
|
|
|
|
# # Collect statistics about itself
|
|
# [[inputs.internal]]
|
|
# ## If true, collect telegraf memory stats.
|
|
# # collect_memstats = true
|
|
|
|
# # Retrieve data from MODBUS slave devices
|
|
# [[inputs.modbus]]
|
|
# ## Connection Configuration
|
|
# ##
|
|
# ## The plugin supports connections to PLCs via MODBUS/TCP or
|
|
# ## via serial line communication in binary (RTU) or readable (ASCII) encoding
|
|
# ##
|
|
# ## Device name
|
|
# name = "Device"
|
|
#
|
|
# ## Slave ID - addresses a MODBUS device on the bus
|
|
# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved]
|
|
# slave_id = 1
|
|
#
|
|
# ## Timeout for each request
|
|
# timeout = "1s"
|
|
#
|
|
# ## Maximum number of retries and the time to wait between retries
|
|
# ## when a slave-device is busy.
|
|
# # busy_retries = 0
|
|
# # busy_retries_wait = "100ms"
|
|
#
|
|
# # TCP - connect via Modbus/TCP
|
|
# controller = "tcp://localhost:502"
|
|
#
|
|
# ## Serial (RS485; RS232)
|
|
# # controller = "file:///dev/ttyUSB0"
|
|
# # baud_rate = 9600
|
|
# # data_bits = 8
|
|
# # parity = "N"
|
|
# # stop_bits = 1
|
|
# # transmission_mode = "RTU"
|
|
#
|
|
#
|
|
# ## Measurements
|
|
# ##
|
|
#
|
|
# ## Digital Variables, Discrete Inputs and Coils
|
|
# ## measurement - the (optional) measurement name, defaults to "modbus"
|
|
# ## name - the variable name
|
|
# ## address - variable address
|
|
#
|
|
# discrete_inputs = [
|
|
# { name = "start", address = [0]},
|
|
# { name = "stop", address = [1]},
|
|
# { name = "reset", address = [2]},
|
|
# { name = "emergency_stop", address = [3]},
|
|
# ]
|
|
# coils = [
|
|
# { name = "motor1_run", address = [0]},
|
|
# { name = "motor1_jog", address = [1]},
|
|
# { name = "motor1_stop", address = [2]},
|
|
# ]
|
|
#
|
|
# ## Analog Variables, Input Registers and Holding Registers
|
|
# ## measurement - the (optional) measurement name, defaults to "modbus"
|
|
# ## name - the variable name
|
|
# ## byte_order - the ordering of bytes
|
|
# ## |---AB, ABCD - Big Endian
|
|
# ## |---BA, DCBA - Little Endian
|
|
# ## |---BADC - Mid-Big Endian
|
|
# ## |---CDAB - Mid-Little Endian
|
|
# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64,
|
|
# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation)
|
|
# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input)
|
|
# ## scale - the final numeric variable representation
|
|
# ## address - variable address
|
|
#
|
|
# holding_registers = [
|
|
# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]},
|
|
# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]},
|
|
# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]},
|
|
# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]},
|
|
# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]},
|
|
# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]},
|
|
# ]
|
|
# input_registers = [
|
|
# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]},
|
|
# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]},
|
|
# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]},
|
|
# ]
|
|
|
|
# # Read current weather and forecasts data from openweathermap.org
|
|
# [[inputs.openweathermap]]
|
|
# ## OpenWeatherMap API key.
|
|
# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
|
#
|
|
# ## City ID's to collect weather data from.
|
|
# city_id = ["5391959"]
|
|
#
|
|
# ## Language of the description field. Can be one of "ar", "bg",
|
|
# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu",
|
|
# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru",
|
|
# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw"
|
|
# # lang = "en"
|
|
#
|
|
# ## APIs to fetch; can contain "weather" or "forecast".
|
|
# fetch = ["weather", "forecast"]
|
|
#
|
|
# ## OpenWeatherMap base URL
|
|
# # base_url = "https://api.openweathermap.org/"
|
|
#
|
|
# ## Timeout for HTTP response.
|
|
# # response_timeout = "5s"
|
|
#
|
|
# ## Preferred unit system for temperature and wind speed. Can be one of
|
|
# ## "metric", "imperial", or "standard".
|
|
# # units = "metric"
|
|
#
|
|
# ## Query interval; OpenWeatherMap updates their weather data every 10
|
|
# ## minutes.
|
|
# interval = "10m"
|
|
|
|
|
|
|
|
###############################################################################
|
|
# SERVICE INPUT PLUGINS #
|
|
###############################################################################
|
|
|
|
|
|
# Generic socket listener capable of handling multiple socket types.
|
|
[[inputs.socket_listener]]
|
|
## URL to listen on
|
|
# service_address = "tcp://:8094"
|
|
# service_address = "tcp://127.0.0.1:http"
|
|
# service_address = "tcp4://:8094"
|
|
# service_address = "tcp6://:8094"
|
|
# service_address = "tcp6://[2001:db8::1]:8094"
|
|
service_address = "udp://:8094"
|
|
# service_address = "udp4://:8094"
|
|
# service_address = "udp6://:8094"
|
|
# service_address = "unix:///tmp/telegraf.sock"
|
|
# service_address = "unixgram:///tmp/telegraf.sock"
|
|
|
|
## Change the file mode bits on unix sockets. These permissions may not be
|
|
## respected by some platforms, to safely restrict write permissions it is best
|
|
## to place the socket into a directory that has previously been created
|
|
## with the desired permissions.
|
|
## ex: socket_mode = "777"
|
|
# socket_mode = ""
|
|
|
|
## Maximum number of concurrent connections.
|
|
## Only applies to stream sockets (e.g. TCP).
|
|
## 0 (default) is unlimited.
|
|
# max_connections = 1024
|
|
|
|
## Read timeout.
|
|
## Only applies to stream sockets (e.g. TCP).
|
|
## 0 (default) is unlimited.
|
|
# read_timeout = "30s"
|
|
|
|
## Optional TLS configuration.
|
|
## Only applies to stream sockets (e.g. TCP).
|
|
# tls_cert = "/etc/telegraf/cert.pem"
|
|
# tls_key = "/etc/telegraf/key.pem"
|
|
## Enables client authentication if set.
|
|
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
|
|
|
## Maximum socket buffer size (in bytes when no unit specified).
|
|
## For stream sockets, once the buffer fills up, the sender will start backing up.
|
|
## For datagram sockets, once the buffer fills up, metrics will start dropping.
|
|
## Defaults to the OS default.
|
|
# read_buffer_size = "64KiB"
|
|
|
|
## Period between keep alive probes.
|
|
## Only applies to TCP sockets.
|
|
## 0 disables keep alive probes.
|
|
## Defaults to the OS configuration.
|
|
# keep_alive_period = "5m"
|
|
|
|
## Data format to consume.
|
|
## Each data format has its own unique set of configuration options, read
|
|
## more about them here:
|
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
# data_format = "influx"
|
|
|
|
## Content encoding for message payloads, can be set to "gzip" to or
|
|
## "identity" to apply no encoding.
|
|
# content_encoding = "identity"
|
|
|
|
|
|
# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
|
|
# [[inputs.syslog]]
|
|
# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
|
|
# ## Protocol, address and port to host the syslog receiver.
|
|
# ## If no host is specified, then localhost is used.
|
|
# ## If no port is specified, 6514 is used (RFC5425#section-4.1).
|
|
# server = "tcp://:6514"
|
|
#
|
|
# ## TLS Config
|
|
# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
|
|
# # tls_cert = "/etc/telegraf/cert.pem"
|
|
# # tls_key = "/etc/telegraf/key.pem"
|
|
#
|
|
# ## Period between keep alive probes.
|
|
# ## 0 disables keep alive probes.
|
|
# ## Defaults to the OS configuration.
|
|
# ## Only applies to stream sockets (e.g. TCP).
|
|
# # keep_alive_period = "5m"
|
|
#
|
|
# ## Maximum number of concurrent connections (default = 0).
|
|
# ## 0 means unlimited.
|
|
# ## Only applies to stream sockets (e.g. TCP).
|
|
# # max_connections = 1024
|
|
#
|
|
# ## Read timeout is the maximum time allowed for reading a single message (default = 5s).
|
|
# ## 0 means unlimited.
|
|
# # read_timeout = "5s"
|
|
#
|
|
# ## The framing technique with which it is expected that messages are transported (default = "octet-counting").
|
|
# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
|
|
# ## or the non-transparent framing technique (RFC6587#section-3.4.2).
|
|
# ## Must be one of "octet-counting", "non-transparent".
|
|
# # framing = "octet-counting"
|
|
#
|
|
# ## The trailer to be expected in case of non-transparent framing (default = "LF").
|
|
# ## Must be one of "LF", or "NUL".
|
|
# # trailer = "LF"
|
|
#
|
|
# ## Whether to parse in best effort mode or not (default = false).
|
|
# ## By default best effort parsing is off.
|
|
# # best_effort = false
|
|
#
|
|
# ## Character to prepend to SD-PARAMs (default = "_").
|
|
# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
|
|
# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
|
|
# ## For each combination a field is created.
|
|
# ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
|
|
# # sdparam_separator = "_"
|