2022-07-20 16:53:12 +03:00
|
|
|
package metrics
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2022-07-21 19:30:06 +03:00
|
|
|
"io/ioutil"
|
2022-07-20 16:53:12 +03:00
|
|
|
"log"
|
|
|
|
"net/http"
|
2022-07-21 19:45:07 +03:00
|
|
|
"net/url"
|
2022-07-20 16:53:12 +03:00
|
|
|
"time"
|
2022-07-25 10:17:02 +03:00
|
|
|
|
|
|
|
"compress/gzip"
|
2022-07-20 16:53:12 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// InitPushProcessMetrics sets up periodic push for 'process_*' metrics to the given pushURL with the given interval.
|
|
|
|
//
|
|
|
|
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
|
|
|
// to all the metrics before pushing them to pushURL.
|
|
|
|
//
|
|
|
|
// The metrics are pushed to pushURL in Prometheus text exposition format.
|
|
|
|
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
|
|
|
//
|
|
|
|
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
|
|
|
// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format
|
|
|
|
//
|
|
|
|
// It is OK calling InitPushProcessMetrics multiple times with different pushURL -
|
|
|
|
// in this case metrics are pushed to all the provided pushURL urls.
|
2022-07-21 19:45:07 +03:00
|
|
|
func InitPushProcessMetrics(pushURL string, interval time.Duration, extraLabels string) error {
|
2022-07-20 16:53:12 +03:00
|
|
|
writeMetrics := func(w io.Writer) {
|
|
|
|
WriteProcessMetrics(w)
|
|
|
|
}
|
2022-07-21 19:45:07 +03:00
|
|
|
return InitPushExt(pushURL, interval, extraLabels, writeMetrics)
|
2022-07-20 16:53:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// InitPush sets up periodic push for globally registered metrics to the given pushURL with the given interval.
|
|
|
|
//
|
|
|
|
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
|
|
|
// to all the metrics before pushing them to pushURL.
|
|
|
|
//
|
|
|
|
// If pushProcessMetrics is set to true, then 'process_*' metrics are also pushed to pushURL.
|
|
|
|
//
|
|
|
|
// The metrics are pushed to pushURL in Prometheus text exposition format.
|
|
|
|
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
|
|
|
//
|
|
|
|
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
|
|
|
// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format
|
|
|
|
//
|
|
|
|
// It is OK calling InitPush multiple times with different pushURL -
|
|
|
|
// in this case metrics are pushed to all the provided pushURL urls.
|
2022-07-21 19:45:07 +03:00
|
|
|
func InitPush(pushURL string, interval time.Duration, extraLabels string, pushProcessMetrics bool) error {
|
2022-07-20 16:53:12 +03:00
|
|
|
writeMetrics := func(w io.Writer) {
|
2022-07-21 18:35:46 +03:00
|
|
|
WritePrometheus(w, pushProcessMetrics)
|
2022-07-20 16:53:12 +03:00
|
|
|
}
|
2022-07-21 19:45:07 +03:00
|
|
|
return InitPushExt(pushURL, interval, extraLabels, writeMetrics)
|
2022-07-20 16:53:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// InitPush sets up periodic push for metrics from s to the given pushURL with the given interval.
|
|
|
|
//
|
|
|
|
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
|
|
|
// to all the metrics before pushing them to pushURL.
|
|
|
|
//
|
2022-08-08 17:15:06 +03:00
|
|
|
// / The metrics are pushed to pushURL in Prometheus text exposition format.
|
2022-07-20 16:53:12 +03:00
|
|
|
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
|
|
|
//
|
|
|
|
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
|
|
|
// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format
|
|
|
|
//
|
|
|
|
// It is OK calling InitPush multiple times with different pushURL -
|
|
|
|
// in this case metrics are pushed to all the provided pushURL urls.
|
2022-07-21 19:45:07 +03:00
|
|
|
func (s *Set) InitPush(pushURL string, interval time.Duration, extraLabels string) error {
|
2022-07-20 16:53:12 +03:00
|
|
|
writeMetrics := func(w io.Writer) {
|
|
|
|
s.WritePrometheus(w)
|
|
|
|
}
|
2022-07-21 19:45:07 +03:00
|
|
|
return InitPushExt(pushURL, interval, extraLabels, writeMetrics)
|
2022-07-20 16:53:12 +03:00
|
|
|
}
|
|
|
|
|
2022-07-21 18:35:46 +03:00
|
|
|
// InitPushExt sets up periodic push for metrics obtained by calling writeMetrics with the given interval.
|
|
|
|
//
|
|
|
|
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
|
|
|
// to all the metrics before pushing them to pushURL.
|
|
|
|
//
|
|
|
|
// The writeMetrics callback must write metrics to w in Prometheus text exposition format without timestamps and trailing comments.
|
|
|
|
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
|
|
|
//
|
|
|
|
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
|
|
|
// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format
|
|
|
|
//
|
|
|
|
// It is OK calling InitPushExt multiple times with different pushURL -
|
|
|
|
// in this case metrics are pushed to all the provided pushURL urls.
|
2022-07-25 10:17:02 +03:00
|
|
|
//
|
|
|
|
// It is OK calling InitPushExt multiple times with different writeMetrics -
|
|
|
|
// in this case all the metrics generated by writeMetrics callbacks are writte to pushURL.
|
2022-07-21 19:45:07 +03:00
|
|
|
func InitPushExt(pushURL string, interval time.Duration, extraLabels string, writeMetrics func(w io.Writer)) error {
|
2022-07-20 16:53:12 +03:00
|
|
|
if interval <= 0 {
|
2022-07-21 19:45:07 +03:00
|
|
|
return fmt.Errorf("interval must be positive; got %s", interval)
|
2022-07-20 16:53:12 +03:00
|
|
|
}
|
|
|
|
if err := validateTags(extraLabels); err != nil {
|
2022-07-21 19:45:07 +03:00
|
|
|
return fmt.Errorf("invalid extraLabels=%q: %w", extraLabels, err)
|
2022-07-20 16:53:12 +03:00
|
|
|
}
|
2022-07-21 19:45:07 +03:00
|
|
|
pu, err := url.Parse(pushURL)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("cannot parse pushURL=%q: %w", pushURL, err)
|
|
|
|
}
|
|
|
|
if pu.Scheme != "http" && pu.Scheme != "https" {
|
|
|
|
return fmt.Errorf("unsupported scheme in pushURL=%q; expecting 'http' or 'https'", pushURL)
|
|
|
|
}
|
|
|
|
if pu.Host == "" {
|
|
|
|
return fmt.Errorf("missing host in pushURL=%q", pushURL)
|
|
|
|
}
|
|
|
|
pushURLRedacted := pu.Redacted()
|
2022-07-21 18:18:47 +03:00
|
|
|
c := &http.Client{
|
|
|
|
Timeout: interval,
|
|
|
|
}
|
2022-08-04 18:12:13 +03:00
|
|
|
pushesTotal := pushMetrics.GetOrCreateCounter(fmt.Sprintf(`metrics_push_total{url=%q}`, pushURLRedacted))
|
|
|
|
pushErrorsTotal := pushMetrics.GetOrCreateCounter(fmt.Sprintf(`metrics_push_errors_total{url=%q}`, pushURLRedacted))
|
|
|
|
bytesPushedTotal := pushMetrics.GetOrCreateCounter(fmt.Sprintf(`metrics_push_bytes_pushed_total{url=%q}`, pushURLRedacted))
|
2023-02-20 07:54:52 +03:00
|
|
|
pushDuration := pushMetrics.GetOrCreateHistogram(fmt.Sprintf(`metrics_push_duration_seconds{url=%q}`, pushURLRedacted))
|
|
|
|
pushBlockSize := pushMetrics.GetOrCreateHistogram(fmt.Sprintf(`metrics_push_block_size_bytes{url=%q}`, pushURLRedacted))
|
2022-08-04 18:12:13 +03:00
|
|
|
pushMetrics.GetOrCreateFloatCounter(fmt.Sprintf(`metrics_push_interval_seconds{url=%q}`, pushURLRedacted)).Set(interval.Seconds())
|
2022-07-20 16:53:12 +03:00
|
|
|
go func() {
|
|
|
|
ticker := time.NewTicker(interval)
|
|
|
|
var bb bytes.Buffer
|
|
|
|
var tmpBuf []byte
|
2022-07-25 10:17:02 +03:00
|
|
|
zw := gzip.NewWriter(&bb)
|
2022-07-20 16:53:12 +03:00
|
|
|
for range ticker.C {
|
|
|
|
bb.Reset()
|
|
|
|
writeMetrics(&bb)
|
|
|
|
if len(extraLabels) > 0 {
|
|
|
|
tmpBuf = addExtraLabels(tmpBuf[:0], bb.Bytes(), extraLabels)
|
|
|
|
bb.Reset()
|
2022-07-25 10:17:02 +03:00
|
|
|
if _, err := bb.Write(tmpBuf); err != nil {
|
|
|
|
panic(fmt.Errorf("BUG: cannot write %d bytes to bytes.Buffer: %s", len(tmpBuf), err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tmpBuf = append(tmpBuf[:0], bb.Bytes()...)
|
|
|
|
bb.Reset()
|
|
|
|
zw.Reset(&bb)
|
|
|
|
if _, err := zw.Write(tmpBuf); err != nil {
|
|
|
|
panic(fmt.Errorf("BUG: cannot write %d bytes to gzip writer: %s", len(tmpBuf), err))
|
|
|
|
}
|
2022-07-25 10:37:04 +03:00
|
|
|
if err := zw.Close(); err != nil {
|
|
|
|
panic(fmt.Errorf("BUG: cannot flush metrics to gzip writer: %s", err))
|
|
|
|
}
|
2022-08-04 18:12:13 +03:00
|
|
|
pushesTotal.Inc()
|
|
|
|
blockLen := bb.Len()
|
|
|
|
bytesPushedTotal.Add(blockLen)
|
|
|
|
pushBlockSize.Update(float64(blockLen))
|
2022-07-25 10:17:02 +03:00
|
|
|
req, err := http.NewRequest("GET", pushURL, &bb)
|
|
|
|
if err != nil {
|
2022-08-04 18:12:13 +03:00
|
|
|
panic(fmt.Errorf("BUG: metrics.push: cannot initialize request for metrics push to %q: %w", pushURLRedacted, err))
|
2022-07-20 16:53:12 +03:00
|
|
|
}
|
2022-07-25 10:17:02 +03:00
|
|
|
req.Header.Set("Content-Type", "text/plain")
|
|
|
|
req.Header.Set("Content-Encoding", "gzip")
|
2022-08-04 18:12:13 +03:00
|
|
|
startTime := time.Now()
|
2022-07-25 10:17:02 +03:00
|
|
|
resp, err := c.Do(req)
|
2022-08-04 18:12:13 +03:00
|
|
|
pushDuration.UpdateDuration(startTime)
|
2022-07-20 16:53:12 +03:00
|
|
|
if err != nil {
|
2022-07-21 19:45:07 +03:00
|
|
|
log.Printf("ERROR: metrics.push: cannot push metrics to %q: %s", pushURLRedacted, err)
|
2022-08-04 18:12:13 +03:00
|
|
|
pushErrorsTotal.Inc()
|
2022-07-20 16:53:12 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if resp.StatusCode/100 != 2 {
|
2022-07-21 19:30:06 +03:00
|
|
|
body, _ := ioutil.ReadAll(resp.Body)
|
|
|
|
_ = resp.Body.Close()
|
2022-07-21 19:45:07 +03:00
|
|
|
log.Printf("ERROR: metrics.push: unexpected status code in response from %q: %d; expecting 2xx; response body: %q",
|
|
|
|
pushURLRedacted, resp.StatusCode, body)
|
2022-08-04 18:12:13 +03:00
|
|
|
pushErrorsTotal.Inc()
|
2022-07-20 16:53:12 +03:00
|
|
|
continue
|
|
|
|
}
|
2022-07-21 19:30:06 +03:00
|
|
|
_ = resp.Body.Close()
|
2022-07-20 16:53:12 +03:00
|
|
|
}
|
|
|
|
}()
|
2022-07-21 19:45:07 +03:00
|
|
|
return nil
|
2022-07-20 16:53:12 +03:00
|
|
|
}
|
|
|
|
|
2022-08-04 18:12:13 +03:00
|
|
|
var pushMetrics = NewSet()
|
|
|
|
|
|
|
|
func writePushMetrics(w io.Writer) {
|
|
|
|
pushMetrics.WritePrometheus(w)
|
|
|
|
}
|
|
|
|
|
2022-07-20 16:53:12 +03:00
|
|
|
func addExtraLabels(dst, src []byte, extraLabels string) []byte {
|
|
|
|
for len(src) > 0 {
|
|
|
|
var line []byte
|
|
|
|
n := bytes.IndexByte(src, '\n')
|
|
|
|
if n >= 0 {
|
|
|
|
line = src[:n]
|
|
|
|
src = src[n+1:]
|
|
|
|
} else {
|
|
|
|
line = src
|
|
|
|
src = nil
|
|
|
|
}
|
2022-07-21 18:35:46 +03:00
|
|
|
line = bytes.TrimSpace(line)
|
|
|
|
if len(line) == 0 {
|
|
|
|
// Skip empy lines
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if bytes.HasPrefix(line, bashBytes) {
|
|
|
|
// Copy comments as is
|
|
|
|
dst = append(dst, line...)
|
|
|
|
dst = append(dst, '\n')
|
|
|
|
continue
|
|
|
|
}
|
2022-07-20 16:53:12 +03:00
|
|
|
n = bytes.IndexByte(line, '{')
|
|
|
|
if n >= 0 {
|
|
|
|
dst = append(dst, line[:n+1]...)
|
|
|
|
dst = append(dst, extraLabels...)
|
|
|
|
dst = append(dst, ',')
|
|
|
|
dst = append(dst, line[n+1:]...)
|
|
|
|
} else {
|
|
|
|
n = bytes.LastIndexByte(line, ' ')
|
|
|
|
if n < 0 {
|
2022-07-21 18:35:46 +03:00
|
|
|
panic(fmt.Errorf("BUG: missing whitespace between metric name and metric value in Prometheus text exposition line %q", line))
|
2022-07-20 16:53:12 +03:00
|
|
|
}
|
|
|
|
dst = append(dst, line[:n]...)
|
|
|
|
dst = append(dst, '{')
|
|
|
|
dst = append(dst, extraLabels...)
|
|
|
|
dst = append(dst, '}')
|
|
|
|
dst = append(dst, line[n:]...)
|
|
|
|
}
|
|
|
|
dst = append(dst, '\n')
|
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
2022-07-21 18:35:46 +03:00
|
|
|
|
|
|
|
var bashBytes = []byte("#")
|