tracing updates
Some checks failed
autoapprove / autoapprove (pull_request) Failing after 1m26s
automerge / automerge (pull_request) Failing after 4s
codeql / analyze (go) (pull_request) Failing after 3m11s
dependabot-automerge / automerge (pull_request) Has been skipped
prbuild / test (pull_request) Failing after 1m29s
prbuild / lint (pull_request) Failing after 2m33s

Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
This commit is contained in:
2023-09-01 14:32:27 +03:00
parent 4b3ba3f76a
commit 34c80e29bc
8 changed files with 162 additions and 143 deletions

85
stmt.go
View File

@@ -6,6 +6,7 @@ import (
"fmt"
"time"
requestid "go.unistack.org/micro-wrapper-requestid/v3"
"go.unistack.org/micro/v3/tracer"
)
@@ -46,7 +47,7 @@ func (w *wrapperStmt) Close() error {
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "Close", labelUnknown, td, err)...).Log(ctx, w.opts.LoggerLevel)
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "Close", getCallerName(), td, err)).Log(ctx, w.opts.LoggerLevel)
}
return err
}
@@ -87,7 +88,7 @@ func (w *wrapperStmt) Exec(args []driver.Value) (driver.Result, error) {
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "Exec", labelUnknown, td, err)...).Log(ctx, w.opts.LoggerLevel)
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "Exec", getCallerName(), td, err)).Log(ctx, w.opts.LoggerLevel)
}
return res, err
@@ -103,8 +104,7 @@ func (w *wrapperStmt) Query(args []driver.Value) (driver.Rows, error) {
}
labels := []string{labelMethod, "Query"}
ts := time.Now()
// nolint:staticcheck
rows, err := w.stmt.Query(args)
rows, err := w.stmt.Query(args) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
@@ -116,31 +116,40 @@ func (w *wrapperStmt) Query(args []driver.Value) (driver.Rows, error) {
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "Query", labelUnknown, td, err)...).Log(ctx, w.opts.LoggerLevel)
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "Query", getCallerName(), td, err)).Log(ctx, w.opts.LoggerLevel)
}
return rows, err
}
// ColumnConverter implements driver.ColumnConverter
func (w *wrapperStmt) ColumnConverter(idx int) driver.ValueConverter {
s, ok := w.stmt.(driver.ColumnConverter) // nolint:staticcheck
if !ok {
return nil
}
return s.ColumnConverter(idx)
}
// ExecContext implements driver.StmtExecContext ExecContext
func (w *wrapperStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
var nctx context.Context
var span tracer.Span
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "ExecContext")
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "ExecContext")
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("method", "ExecContext")
span.AddLabels("db.method", "ExecContext")
name := getQueryName(ctx)
if name != "" {
span.AddLabels("query", name)
} else {
name = labelUnknown
}
span.AddLabels("db.statement", name)
defer span.Finish()
if len(args) > 0 {
span.AddLabels("args", fmt.Sprintf("%v", namedValueToLabels(args)))
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
if id, ok := ctx.Value(requestid.XRequestIDKey).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "ExecContext", labelQuery, name}
@@ -151,8 +160,7 @@ func (w *wrapperStmt) ExecContext(ctx context.Context, args []driver.NamedValue)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.AddLabels("error", true)
span.AddLabels("err", err.Error())
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
@@ -160,7 +168,7 @@ func (w *wrapperStmt) ExecContext(ctx context.Context, args []driver.NamedValue)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "ExecContext", name, td, err)...).Log(ctx, w.opts.LoggerLevel)
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "ExecContext", name, td, err)).Log(ctx, w.opts.LoggerLevel)
}
return res, err
}
@@ -168,11 +176,10 @@ func (w *wrapperStmt) ExecContext(ctx context.Context, args []driver.NamedValue)
values, err := namedValueToValue(args)
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.AddLabels("error", true)
span.AddLabels("err", err.Error())
span.SetStatus(tracer.SpanStatusError, err.Error())
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "ExecContext", name, 0, err)...).Log(ctx, w.opts.LoggerLevel)
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "ExecContext", name, 0, err)).Log(ctx, w.opts.LoggerLevel)
}
return nil, err
}
@@ -182,8 +189,7 @@ func (w *wrapperStmt) ExecContext(ctx context.Context, args []driver.NamedValue)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.AddLabels("error", true)
span.AddLabels("err", err.Error())
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
@@ -192,7 +198,7 @@ func (w *wrapperStmt) ExecContext(ctx context.Context, args []driver.NamedValue)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "ExecContext", name, td, err)...).Log(ctx, w.opts.LoggerLevel)
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "ExecContext", name, td, err)).Log(ctx, w.opts.LoggerLevel)
}
return res, err
@@ -202,21 +208,21 @@ func (w *wrapperStmt) ExecContext(ctx context.Context, args []driver.NamedValue)
func (w *wrapperStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
var nctx context.Context
var span tracer.Span
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "QueryContext")
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "QueryContext")
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("method", "QueryContext")
span.AddLabels("db.method", "QueryContext")
name := getQueryName(ctx)
if name != "" {
span.AddLabels("query", name)
} else {
name = labelUnknown
}
span.AddLabels("db.statement", name)
defer span.Finish()
if len(args) > 0 {
span.AddLabels("args", fmt.Sprintf("%v", namedValueToLabels(args)))
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
if id, ok := ctx.Value(requestid.XRequestIDKey).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "QueryContext", labelQuery, name}
if conn, ok := w.stmt.(driver.StmtQueryContext); ok {
@@ -226,8 +232,7 @@ func (w *wrapperStmt) QueryContext(ctx context.Context, args []driver.NamedValue
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.AddLabels("error", true)
span.AddLabels("err", err.Error())
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
@@ -236,7 +241,7 @@ func (w *wrapperStmt) QueryContext(ctx context.Context, args []driver.NamedValue
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "QueryContext", name, td, err)...).Log(ctx, w.opts.LoggerLevel)
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "QueryContext", name, td, err)).Log(ctx, w.opts.LoggerLevel)
}
return rows, err
}
@@ -245,11 +250,10 @@ func (w *wrapperStmt) QueryContext(ctx context.Context, args []driver.NamedValue
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.AddLabels("error", true)
span.AddLabels("err", err.Error())
span.SetStatus(tracer.SpanStatusError, err.Error())
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "QueryContext", name, 0, err)...).Log(ctx, w.opts.LoggerLevel)
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "QueryContext", name, 0, err)).Log(ctx, w.opts.LoggerLevel)
}
return nil, err
}
@@ -259,8 +263,7 @@ func (w *wrapperStmt) QueryContext(ctx context.Context, args []driver.NamedValue
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.AddLabels("error", true)
span.AddLabels("err", err.Error())
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
@@ -269,7 +272,7 @@ func (w *wrapperStmt) QueryContext(ctx context.Context, args []driver.NamedValue
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "QueryContext", name, td, err)...).Log(ctx, w.opts.LoggerLevel)
w.opts.Logger.Fields(w.opts.LoggerObserver(ctx, "QueryContext", name, td, err)).Log(ctx, w.opts.LoggerLevel)
}
return rows, err