2019-12-17 18:38:03 +03:00
|
|
|
// Package handler implements service debug handler embedded in go-micro services
|
2019-08-06 19:53:14 +03:00
|
|
|
package handler
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
2020-05-24 20:45:57 +03:00
|
|
|
"github.com/micro/go-micro/v2/client"
|
2020-01-30 14:39:00 +03:00
|
|
|
"github.com/micro/go-micro/v2/debug/log"
|
|
|
|
proto "github.com/micro/go-micro/v2/debug/service/proto"
|
|
|
|
"github.com/micro/go-micro/v2/debug/stats"
|
|
|
|
"github.com/micro/go-micro/v2/debug/trace"
|
|
|
|
"github.com/micro/go-micro/v2/server"
|
2019-08-06 19:53:14 +03:00
|
|
|
)
|
|
|
|
|
2020-01-29 19:05:58 +03:00
|
|
|
// NewHandler returns an instance of the Debug Handler
|
2020-05-24 20:45:57 +03:00
|
|
|
func NewHandler(c client.Client) *Debug {
|
2020-01-29 19:05:58 +03:00
|
|
|
return &Debug{
|
|
|
|
log: log.DefaultLog,
|
|
|
|
stats: stats.DefaultStats,
|
2020-01-30 01:40:43 +03:00
|
|
|
trace: trace.DefaultTracer,
|
2020-05-24 20:45:57 +03:00
|
|
|
cache: c.Options().Cache,
|
2020-01-29 19:05:58 +03:00
|
|
|
}
|
|
|
|
}
|
2019-08-06 19:53:14 +03:00
|
|
|
|
2019-11-26 20:04:44 +03:00
|
|
|
type Debug struct {
|
2019-12-18 21:36:42 +03:00
|
|
|
// must honour the debug handler
|
2019-11-28 14:05:35 +03:00
|
|
|
proto.DebugHandler
|
2019-12-18 21:36:42 +03:00
|
|
|
// the logger for retrieving logs
|
2019-11-28 14:36:38 +03:00
|
|
|
log log.Log
|
2019-12-18 21:36:42 +03:00
|
|
|
// the stats collector
|
|
|
|
stats stats.Stats
|
2020-01-25 00:29:29 +03:00
|
|
|
// the tracer
|
2020-01-29 18:45:11 +03:00
|
|
|
trace trace.Tracer
|
2020-05-24 20:45:57 +03:00
|
|
|
// the cache
|
|
|
|
cache *client.Cache
|
2019-11-26 20:04:44 +03:00
|
|
|
}
|
|
|
|
|
2019-08-06 19:53:14 +03:00
|
|
|
func (d *Debug) Health(ctx context.Context, req *proto.HealthRequest, rsp *proto.HealthResponse) error {
|
|
|
|
rsp.Status = "ok"
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Debug) Stats(ctx context.Context, req *proto.StatsRequest, rsp *proto.StatsResponse) error {
|
2019-12-18 21:36:42 +03:00
|
|
|
stats, err := d.stats.Read()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(stats) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// write the response values
|
|
|
|
rsp.Timestamp = uint64(stats[0].Timestamp)
|
|
|
|
rsp.Started = uint64(stats[0].Started)
|
|
|
|
rsp.Uptime = uint64(stats[0].Uptime)
|
|
|
|
rsp.Memory = stats[0].Memory
|
|
|
|
rsp.Gc = stats[0].GC
|
|
|
|
rsp.Threads = stats[0].Threads
|
|
|
|
rsp.Requests = stats[0].Requests
|
|
|
|
rsp.Errors = stats[0].Errors
|
|
|
|
|
2019-08-06 19:53:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
2019-11-26 18:39:55 +03:00
|
|
|
|
2020-01-25 00:29:29 +03:00
|
|
|
func (d *Debug) Trace(ctx context.Context, req *proto.TraceRequest, rsp *proto.TraceResponse) error {
|
|
|
|
traces, err := d.trace.Read(trace.ReadTrace(req.Id))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-02-12 13:57:17 +03:00
|
|
|
for _, t := range traces {
|
|
|
|
var typ proto.SpanType
|
|
|
|
switch t.Type {
|
|
|
|
case trace.SpanTypeRequestInbound:
|
|
|
|
typ = proto.SpanType_INBOUND
|
|
|
|
case trace.SpanTypeRequestOutbound:
|
|
|
|
typ = proto.SpanType_OUTBOUND
|
|
|
|
}
|
2020-01-25 00:29:29 +03:00
|
|
|
rsp.Spans = append(rsp.Spans, &proto.Span{
|
2020-02-12 13:57:17 +03:00
|
|
|
Trace: t.Trace,
|
|
|
|
Id: t.Id,
|
|
|
|
Parent: t.Parent,
|
|
|
|
Name: t.Name,
|
|
|
|
Started: uint64(t.Started.UnixNano()),
|
|
|
|
Duration: uint64(t.Duration.Nanoseconds()),
|
|
|
|
Type: typ,
|
|
|
|
Metadata: t.Metadata,
|
2020-01-25 00:29:29 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-05 02:58:29 +03:00
|
|
|
func (d *Debug) Log(ctx context.Context, stream server.Stream) error {
|
2019-12-02 17:55:35 +03:00
|
|
|
req := new(proto.LogRequest)
|
|
|
|
if err := stream.Recv(req); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-11-28 21:08:48 +03:00
|
|
|
var options []log.ReadOption
|
|
|
|
|
2019-12-01 16:15:10 +03:00
|
|
|
since := time.Unix(req.Since, 0)
|
2019-11-27 21:38:26 +03:00
|
|
|
if !since.IsZero() {
|
2019-11-28 21:08:48 +03:00
|
|
|
options = append(options, log.Since(since))
|
|
|
|
}
|
|
|
|
|
|
|
|
count := int(req.Count)
|
|
|
|
if count > 0 {
|
|
|
|
options = append(options, log.Count(count))
|
2019-11-27 21:38:26 +03:00
|
|
|
}
|
|
|
|
|
2019-11-30 15:39:29 +03:00
|
|
|
if req.Stream {
|
2019-12-17 18:38:03 +03:00
|
|
|
// TODO: we need to figure out how to close the log stream
|
2019-12-01 16:15:10 +03:00
|
|
|
// It seems like when a client disconnects,
|
2019-11-30 15:39:29 +03:00
|
|
|
// the connection stays open until some timeout expires
|
|
|
|
// or something like that; that means the map of streams
|
2019-12-01 16:15:10 +03:00
|
|
|
// might end up leaking memory if not cleaned up properly
|
2019-12-17 19:56:55 +03:00
|
|
|
lgStream, err := d.log.Stream()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer lgStream.Stop()
|
2019-12-17 18:38:03 +03:00
|
|
|
|
2019-12-17 19:56:55 +03:00
|
|
|
for record := range lgStream.Chan() {
|
2019-12-18 21:36:42 +03:00
|
|
|
// copy metadata
|
|
|
|
metadata := make(map[string]string)
|
|
|
|
for k, v := range record.Metadata {
|
|
|
|
metadata[k] = v
|
|
|
|
}
|
|
|
|
// send record
|
|
|
|
if err := stream.Send(&proto.Record{
|
|
|
|
Timestamp: record.Timestamp.Unix(),
|
|
|
|
Message: record.Message.(string),
|
|
|
|
Metadata: metadata,
|
|
|
|
}); err != nil {
|
2019-11-30 15:39:29 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-12-17 18:38:03 +03:00
|
|
|
|
2019-11-30 15:39:29 +03:00
|
|
|
// done streaming, return
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-28 21:08:48 +03:00
|
|
|
// get the log records
|
2019-12-17 19:56:55 +03:00
|
|
|
records, err := d.log.Read(options...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-12-17 18:38:03 +03:00
|
|
|
|
2019-11-30 15:39:29 +03:00
|
|
|
// send all the logs downstream
|
2019-11-27 21:38:26 +03:00
|
|
|
for _, record := range records {
|
2019-12-18 21:36:42 +03:00
|
|
|
// copy metadata
|
|
|
|
metadata := make(map[string]string)
|
|
|
|
for k, v := range record.Metadata {
|
|
|
|
metadata[k] = v
|
|
|
|
}
|
|
|
|
// send record
|
|
|
|
if err := stream.Send(&proto.Record{
|
|
|
|
Timestamp: record.Timestamp.Unix(),
|
|
|
|
Message: record.Message.(string),
|
|
|
|
Metadata: metadata,
|
|
|
|
}); err != nil {
|
2019-11-30 15:39:29 +03:00
|
|
|
return err
|
2019-11-27 21:38:26 +03:00
|
|
|
}
|
2019-11-30 15:39:29 +03:00
|
|
|
}
|
2019-11-27 21:38:26 +03:00
|
|
|
|
2019-11-30 15:39:29 +03:00
|
|
|
return nil
|
|
|
|
}
|
2020-05-24 20:45:57 +03:00
|
|
|
|
|
|
|
// Cache returns all the key value pairs in the client cache
|
|
|
|
func (d *Debug) Cache(ctx context.Context, req *proto.CacheRequest, rsp *proto.CacheResponse) error {
|
|
|
|
rsp.Values = d.cache.List()
|
|
|
|
return nil
|
|
|
|
}
|