Compare commits
	
		
			4 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| b0cbddcfdd | |||
| 
						 | 
					d0534a7d05 | ||
| ab051405c5 | |||
| 
						 | 
					268b3dbff4 | 
@@ -1,5 +1,5 @@
 | 
			
		||||
# Micro
 | 
			
		||||

 | 
			
		||||

 | 
			
		||||
[](https://opensource.org/licenses/Apache-2.0)
 | 
			
		||||
[](https://pkg.go.dev/go.unistack.org/micro/v4?tab=overview)
 | 
			
		||||
[](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av4+event%3Apush)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										235
									
								
								cluster/hasql/cluster.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										235
									
								
								cluster/hasql/cluster.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,235 @@
 | 
			
		||||
package sql
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"database/sql"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"unsafe"
 | 
			
		||||
 | 
			
		||||
	"golang.yandex/hasql/v2"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func newSQLRowError() *sql.Row {
 | 
			
		||||
	row := &sql.Row{}
 | 
			
		||||
	t := reflect.TypeOf(row).Elem()
 | 
			
		||||
	field, _ := t.FieldByName("err")
 | 
			
		||||
	rowPtr := unsafe.Pointer(row)
 | 
			
		||||
	errFieldPtr := unsafe.Pointer(uintptr(rowPtr) + field.Offset)
 | 
			
		||||
	errPtr := (*error)(errFieldPtr)
 | 
			
		||||
	*errPtr = ErrorNoAliveNodes
 | 
			
		||||
	return row
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ClusterQuerier interface {
 | 
			
		||||
	Querier
 | 
			
		||||
	WaitForNodes(ctx context.Context, criterion ...hasql.NodeStateCriterion) error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Cluster struct {
 | 
			
		||||
	hasql   *hasql.Cluster[Querier]
 | 
			
		||||
	options ClusterOptions
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewCluster returns [Querier] that provides cluster of nodes
 | 
			
		||||
func NewCluster[T Querier](opts ...ClusterOption) (ClusterQuerier, error) {
 | 
			
		||||
	options := ClusterOptions{Context: context.Background()}
 | 
			
		||||
	for _, opt := range opts {
 | 
			
		||||
		opt(&options)
 | 
			
		||||
	}
 | 
			
		||||
	if options.NodeChecker == nil {
 | 
			
		||||
		return nil, ErrClusterChecker
 | 
			
		||||
	}
 | 
			
		||||
	if options.NodeDiscoverer == nil {
 | 
			
		||||
		return nil, ErrClusterDiscoverer
 | 
			
		||||
	}
 | 
			
		||||
	if options.NodePicker == nil {
 | 
			
		||||
		return nil, ErrClusterPicker
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if options.Retries < 1 {
 | 
			
		||||
		options.Retries = 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if options.NodeStateCriterion == 0 {
 | 
			
		||||
		options.NodeStateCriterion = hasql.Primary
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	options.Options = append(options.Options, hasql.WithNodePicker(options.NodePicker))
 | 
			
		||||
	if p, ok := options.NodePicker.(*CustomPicker[Querier]); ok {
 | 
			
		||||
		p.opts.Priority = options.NodePriority
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c, err := hasql.NewCluster(
 | 
			
		||||
		options.NodeDiscoverer,
 | 
			
		||||
		options.NodeChecker,
 | 
			
		||||
		options.Options...,
 | 
			
		||||
	)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &Cluster{hasql: c, options: options}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Cluster) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
 | 
			
		||||
	var tx *sql.Tx
 | 
			
		||||
	var err error
 | 
			
		||||
 | 
			
		||||
	retries := 0
 | 
			
		||||
	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
 | 
			
		||||
		for ; retries < c.options.Retries; retries++ {
 | 
			
		||||
			if tx, err = n.DB().BeginTx(ctx, opts); err != nil && retries >= c.options.Retries {
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return false
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	if tx == nil && err == nil {
 | 
			
		||||
		err = ErrorNoAliveNodes
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return tx, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Cluster) Close() error {
 | 
			
		||||
	return c.hasql.Close()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Cluster) Conn(ctx context.Context) (*sql.Conn, error) {
 | 
			
		||||
	var conn *sql.Conn
 | 
			
		||||
	var err error
 | 
			
		||||
 | 
			
		||||
	retries := 0
 | 
			
		||||
	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
 | 
			
		||||
		for ; retries < c.options.Retries; retries++ {
 | 
			
		||||
			if conn, err = n.DB().Conn(ctx); err != nil && retries >= c.options.Retries {
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return false
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	if conn == nil && err == nil {
 | 
			
		||||
		err = ErrorNoAliveNodes
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return conn, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Cluster) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
 | 
			
		||||
	var res sql.Result
 | 
			
		||||
	var err error
 | 
			
		||||
 | 
			
		||||
	retries := 0
 | 
			
		||||
	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
 | 
			
		||||
		for ; retries < c.options.Retries; retries++ {
 | 
			
		||||
			if res, err = n.DB().ExecContext(ctx, query, args...); err != nil && retries >= c.options.Retries {
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return false
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	if res == nil && err == nil {
 | 
			
		||||
		err = ErrorNoAliveNodes
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return res, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Cluster) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
 | 
			
		||||
	var res *sql.Stmt
 | 
			
		||||
	var err error
 | 
			
		||||
 | 
			
		||||
	retries := 0
 | 
			
		||||
	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
 | 
			
		||||
		for ; retries < c.options.Retries; retries++ {
 | 
			
		||||
			if res, err = n.DB().PrepareContext(ctx, query); err != nil && retries >= c.options.Retries {
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return false
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	if res == nil && err == nil {
 | 
			
		||||
		err = ErrorNoAliveNodes
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return res, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Cluster) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
 | 
			
		||||
	var res *sql.Rows
 | 
			
		||||
	var err error
 | 
			
		||||
 | 
			
		||||
	retries := 0
 | 
			
		||||
	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
 | 
			
		||||
		for ; retries < c.options.Retries; retries++ {
 | 
			
		||||
			if res, err = n.DB().QueryContext(ctx, query); err != nil && err != sql.ErrNoRows && retries >= c.options.Retries {
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return false
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	if res == nil && err == nil {
 | 
			
		||||
		err = ErrorNoAliveNodes
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return res, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Cluster) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
 | 
			
		||||
	var res *sql.Row
 | 
			
		||||
 | 
			
		||||
	retries := 0
 | 
			
		||||
	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
 | 
			
		||||
		for ; retries < c.options.Retries; retries++ {
 | 
			
		||||
			res = n.DB().QueryRowContext(ctx, query, args...)
 | 
			
		||||
			if res.Err() == nil {
 | 
			
		||||
				return false
 | 
			
		||||
			} else if res.Err() != nil && retries >= c.options.Retries {
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return true
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	if res == nil {
 | 
			
		||||
		res = newSQLRowError()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return res
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Cluster) PingContext(ctx context.Context) error {
 | 
			
		||||
	var err error
 | 
			
		||||
	var ok bool
 | 
			
		||||
 | 
			
		||||
	retries := 0
 | 
			
		||||
	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
 | 
			
		||||
		ok = true
 | 
			
		||||
		for ; retries < c.options.Retries; retries++ {
 | 
			
		||||
			if err = n.DB().PingContext(ctx); err != nil && retries >= c.options.Retries {
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return false
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	if !ok {
 | 
			
		||||
		err = ErrorNoAliveNodes
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Cluster) WaitForNodes(ctx context.Context, criterions ...hasql.NodeStateCriterion) error {
 | 
			
		||||
	for _, criterion := range criterions {
 | 
			
		||||
		if _, err := c.hasql.WaitForNode(ctx, criterion); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										171
									
								
								cluster/hasql/cluster_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										171
									
								
								cluster/hasql/cluster_test.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,171 @@
 | 
			
		||||
package sql
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/DATA-DOG/go-sqlmock"
 | 
			
		||||
	"golang.yandex/hasql/v2"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestNewCluster(t *testing.T) {
 | 
			
		||||
	dbMaster, dbMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer dbMaster.Close()
 | 
			
		||||
	dbMasterMock.MatchExpectationsInOrder(false)
 | 
			
		||||
 | 
			
		||||
	dbMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRowsWithColumnDefinition(
 | 
			
		||||
			sqlmock.NewColumn("role").OfType("int8", 0),
 | 
			
		||||
			sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
 | 
			
		||||
			AddRow(1, 0)).
 | 
			
		||||
		RowsWillBeClosed().
 | 
			
		||||
		WithoutArgs()
 | 
			
		||||
 | 
			
		||||
	dbMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("master-dc1"))
 | 
			
		||||
 | 
			
		||||
	dbDRMaster, dbDRMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer dbDRMaster.Close()
 | 
			
		||||
	dbDRMasterMock.MatchExpectationsInOrder(false)
 | 
			
		||||
 | 
			
		||||
	dbDRMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRowsWithColumnDefinition(
 | 
			
		||||
			sqlmock.NewColumn("role").OfType("int8", 0),
 | 
			
		||||
			sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
 | 
			
		||||
			AddRow(2, 40)).
 | 
			
		||||
		RowsWillBeClosed().
 | 
			
		||||
		WithoutArgs()
 | 
			
		||||
 | 
			
		||||
	dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("drmaster1-dc2"))
 | 
			
		||||
 | 
			
		||||
	dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("drmaster"))
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC1, dbSlaveDC1Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer dbSlaveDC1.Close()
 | 
			
		||||
	dbSlaveDC1Mock.MatchExpectationsInOrder(false)
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC1Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRowsWithColumnDefinition(
 | 
			
		||||
			sqlmock.NewColumn("role").OfType("int8", 0),
 | 
			
		||||
			sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
 | 
			
		||||
			AddRow(2, 50)).
 | 
			
		||||
		RowsWillBeClosed().
 | 
			
		||||
		WithoutArgs()
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("slave-dc1"))
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC2, dbSlaveDC2Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer dbSlaveDC2.Close()
 | 
			
		||||
	dbSlaveDC1Mock.MatchExpectationsInOrder(false)
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC2Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRowsWithColumnDefinition(
 | 
			
		||||
			sqlmock.NewColumn("role").OfType("int8", 0),
 | 
			
		||||
			sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
 | 
			
		||||
			AddRow(2, 50)).
 | 
			
		||||
		RowsWillBeClosed().
 | 
			
		||||
		WithoutArgs()
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC2Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("slave-dc1"))
 | 
			
		||||
 | 
			
		||||
	tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	c, err := NewCluster[Querier](
 | 
			
		||||
		WithClusterContext(tctx),
 | 
			
		||||
		WithClusterNodeChecker(hasql.PostgreSQLChecker),
 | 
			
		||||
		WithClusterNodePicker(NewCustomPicker[Querier](
 | 
			
		||||
			CustomPickerMaxLag(100),
 | 
			
		||||
		)),
 | 
			
		||||
		WithClusterNodes(
 | 
			
		||||
			ClusterNode{"slave-dc1", dbSlaveDC1, 1},
 | 
			
		||||
			ClusterNode{"master-dc1", dbMaster, 1},
 | 
			
		||||
			ClusterNode{"slave-dc2", dbSlaveDC2, 2},
 | 
			
		||||
			ClusterNode{"drmaster1-dc2", dbDRMaster, 0},
 | 
			
		||||
		),
 | 
			
		||||
		WithClusterOptions(
 | 
			
		||||
			hasql.WithUpdateInterval[Querier](2*time.Second),
 | 
			
		||||
			hasql.WithUpdateTimeout[Querier](1*time.Second),
 | 
			
		||||
		),
 | 
			
		||||
	)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer c.Close()
 | 
			
		||||
 | 
			
		||||
	if err = c.WaitForNodes(tctx, hasql.Primary, hasql.Standby); err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	time.Sleep(500 * time.Millisecond)
 | 
			
		||||
 | 
			
		||||
	node1Name := ""
 | 
			
		||||
	fmt.Printf("check for Standby\n")
 | 
			
		||||
	if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.Standby), "SELECT node_name as name"); row.Err() != nil {
 | 
			
		||||
		t.Fatal(row.Err())
 | 
			
		||||
	} else if err = row.Scan(&node1Name); err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	} else if "slave-dc1" != node1Name {
 | 
			
		||||
		t.Fatalf("invalid node name %s != %s", "slave-dc1", node1Name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("slave-dc1"))
 | 
			
		||||
 | 
			
		||||
	node2Name := ""
 | 
			
		||||
	fmt.Printf("check for PreferStandby\n")
 | 
			
		||||
	if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferStandby), "SELECT node_name as name"); row.Err() != nil {
 | 
			
		||||
		t.Fatal(row.Err())
 | 
			
		||||
	} else if err = row.Scan(&node2Name); err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	} else if "slave-dc1" != node2Name {
 | 
			
		||||
		t.Fatalf("invalid node name %s != %s", "slave-dc1", node2Name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	node3Name := ""
 | 
			
		||||
	fmt.Printf("check for PreferPrimary\n")
 | 
			
		||||
	if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferPrimary), "SELECT node_name as name"); row.Err() != nil {
 | 
			
		||||
		t.Fatal(row.Err())
 | 
			
		||||
	} else if err = row.Scan(&node3Name); err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	} else if "master-dc1" != node3Name {
 | 
			
		||||
		t.Fatalf("invalid node name %s != %s", "master-dc1", node3Name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC1Mock.ExpectQuery(`.*`).WillReturnRows(sqlmock.NewRows([]string{"role"}).RowError(1, fmt.Errorf("row error")))
 | 
			
		||||
 | 
			
		||||
	time.Sleep(2 * time.Second)
 | 
			
		||||
 | 
			
		||||
	fmt.Printf("check for PreferStandby\n")
 | 
			
		||||
	if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferStandby), "SELECT node_name as name"); row.Err() == nil {
 | 
			
		||||
		t.Fatal("must return error")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if dbMasterErr := dbMasterMock.ExpectationsWereMet(); dbMasterErr != nil {
 | 
			
		||||
		t.Error(dbMasterErr)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										25
									
								
								cluster/hasql/db.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								cluster/hasql/db.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,25 @@
 | 
			
		||||
package sql
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"database/sql"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Querier interface {
 | 
			
		||||
	// Basic connection methods
 | 
			
		||||
	PingContext(ctx context.Context) error
 | 
			
		||||
	Close() error
 | 
			
		||||
 | 
			
		||||
	// Query methods with context
 | 
			
		||||
	ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
 | 
			
		||||
	QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
 | 
			
		||||
	QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
 | 
			
		||||
 | 
			
		||||
	// Prepared statements with context
 | 
			
		||||
	PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
 | 
			
		||||
 | 
			
		||||
	// Transaction management with context
 | 
			
		||||
	BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
 | 
			
		||||
 | 
			
		||||
	Conn(ctx context.Context) (*sql.Conn, error)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										295
									
								
								cluster/hasql/driver.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										295
									
								
								cluster/hasql/driver.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,295 @@
 | 
			
		||||
package sql
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"database/sql"
 | 
			
		||||
	"database/sql/driver"
 | 
			
		||||
	"io"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// OpenDBWithCluster creates a [*sql.DB] that uses the [ClusterQuerier]
 | 
			
		||||
func OpenDBWithCluster(db ClusterQuerier) (*sql.DB, error) {
 | 
			
		||||
	driver := NewClusterDriver(db)
 | 
			
		||||
	connector, err := driver.OpenConnector("")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return sql.OpenDB(connector), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ClusterDriver implements [driver.Driver] and driver.Connector for an existing [Querier]
 | 
			
		||||
type ClusterDriver struct {
 | 
			
		||||
	db ClusterQuerier
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewClusterDriver creates a new [driver.Driver] that uses an existing [ClusterQuerier]
 | 
			
		||||
func NewClusterDriver(db ClusterQuerier) *ClusterDriver {
 | 
			
		||||
	return &ClusterDriver{db: db}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Open implements [driver.Driver.Open]
 | 
			
		||||
func (d *ClusterDriver) Open(name string) (driver.Conn, error) {
 | 
			
		||||
	return d.Connect(context.Background())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// OpenConnector implements [driver.DriverContext.OpenConnector]
 | 
			
		||||
func (d *ClusterDriver) OpenConnector(name string) (driver.Connector, error) {
 | 
			
		||||
	return d, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Connect implements [driver.Connector.Connect]
 | 
			
		||||
func (d *ClusterDriver) Connect(ctx context.Context) (driver.Conn, error) {
 | 
			
		||||
	conn, err := d.db.Conn(ctx)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return &dbConn{conn: conn}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Driver implements [driver.Connector.Driver]
 | 
			
		||||
func (d *ClusterDriver) Driver() driver.Driver {
 | 
			
		||||
	return d
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// dbConn implements driver.Conn with both context and legacy methods
 | 
			
		||||
type dbConn struct {
 | 
			
		||||
	conn *sql.Conn
 | 
			
		||||
	mu   sync.Mutex
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Prepare implements [driver.Conn.Prepare] (legacy method)
 | 
			
		||||
func (c *dbConn) Prepare(query string) (driver.Stmt, error) {
 | 
			
		||||
	return c.PrepareContext(context.Background(), query)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PrepareContext implements [driver.ConnPrepareContext.PrepareContext]
 | 
			
		||||
func (c *dbConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
 | 
			
		||||
	c.mu.Lock()
 | 
			
		||||
	defer c.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	stmt, err := c.conn.PrepareContext(ctx, query)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &dbStmt{stmt: stmt}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Exec implements [driver.Execer.Exec] (legacy method)
 | 
			
		||||
func (c *dbConn) Exec(query string, args []driver.Value) (driver.Result, error) {
 | 
			
		||||
	namedArgs := make([]driver.NamedValue, len(args))
 | 
			
		||||
	for i, value := range args {
 | 
			
		||||
		namedArgs[i] = driver.NamedValue{Value: value}
 | 
			
		||||
	}
 | 
			
		||||
	return c.ExecContext(context.Background(), query, namedArgs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ExecContext implements [driver.ExecerContext.ExecContext]
 | 
			
		||||
func (c *dbConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
 | 
			
		||||
	c.mu.Lock()
 | 
			
		||||
	defer c.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	// Convert driver.NamedValue to any
 | 
			
		||||
	interfaceArgs := make([]any, len(args))
 | 
			
		||||
	for i, arg := range args {
 | 
			
		||||
		interfaceArgs[i] = arg.Value
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.conn.ExecContext(ctx, query, interfaceArgs...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Query implements [driver.Queryer.Query] (legacy method)
 | 
			
		||||
func (c *dbConn) Query(query string, args []driver.Value) (driver.Rows, error) {
 | 
			
		||||
	namedArgs := make([]driver.NamedValue, len(args))
 | 
			
		||||
	for i, value := range args {
 | 
			
		||||
		namedArgs[i] = driver.NamedValue{Value: value}
 | 
			
		||||
	}
 | 
			
		||||
	return c.QueryContext(context.Background(), query, namedArgs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// QueryContext implements [driver.QueryerContext.QueryContext]
 | 
			
		||||
func (c *dbConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
 | 
			
		||||
	c.mu.Lock()
 | 
			
		||||
	defer c.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	// Convert driver.NamedValue to any
 | 
			
		||||
	interfaceArgs := make([]any, len(args))
 | 
			
		||||
	for i, arg := range args {
 | 
			
		||||
		interfaceArgs[i] = arg.Value
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rows, err := c.conn.QueryContext(ctx, query, interfaceArgs...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &dbRows{rows: rows}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Begin implements [driver.Conn.Begin] (legacy method)
 | 
			
		||||
func (c *dbConn) Begin() (driver.Tx, error) {
 | 
			
		||||
	return c.BeginTx(context.Background(), driver.TxOptions{})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// BeginTx implements [driver.ConnBeginTx.BeginTx]
 | 
			
		||||
func (c *dbConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
 | 
			
		||||
	c.mu.Lock()
 | 
			
		||||
	defer c.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	sqlOpts := &sql.TxOptions{
 | 
			
		||||
		Isolation: sql.IsolationLevel(opts.Isolation),
 | 
			
		||||
		ReadOnly:  opts.ReadOnly,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tx, err := c.conn.BeginTx(ctx, sqlOpts)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &dbTx{tx: tx}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Ping implements [driver.Pinger.Ping]
 | 
			
		||||
func (c *dbConn) Ping(ctx context.Context) error {
 | 
			
		||||
	return c.conn.PingContext(ctx)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Close implements [driver.Conn.Close]
 | 
			
		||||
func (c *dbConn) Close() error {
 | 
			
		||||
	return c.conn.Close()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsValid implements [driver.Validator.IsValid]
 | 
			
		||||
func (c *dbConn) IsValid() bool {
 | 
			
		||||
	// Ping with a short timeout to check if the connection is still valid
 | 
			
		||||
	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	return c.conn.PingContext(ctx) == nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// dbStmt implements [driver.Stmt] with both context and legacy methods
 | 
			
		||||
type dbStmt struct {
 | 
			
		||||
	stmt *sql.Stmt
 | 
			
		||||
	mu   sync.Mutex
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Close implements [driver.Stmt.Close]
 | 
			
		||||
func (s *dbStmt) Close() error {
 | 
			
		||||
	s.mu.Lock()
 | 
			
		||||
	defer s.mu.Unlock()
 | 
			
		||||
	return s.stmt.Close()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Close implements [driver.Stmt.NumInput]
 | 
			
		||||
func (s *dbStmt) NumInput() int {
 | 
			
		||||
	return -1 // Number of parameters is unknown
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Exec implements [driver.Stmt.Exec] (legacy method)
 | 
			
		||||
func (s *dbStmt) Exec(args []driver.Value) (driver.Result, error) {
 | 
			
		||||
	namedArgs := make([]driver.NamedValue, len(args))
 | 
			
		||||
	for i, value := range args {
 | 
			
		||||
		namedArgs[i] = driver.NamedValue{Value: value}
 | 
			
		||||
	}
 | 
			
		||||
	return s.ExecContext(context.Background(), namedArgs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ExecContext implements [driver.StmtExecContext.ExecContext]
 | 
			
		||||
func (s *dbStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
 | 
			
		||||
	s.mu.Lock()
 | 
			
		||||
	defer s.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	interfaceArgs := make([]any, len(args))
 | 
			
		||||
	for i, arg := range args {
 | 
			
		||||
		interfaceArgs[i] = arg.Value
 | 
			
		||||
	}
 | 
			
		||||
	return s.stmt.ExecContext(ctx, interfaceArgs...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Query implements [driver.Stmt.Query] (legacy method)
 | 
			
		||||
func (s *dbStmt) Query(args []driver.Value) (driver.Rows, error) {
 | 
			
		||||
	namedArgs := make([]driver.NamedValue, len(args))
 | 
			
		||||
	for i, value := range args {
 | 
			
		||||
		namedArgs[i] = driver.NamedValue{Value: value}
 | 
			
		||||
	}
 | 
			
		||||
	return s.QueryContext(context.Background(), namedArgs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// QueryContext implements [driver.StmtQueryContext.QueryContext]
 | 
			
		||||
func (s *dbStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
 | 
			
		||||
	s.mu.Lock()
 | 
			
		||||
	defer s.mu.Unlock()
 | 
			
		||||
 | 
			
		||||
	interfaceArgs := make([]any, len(args))
 | 
			
		||||
	for i, arg := range args {
 | 
			
		||||
		interfaceArgs[i] = arg.Value
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rows, err := s.stmt.QueryContext(ctx, interfaceArgs...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &dbRows{rows: rows}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// dbRows implements [driver.Rows]
 | 
			
		||||
type dbRows struct {
 | 
			
		||||
	rows *sql.Rows
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Columns implements [driver.Rows.Columns]
 | 
			
		||||
func (r *dbRows) Columns() []string {
 | 
			
		||||
	cols, err := r.rows.Columns()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// This shouldn't happen if the query was successful
 | 
			
		||||
		return []string{}
 | 
			
		||||
	}
 | 
			
		||||
	return cols
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Close implements [driver.Rows.Close]
 | 
			
		||||
func (r *dbRows) Close() error {
 | 
			
		||||
	return r.rows.Close()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Next implements [driver.Rows.Next]
 | 
			
		||||
func (r *dbRows) Next(dest []driver.Value) error {
 | 
			
		||||
	if !r.rows.Next() {
 | 
			
		||||
		if err := r.rows.Err(); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		return io.EOF
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Create a slice of interfaces to scan into
 | 
			
		||||
	scanArgs := make([]any, len(dest))
 | 
			
		||||
	for i := range scanArgs {
 | 
			
		||||
		scanArgs[i] = &dest[i]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return r.rows.Scan(scanArgs...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// dbTx implements [driver.Tx]
 | 
			
		||||
type dbTx struct {
 | 
			
		||||
	tx *sql.Tx
 | 
			
		||||
	mu sync.Mutex
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Commit implements [driver.Tx.Commit]
 | 
			
		||||
func (t *dbTx) Commit() error {
 | 
			
		||||
	t.mu.Lock()
 | 
			
		||||
	defer t.mu.Unlock()
 | 
			
		||||
	return t.tx.Commit()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Rollback implements [driver.Tx.Rollback]
 | 
			
		||||
func (t *dbTx) Rollback() error {
 | 
			
		||||
	t.mu.Lock()
 | 
			
		||||
	defer t.mu.Unlock()
 | 
			
		||||
	return t.tx.Rollback()
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										141
									
								
								cluster/hasql/driver_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										141
									
								
								cluster/hasql/driver_test.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,141 @@
 | 
			
		||||
package sql
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/DATA-DOG/go-sqlmock"
 | 
			
		||||
	"golang.yandex/hasql/v2"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestDriver(t *testing.T) {
 | 
			
		||||
	dbMaster, dbMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer dbMaster.Close()
 | 
			
		||||
	dbMasterMock.MatchExpectationsInOrder(false)
 | 
			
		||||
 | 
			
		||||
	dbMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRowsWithColumnDefinition(
 | 
			
		||||
			sqlmock.NewColumn("role").OfType("int8", 0),
 | 
			
		||||
			sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
 | 
			
		||||
			AddRow(1, 0)).
 | 
			
		||||
		RowsWillBeClosed().
 | 
			
		||||
		WithoutArgs()
 | 
			
		||||
 | 
			
		||||
	dbMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("master-dc1"))
 | 
			
		||||
 | 
			
		||||
	dbDRMaster, dbDRMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer dbDRMaster.Close()
 | 
			
		||||
	dbDRMasterMock.MatchExpectationsInOrder(false)
 | 
			
		||||
 | 
			
		||||
	dbDRMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRowsWithColumnDefinition(
 | 
			
		||||
			sqlmock.NewColumn("role").OfType("int8", 0),
 | 
			
		||||
			sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
 | 
			
		||||
			AddRow(2, 40)).
 | 
			
		||||
		RowsWillBeClosed().
 | 
			
		||||
		WithoutArgs()
 | 
			
		||||
 | 
			
		||||
	dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("drmaster1-dc2"))
 | 
			
		||||
 | 
			
		||||
	dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("drmaster"))
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC1, dbSlaveDC1Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer dbSlaveDC1.Close()
 | 
			
		||||
	dbSlaveDC1Mock.MatchExpectationsInOrder(false)
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC1Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRowsWithColumnDefinition(
 | 
			
		||||
			sqlmock.NewColumn("role").OfType("int8", 0),
 | 
			
		||||
			sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
 | 
			
		||||
			AddRow(2, 50)).
 | 
			
		||||
		RowsWillBeClosed().
 | 
			
		||||
		WithoutArgs()
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("slave-dc1"))
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC2, dbSlaveDC2Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer dbSlaveDC2.Close()
 | 
			
		||||
	dbSlaveDC1Mock.MatchExpectationsInOrder(false)
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC2Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRowsWithColumnDefinition(
 | 
			
		||||
			sqlmock.NewColumn("role").OfType("int8", 0),
 | 
			
		||||
			sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
 | 
			
		||||
			AddRow(2, 50)).
 | 
			
		||||
		RowsWillBeClosed().
 | 
			
		||||
		WithoutArgs()
 | 
			
		||||
 | 
			
		||||
	dbSlaveDC2Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
 | 
			
		||||
		sqlmock.NewRows([]string{"name"}).
 | 
			
		||||
			AddRow("slave-dc1"))
 | 
			
		||||
 | 
			
		||||
	tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	c, err := NewCluster[Querier](
 | 
			
		||||
		WithClusterContext(tctx),
 | 
			
		||||
		WithClusterNodeChecker(hasql.PostgreSQLChecker),
 | 
			
		||||
		WithClusterNodePicker(NewCustomPicker[Querier](
 | 
			
		||||
			CustomPickerMaxLag(100),
 | 
			
		||||
		)),
 | 
			
		||||
		WithClusterNodes(
 | 
			
		||||
			ClusterNode{"slave-dc1", dbSlaveDC1, 1},
 | 
			
		||||
			ClusterNode{"master-dc1", dbMaster, 1},
 | 
			
		||||
			ClusterNode{"slave-dc2", dbSlaveDC2, 2},
 | 
			
		||||
			ClusterNode{"drmaster1-dc2", dbDRMaster, 0},
 | 
			
		||||
		),
 | 
			
		||||
		WithClusterOptions(
 | 
			
		||||
			hasql.WithUpdateInterval[Querier](2*time.Second),
 | 
			
		||||
			hasql.WithUpdateTimeout[Querier](1*time.Second),
 | 
			
		||||
		),
 | 
			
		||||
	)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer c.Close()
 | 
			
		||||
 | 
			
		||||
	if err = c.WaitForNodes(tctx, hasql.Primary, hasql.Standby); err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	db, err := OpenDBWithCluster(c)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Use context methods
 | 
			
		||||
	row := db.QueryRowContext(NodeStateCriterion(t.Context(), hasql.Primary), "SELECT node_name as name")
 | 
			
		||||
	if err = row.Err(); err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeName := ""
 | 
			
		||||
	if err = row.Scan(&nodeName); err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if nodeName != "master-dc1" {
 | 
			
		||||
		t.Fatalf("invalid node_name %s != %s", "master-dc1", nodeName)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										10
									
								
								cluster/hasql/error.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								cluster/hasql/error.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,10 @@
 | 
			
		||||
package sql
 | 
			
		||||
 | 
			
		||||
import "errors"
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	ErrClusterChecker    = errors.New("cluster node checker required")
 | 
			
		||||
	ErrClusterDiscoverer = errors.New("cluster node discoverer required")
 | 
			
		||||
	ErrClusterPicker     = errors.New("cluster node picker required")
 | 
			
		||||
	ErrorNoAliveNodes    = errors.New("cluster no alive nodes")
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										110
									
								
								cluster/hasql/options.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										110
									
								
								cluster/hasql/options.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,110 @@
 | 
			
		||||
package sql
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"math"
 | 
			
		||||
 | 
			
		||||
	"golang.yandex/hasql/v2"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// ClusterOptions contains cluster specific options
 | 
			
		||||
type ClusterOptions struct {
 | 
			
		||||
	NodeChecker        hasql.NodeChecker
 | 
			
		||||
	NodePicker         hasql.NodePicker[Querier]
 | 
			
		||||
	NodeDiscoverer     hasql.NodeDiscoverer[Querier]
 | 
			
		||||
	Options            []hasql.ClusterOpt[Querier]
 | 
			
		||||
	Context            context.Context
 | 
			
		||||
	Retries            int
 | 
			
		||||
	NodePriority       map[string]int32
 | 
			
		||||
	NodeStateCriterion hasql.NodeStateCriterion
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ClusterOption apply cluster options to ClusterOptions
 | 
			
		||||
type ClusterOption func(*ClusterOptions)
 | 
			
		||||
 | 
			
		||||
// WithClusterNodeChecker pass hasql.NodeChecker to cluster options
 | 
			
		||||
func WithClusterNodeChecker(c hasql.NodeChecker) ClusterOption {
 | 
			
		||||
	return func(o *ClusterOptions) {
 | 
			
		||||
		o.NodeChecker = c
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithClusterNodePicker pass hasql.NodePicker to cluster options
 | 
			
		||||
func WithClusterNodePicker(p hasql.NodePicker[Querier]) ClusterOption {
 | 
			
		||||
	return func(o *ClusterOptions) {
 | 
			
		||||
		o.NodePicker = p
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithClusterNodeDiscoverer pass hasql.NodeDiscoverer to cluster options
 | 
			
		||||
func WithClusterNodeDiscoverer(d hasql.NodeDiscoverer[Querier]) ClusterOption {
 | 
			
		||||
	return func(o *ClusterOptions) {
 | 
			
		||||
		o.NodeDiscoverer = d
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithRetries retry count on other nodes in case of error
 | 
			
		||||
func WithRetries(n int) ClusterOption {
 | 
			
		||||
	return func(o *ClusterOptions) {
 | 
			
		||||
		o.Retries = n
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithClusterContext pass context.Context to cluster options and used for checks
 | 
			
		||||
func WithClusterContext(ctx context.Context) ClusterOption {
 | 
			
		||||
	return func(o *ClusterOptions) {
 | 
			
		||||
		o.Context = ctx
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithClusterOptions pass hasql.ClusterOpt
 | 
			
		||||
func WithClusterOptions(opts ...hasql.ClusterOpt[Querier]) ClusterOption {
 | 
			
		||||
	return func(o *ClusterOptions) {
 | 
			
		||||
		o.Options = append(o.Options, opts...)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithClusterNodeStateCriterion pass default hasql.NodeStateCriterion
 | 
			
		||||
func WithClusterNodeStateCriterion(c hasql.NodeStateCriterion) ClusterOption {
 | 
			
		||||
	return func(o *ClusterOptions) {
 | 
			
		||||
		o.NodeStateCriterion = c
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ClusterNode struct {
 | 
			
		||||
	Name     string
 | 
			
		||||
	DB       Querier
 | 
			
		||||
	Priority int32
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithClusterNodes create cluster with static NodeDiscoverer
 | 
			
		||||
func WithClusterNodes(cns ...ClusterNode) ClusterOption {
 | 
			
		||||
	return func(o *ClusterOptions) {
 | 
			
		||||
		nodes := make([]*hasql.Node[Querier], 0, len(cns))
 | 
			
		||||
		if o.NodePriority == nil {
 | 
			
		||||
			o.NodePriority = make(map[string]int32, len(cns))
 | 
			
		||||
		}
 | 
			
		||||
		for _, cn := range cns {
 | 
			
		||||
			nodes = append(nodes, hasql.NewNode(cn.Name, cn.DB))
 | 
			
		||||
			if cn.Priority == 0 {
 | 
			
		||||
				cn.Priority = math.MaxInt32
 | 
			
		||||
			}
 | 
			
		||||
			o.NodePriority[cn.Name] = cn.Priority
 | 
			
		||||
		}
 | 
			
		||||
		o.NodeDiscoverer = hasql.NewStaticNodeDiscoverer(nodes...)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type nodeStateCriterionKey struct{}
 | 
			
		||||
 | 
			
		||||
// NodeStateCriterion inject hasql.NodeStateCriterion to context
 | 
			
		||||
func NodeStateCriterion(ctx context.Context, c hasql.NodeStateCriterion) context.Context {
 | 
			
		||||
	return context.WithValue(ctx, nodeStateCriterionKey{}, c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Cluster) getNodeStateCriterion(ctx context.Context) hasql.NodeStateCriterion {
 | 
			
		||||
	if v, ok := ctx.Value(nodeStateCriterionKey{}).(hasql.NodeStateCriterion); ok {
 | 
			
		||||
		return v
 | 
			
		||||
	}
 | 
			
		||||
	return c.options.NodeStateCriterion
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										113
									
								
								cluster/hasql/picker.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										113
									
								
								cluster/hasql/picker.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,113 @@
 | 
			
		||||
package sql
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"math"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"golang.yandex/hasql/v2"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// compile time guard
 | 
			
		||||
var _ hasql.NodePicker[Querier] = (*CustomPicker[Querier])(nil)
 | 
			
		||||
 | 
			
		||||
// CustomPickerOptions holds options to pick nodes
 | 
			
		||||
type CustomPickerOptions struct {
 | 
			
		||||
	MaxLag   int
 | 
			
		||||
	Priority map[string]int32
 | 
			
		||||
	Retries  int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CustomPickerOption func apply option to CustomPickerOptions
 | 
			
		||||
type CustomPickerOption func(*CustomPickerOptions)
 | 
			
		||||
 | 
			
		||||
// CustomPickerMaxLag specifies max lag for which node can be used
 | 
			
		||||
func CustomPickerMaxLag(n int) CustomPickerOption {
 | 
			
		||||
	return func(o *CustomPickerOptions) {
 | 
			
		||||
		o.MaxLag = n
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewCustomPicker creates new node picker
 | 
			
		||||
func NewCustomPicker[T Querier](opts ...CustomPickerOption) *CustomPicker[Querier] {
 | 
			
		||||
	options := CustomPickerOptions{}
 | 
			
		||||
	for _, o := range opts {
 | 
			
		||||
		o(&options)
 | 
			
		||||
	}
 | 
			
		||||
	return &CustomPicker[Querier]{opts: options}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CustomPicker holds node picker options
 | 
			
		||||
type CustomPicker[T Querier] struct {
 | 
			
		||||
	opts CustomPickerOptions
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PickNode used to return specific node
 | 
			
		||||
func (p *CustomPicker[T]) PickNode(cnodes []hasql.CheckedNode[T]) hasql.CheckedNode[T] {
 | 
			
		||||
	for _, n := range cnodes {
 | 
			
		||||
		fmt.Printf("node %s\n", n.Node.String())
 | 
			
		||||
	}
 | 
			
		||||
	return cnodes[0]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *CustomPicker[T]) getPriority(nodeName string) int32 {
 | 
			
		||||
	if prio, ok := p.opts.Priority[nodeName]; ok {
 | 
			
		||||
		return prio
 | 
			
		||||
	}
 | 
			
		||||
	return math.MaxInt32 // Default to lowest priority
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CompareNodes used to sort nodes
 | 
			
		||||
func (p *CustomPicker[T]) CompareNodes(a, b hasql.CheckedNode[T]) int {
 | 
			
		||||
	// Get replication lag values
 | 
			
		||||
	aLag := a.Info.(interface{ ReplicationLag() int }).ReplicationLag()
 | 
			
		||||
	bLag := b.Info.(interface{ ReplicationLag() int }).ReplicationLag()
 | 
			
		||||
 | 
			
		||||
	// First check that lag lower then MaxLag
 | 
			
		||||
	if aLag > p.opts.MaxLag && bLag > p.opts.MaxLag {
 | 
			
		||||
		return 0 // both are equal
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If one node exceeds MaxLag and the other doesn't, prefer the one that doesn't
 | 
			
		||||
	if aLag > p.opts.MaxLag {
 | 
			
		||||
		return 1 // b is better
 | 
			
		||||
	}
 | 
			
		||||
	if bLag > p.opts.MaxLag {
 | 
			
		||||
		return -1 // a is better
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Get node priorities
 | 
			
		||||
	aPrio := p.getPriority(a.Node.String())
 | 
			
		||||
	bPrio := p.getPriority(b.Node.String())
 | 
			
		||||
 | 
			
		||||
	// if both priority equals
 | 
			
		||||
	if aPrio == bPrio {
 | 
			
		||||
		// First compare by replication lag
 | 
			
		||||
		if aLag < bLag {
 | 
			
		||||
			return -1
 | 
			
		||||
		}
 | 
			
		||||
		if aLag > bLag {
 | 
			
		||||
			return 1
 | 
			
		||||
		}
 | 
			
		||||
		// If replication lag is equal, compare by latency
 | 
			
		||||
		aLatency := a.Info.(interface{ Latency() time.Duration }).Latency()
 | 
			
		||||
		bLatency := b.Info.(interface{ Latency() time.Duration }).Latency()
 | 
			
		||||
 | 
			
		||||
		if aLatency < bLatency {
 | 
			
		||||
			return -1
 | 
			
		||||
		}
 | 
			
		||||
		if aLatency > bLatency {
 | 
			
		||||
			return 1
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// If lag and latency is equal
 | 
			
		||||
		return 0
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If priorities are different, prefer the node with lower priority value
 | 
			
		||||
	if aPrio < bPrio {
 | 
			
		||||
		return -1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 1
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										3
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										3
									
								
								go.mod
									
									
									
									
									
								
							@@ -1,6 +1,6 @@
 | 
			
		||||
module go.unistack.org/micro/v4
 | 
			
		||||
 | 
			
		||||
go 1.22.0
 | 
			
		||||
go 1.24
 | 
			
		||||
 | 
			
		||||
require (
 | 
			
		||||
	dario.cat/mergo v1.0.1
 | 
			
		||||
@@ -17,6 +17,7 @@ require (
 | 
			
		||||
	go.uber.org/automaxprocs v1.6.0
 | 
			
		||||
	go.unistack.org/micro-proto/v4 v4.1.0
 | 
			
		||||
	golang.org/x/sync v0.10.0
 | 
			
		||||
	golang.yandex/hasql/v2 v2.1.0
 | 
			
		||||
	google.golang.org/grpc v1.69.4
 | 
			
		||||
	google.golang.org/protobuf v1.36.3
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								go.sum
									
									
									
									
									
								
							@@ -56,6 +56,8 @@ golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
 | 
			
		||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 | 
			
		||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
 | 
			
		||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
 | 
			
		||||
golang.yandex/hasql/v2 v2.1.0 h1:7CaFFWeHoK5TvA+QvZzlKHlIN5sqNpqM8NSrXskZD/k=
 | 
			
		||||
golang.yandex/hasql/v2 v2.1.0/go.mod h1:3Au1AxuJDCTXmS117BpbI6e+70kGWeyLR1qJAH6HdtA=
 | 
			
		||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
 | 
			
		||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
 | 
			
		||||
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
 | 
			
		||||
 
 | 
			
		||||
@@ -3,6 +3,7 @@ package sql
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"database/sql"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -11,31 +12,84 @@ type Statser interface {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewStatsMeter(ctx context.Context, db Statser, opts ...Option) {
 | 
			
		||||
	options := NewOptions(opts...)
 | 
			
		||||
 | 
			
		||||
	go func() {
 | 
			
		||||
		ticker := time.NewTicker(options.MeterStatsInterval)
 | 
			
		||||
		defer ticker.Stop()
 | 
			
		||||
 | 
			
		||||
		for {
 | 
			
		||||
			select {
 | 
			
		||||
			case <-ctx.Done():
 | 
			
		||||
				return
 | 
			
		||||
			case <-ticker.C:
 | 
			
		||||
	if db == nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	options := NewOptions(opts...)
 | 
			
		||||
 | 
			
		||||
	var (
 | 
			
		||||
		statsMu                                                     sync.Mutex
 | 
			
		||||
		lastUpdated                                                 time.Time
 | 
			
		||||
		maxOpenConnections, openConnections, inUse, idle, waitCount float64
 | 
			
		||||
		maxIdleClosed, maxIdleTimeClosed, maxLifetimeClosed         float64
 | 
			
		||||
		waitDuration                                                float64
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	updateFn := func() {
 | 
			
		||||
		statsMu.Lock()
 | 
			
		||||
		defer statsMu.Unlock()
 | 
			
		||||
 | 
			
		||||
		if time.Since(lastUpdated) < options.MeterStatsInterval {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		stats := db.Stats()
 | 
			
		||||
				options.Meter.Counter(MaxOpenConnections).Set(uint64(stats.MaxOpenConnections))
 | 
			
		||||
				options.Meter.Counter(OpenConnections).Set(uint64(stats.OpenConnections))
 | 
			
		||||
				options.Meter.Counter(InuseConnections).Set(uint64(stats.InUse))
 | 
			
		||||
				options.Meter.Counter(IdleConnections).Set(uint64(stats.Idle))
 | 
			
		||||
				options.Meter.Counter(WaitConnections).Set(uint64(stats.WaitCount))
 | 
			
		||||
				options.Meter.FloatCounter(BlockedSeconds).Set(stats.WaitDuration.Seconds())
 | 
			
		||||
				options.Meter.Counter(MaxIdleClosed).Set(uint64(stats.MaxIdleClosed))
 | 
			
		||||
				options.Meter.Counter(MaxIdletimeClosed).Set(uint64(stats.MaxIdleTimeClosed))
 | 
			
		||||
				options.Meter.Counter(MaxLifetimeClosed).Set(uint64(stats.MaxLifetimeClosed))
 | 
			
		||||
		maxOpenConnections = float64(stats.MaxOpenConnections)
 | 
			
		||||
		openConnections = float64(stats.OpenConnections)
 | 
			
		||||
		inUse = float64(stats.InUse)
 | 
			
		||||
		idle = float64(stats.Idle)
 | 
			
		||||
		waitCount = float64(stats.WaitCount)
 | 
			
		||||
		maxIdleClosed = float64(stats.MaxIdleClosed)
 | 
			
		||||
		maxIdleTimeClosed = float64(stats.MaxIdleTimeClosed)
 | 
			
		||||
		maxLifetimeClosed = float64(stats.MaxLifetimeClosed)
 | 
			
		||||
		waitDuration = float64(stats.WaitDuration.Seconds())
 | 
			
		||||
 | 
			
		||||
		lastUpdated = time.Now()
 | 
			
		||||
	}
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	options.Meter.Gauge(MaxOpenConnections, func() float64 {
 | 
			
		||||
		updateFn()
 | 
			
		||||
		return maxOpenConnections
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	options.Meter.Gauge(OpenConnections, func() float64 {
 | 
			
		||||
		updateFn()
 | 
			
		||||
		return openConnections
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	options.Meter.Gauge(InuseConnections, func() float64 {
 | 
			
		||||
		updateFn()
 | 
			
		||||
		return inUse
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	options.Meter.Gauge(IdleConnections, func() float64 {
 | 
			
		||||
		updateFn()
 | 
			
		||||
		return idle
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	options.Meter.Gauge(WaitConnections, func() float64 {
 | 
			
		||||
		updateFn()
 | 
			
		||||
		return waitCount
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	options.Meter.Gauge(BlockedSeconds, func() float64 {
 | 
			
		||||
		updateFn()
 | 
			
		||||
		return waitDuration
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	options.Meter.Gauge(MaxIdleClosed, func() float64 {
 | 
			
		||||
		updateFn()
 | 
			
		||||
		return maxIdleClosed
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	options.Meter.Gauge(MaxIdletimeClosed, func() float64 {
 | 
			
		||||
		updateFn()
 | 
			
		||||
		return maxIdleTimeClosed
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	options.Meter.Gauge(MaxLifetimeClosed, func() float64 {
 | 
			
		||||
		updateFn()
 | 
			
		||||
		return maxLifetimeClosed
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -59,6 +59,8 @@ type Meter interface {
 | 
			
		||||
	Options() Options
 | 
			
		||||
	// String return meter type
 | 
			
		||||
	String() string
 | 
			
		||||
	// Unregister metric name and drop all data
 | 
			
		||||
	Unregister(name string, labels ...string) bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Counter is a counter
 | 
			
		||||
 
 | 
			
		||||
@@ -28,6 +28,10 @@ func (r *noopMeter) Name() string {
 | 
			
		||||
	return r.opts.Name
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *noopMeter) Unregister(name string, labels ...string) bool {
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Init initialize options
 | 
			
		||||
func (r *noopMeter) Init(opts ...Option) error {
 | 
			
		||||
	for _, o := range opts {
 | 
			
		||||
 
 | 
			
		||||
@@ -6,18 +6,18 @@ import (
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"sync/atomic"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"go.unistack.org/micro/v4/meter"
 | 
			
		||||
	"go.unistack.org/micro/v4/semconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	pools   = make([]Statser, 0)
 | 
			
		||||
	poolsMu sync.Mutex
 | 
			
		||||
)
 | 
			
		||||
func unregisterMetrics(size int) {
 | 
			
		||||
	meter.DefaultMeter.Unregister(semconv.PoolGetTotal, "capacity", strconv.Itoa(size))
 | 
			
		||||
	meter.DefaultMeter.Unregister(semconv.PoolPutTotal, "capacity", strconv.Itoa(size))
 | 
			
		||||
	meter.DefaultMeter.Unregister(semconv.PoolMisTotal, "capacity", strconv.Itoa(size))
 | 
			
		||||
	meter.DefaultMeter.Unregister(semconv.PoolRetTotal, "capacity", strconv.Itoa(size))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Stats struct
 | 
			
		||||
type Stats struct {
 | 
			
		||||
	Get uint64
 | 
			
		||||
	Put uint64
 | 
			
		||||
@@ -25,41 +25,13 @@ type Stats struct {
 | 
			
		||||
	Ret uint64
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Statser provides buffer pool stats
 | 
			
		||||
type Statser interface {
 | 
			
		||||
	Stats() Stats
 | 
			
		||||
	Cap() int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	go newStatsMeter()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newStatsMeter() {
 | 
			
		||||
	ticker := time.NewTicker(meter.DefaultMeterStatsInterval)
 | 
			
		||||
	defer ticker.Stop()
 | 
			
		||||
 | 
			
		||||
	for range ticker.C {
 | 
			
		||||
		poolsMu.Lock()
 | 
			
		||||
		for _, st := range pools {
 | 
			
		||||
			stats := st.Stats()
 | 
			
		||||
			meter.DefaultMeter.Counter(semconv.PoolGetTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Get)
 | 
			
		||||
			meter.DefaultMeter.Counter(semconv.PoolPutTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Put)
 | 
			
		||||
			meter.DefaultMeter.Counter(semconv.PoolMisTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Mis)
 | 
			
		||||
			meter.DefaultMeter.Counter(semconv.PoolRetTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Ret)
 | 
			
		||||
		}
 | 
			
		||||
		poolsMu.Unlock()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	_ Statser = (*BytePool)(nil)
 | 
			
		||||
	_ Statser = (*BytesPool)(nil)
 | 
			
		||||
	_ Statser = (*StringsPool)(nil)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Pool[T any] struct {
 | 
			
		||||
	p   *sync.Pool
 | 
			
		||||
	get *atomic.Uint64
 | 
			
		||||
	put *atomic.Uint64
 | 
			
		||||
	mis *atomic.Uint64
 | 
			
		||||
	ret *atomic.Uint64
 | 
			
		||||
	c   int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p Pool[T]) Put(t T) {
 | 
			
		||||
@@ -70,37 +42,82 @@ func (p Pool[T]) Get() T {
 | 
			
		||||
	return p.p.Get().(T)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPool[T any](fn func() T) Pool[T] {
 | 
			
		||||
	return Pool[T]{
 | 
			
		||||
		p: &sync.Pool{
 | 
			
		||||
func NewPool[T any](fn func() T, size int) Pool[T] {
 | 
			
		||||
	p := Pool[T]{
 | 
			
		||||
		c:   size,
 | 
			
		||||
		get: &atomic.Uint64{},
 | 
			
		||||
		put: &atomic.Uint64{},
 | 
			
		||||
		mis: &atomic.Uint64{},
 | 
			
		||||
		ret: &atomic.Uint64{},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	p.p = &sync.Pool{
 | 
			
		||||
		New: func() interface{} {
 | 
			
		||||
			p.mis.Add(1)
 | 
			
		||||
			return fn()
 | 
			
		||||
		},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolGetTotal, func() float64 {
 | 
			
		||||
		return float64(p.get.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolPutTotal, func() float64 {
 | 
			
		||||
		return float64(p.put.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolMisTotal, func() float64 {
 | 
			
		||||
		return float64(p.mis.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolRetTotal, func() float64 {
 | 
			
		||||
		return float64(p.ret.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BytePool struct {
 | 
			
		||||
	p   *sync.Pool
 | 
			
		||||
	get uint64
 | 
			
		||||
	put uint64
 | 
			
		||||
	mis uint64
 | 
			
		||||
	ret uint64
 | 
			
		||||
	get *atomic.Uint64
 | 
			
		||||
	put *atomic.Uint64
 | 
			
		||||
	mis *atomic.Uint64
 | 
			
		||||
	ret *atomic.Uint64
 | 
			
		||||
	c   int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBytePool(size int) *BytePool {
 | 
			
		||||
	p := &BytePool{c: size}
 | 
			
		||||
	p := &BytePool{
 | 
			
		||||
		c:   size,
 | 
			
		||||
		get: &atomic.Uint64{},
 | 
			
		||||
		put: &atomic.Uint64{},
 | 
			
		||||
		mis: &atomic.Uint64{},
 | 
			
		||||
		ret: &atomic.Uint64{},
 | 
			
		||||
	}
 | 
			
		||||
	p.p = &sync.Pool{
 | 
			
		||||
		New: func() interface{} {
 | 
			
		||||
			atomic.AddUint64(&p.mis, 1)
 | 
			
		||||
			p.mis.Add(1)
 | 
			
		||||
			b := make([]byte, 0, size)
 | 
			
		||||
			return &b
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	poolsMu.Lock()
 | 
			
		||||
	pools = append(pools, p)
 | 
			
		||||
	poolsMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolGetTotal, func() float64 {
 | 
			
		||||
		return float64(p.get.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolPutTotal, func() float64 {
 | 
			
		||||
		return float64(p.put.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolMisTotal, func() float64 {
 | 
			
		||||
		return float64(p.mis.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolRetTotal, func() float64 {
 | 
			
		||||
		return float64(p.ret.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -110,49 +127,73 @@ func (p *BytePool) Cap() int {
 | 
			
		||||
 | 
			
		||||
func (p *BytePool) Stats() Stats {
 | 
			
		||||
	return Stats{
 | 
			
		||||
		Put: atomic.LoadUint64(&p.put),
 | 
			
		||||
		Get: atomic.LoadUint64(&p.get),
 | 
			
		||||
		Mis: atomic.LoadUint64(&p.mis),
 | 
			
		||||
		Ret: atomic.LoadUint64(&p.ret),
 | 
			
		||||
		Put: p.put.Load(),
 | 
			
		||||
		Get: p.get.Load(),
 | 
			
		||||
		Mis: p.mis.Load(),
 | 
			
		||||
		Ret: p.ret.Load(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BytePool) Get() *[]byte {
 | 
			
		||||
	atomic.AddUint64(&p.get, 1)
 | 
			
		||||
	p.get.Add(1)
 | 
			
		||||
	return p.p.Get().(*[]byte)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BytePool) Put(b *[]byte) {
 | 
			
		||||
	atomic.AddUint64(&p.put, 1)
 | 
			
		||||
	p.put.Add(1)
 | 
			
		||||
	if cap(*b) > p.c {
 | 
			
		||||
		atomic.AddUint64(&p.ret, 1)
 | 
			
		||||
		p.ret.Add(1)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	*b = (*b)[:0]
 | 
			
		||||
	p.p.Put(b)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BytePool) Close() {
 | 
			
		||||
	unregisterMetrics(p.c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BytesPool struct {
 | 
			
		||||
	p   *sync.Pool
 | 
			
		||||
	get uint64
 | 
			
		||||
	put uint64
 | 
			
		||||
	mis uint64
 | 
			
		||||
	ret uint64
 | 
			
		||||
	get *atomic.Uint64
 | 
			
		||||
	put *atomic.Uint64
 | 
			
		||||
	mis *atomic.Uint64
 | 
			
		||||
	ret *atomic.Uint64
 | 
			
		||||
	c   int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBytesPool(size int) *BytesPool {
 | 
			
		||||
	p := &BytesPool{c: size}
 | 
			
		||||
	p := &BytesPool{
 | 
			
		||||
		c:   size,
 | 
			
		||||
		get: &atomic.Uint64{},
 | 
			
		||||
		put: &atomic.Uint64{},
 | 
			
		||||
		mis: &atomic.Uint64{},
 | 
			
		||||
		ret: &atomic.Uint64{},
 | 
			
		||||
	}
 | 
			
		||||
	p.p = &sync.Pool{
 | 
			
		||||
		New: func() interface{} {
 | 
			
		||||
			atomic.AddUint64(&p.mis, 1)
 | 
			
		||||
			p.mis.Add(1)
 | 
			
		||||
			b := bytes.NewBuffer(make([]byte, 0, size))
 | 
			
		||||
			return b
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	poolsMu.Lock()
 | 
			
		||||
	pools = append(pools, p)
 | 
			
		||||
	poolsMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolGetTotal, func() float64 {
 | 
			
		||||
		return float64(p.get.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolPutTotal, func() float64 {
 | 
			
		||||
		return float64(p.put.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolMisTotal, func() float64 {
 | 
			
		||||
		return float64(p.mis.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	meter.DefaultMeter.Gauge(semconv.PoolRetTotal, func() float64 {
 | 
			
		||||
		return float64(p.ret.Load())
 | 
			
		||||
	}, "capacity", strconv.Itoa(p.c))
 | 
			
		||||
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -162,10 +203,10 @@ func (p *BytesPool) Cap() int {
 | 
			
		||||
 | 
			
		||||
func (p *BytesPool) Stats() Stats {
 | 
			
		||||
	return Stats{
 | 
			
		||||
		Put: atomic.LoadUint64(&p.put),
 | 
			
		||||
		Get: atomic.LoadUint64(&p.get),
 | 
			
		||||
		Mis: atomic.LoadUint64(&p.mis),
 | 
			
		||||
		Ret: atomic.LoadUint64(&p.ret),
 | 
			
		||||
		Put: p.put.Load(),
 | 
			
		||||
		Get: p.get.Load(),
 | 
			
		||||
		Mis: p.mis.Load(),
 | 
			
		||||
		Ret: p.ret.Load(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -174,34 +215,43 @@ func (p *BytesPool) Get() *bytes.Buffer {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BytesPool) Put(b *bytes.Buffer) {
 | 
			
		||||
	p.put.Add(1)
 | 
			
		||||
	if (*b).Cap() > p.c {
 | 
			
		||||
		atomic.AddUint64(&p.ret, 1)
 | 
			
		||||
		p.ret.Add(1)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	b.Reset()
 | 
			
		||||
	p.p.Put(b)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BytesPool) Close() {
 | 
			
		||||
	unregisterMetrics(p.c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type StringsPool struct {
 | 
			
		||||
	p   *sync.Pool
 | 
			
		||||
	get uint64
 | 
			
		||||
	put uint64
 | 
			
		||||
	mis uint64
 | 
			
		||||
	ret uint64
 | 
			
		||||
	get *atomic.Uint64
 | 
			
		||||
	put *atomic.Uint64
 | 
			
		||||
	mis *atomic.Uint64
 | 
			
		||||
	ret *atomic.Uint64
 | 
			
		||||
	c   int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewStringsPool(size int) *StringsPool {
 | 
			
		||||
	p := &StringsPool{c: size}
 | 
			
		||||
	p := &StringsPool{
 | 
			
		||||
		c:   size,
 | 
			
		||||
		get: &atomic.Uint64{},
 | 
			
		||||
		put: &atomic.Uint64{},
 | 
			
		||||
		mis: &atomic.Uint64{},
 | 
			
		||||
		ret: &atomic.Uint64{},
 | 
			
		||||
	}
 | 
			
		||||
	p.p = &sync.Pool{
 | 
			
		||||
		New: func() interface{} {
 | 
			
		||||
			atomic.AddUint64(&p.mis, 1)
 | 
			
		||||
			p.mis.Add(1)
 | 
			
		||||
			return &strings.Builder{}
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	poolsMu.Lock()
 | 
			
		||||
	pools = append(pools, p)
 | 
			
		||||
	poolsMu.Unlock()
 | 
			
		||||
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -211,24 +261,28 @@ func (p *StringsPool) Cap() int {
 | 
			
		||||
 | 
			
		||||
func (p *StringsPool) Stats() Stats {
 | 
			
		||||
	return Stats{
 | 
			
		||||
		Put: atomic.LoadUint64(&p.put),
 | 
			
		||||
		Get: atomic.LoadUint64(&p.get),
 | 
			
		||||
		Mis: atomic.LoadUint64(&p.mis),
 | 
			
		||||
		Ret: atomic.LoadUint64(&p.ret),
 | 
			
		||||
		Put: p.put.Load(),
 | 
			
		||||
		Get: p.get.Load(),
 | 
			
		||||
		Mis: p.mis.Load(),
 | 
			
		||||
		Ret: p.ret.Load(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *StringsPool) Get() *strings.Builder {
 | 
			
		||||
	atomic.AddUint64(&p.get, 1)
 | 
			
		||||
	p.get.Add(1)
 | 
			
		||||
	return p.p.Get().(*strings.Builder)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *StringsPool) Put(b *strings.Builder) {
 | 
			
		||||
	atomic.AddUint64(&p.put, 1)
 | 
			
		||||
	p.put.Add(1)
 | 
			
		||||
	if b.Cap() > p.c {
 | 
			
		||||
		atomic.AddUint64(&p.ret, 1)
 | 
			
		||||
		p.ret.Add(1)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	b.Reset()
 | 
			
		||||
	p.p.Put(b)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *StringsPool) Close() {
 | 
			
		||||
	unregisterMetrics(p.c)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user