fixed struct alignment && refactor linter
Some checks failed
lint / lint (pull_request) Has been cancelled
pr / test (pull_request) Has been cancelled

This commit is contained in:
2024-12-09 13:06:43 +03:00
parent 94e8f90f00
commit 9d6a44b783
71 changed files with 532 additions and 448 deletions

View File

@@ -35,7 +35,7 @@ func NewNetDialer(parent DialFunc, opts ...Option) DialFunc {
if cache.opts.MaxCacheEntries == 0 {
cache.opts.MaxCacheEntries = DefaultMaxCacheEntries
}
return func(ctx context.Context, network, address string) (net.Conn, error) {
return func(_ context.Context, network, address string) (net.Conn, error) {
conn := &dnsConn{}
conn.roundTrip = cachingRoundTrip(&cache, network, address)
return conn, nil
@@ -116,12 +116,12 @@ func PreferIPV6(b bool) Option {
}
type cache struct {
sync.RWMutex
dial DialFunc
entries map[string]cacheEntry
dial DialFunc
opts Options
sync.RWMutex
}
type cacheEntry struct {
@@ -283,7 +283,7 @@ func getNameLen(msg string) int {
for i < len(msg) {
if msg[i] == 0 {
// end of name
i += 1
i++
break
}
if msg[i] >= 0xc0 {
@@ -311,7 +311,7 @@ func getUint32(s string) int {
func cachingRoundTrip(cache *cache, network, address string) roundTripper {
return func(ctx context.Context, req string) (res string, err error) {
// check cache
if res := cache.get(req); res != "" {
if res = cache.get(req); res != "" {
return res, nil
}

View File

@@ -11,15 +11,16 @@ import (
)
type dnsConn struct {
sync.Mutex
ctx context.Context
cancel context.CancelFunc
roundTrip roundTripper
deadline time.Time
ibuf bytes.Buffer
obuf bytes.Buffer
ctx context.Context
cancel context.CancelFunc
deadline time.Time
roundTrip roundTripper
sync.Mutex
}
type roundTripper func(ctx context.Context, req string) (res string, err error)
@@ -66,8 +67,8 @@ func (c *dnsConn) RemoteAddr() net.Addr {
}
func (c *dnsConn) SetDeadline(t time.Time) error {
c.SetReadDeadline(t)
c.SetWriteDeadline(t)
_ = c.SetReadDeadline(t)
_ = c.SetWriteDeadline(t)
return nil
}
@@ -78,7 +79,7 @@ func (c *dnsConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *dnsConn) SetWriteDeadline(t time.Time) error {
func (c *dnsConn) SetWriteDeadline(_ time.Time) error {
// writes do not timeout
return nil
}
@@ -156,23 +157,22 @@ func readMessage(c net.Conn) (string, error) {
return "", err
}
return string(b[:n]), nil
} else {
var sz [2]byte
_, err := io.ReadFull(c, sz[:])
if err != nil {
return "", err
}
size := int64(sz[0])<<8 | int64(sz[1])
var str strings.Builder
_, err = io.CopyN(&str, c, size)
if err == io.EOF {
return "", io.ErrUnexpectedEOF
}
if err != nil {
return "", err
}
return str.String(), nil
}
var sz [2]byte
_, err := io.ReadFull(c, sz[:])
if err != nil {
return "", err
}
size := int64(sz[0])<<8 | int64(sz[1])
var str strings.Builder
_, err = io.CopyN(&str, c, size)
if err == io.EOF {
return "", io.ErrUnexpectedEOF
}
if err != nil {
return "", err
}
return str.String(), nil
}

View File

@@ -71,7 +71,7 @@ func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
}
// TagConn can attach some information to the given context.
func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
func (h *serverHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
if span, ok := tracer.SpanFromContext(ctx); ok {
attrs := peerAttr(peerFromCtx(ctx))
span.AddLabels(attrs...)
@@ -80,7 +80,7 @@ func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) co
}
// HandleConn processes the Conn stats.
func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
func (h *serverHandler) HandleConn(_ context.Context, _ stats.ConnStats) {
}
type clientHandler struct {

View File

@@ -665,12 +665,12 @@ func patParamKeys(pattern string) ([]string, error) {
// longestPrefix finds the length of the shared prefix
// of two strings
func longestPrefix(k1, k2 string) int {
max := len(k1)
if l := len(k2); l < max {
max = l
maxLen := len(k1)
if l := len(k2); l < maxLen {
maxLen = l
}
var i int
for i = 0; i < max; i++ {
for i = 0; i < maxLen; i++ {
if k1[i] != k2[i] {
break
}

View File

@@ -14,7 +14,7 @@ func Random(d time.Duration) time.Duration {
return time.Duration(v)
}
func RandomInterval(min, max time.Duration) time.Duration {
func RandomInterval(minTime, maxTime time.Duration) time.Duration {
var rng rand.Rand
return time.Duration(rng.Int63n(max.Nanoseconds()-min.Nanoseconds())+min.Nanoseconds()) * time.Nanosecond
return time.Duration(rng.Int63n(maxTime.Nanoseconds()-minTime.Nanoseconds())+minTime.Nanoseconds()) * time.Nanosecond
}

View File

@@ -24,12 +24,12 @@ type Ticker struct {
// NewTickerContext returns a pointer to an initialized instance of the Ticker.
// It works like NewTicker except that it has ability to close via context.
// Also it works fine with context.WithTimeout to handle max time to run ticker.
func NewTickerContext(ctx context.Context, min, max time.Duration) *Ticker {
func NewTickerContext(ctx context.Context, minTime, maxTime time.Duration) *Ticker {
ticker := &Ticker{
C: make(chan time.Time),
done: make(chan chan struct{}),
min: min.Nanoseconds(),
max: max.Nanoseconds(),
min: minTime.Nanoseconds(),
max: maxTime.Nanoseconds(),
ctx: ctx,
}
go ticker.run()
@@ -39,12 +39,12 @@ func NewTickerContext(ctx context.Context, min, max time.Duration) *Ticker {
// NewTicker returns a pointer to an initialized instance of the Ticker.
// Min and max are durations of the shortest and longest allowed
// ticks. Ticker will run in a goroutine until explicitly stopped.
func NewTicker(min, max time.Duration) *Ticker {
func NewTicker(minTime, maxTime time.Duration) *Ticker {
ticker := &Ticker{
C: make(chan time.Time),
done: make(chan chan struct{}),
min: min.Nanoseconds(),
max: max.Nanoseconds(),
min: minTime.Nanoseconds(),
max: maxTime.Nanoseconds(),
ctx: context.Background(),
}
go ticker.run()

View File

@@ -31,26 +31,26 @@ loop:
func TestTicker(t *testing.T) {
t.Parallel()
min := time.Duration(10)
max := time.Duration(20)
minTime := time.Duration(10)
maxTime := time.Duration(20)
// tick can take a little longer since we're not adjusting it to account for
// processing.
precision := time.Duration(4)
rt := NewTicker(min*time.Millisecond, max*time.Millisecond)
rt := NewTicker(minTime*time.Millisecond, maxTime*time.Millisecond)
for i := 0; i < 5; i++ {
t0 := time.Now()
t1 := <-rt.C
td := t1.Sub(t0)
if td < min*time.Millisecond {
if td < minTime*time.Millisecond {
t.Fatalf("tick was shorter than expected: %s", td)
} else if td > (max+precision)*time.Millisecond {
} else if td > (maxTime+precision)*time.Millisecond {
t.Fatalf("tick was longer than expected: %s", td)
}
}
rt.Stop()
time.Sleep((max + precision) * time.Millisecond)
time.Sleep((maxTime + precision) * time.Millisecond)
select {
case v, ok := <-rt.C:
if ok || !v.IsZero() {

View File

@@ -48,19 +48,19 @@ func Listen(addr string, fn func(string) (net.Listener, error)) (net.Listener, e
// we have a port range
// extract min port
min, err := strconv.Atoi(prange[0])
minPort, err := strconv.Atoi(prange[0])
if err != nil {
return nil, errors.New("unable to extract port range")
}
// extract max port
max, err := strconv.Atoi(prange[1])
maxPort, err := strconv.Atoi(prange[1])
if err != nil {
return nil, errors.New("unable to extract port range")
}
// range the ports
for port := min; port <= max; port++ {
for port := minPort; port <= maxPort; port++ {
// try bind to host:port
ln, err := fn(HostPort(host, port))
if err == nil {
@@ -68,7 +68,7 @@ func Listen(addr string, fn func(string) (net.Listener, error)) (net.Listener, e
}
// hit max port
if port == max {
if port == maxPort {
return nil, err
}
}

View File

@@ -155,7 +155,7 @@ func indexFunction(v reflect.Value) func(i int) reflect.Value {
return v.MapIndex(keys[i])
}
}
return func(i int) reflect.Value { return reflect.Value{} }
return func(_ int) reflect.Value { return reflect.Value{} }
}
func mergeValue(values []reflect.Value) reflect.Value {

View File

@@ -101,16 +101,16 @@ func TestMergeString(t *testing.T) {
func TestMergeNested(t *testing.T) {
type CallReqNested struct {
Nested *CallReqNested `json:"nested2"`
StringArgs []string `json:"string_args"`
Uint64Args []uint64 `json:"uint64_args"`
Nested *CallReqNested `json:"nested2"`
}
type CallReq struct {
Nested *CallReqNested `json:"nested"`
Name string `json:"name"`
Req string `json:"req"`
Arg2 int `json:"arg2"`
Nested *CallReqNested `json:"nested"`
}
dst := &CallReq{

View File

@@ -109,12 +109,11 @@ func Merge(olist []*register.Service, nlist []*register.Service) []*register.Ser
seen = true
srv = append(srv, sp)
break
} else {
sp := &register.Service{}
// make copy
*sp = *o
srv = append(srv, sp)
}
sp := &register.Service{}
// make copy
*sp = *o
srv = append(srv, sp)
}
if !seen {
srv = append(srv, Copy([]*register.Service{n})...)
@@ -153,14 +152,14 @@ func Remove(old, del []*register.Service) []*register.Service {
// WaitService using register wait for service to appear with min/max interval for check and optional timeout.
// Timeout can be 0 to wait infinitive.
func WaitService(ctx context.Context, reg register.Register, name string, min time.Duration, max time.Duration, timeout time.Duration, opts ...register.LookupOption) error {
func WaitService(ctx context.Context, reg register.Register, name string, minTime time.Duration, maxTime time.Duration, timeout time.Duration, opts ...register.LookupOption) error {
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
ticker := jitter.NewTickerContext(ctx, min, max)
ticker := jitter.NewTickerContext(ctx, minTime, maxTime)
defer ticker.Stop()
for {

View File

@@ -10,10 +10,11 @@ import (
// Buffer is ring buffer
type Buffer struct {
sync.RWMutex
streams map[string]*Stream
vals []*Entry
size int
sync.RWMutex
}
// Entry is ring buffer data entry

View File

@@ -6,8 +6,8 @@ import (
// Pool holds the socket pool
type Pool struct {
sync.RWMutex
pool map[string]*Socket
sync.RWMutex
}
// Get socket from pool

View File

@@ -20,10 +20,11 @@ type Stream interface {
}
type stream struct {
sync.RWMutex
Stream
err error
request *request
sync.RWMutex
}
type request struct {

View File

@@ -10,11 +10,18 @@ import (
type DigitalOceanMetadata struct {
Metadata struct {
V1 struct {
DropletID int64 `json:"droplet_id"`
Hostname string `json:"hostname"`
VendorData string `json:"vendor_data"`
Features map[string]interface{} `json:"features"`
Hostname string `json:"hostname"`
VendorData string `json:"vendor_data"`
Region string `json:"region"`
PublicKeys []string `json:"public_keys"`
Region string `json:"region"`
DNS struct {
Nameservers []string `json:"nameservers"`
} `json:"dns"`
Interfaces struct {
Private []struct {
IPv4 struct {
@@ -31,24 +38,23 @@ type DigitalOceanMetadata struct {
Netmask string `json:"netmask"`
Gateway string `json:"gateway"`
} `json:"ipv4"`
IPv6 struct {
Address string `json:"ip_address"`
CIDR int `json:"cidr"`
Gateway string `json:"gateway"`
} `json:"ipv6"`
Mac string `json:"mac"`
Type string `json:"type"`
IPv6 struct {
Address string `json:"ip_address"`
Gateway string `json:"gateway"`
CIDR int `json:"cidr"`
} `json:"ipv6"`
} `json:"public"`
} `json:"interfaces"`
DropletID int64 `json:"droplet_id"`
FloatingIP struct {
IPv4 struct {
Active bool `json:"active"`
} `json:"ipv4"`
} `json:"floating_ip"`
DNS struct {
Nameservers []string `json:"nameservers"`
} `json:"dns"`
Features map[string]interface{} `json:"features"`
} `json:"v1"`
} `json:"metadata"`
}
@@ -56,7 +62,7 @@ type DigitalOceanMetadata struct {
func (stfs *DigitalOceanMetadata) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/metadata/v1.json":
json.NewEncoder(w).Encode(stfs.Metadata.V1)
_ = json.NewEncoder(w).Encode(stfs.Metadata.V1)
default:
fs := FileServer(stfs, "json", time.Now())
idx := strings.Index(r.URL.Path[1:], "/")

View File

@@ -2,29 +2,29 @@ package structfs
type EC2Metadata struct {
Latest struct {
Userdata string `json:"user-data"`
Metadata struct {
AMIID int `json:"ami-id"`
AMILaunchIndex int `json:"ami-launch-index"`
AMIManifestPath string `json:"ami-manifest-path"`
AncestorAMIIDs []int `json:"ancestor-ami-ids"`
BlockDeviceMapping []string `json:"block-device-mapping"`
InstanceID int `json:"instance-id"`
InstanceType string `json:"instance-type"`
LocalHostname string `json:"local-hostname"`
LocalIPv4 string `json:"local-ipv4"`
kernelID int `json:"kernel-id"`
Placement string `json:"placement"`
AvailabilityZone string `json:"availability-zone"`
ProductCodes string `json:"product-codes"`
PublicHostname string `json:"public-hostname"`
PublicIPv4 string `json:"public-ipv4"`
PublicKeys []struct {
AMIManifestPath string `json:"ami-manifest-path"`
InstanceType string `json:"instance-type"`
LocalHostname string `json:"local-hostname"`
LocalIPv4 string `json:"local-ipv4"`
Placement string `json:"placement"`
AvailabilityZone string `json:"availability-zone"`
ProductCodes string `json:"product-codes"`
PublicHostname string `json:"public-hostname"`
PublicIPv4 string `json:"public-ipv4"`
PublicKeys []struct {
Key []string `json:"-"`
} `json:"public-keys"`
RamdiskID int `json:"ramdisk-id"`
ReservationID int `json:"reservation-id"`
SecurityGroups []string `json:"security-groups"`
AncestorAMIIDs []int `json:"ancestor-ami-ids"`
BlockDeviceMapping []string `json:"block-device-mapping"`
SecurityGroups []string `json:"security-groups"`
RamdiskID int `json:"ramdisk-id"`
ReservationID int `json:"reservation-id"`
AMIID int `json:"ami-id"`
AMILaunchIndex int `json:"ami-launch-index"`
kernelID int `json:"kernel-id"`
InstanceID int `json:"instance-id"`
} `json:"meta-data"`
Userdata string `json:"user-data"`
} `json:"latest"`
}

View File

@@ -27,7 +27,7 @@ func (fs *fs) ServeHTTP(w http.ResponseWriter, r *http.Request) {
f, err := fs.Open(r.URL.Path)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
_, _ = w.Write([]byte(err.Error()))
return
}
w.Header().Set("Content-Type", "application/octet-stream")
@@ -35,22 +35,22 @@ func (fs *fs) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
type fs struct {
modtime time.Time
iface interface{}
tag string
modtime time.Time
}
type file struct {
name string
offset int64
data []byte
modtime time.Time
name string
data []byte
offset int64
}
type fileInfo struct {
modtime time.Time
name string
size int64
modtime time.Time
}
func (fi *fileInfo) Sys() interface{} {
@@ -67,9 +67,9 @@ func (fi *fileInfo) Name() string {
func (fi *fileInfo) Mode() os.FileMode {
if strings.HasSuffix(fi.name, "/") {
return os.FileMode(0755) | os.ModeDir
return os.FileMode(0o755) | os.ModeDir
}
return os.FileMode(0644)
return os.FileMode(0o644)
}
func (fi *fileInfo) IsDir() bool {
@@ -105,22 +105,21 @@ func (f *file) Read(b []byte) (int, error) {
return n, err
}
func (f *file) Readdir(count int) ([]os.FileInfo, error) {
func (f *file) Readdir(_ int) ([]os.FileInfo, error) {
return nil, nil
}
func (f *file) Seek(offset int64, whence int) (int64, error) {
// log.Printf("seek %d %d %s\n", offset, whence, f.name)
switch whence {
case os.SEEK_SET:
case io.SeekStart:
f.offset = offset
case os.SEEK_CUR:
case io.SeekCurrent:
f.offset += offset
case os.SEEK_END:
case io.SeekEnd:
f.offset = int64(len(f.data)) + offset
}
return f.offset, nil
}
func (f *file) Stat() (os.FileInfo, error) {

View File

@@ -2,7 +2,7 @@ package structfs
import (
"encoding/json"
"io/ioutil"
"io"
"net/http"
"reflect"
"testing"
@@ -82,17 +82,17 @@ func get(path string) ([]byte, error) {
return nil, err
}
defer res.Body.Close()
return ioutil.ReadAll(res.Body)
return io.ReadAll(res.Body)
}
func TestAll(t *testing.T) {
server(t)
var tests = []struct {
tests := []struct {
in string
out string
}{
{"http://127.0.0.1:8080/metadata/v1/", "droplet_id\nhostname\nvendor_data\npublic_keys\nregion\ninterfaces\nfloating_ip\ndns\nfeatures"},
{"http://127.0.0.1:8080/metadata/v1/", "features\nhostname\nvendor_data\nregion\npublic_keys\ndns\ninterfaces\ndroplet_id\nfloating_ip"},
{"http://127.0.0.1:8080/metadata/v1/droplet_id", "2756294"},
{"http://127.0.0.1:8080/metadata/v1/dns/", "nameservers"},
{"http://127.0.0.1:8080/metadata/v1/dns/nameservers", "2001:4860:4860::8844\n2001:4860:4860::8888\n8.8.8.8"},

View File

@@ -78,8 +78,8 @@ var (
for _, se := range st.Details() {
switch ne := se.(type) {
case proto.Message:
buf, err := testCodec.Marshal(ne)
if err != nil {
var buf []byte
if buf, err = testCodec.Marshal(ne); err != nil {
return fmt.Errorf("failed to marshal err: %w", err)
}
if err = testCodec.Unmarshal(buf, &testMap); err != nil {
@@ -438,10 +438,10 @@ func Run(ctx context.Context, c client.Client, m sqlmock.Sqlmock, dir string, ex
}
type Case struct {
dbfiles []string
reqfile string
rspfile string
errfile string
dbfiles []string
}
func GetCases(dir string, exts []string) ([]Case, error) {

View File

@@ -1,6 +1,5 @@
package text
func DetectEncoding(text string) map[string]int {
charsets := map[string]int{
"UTF-8": 0,
@@ -19,7 +18,7 @@ func DetectEncoding(text string) map[string]int {
utfupper := 5
lowercase := 3
uppercase := 1
last_simb := 0
lastSimb := 0
for a := 0; a < len(text); a++ {
char := int(text[a])
@@ -30,10 +29,10 @@ func DetectEncoding(text string) map[string]int {
}
// UTF-8
if (last_simb == 208) && ((char > 143 && char < 176) || char == 129) {
if (lastSimb == 208) && ((char > 143 && char < 176) || char == 129) {
charsets["UTF-8"] += (utfupper * 2)
}
if ((last_simb == 208) && ((char > 175 && char < 192) || char == 145)) || (last_simb == 209 && char > 127 && char < 144) {
if ((lastSimb == 208) && ((char > 175 && char < 192) || char == 145)) || (lastSimb == 209 && char > 127 && char < 144) {
charsets["UTF-8"] += (utflower * 2)
}
@@ -77,7 +76,7 @@ func DetectEncoding(text string) map[string]int {
charsets["MAC"] += uppercase
}
last_simb = char
lastSimb = char
}
return charsets

View File

@@ -2,7 +2,6 @@ package time
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"time"
@@ -14,7 +13,7 @@ type Duration int64
func ParseDuration(s string) (time.Duration, error) {
if s == "" {
return 0, fmt.Errorf(`time: invalid duration "` + s + `"`)
return 0, fmt.Errorf(`time: invalid duration "%s"`, s)
}
var p int
@@ -27,21 +26,21 @@ loop:
case 'h':
d, err := strconv.Atoi(s[p:i])
if err != nil {
return 0, errors.New("time: invalid duration " + s)
return 0, fmt.Errorf(`time: invalid duration "%s"`, s)
}
hours += d
p = i + 1
case 'd':
d, err := strconv.Atoi(s[p:i])
if err != nil {
return 0, errors.New("time: invalid duration " + s)
return 0, fmt.Errorf(`time: invalid duration "%s"`, s)
}
hours += d * 24
p = i + 1
case 'y':
n, err := strconv.Atoi(s[p:i])
if err != nil {
return 0, errors.New("time: invalid duration " + s)
return 0, fmt.Errorf(`time: invalid duration "%s"`, s)
}
var d int
for j := n - 1; j >= 0; j-- {

View File

@@ -39,19 +39,16 @@ func newStatsMeter() {
ticker := time.NewTicker(meter.DefaultMeterStatsInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
poolsMu.Lock()
for _, st := range pools {
stats := st.Stats()
meter.DefaultMeter.Counter(semconv.PoolGetTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Get)
meter.DefaultMeter.Counter(semconv.PoolPutTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Put)
meter.DefaultMeter.Counter(semconv.PoolMisTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Mis)
meter.DefaultMeter.Counter(semconv.PoolRetTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Ret)
}
poolsMu.Unlock()
for range ticker.C {
poolsMu.Lock()
for _, st := range pools {
stats := st.Stats()
meter.DefaultMeter.Counter(semconv.PoolGetTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Get)
meter.DefaultMeter.Counter(semconv.PoolPutTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Put)
meter.DefaultMeter.Counter(semconv.PoolMisTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Mis)
meter.DefaultMeter.Counter(semconv.PoolRetTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Ret)
}
poolsMu.Unlock()
}
}