move implementations to external repos (#17)

Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
This commit is contained in:
2020-08-25 13:44:41 +03:00
committed by GitHub
parent c4a303190a
commit 0f4b1435d9
238 changed files with 151 additions and 37364 deletions

128
store/cache/cache.go vendored
View File

@@ -1,128 +0,0 @@
package cache
import (
"github.com/unistack-org/micro/v3/store"
"github.com/unistack-org/micro/v3/store/memory"
)
// cache store is a store with caching to reduce IO where applicable.
// A memory store is used to cache reads from the given backing store.
// Reads are read through, writes are write-through
type cache struct {
m store.Store // the memory store
b store.Store // the backing store, could be file, cockroach etc
options store.Options
}
// NewStore returns a new cache store
func NewStore(store store.Store, opts ...store.Option) store.Store {
cf := &cache{
m: memory.NewStore(opts...),
b: store,
}
return cf
}
func (c *cache) init(opts ...store.Option) error {
for _, o := range opts {
o(&c.options)
}
return nil
}
// Init initialises the underlying stores
func (c *cache) Init(opts ...store.Option) error {
if err := c.init(opts...); err != nil {
return err
}
if err := c.m.Init(opts...); err != nil {
return err
}
return c.b.Init(opts...)
}
// Options allows you to view the current options.
func (c *cache) Options() store.Options {
return c.options
}
// Read takes a single key name and optional ReadOptions. It returns matching []*Record or an error.
func (c *cache) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) {
recs, err := c.m.Read(key, opts...)
if err != nil && err != store.ErrNotFound {
return nil, err
}
if len(recs) > 0 {
return recs, nil
}
recs, err = c.b.Read(key, opts...)
if err == nil {
for _, rec := range recs {
if err := c.m.Write(rec); err != nil {
return nil, err
}
}
}
return recs, err
}
// Write() writes a record to the store, and returns an error if the record was not written.
// If the write succeeds in writing to memory but fails to write through to file, you'll receive an error
// but the value may still reside in memory so appropriate action should be taken.
func (c *cache) Write(r *store.Record, opts ...store.WriteOption) error {
if err := c.m.Write(r, opts...); err != nil {
return err
}
return c.b.Write(r, opts...)
}
// Delete removes the record with the corresponding key from the store.
// If the delete succeeds in writing to memory but fails to write through to file, you'll receive an error
// but the value may still reside in memory so appropriate action should be taken.
func (c *cache) Delete(key string, opts ...store.DeleteOption) error {
if err := c.m.Delete(key, opts...); err != nil {
return err
}
return c.b.Delete(key, opts...)
}
// List returns any keys that match, or an empty list with no error if none matched.
func (c *cache) List(opts ...store.ListOption) ([]string, error) {
keys, err := c.m.List(opts...)
if err != nil && err != store.ErrNotFound {
return nil, err
}
if len(keys) > 0 {
return keys, nil
}
keys, err = c.b.List(opts...)
if err == nil {
for _, key := range keys {
recs, err := c.b.Read(key)
if err != nil {
return nil, err
}
for _, r := range recs {
if err := c.m.Write(r); err != nil {
return nil, err
}
}
}
}
return keys, err
}
// Close the store and the underlying store
func (c *cache) Close() error {
if err := c.m.Close(); err != nil {
return err
}
return c.b.Close()
}
// String returns the name of the implementation.
func (c *cache) String() string {
return "cache"
}

View File

@@ -1,102 +0,0 @@
package cache
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/unistack-org/micro/v3/store"
"github.com/unistack-org/micro/v3/store/file"
)
func cleanup(db string, s store.Store) {
s.Close()
dir := filepath.Join(file.DefaultDir, db+"/")
os.RemoveAll(dir)
}
func TestRead(t *testing.T) {
cf := NewStore(file.NewStore())
cf.Init()
cfInt := cf.(*cache)
defer cleanup(file.DefaultDatabase, cf)
_, err := cf.Read("key1")
assert.Error(t, err, "Unexpected record")
cfInt.b.Write(&store.Record{
Key: "key1",
Value: []byte("foo"),
})
recs, err := cf.Read("key1")
assert.NoError(t, err)
assert.Len(t, recs, 1, "Expected a record to be pulled from file store")
recs, err = cfInt.m.Read("key1")
assert.NoError(t, err)
assert.Len(t, recs, 1, "Expected a memory store to be populatedfrom file store")
}
func TestWrite(t *testing.T) {
cf := NewStore(file.NewStore())
cf.Init()
cfInt := cf.(*cache)
defer cleanup(file.DefaultDatabase, cf)
cf.Write(&store.Record{
Key: "key1",
Value: []byte("foo"),
})
recs, _ := cfInt.m.Read("key1")
assert.Len(t, recs, 1, "Expected a record in the memory store")
recs, _ = cfInt.b.Read("key1")
assert.Len(t, recs, 1, "Expected a record in the file store")
}
func TestDelete(t *testing.T) {
cf := NewStore(file.NewStore())
cf.Init()
cfInt := cf.(*cache)
defer cleanup(file.DefaultDatabase, cf)
cf.Write(&store.Record{
Key: "key1",
Value: []byte("foo"),
})
recs, _ := cfInt.m.Read("key1")
assert.Len(t, recs, 1, "Expected a record in the memory store")
recs, _ = cfInt.b.Read("key1")
assert.Len(t, recs, 1, "Expected a record in the file store")
cf.Delete("key1")
_, err := cfInt.m.Read("key1")
assert.Error(t, err, "Expected no records in memory store")
_, err = cfInt.b.Read("key1")
assert.Error(t, err, "Expected no records in file store")
}
func TestList(t *testing.T) {
cf := NewStore(file.NewStore())
cf.Init()
cfInt := cf.(*cache)
defer cleanup(file.DefaultDatabase, cf)
keys, err := cf.List()
assert.NoError(t, err)
assert.Len(t, keys, 0)
cfInt.b.Write(&store.Record{
Key: "key1",
Value: []byte("foo"),
})
cfInt.b.Write(&store.Record{
Key: "key2",
Value: []byte("foo"),
})
keys, err = cf.List()
assert.NoError(t, err)
assert.Len(t, keys, 2)
}

View File

@@ -1,524 +0,0 @@
// Package cockroach implements the cockroach store
package cockroach
import (
"database/sql"
"fmt"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/lib/pq"
"github.com/pkg/errors"
"github.com/unistack-org/micro/v3/logger"
"github.com/unistack-org/micro/v3/store"
)
// DefaultDatabase is the namespace that the sql store
// will use if no namespace is provided.
var (
DefaultDatabase = "micro"
DefaultTable = "micro"
)
var (
re = regexp.MustCompile("[^a-zA-Z0-9]+")
statements = map[string]string{
"list": "SELECT key, value, metadata, expiry FROM %s.%s WHERE key LIKE $1 ORDER BY key DESC LIMIT $2 OFFSET $3;",
"read": "SELECT key, value, metadata, expiry FROM %s.%s WHERE key = $1;",
"readMany": "SELECT key, value, metadata, expiry FROM %s.%s WHERE key LIKE $1;",
"readOffset": "SELECT key, value, metadata, expiry FROM %s.%s WHERE key LIKE $1 ORDER BY key DESC LIMIT $2 OFFSET $3;",
"write": "INSERT INTO %s.%s(key, value, metadata, expiry) VALUES ($1, $2::bytea, $3, $4) ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value, metadata = EXCLUDED.metadata, expiry = EXCLUDED.expiry;",
"delete": "DELETE FROM %s.%s WHERE key = $1;",
}
)
type sqlStore struct {
options store.Options
db *sql.DB
sync.RWMutex
// known databases
databases map[string]bool
}
func (s *sqlStore) getDB(database, table string) (string, string) {
if len(database) == 0 {
if len(s.options.Database) > 0 {
database = s.options.Database
} else {
database = DefaultDatabase
}
}
if len(table) == 0 {
if len(s.options.Table) > 0 {
table = s.options.Table
} else {
table = DefaultTable
}
}
// store.namespace must only contain letters, numbers and underscores
database = re.ReplaceAllString(database, "_")
table = re.ReplaceAllString(table, "_")
return database, table
}
func (s *sqlStore) createDB(database, table string) error {
database, table = s.getDB(database, table)
s.Lock()
defer s.Unlock()
if _, ok := s.databases[database+":"+table]; ok {
return nil
}
if err := s.initDB(database, table); err != nil {
return err
}
s.databases[database+":"+table] = true
return nil
}
func (s *sqlStore) initDB(database, table string) error {
if s.db == nil {
return errors.New("Database connection not initialised")
}
// Create the namespace's database
_, err := s.db.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s;", database))
if err != nil {
return err
}
_, err = s.db.Exec(fmt.Sprintf("SET DATABASE = %s;", database))
if err != nil {
return errors.Wrap(err, "Couldn't set database")
}
// Create a table for the namespace's prefix
_, err = s.db.Exec(fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s
(
key text NOT NULL,
value bytea,
metadata JSONB,
expiry timestamp with time zone,
CONSTRAINT %s_pkey PRIMARY KEY (key)
);`, table, table))
if err != nil {
return errors.Wrap(err, "Couldn't create table")
}
// Create Index
_, err = s.db.Exec(fmt.Sprintf(`CREATE INDEX IF NOT EXISTS "%s" ON %s.%s USING btree ("key");`, "key_index_"+table, database, table))
if err != nil {
return err
}
// Create Metadata Index
_, err = s.db.Exec(fmt.Sprintf(`CREATE INDEX IF NOT EXISTS "%s" ON %s.%s USING GIN ("metadata");`, "metadata_index_"+table, database, table))
if err != nil {
return err
}
return nil
}
func (s *sqlStore) configure() error {
if len(s.options.Nodes) == 0 {
s.options.Nodes = []string{"postgresql://root@localhost:26257?sslmode=disable"}
}
source := s.options.Nodes[0]
// check if it is a standard connection string eg: host=%s port=%d user=%s password=%s dbname=%s sslmode=disable
// if err is nil which means it would be a URL like postgre://xxxx?yy=zz
_, err := url.Parse(source)
if err != nil {
if !strings.Contains(source, " ") {
source = fmt.Sprintf("host=%s", source)
}
}
// create source from first node
db, err := sql.Open("postgres", source)
if err != nil {
return err
}
if err := db.Ping(); err != nil {
return err
}
if s.db != nil {
s.db.Close()
}
// save the values
s.db = db
// get DB
database, table := s.getDB(s.options.Database, s.options.Table)
// initialise the database
return s.initDB(database, table)
}
func (s *sqlStore) prepare(database, table, query string) (*sql.Stmt, error) {
st, ok := statements[query]
if !ok {
return nil, errors.New("unsupported statement")
}
// get DB
database, table = s.getDB(database, table)
q := fmt.Sprintf(st, database, table)
stmt, err := s.db.Prepare(q)
if err != nil {
return nil, err
}
return stmt, nil
}
func (s *sqlStore) Close() error {
if s.db != nil {
return s.db.Close()
}
return nil
}
func (s *sqlStore) Init(opts ...store.Option) error {
for _, o := range opts {
o(&s.options)
}
// reconfigure
return s.configure()
}
// List all the known records
func (s *sqlStore) List(opts ...store.ListOption) ([]string, error) {
var options store.ListOptions
for _, o := range opts {
o(&options)
}
// create the db if not exists
if err := s.createDB(options.Database, options.Table); err != nil {
return nil, err
}
limit := sql.NullInt32{}
offset := 0
pattern := "%"
if options.Prefix != "" || options.Suffix != "" {
if options.Prefix != "" {
pattern = options.Prefix + pattern
}
if options.Suffix != "" {
pattern += options.Suffix
}
}
if options.Offset > 0 {
offset = int(options.Offset)
}
if options.Limit > 0 {
limit = sql.NullInt32{Int32: int32(options.Limit), Valid: true}
}
st, err := s.prepare(options.Database, options.Table, "list")
if err != nil {
return nil, err
}
defer st.Close()
rows, err := st.Query(pattern, limit, offset)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err
}
defer rows.Close()
records, err := s.rowsToRecords(rows)
if err != nil {
return nil, err
}
keys := make([]string, 0, len(records))
for _, k := range records {
keys = append(keys, k.Key)
}
rowErr := rows.Close()
if rowErr != nil {
// transaction rollback or something
return keys, rowErr
}
if err := rows.Err(); err != nil {
return keys, err
}
return keys, nil
}
// rowToRecord converts from sql.Row to a store.Record. If the record has expired it will issue a delete in a separate goroutine
func (s *sqlStore) rowToRecord(row *sql.Row) (*store.Record, error) {
var timehelper pq.NullTime
record := &store.Record{}
metadata := make(Metadata)
if err := row.Scan(&record.Key, &record.Value, &metadata, &timehelper); err != nil {
if err == sql.ErrNoRows {
return record, store.ErrNotFound
}
return nil, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if timehelper.Valid {
if timehelper.Time.Before(time.Now()) {
// record has expired
go s.Delete(record.Key)
return nil, store.ErrNotFound
}
record.Expiry = time.Until(timehelper.Time)
}
return record, nil
}
// rowsToRecords converts from sql.Rows to []*store.Record. If a record has expired it will issue a delete in a separate goroutine
func (s *sqlStore) rowsToRecords(rows *sql.Rows) ([]*store.Record, error) {
var records []*store.Record
var timehelper pq.NullTime
for rows.Next() {
record := &store.Record{}
metadata := make(Metadata)
if err := rows.Scan(&record.Key, &record.Value, &metadata, &timehelper); err != nil {
return records, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if timehelper.Valid {
if timehelper.Time.Before(time.Now()) {
// record has expired
go s.Delete(record.Key)
} else {
record.Expiry = time.Until(timehelper.Time)
records = append(records, record)
}
} else {
records = append(records, record)
}
}
return records, nil
}
// Read a single key
func (s *sqlStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) {
var options store.ReadOptions
for _, o := range opts {
o(&options)
}
// create the db if not exists
if err := s.createDB(options.Database, options.Table); err != nil {
return nil, err
}
if options.Prefix || options.Suffix {
return s.read(key, options)
}
st, err := s.prepare(options.Database, options.Table, "read")
if err != nil {
return nil, err
}
defer st.Close()
row := st.QueryRow(key)
record, err := s.rowToRecord(row)
if err != nil {
return nil, err
}
var records []*store.Record
return append(records, record), nil
}
// Read Many records
func (s *sqlStore) read(key string, options store.ReadOptions) ([]*store.Record, error) {
pattern := "%"
if options.Prefix {
pattern = key + pattern
}
if options.Suffix {
pattern += key
}
var rows *sql.Rows
var st *sql.Stmt
var err error
if options.Limit != 0 {
st, err = s.prepare(options.Database, options.Table, "readOffset")
if err != nil {
return nil, err
}
defer st.Close()
rows, err = st.Query(pattern, options.Limit, options.Offset)
} else {
st, err = s.prepare(options.Database, options.Table, "readMany")
if err != nil {
return nil, err
}
defer st.Close()
rows, err = st.Query(pattern)
}
if err != nil {
if err == sql.ErrNoRows {
return []*store.Record{}, nil
}
return []*store.Record{}, errors.Wrap(err, "sqlStore.read failed")
}
defer rows.Close()
records, err := s.rowsToRecords(rows)
if err != nil {
return nil, err
}
rowErr := rows.Close()
if rowErr != nil {
// transaction rollback or something
return records, rowErr
}
if err := rows.Err(); err != nil {
return records, err
}
return records, nil
}
// Write records
func (s *sqlStore) Write(r *store.Record, opts ...store.WriteOption) error {
var options store.WriteOptions
for _, o := range opts {
o(&options)
}
// create the db if not exists
if err := s.createDB(options.Database, options.Table); err != nil {
return err
}
st, err := s.prepare(options.Database, options.Table, "write")
if err != nil {
return err
}
defer st.Close()
metadata := make(Metadata)
for k, v := range r.Metadata {
metadata[k] = v
}
var expiry time.Time
// expiry from options takes precedence
if !options.Expiry.IsZero() {
expiry = options.Expiry
} else if r.Expiry != 0 {
expiry = time.Now().Add(r.Expiry)
}
if options.TTL != 0 {
expiry = time.Now().Add(options.TTL)
}
if expiry.IsZero() {
_, err = st.Exec(r.Key, r.Value, metadata, nil)
} else {
_, err = st.Exec(r.Key, r.Value, metadata, expiry)
}
if err != nil {
return errors.Wrap(err, "Couldn't insert record "+r.Key)
}
return nil
}
// Delete records with keys
func (s *sqlStore) Delete(key string, opts ...store.DeleteOption) error {
var options store.DeleteOptions
for _, o := range opts {
o(&options)
}
// create the db if not exists
if err := s.createDB(options.Database, options.Table); err != nil {
return err
}
st, err := s.prepare(options.Database, options.Table, "delete")
if err != nil {
return err
}
defer st.Close()
result, err := st.Exec(key)
if err != nil {
return err
}
_, err = result.RowsAffected()
if err != nil {
return err
}
return nil
}
func (s *sqlStore) Options() store.Options {
return s.options
}
func (s *sqlStore) String() string {
return "cockroach"
}
// NewStore returns a new micro Store backed by sql
func NewStore(opts ...store.Option) store.Store {
options := store.Options{
Database: DefaultDatabase,
Table: DefaultTable,
}
for _, o := range opts {
o(&options)
}
// new store
s := new(sqlStore)
// set the options
s.options = options
// mark known databases
s.databases = make(map[string]bool)
// best-effort configure the store
if err := s.configure(); err != nil {
if logger.V(logger.ErrorLevel, logger.DefaultLogger) {
logger.Error("Error configuring store ", err)
}
}
// return store
return s
}

View File

@@ -1,167 +0,0 @@
package cockroach
import (
"database/sql"
"fmt"
"os"
"testing"
"time"
"github.com/kr/pretty"
"github.com/unistack-org/micro/v3/store"
)
func TestSQL(t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) > 0 {
t.Skip()
}
connection := fmt.Sprintf(
"host=%s port=%d user=%s sslmode=disable dbname=%s",
"localhost",
26257,
"root",
"test",
)
db, err := sql.Open("postgres", connection)
if err != nil {
t.Fatal(err)
}
if err := db.Ping(); err != nil {
t.Skip("store/cockroach: can't connect to db")
}
db.Close()
sqlStore := NewStore(
store.Database("testsql"),
store.Nodes(connection),
)
if err := sqlStore.Init(); err != nil {
t.Fatal(err)
}
keys, err := sqlStore.List()
if err != nil {
t.Error(err)
} else {
t.Logf("%# v\n", pretty.Formatter(keys))
}
err = sqlStore.Write(
&store.Record{
Key: "test",
Value: []byte("foo"),
},
)
if err != nil {
t.Error(err)
}
err = sqlStore.Write(
&store.Record{
Key: "bar",
Value: []byte("baz"),
},
)
if err != nil {
t.Error(err)
}
err = sqlStore.Write(
&store.Record{
Key: "qux",
Value: []byte("aasad"),
},
)
if err != nil {
t.Error(err)
}
err = sqlStore.Delete("qux")
if err != nil {
t.Error(err)
}
err = sqlStore.Write(&store.Record{
Key: "test",
Value: []byte("bar"),
Expiry: time.Second * 10,
})
if err != nil {
t.Error(err)
}
records, err := sqlStore.Read("test")
if err != nil {
t.Error(err)
}
t.Logf("%# v\n", pretty.Formatter(records))
if string(records[0].Value) != "bar" {
t.Error("Expected bar, got ", string(records[0].Value))
}
time.Sleep(11 * time.Second)
_, err = sqlStore.Read("test")
switch err {
case nil:
t.Error("Key test should have expired")
case store.ErrNotFound:
break
default:
t.Error(err)
}
sqlStore.Delete("bar")
sqlStore.Write(&store.Record{Key: "aaa", Value: []byte("bbb"), Expiry: 10 * time.Second})
sqlStore.Write(&store.Record{Key: "aaaa", Value: []byte("bbb"), Expiry: 10 * time.Second})
sqlStore.Write(&store.Record{Key: "aaaaa", Value: []byte("bbb"), Expiry: 10 * time.Second})
results, err := sqlStore.Read("a", store.ReadPrefix())
if err != nil {
t.Error(err)
}
if len(results) != 3 {
t.Fatal("Results should have returned 3 records")
}
time.Sleep(10 * time.Second)
results, err = sqlStore.Read("a", store.ReadPrefix())
if err != nil {
t.Error(err)
}
if len(results) != 0 {
t.Fatal("Results should have returned 0 records")
}
sqlStore.Write(&store.Record{Key: "bbb", Value: []byte("bbb")}, store.WriteExpiry(time.Now().Add(10*time.Second)))
sqlStore.Write(&store.Record{Key: "bbbb", Value: []byte("bbb")}, store.WriteExpiry(time.Now().Add(10*time.Second)))
sqlStore.Write(&store.Record{Key: "bbbbb", Value: []byte("bbb")}, store.WriteExpiry(time.Now().Add(10*time.Second)))
results, err = sqlStore.Read("b", store.ReadPrefix())
if err != nil {
t.Error(err)
}
if len(results) != 3 {
t.Fatalf("Results should have returned 3 records. Received %d", len(results))
}
time.Sleep(10 * time.Second)
results, err = sqlStore.Read("b", store.ReadPrefix())
if err != nil {
t.Error(err)
}
if len(results) != 0 {
t.Fatalf("Results should have returned 0 records. Received %d", len(results))
}
sqlStore.Write(&store.Record{Key: "ccc", Value: []byte("bbb")}, store.WriteTTL(10*time.Second))
sqlStore.Write(&store.Record{Key: "cccc", Value: []byte("bbb")}, store.WriteTTL(10*time.Second))
sqlStore.Write(&store.Record{Key: "ccccc", Value: []byte("bbb")}, store.WriteTTL(10*time.Second))
results, err = sqlStore.Read("c", store.ReadPrefix())
if err != nil {
t.Error(err)
}
if len(results) != 3 {
t.Fatalf("Results should have returned 3 records. Received %d", len(results))
}
time.Sleep(10 * time.Second)
results, err = sqlStore.Read("c", store.ReadPrefix())
if err != nil {
t.Error(err)
}
if len(results) != 0 {
t.Fatalf("Results should have returned 0 records. Received %d", len(results))
}
}

View File

@@ -1,45 +0,0 @@
package cockroach
import (
"database/sql/driver"
"encoding/json"
"errors"
)
// https://github.com/upper/db/blob/master/postgresql/custom_types.go#L43
type Metadata map[string]interface{}
// Scan satisfies the sql.Scanner interface.
func (m *Metadata) Scan(src interface{}) error {
source, ok := src.([]byte)
if !ok {
return errors.New("Type assertion .([]byte) failed.")
}
var i interface{}
err := json.Unmarshal(source, &i)
if err != nil {
return err
}
*m, ok = i.(map[string]interface{})
if !ok {
return errors.New("Type assertion .(map[string]interface{}) failed.")
}
return nil
}
// Value satisfies the driver.Valuer interface.
func (m Metadata) Value() (driver.Value, error) {
j, err := json.Marshal(m)
return j, err
}
func toMetadata(m *Metadata) map[string]interface{} {
md := make(map[string]interface{})
for k, v := range *m {
md[k] = v
}
return md
}

View File

@@ -1,395 +0,0 @@
// Package local is a file system backed store
package file
import (
"encoding/json"
"os"
"path/filepath"
"sort"
"strings"
"time"
"github.com/unistack-org/micro/v3/store"
bolt "go.etcd.io/bbolt"
)
var (
// DefaultDatabase is the namespace that the bbolt store
// will use if no namespace is provided.
DefaultDatabase = "micro"
// DefaultTable when none is specified
DefaultTable = "micro"
// DefaultDir is the default directory for bbolt files
DefaultDir = filepath.Join(os.TempDir(), "micro", "store")
// bucket used for data storage
dataBucket = "data"
)
// NewStore returns a file store
func NewStore(opts ...store.Option) store.Store {
s := &fileStore{}
s.init(opts...)
return s
}
type fileStore struct {
options store.Options
dir string
}
// record stored by us
type record struct {
Key string
Value []byte
Metadata map[string]interface{}
ExpiresAt time.Time
}
func key(database, table string) string {
return database + ":" + table
}
func (m *fileStore) delete(db *bolt.DB, key string) error {
return db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(dataBucket))
if b == nil {
return nil
}
return b.Delete([]byte(key))
})
}
func (m *fileStore) init(opts ...store.Option) error {
for _, o := range opts {
o(&m.options)
}
if m.options.Database == "" {
m.options.Database = DefaultDatabase
}
if m.options.Table == "" {
// bbolt requires bucketname to not be empty
m.options.Table = DefaultTable
}
// create a directory /tmp/micro
dir := filepath.Join(DefaultDir, m.options.Database)
// Ignoring this as the folder might exist.
// Reads/Writes updates will return with sensible error messages
// about the dir not existing in case this cannot create the path anyway
os.MkdirAll(dir, 0700)
return nil
}
func (f *fileStore) getDB(database, table string) (*bolt.DB, error) {
if len(database) == 0 {
database = f.options.Database
}
if len(table) == 0 {
table = f.options.Table
}
// create a directory /tmp/micro
dir := filepath.Join(DefaultDir, database)
// create the database handle
fname := table + ".db"
// make the dir
os.MkdirAll(dir, 0700)
// database path
dbPath := filepath.Join(dir, fname)
// create new db handle
// Bolt DB only allows one process to open the file R/W so make sure we're doing this under a lock
return bolt.Open(dbPath, 0700, &bolt.Options{Timeout: 5 * time.Second})
}
func (m *fileStore) list(db *bolt.DB, limit, offset uint) []string {
var allItems []string
db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(dataBucket))
// nothing to read
if b == nil {
return nil
}
// @todo very inefficient
if err := b.ForEach(func(k, v []byte) error {
storedRecord := &record{}
if err := json.Unmarshal(v, storedRecord); err != nil {
return err
}
if !storedRecord.ExpiresAt.IsZero() {
if storedRecord.ExpiresAt.Before(time.Now()) {
return nil
}
}
allItems = append(allItems, string(k))
return nil
}); err != nil {
return err
}
return nil
})
allKeys := make([]string, len(allItems))
for i, k := range allItems {
allKeys[i] = k
}
if limit != 0 || offset != 0 {
sort.Slice(allKeys, func(i, j int) bool { return allKeys[i] < allKeys[j] })
end := len(allKeys)
if limit > 0 {
calcLimit := int(offset + limit)
if calcLimit < end {
end = calcLimit
}
}
if int(offset) >= end {
return nil
}
return allKeys[offset:end]
}
return allKeys
}
func (m *fileStore) get(db *bolt.DB, k string) (*store.Record, error) {
var value []byte
db.View(func(tx *bolt.Tx) error {
// @todo this is still very experimental...
b := tx.Bucket([]byte(dataBucket))
if b == nil {
return nil
}
value = b.Get([]byte(k))
return nil
})
if value == nil {
return nil, store.ErrNotFound
}
storedRecord := &record{}
if err := json.Unmarshal(value, storedRecord); err != nil {
return nil, err
}
newRecord := &store.Record{}
newRecord.Key = storedRecord.Key
newRecord.Value = storedRecord.Value
newRecord.Metadata = make(map[string]interface{})
for k, v := range storedRecord.Metadata {
newRecord.Metadata[k] = v
}
if !storedRecord.ExpiresAt.IsZero() {
if storedRecord.ExpiresAt.Before(time.Now()) {
return nil, store.ErrNotFound
}
newRecord.Expiry = time.Until(storedRecord.ExpiresAt)
}
return newRecord, nil
}
func (m *fileStore) set(db *bolt.DB, r *store.Record) error {
// copy the incoming record and then
// convert the expiry in to a hard timestamp
item := &record{}
item.Key = r.Key
item.Value = r.Value
item.Metadata = make(map[string]interface{})
if r.Expiry != 0 {
item.ExpiresAt = time.Now().Add(r.Expiry)
}
for k, v := range r.Metadata {
item.Metadata[k] = v
}
// marshal the data
data, _ := json.Marshal(item)
return db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(dataBucket))
if b == nil {
var err error
b, err = tx.CreateBucketIfNotExists([]byte(dataBucket))
if err != nil {
return err
}
}
return b.Put([]byte(r.Key), data)
})
}
func (f *fileStore) Close() error {
return nil
}
func (f *fileStore) Init(opts ...store.Option) error {
return f.init(opts...)
}
func (m *fileStore) Delete(key string, opts ...store.DeleteOption) error {
var deleteOptions store.DeleteOptions
for _, o := range opts {
o(&deleteOptions)
}
db, err := m.getDB(deleteOptions.Database, deleteOptions.Table)
if err != nil {
return err
}
defer db.Close()
return m.delete(db, key)
}
func (m *fileStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) {
var readOpts store.ReadOptions
for _, o := range opts {
o(&readOpts)
}
db, err := m.getDB(readOpts.Database, readOpts.Table)
if err != nil {
return nil, err
}
defer db.Close()
var keys []string
// Handle Prefix / suffix
// TODO: do range scan here rather than listing all keys
if readOpts.Prefix || readOpts.Suffix {
// list the keys
k := m.list(db, readOpts.Limit, readOpts.Offset)
// check for prefix and suffix
for _, v := range k {
if readOpts.Prefix && !strings.HasPrefix(v, key) {
continue
}
if readOpts.Suffix && !strings.HasSuffix(v, key) {
continue
}
keys = append(keys, v)
}
} else {
keys = []string{key}
}
var results []*store.Record
for _, k := range keys {
r, err := m.get(db, k)
if err != nil {
return results, err
}
results = append(results, r)
}
return results, nil
}
func (m *fileStore) Write(r *store.Record, opts ...store.WriteOption) error {
var writeOpts store.WriteOptions
for _, o := range opts {
o(&writeOpts)
}
db, err := m.getDB(writeOpts.Database, writeOpts.Table)
if err != nil {
return err
}
defer db.Close()
if len(opts) > 0 {
// Copy the record before applying options, or the incoming record will be mutated
newRecord := store.Record{}
newRecord.Key = r.Key
newRecord.Value = r.Value
newRecord.Metadata = make(map[string]interface{})
newRecord.Expiry = r.Expiry
if !writeOpts.Expiry.IsZero() {
newRecord.Expiry = time.Until(writeOpts.Expiry)
}
if writeOpts.TTL != 0 {
newRecord.Expiry = writeOpts.TTL
}
for k, v := range r.Metadata {
newRecord.Metadata[k] = v
}
return m.set(db, &newRecord)
}
return m.set(db, r)
}
func (m *fileStore) Options() store.Options {
return m.options
}
func (m *fileStore) List(opts ...store.ListOption) ([]string, error) {
var listOptions store.ListOptions
for _, o := range opts {
o(&listOptions)
}
db, err := m.getDB(listOptions.Database, listOptions.Table)
if err != nil {
return nil, err
}
defer db.Close()
// TODO apply prefix/suffix in range query
allKeys := m.list(db, listOptions.Limit, listOptions.Offset)
if len(listOptions.Prefix) > 0 {
var prefixKeys []string
for _, k := range allKeys {
if strings.HasPrefix(k, listOptions.Prefix) {
prefixKeys = append(prefixKeys, k)
}
}
allKeys = prefixKeys
}
if len(listOptions.Suffix) > 0 {
var suffixKeys []string
for _, k := range allKeys {
if strings.HasSuffix(k, listOptions.Suffix) {
suffixKeys = append(suffixKeys, k)
}
}
allKeys = suffixKeys
}
return allKeys, nil
}
func (m *fileStore) String() string {
return "file"
}

View File

@@ -1,268 +0,0 @@
package file
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/kr/pretty"
"github.com/unistack-org/micro/v3/store"
)
func cleanup(db string, s store.Store) {
s.Close()
dir := filepath.Join(DefaultDir, db+"/")
os.RemoveAll(dir)
}
func TestFileStoreReInit(t *testing.T) {
s := NewStore(store.Table("aaa"))
defer cleanup(DefaultDatabase, s)
s.Init(store.Table("bbb"))
if s.Options().Table != "bbb" {
t.Error("Init didn't reinitialise the store")
}
}
func TestFileStoreBasic(t *testing.T) {
s := NewStore()
defer cleanup(DefaultDatabase, s)
fileTest(s, t)
}
func TestFileStoreTable(t *testing.T) {
s := NewStore(store.Table("testTable"))
defer cleanup(DefaultDatabase, s)
fileTest(s, t)
}
func TestFileStoreDatabase(t *testing.T) {
s := NewStore(store.Database("testdb"))
defer cleanup("testdb", s)
fileTest(s, t)
}
func TestFileStoreDatabaseTable(t *testing.T) {
s := NewStore(store.Table("testTable"), store.Database("testdb"))
defer cleanup("testdb", s)
fileTest(s, t)
}
func fileTest(s store.Store, t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Options %s %v\n", s.String(), s.Options())
}
// Read and Write an expiring Record
if err := s.Write(&store.Record{
Key: "Hello",
Value: []byte("World"),
Expiry: time.Millisecond * 150,
}); err != nil {
t.Error(err)
}
if r, err := s.Read("Hello"); err != nil {
t.Fatal(err)
} else {
if len(r) != 1 {
t.Error("Read returned multiple records")
}
if r[0].Key != "Hello" {
t.Errorf("Expected %s, got %s", "Hello", r[0].Key)
}
if string(r[0].Value) != "World" {
t.Errorf("Expected %s, got %s", "World", r[0].Value)
}
}
// wait for expiry
time.Sleep(time.Millisecond * 200)
if _, err := s.Read("Hello"); err != store.ErrNotFound {
t.Errorf("Expected %# v, got %# v", store.ErrNotFound, err)
}
// Write 3 records with various expiry and get with Table
records := []*store.Record{
&store.Record{
Key: "foo",
Value: []byte("foofoo"),
},
&store.Record{
Key: "foobar",
Value: []byte("foobarfoobar"),
Expiry: time.Millisecond * 100,
},
}
for _, r := range records {
if err := s.Write(r); err != nil {
t.Errorf("Couldn't write k: %s, v: %# v (%s)", r.Key, pretty.Formatter(r.Value), err)
}
}
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 2 {
t.Errorf("Expected 2 items, got %d", len(results))
//t.Logf("Table test: %v\n", spew.Sdump(results))
}
}
// wait for the expiry
time.Sleep(time.Millisecond * 200)
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else if len(results) != 1 {
t.Errorf("Expected 1 item, got %d", len(results))
//t.Logf("Table test: %v\n", spew.Sdump(results))
}
if err := s.Delete("foo"); err != nil {
t.Errorf("Delete failed (%v)", err)
}
if results, err := s.Read("foo"); err != store.ErrNotFound {
t.Errorf("Expected read failure read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 0 {
t.Errorf("Expected 0 items, got %d (%# v)", len(results), spew.Sdump(results))
}
}
// Write 3 records with various expiry and get with Suffix
records = []*store.Record{
&store.Record{
Key: "foo",
Value: []byte("foofoo"),
},
&store.Record{
Key: "barfoo",
Value: []byte("barfoobarfoo"),
Expiry: time.Millisecond * 100,
},
&store.Record{
Key: "bazbarfoo",
Value: []byte("bazbarfoobazbarfoo"),
Expiry: 2 * time.Millisecond * 100,
},
}
for _, r := range records {
if err := s.Write(r); err != nil {
t.Errorf("Couldn't write k: %s, v: %# v (%s)", r.Key, pretty.Formatter(r.Value), err)
}
}
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 3 {
t.Errorf("Expected 3 items, got %d", len(results))
//t.Logf("Table test: %v\n", spew.Sdump(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 2 {
t.Errorf("Expected 2 items, got %d", len(results))
//t.Logf("Table test: %v\n", spew.Sdump(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 1 {
t.Errorf("Expected 1 item, got %d", len(results))
// t.Logf("Table test: %# v\n", spew.Sdump(results))
}
}
if err := s.Delete("foo"); err != nil {
t.Errorf("Delete failed (%v)", err)
}
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 0 {
t.Errorf("Expected 0 items, got %d (%# v)", len(results), spew.Sdump(results))
}
}
// Test Table, Suffix and WriteOptions
if err := s.Write(&store.Record{
Key: "foofoobarbar",
Value: []byte("something"),
}, store.WriteTTL(time.Millisecond*100)); err != nil {
t.Error(err)
}
if err := s.Write(&store.Record{
Key: "foofoo",
Value: []byte("something"),
}, store.WriteExpiry(time.Now().Add(time.Millisecond*100))); err != nil {
t.Error(err)
}
if err := s.Write(&store.Record{
Key: "barbar",
Value: []byte("something"),
// TTL has higher precedence than expiry
}, store.WriteExpiry(time.Now().Add(time.Hour)), store.WriteTTL(time.Millisecond*100)); err != nil {
t.Error(err)
}
if results, err := s.Read("foo", store.ReadPrefix(), store.ReadSuffix()); err != nil {
t.Error(err)
} else {
if len(results) != 1 {
t.Errorf("Expected 1 results, got %d: %# v", len(results), spew.Sdump(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.List(); err != nil {
t.Errorf("List failed: %s", err)
} else {
if len(results) != 0 {
t.Errorf("Expiry options were not effective, results :%v", spew.Sdump(results))
}
}
// write the following records
for i := 0; i < 10; i++ {
s.Write(&store.Record{
Key: fmt.Sprintf("a%d", i),
Value: []byte{},
})
}
// read back a few records
if results, err := s.Read("a", store.ReadLimit(5), store.ReadPrefix()); err != nil {
t.Error(err)
} else {
if len(results) != 5 {
t.Fatal("Expected 5 results, got ", len(results))
}
if !strings.HasPrefix(results[0].Key, "a") {
t.Fatalf("Expected a prefix, got %s", results[0].Key)
}
}
// read the rest back
if results, err := s.Read("a", store.ReadLimit(30), store.ReadOffset(5), store.ReadPrefix()); err != nil {
t.Fatal(err)
} else {
if len(results) != 5 {
t.Fatal("Expected 5 results, got ", len(results))
}
}
}

View File

@@ -1,299 +0,0 @@
// Package memory is a in-memory store store
package memory
import (
"path/filepath"
"sort"
"strings"
"time"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/unistack-org/micro/v3/store"
)
// NewStore returns a memory store
func NewStore(opts ...store.Option) store.Store {
s := &memoryStore{
options: store.Options{
Database: "micro",
Table: "micro",
},
store: cache.New(cache.NoExpiration, 5*time.Minute),
}
for _, o := range opts {
o(&s.options)
}
return s
}
type memoryStore struct {
options store.Options
store *cache.Cache
}
type storeRecord struct {
key string
value []byte
metadata map[string]interface{}
expiresAt time.Time
}
func (m *memoryStore) key(prefix, key string) string {
return filepath.Join(prefix, key)
}
func (m *memoryStore) prefix(database, table string) string {
if len(database) == 0 {
database = m.options.Database
}
if len(table) == 0 {
table = m.options.Table
}
return filepath.Join(database, table)
}
func (m *memoryStore) get(prefix, key string) (*store.Record, error) {
key = m.key(prefix, key)
var storedRecord *storeRecord
r, found := m.store.Get(key)
if !found {
return nil, store.ErrNotFound
}
storedRecord, ok := r.(*storeRecord)
if !ok {
return nil, errors.New("Retrieved a non *storeRecord from the cache")
}
// Copy the record on the way out
newRecord := &store.Record{}
newRecord.Key = strings.TrimPrefix(storedRecord.key, prefix+"/")
newRecord.Value = make([]byte, len(storedRecord.value))
newRecord.Metadata = make(map[string]interface{})
// copy the value into the new record
copy(newRecord.Value, storedRecord.value)
// check if we need to set the expiry
if !storedRecord.expiresAt.IsZero() {
newRecord.Expiry = time.Until(storedRecord.expiresAt)
}
// copy in the metadata
for k, v := range storedRecord.metadata {
newRecord.Metadata[k] = v
}
return newRecord, nil
}
func (m *memoryStore) set(prefix string, r *store.Record) {
key := m.key(prefix, r.Key)
// copy the incoming record and then
// convert the expiry in to a hard timestamp
i := &storeRecord{}
i.key = r.Key
i.value = make([]byte, len(r.Value))
i.metadata = make(map[string]interface{})
// copy the the value
copy(i.value, r.Value)
// set the expiry
if r.Expiry != 0 {
i.expiresAt = time.Now().Add(r.Expiry)
}
// set the metadata
for k, v := range r.Metadata {
i.metadata[k] = v
}
m.store.Set(key, i, r.Expiry)
}
func (m *memoryStore) delete(prefix, key string) {
key = m.key(prefix, key)
m.store.Delete(key)
}
func (m *memoryStore) list(prefix string, limit, offset uint) []string {
allItems := m.store.Items()
allKeys := make([]string, len(allItems))
i := 0
for k := range allItems {
if !strings.HasPrefix(k, prefix+"/") {
continue
}
allKeys[i] = strings.TrimPrefix(k, prefix+"/")
i++
}
if limit != 0 || offset != 0 {
sort.Slice(allKeys, func(i, j int) bool { return allKeys[i] < allKeys[j] })
sort.Slice(allKeys, func(i, j int) bool { return allKeys[i] < allKeys[j] })
end := len(allKeys)
if limit > 0 {
calcLimit := int(offset + limit)
if calcLimit < end {
end = calcLimit
}
}
if int(offset) >= end {
return nil
}
return allKeys[offset:end]
}
return allKeys
}
func (m *memoryStore) Close() error {
m.store.Flush()
return nil
}
func (m *memoryStore) Init(opts ...store.Option) error {
for _, o := range opts {
o(&m.options)
}
return nil
}
func (m *memoryStore) String() string {
return "memory"
}
func (m *memoryStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) {
readOpts := store.ReadOptions{}
for _, o := range opts {
o(&readOpts)
}
prefix := m.prefix(readOpts.Database, readOpts.Table)
var keys []string
// Handle Prefix / suffix
if readOpts.Prefix || readOpts.Suffix {
k := m.list(prefix, readOpts.Limit, readOpts.Offset)
for _, kk := range k {
if readOpts.Prefix && !strings.HasPrefix(kk, key) {
continue
}
if readOpts.Suffix && !strings.HasSuffix(kk, key) {
continue
}
keys = append(keys, kk)
}
} else {
keys = []string{key}
}
var results []*store.Record
for _, k := range keys {
r, err := m.get(prefix, k)
if err != nil {
return results, err
}
results = append(results, r)
}
return results, nil
}
func (m *memoryStore) Write(r *store.Record, opts ...store.WriteOption) error {
writeOpts := store.WriteOptions{}
for _, o := range opts {
o(&writeOpts)
}
prefix := m.prefix(writeOpts.Database, writeOpts.Table)
if len(opts) > 0 {
// Copy the record before applying options, or the incoming record will be mutated
newRecord := store.Record{}
newRecord.Key = r.Key
newRecord.Value = make([]byte, len(r.Value))
newRecord.Metadata = make(map[string]interface{})
copy(newRecord.Value, r.Value)
newRecord.Expiry = r.Expiry
if !writeOpts.Expiry.IsZero() {
newRecord.Expiry = time.Until(writeOpts.Expiry)
}
if writeOpts.TTL != 0 {
newRecord.Expiry = writeOpts.TTL
}
for k, v := range r.Metadata {
newRecord.Metadata[k] = v
}
m.set(prefix, &newRecord)
return nil
}
// set
m.set(prefix, r)
return nil
}
func (m *memoryStore) Delete(key string, opts ...store.DeleteOption) error {
deleteOptions := store.DeleteOptions{}
for _, o := range opts {
o(&deleteOptions)
}
prefix := m.prefix(deleteOptions.Database, deleteOptions.Table)
m.delete(prefix, key)
return nil
}
func (m *memoryStore) Options() store.Options {
return m.options
}
func (m *memoryStore) List(opts ...store.ListOption) ([]string, error) {
listOptions := store.ListOptions{}
for _, o := range opts {
o(&listOptions)
}
prefix := m.prefix(listOptions.Database, listOptions.Table)
keys := m.list(prefix, listOptions.Limit, listOptions.Offset)
if len(listOptions.Prefix) > 0 {
var prefixKeys []string
for _, k := range keys {
if strings.HasPrefix(k, listOptions.Prefix) {
prefixKeys = append(prefixKeys, k)
}
}
keys = prefixKeys
}
if len(listOptions.Suffix) > 0 {
var suffixKeys []string
for _, k := range keys {
if strings.HasSuffix(k, listOptions.Suffix) {
suffixKeys = append(suffixKeys, k)
}
}
keys = suffixKeys
}
return keys, nil
}

View File

@@ -1,278 +0,0 @@
package memory
import (
"fmt"
"os"
"testing"
"time"
"github.com/kr/pretty"
"github.com/unistack-org/micro/v3/store"
)
func TestMemoryReInit(t *testing.T) {
s := NewStore(store.Table("aaa"))
s.Init(store.Table(""))
if len(s.Options().Table) > 0 {
t.Error("Init didn't reinitialise the store")
}
}
func TestMemoryBasic(t *testing.T) {
s := NewStore()
s.Init()
basictest(s, t)
}
func TestMemoryPrefix(t *testing.T) {
s := NewStore()
s.Init(store.Table("some-prefix"))
basictest(s, t)
}
func TestMemoryNamespace(t *testing.T) {
s := NewStore()
s.Init(store.Database("some-namespace"))
basictest(s, t)
}
func TestMemoryNamespacePrefix(t *testing.T) {
s := NewStore()
s.Init(store.Table("some-prefix"), store.Database("some-namespace"))
basictest(s, t)
}
func basictest(s store.Store, t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Testing store %s, with options %# v\n", s.String(), pretty.Formatter(s.Options()))
}
// Read and Write an expiring Record
if err := s.Write(&store.Record{
Key: "Hello",
Value: []byte("World"),
Expiry: time.Millisecond * 100,
}); err != nil {
t.Error(err)
}
if r, err := s.Read("Hello"); err != nil {
t.Error(err)
} else {
if len(r) != 1 {
t.Error("Read returned multiple records")
}
if r[0].Key != "Hello" {
t.Errorf("Expected %s, got %s", "Hello", r[0].Key)
}
if string(r[0].Value) != "World" {
t.Errorf("Expected %s, got %s", "World", r[0].Value)
}
}
time.Sleep(time.Millisecond * 200)
if _, err := s.Read("Hello"); err != store.ErrNotFound {
t.Errorf("Expected %# v, got %# v", store.ErrNotFound, err)
}
// Write 3 records with various expiry and get with prefix
records := []*store.Record{
&store.Record{
Key: "foo",
Value: []byte("foofoo"),
},
&store.Record{
Key: "foobar",
Value: []byte("foobarfoobar"),
Expiry: time.Millisecond * 100,
},
&store.Record{
Key: "foobarbaz",
Value: []byte("foobarbazfoobarbaz"),
Expiry: 2 * time.Millisecond * 100,
},
}
for _, r := range records {
if err := s.Write(r); err != nil {
t.Errorf("Couldn't write k: %s, v: %# v (%s)", r.Key, pretty.Formatter(r.Value), err)
}
}
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 3 {
t.Errorf("Expected 3 items, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %v\n", pretty.Formatter(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 2 {
t.Errorf("Expected 2 items, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %v\n", pretty.Formatter(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 1 {
t.Errorf("Expected 1 item, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %# v\n", pretty.Formatter(results))
}
}
if err := s.Delete("foo", func(d *store.DeleteOptions) {}); err != nil {
t.Errorf("Delete failed (%v)", err)
}
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 0 {
t.Errorf("Expected 0 items, got %d (%# v)", len(results), pretty.Formatter(results))
}
}
// Write 3 records with various expiry and get with Suffix
records = []*store.Record{
&store.Record{
Key: "foo",
Value: []byte("foofoo"),
},
&store.Record{
Key: "barfoo",
Value: []byte("barfoobarfoo"),
Expiry: time.Millisecond * 100,
},
&store.Record{
Key: "bazbarfoo",
Value: []byte("bazbarfoobazbarfoo"),
Expiry: 2 * time.Millisecond * 100,
},
}
for _, r := range records {
if err := s.Write(r); err != nil {
t.Errorf("Couldn't write k: %s, v: %# v (%s)", r.Key, pretty.Formatter(r.Value), err)
}
}
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 3 {
t.Errorf("Expected 3 items, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %v\n", pretty.Formatter(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 2 {
t.Errorf("Expected 2 items, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %v\n", pretty.Formatter(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 1 {
t.Errorf("Expected 1 item, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %# v\n", pretty.Formatter(results))
}
}
if err := s.Delete("foo"); err != nil {
t.Errorf("Delete failed (%v)", err)
}
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 0 {
t.Errorf("Expected 0 items, got %d (%# v)", len(results), pretty.Formatter(results))
}
}
// Test Prefix, Suffix and WriteOptions
if err := s.Write(&store.Record{
Key: "foofoobarbar",
Value: []byte("something"),
}, store.WriteTTL(time.Millisecond*100)); err != nil {
t.Error(err)
}
if err := s.Write(&store.Record{
Key: "foofoo",
Value: []byte("something"),
}, store.WriteExpiry(time.Now().Add(time.Millisecond*100))); err != nil {
t.Error(err)
}
if err := s.Write(&store.Record{
Key: "barbar",
Value: []byte("something"),
// TTL has higher precedence than expiry
}, store.WriteExpiry(time.Now().Add(time.Hour)), store.WriteTTL(time.Millisecond*100)); err != nil {
t.Error(err)
}
if results, err := s.Read("foo", store.ReadPrefix(), store.ReadSuffix()); err != nil {
t.Error(err)
} else {
if len(results) != 1 {
t.Errorf("Expected 1 results, got %d: %# v", len(results), pretty.Formatter(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.List(); err != nil {
t.Errorf("List failed: %s", err)
} else {
if len(results) != 0 {
t.Error("Expiry options were not effective")
}
}
s.Write(&store.Record{Key: "a", Value: []byte("a")})
s.Write(&store.Record{Key: "aa", Value: []byte("aa")})
s.Write(&store.Record{Key: "aaa", Value: []byte("aaa")})
if results, err := s.Read("b", store.ReadPrefix()); err != nil {
t.Error(err)
} else {
if len(results) != 0 {
t.Errorf("Expected 0 results, got %d", len(results))
}
}
s.Close() // reset the store
for i := 0; i < 10; i++ {
s.Write(&store.Record{
Key: fmt.Sprintf("a%d", i),
Value: []byte{},
})
}
if results, err := s.Read("a", store.ReadLimit(5), store.ReadPrefix()); err != nil {
t.Error(err)
} else {
if len(results) != 5 {
t.Fatal("Expected 5 results, got ", len(results))
}
if results[0].Key != "a0" {
t.Fatalf("Expected a0, got %s", results[0].Key)
}
if results[4].Key != "a4" {
t.Fatalf("Expected a4, got %s", results[4].Key)
}
}
if results, err := s.Read("a", store.ReadLimit(30), store.ReadOffset(5), store.ReadPrefix()); err != nil {
t.Error(err)
} else {
if len(results) != 5 {
t.Error("Expected 5 results, got ", len(results))
}
}
}

View File

@@ -1,3 +1,5 @@
// +build ignore
// Package test provides a way to run tests against all the various implementations of the Store interface.
// It can't live in the store package itself because of circular import issues
package test
@@ -47,7 +49,7 @@ func cacheCleanup(db string, s store.Store) {
}
func TestStoreReInit(t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) > 0 {
if len(os.Getenv("INTEGRATION_TESTS")) > 0 {
t.Skip()
}
@@ -73,7 +75,7 @@ func TestStoreReInit(t *testing.T) {
}
func TestStoreBasic(t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) > 0 {
if len(os.Getenv("INTEGRATION_TESTS")) > 0 {
t.Skip()
}
@@ -97,7 +99,7 @@ func TestStoreBasic(t *testing.T) {
}
func TestStoreTable(t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) > 0 {
if len(os.Getenv("INTEGRATION_TESTS")) > 0 {
t.Skip()
}
@@ -120,7 +122,7 @@ func TestStoreTable(t *testing.T) {
}
func TestStoreDatabase(t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) > 0 {
if len(os.Getenv("INTEGRATION_TESTS")) > 0 {
t.Skip()
}
@@ -143,7 +145,7 @@ func TestStoreDatabase(t *testing.T) {
}
func TestStoreDatabaseTable(t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) > 0 {
if len(os.Getenv("INTEGRATION_TESTS")) > 0 {
t.Skip()
}
@@ -166,7 +168,7 @@ func TestStoreDatabaseTable(t *testing.T) {
}
func runStoreTest(s store.Store, t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
if len(os.Getenv("INTEGRATION_TESTS")) == 0 {
t.Logf("Options %s %v\n", s.String(), s.Options())
}