2020-04-07 14:53:22 +03:00
|
|
|
// Package local is a file system backed store
|
|
|
|
package file
|
|
|
|
|
|
|
|
import (
|
2020-08-20 17:08:35 +03:00
|
|
|
"bytes"
|
2020-04-07 14:53:22 +03:00
|
|
|
"encoding/json"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"time"
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
"github.com/micro/go-micro/v3/store"
|
2020-04-07 21:45:27 +03:00
|
|
|
bolt "go.etcd.io/bbolt"
|
2020-04-07 14:53:22 +03:00
|
|
|
)
|
|
|
|
|
2020-04-07 16:19:45 +03:00
|
|
|
var (
|
|
|
|
// DefaultDatabase is the namespace that the bbolt store
|
|
|
|
// will use if no namespace is provided.
|
|
|
|
DefaultDatabase = "micro"
|
|
|
|
// DefaultTable when none is specified
|
|
|
|
DefaultTable = "micro"
|
|
|
|
// DefaultDir is the default directory for bbolt files
|
2020-04-11 13:23:41 +03:00
|
|
|
DefaultDir = filepath.Join(os.TempDir(), "micro", "store")
|
2020-04-08 11:57:51 +03:00
|
|
|
|
|
|
|
// bucket used for data storage
|
|
|
|
dataBucket = "data"
|
2020-04-07 16:19:45 +03:00
|
|
|
)
|
|
|
|
|
2020-07-14 15:13:32 +03:00
|
|
|
// NewStore returns a file store
|
2020-04-07 14:53:22 +03:00
|
|
|
func NewStore(opts ...store.Option) store.Store {
|
2020-07-14 11:35:46 +03:00
|
|
|
s := &fileStore{}
|
2020-04-07 17:43:43 +03:00
|
|
|
s.init(opts...)
|
2020-04-07 14:53:22 +03:00
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
type fileStore struct {
|
2020-05-01 00:51:25 +03:00
|
|
|
options store.Options
|
|
|
|
dir string
|
|
|
|
}
|
|
|
|
|
|
|
|
type fileHandle struct {
|
|
|
|
key string
|
|
|
|
db *bolt.DB
|
2020-04-07 14:53:22 +03:00
|
|
|
}
|
|
|
|
|
2020-04-07 21:45:27 +03:00
|
|
|
// record stored by us
|
|
|
|
type record struct {
|
|
|
|
Key string
|
|
|
|
Value []byte
|
2020-06-03 11:45:08 +03:00
|
|
|
Metadata map[string]interface{}
|
2020-04-07 21:45:27 +03:00
|
|
|
ExpiresAt time.Time
|
|
|
|
}
|
|
|
|
|
2020-05-01 00:51:25 +03:00
|
|
|
func key(database, table string) string {
|
|
|
|
return database + ":" + table
|
|
|
|
}
|
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
func (m *fileStore) delete(db *bolt.DB, key string) error {
|
|
|
|
return db.Update(func(tx *bolt.Tx) error {
|
2020-04-08 11:57:51 +03:00
|
|
|
b := tx.Bucket([]byte(dataBucket))
|
2020-04-07 21:45:27 +03:00
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return b.Delete([]byte(key))
|
|
|
|
})
|
2020-04-07 17:43:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *fileStore) init(opts ...store.Option) error {
|
2020-04-07 14:53:22 +03:00
|
|
|
for _, o := range opts {
|
|
|
|
o(&m.options)
|
|
|
|
}
|
2020-04-07 21:45:27 +03:00
|
|
|
|
2020-04-07 14:53:22 +03:00
|
|
|
if m.options.Database == "" {
|
2020-04-07 16:19:45 +03:00
|
|
|
m.options.Database = DefaultDatabase
|
2020-04-07 14:53:22 +03:00
|
|
|
}
|
2020-04-07 21:45:27 +03:00
|
|
|
|
2020-04-07 14:53:22 +03:00
|
|
|
if m.options.Table == "" {
|
|
|
|
// bbolt requires bucketname to not be empty
|
2020-04-07 16:19:45 +03:00
|
|
|
m.options.Table = DefaultTable
|
2020-04-07 14:53:22 +03:00
|
|
|
}
|
2020-04-07 21:45:27 +03:00
|
|
|
|
|
|
|
// create a directory /tmp/micro
|
2020-04-08 11:51:10 +03:00
|
|
|
dir := filepath.Join(DefaultDir, m.options.Database)
|
2020-04-07 17:43:43 +03:00
|
|
|
// Ignoring this as the folder might exist.
|
|
|
|
// Reads/Writes updates will return with sensible error messages
|
|
|
|
// about the dir not existing in case this cannot create the path anyway
|
2020-04-08 11:51:10 +03:00
|
|
|
os.MkdirAll(dir, 0700)
|
2020-04-07 21:45:27 +03:00
|
|
|
|
2020-05-01 00:51:25 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
func (f *fileStore) getDB(database, table string) (*bolt.DB, error) {
|
2020-05-01 00:51:25 +03:00
|
|
|
if len(database) == 0 {
|
|
|
|
database = f.options.Database
|
|
|
|
}
|
|
|
|
if len(table) == 0 {
|
|
|
|
table = f.options.Table
|
|
|
|
}
|
|
|
|
|
|
|
|
// create a directory /tmp/micro
|
|
|
|
dir := filepath.Join(DefaultDir, database)
|
|
|
|
// create the database handle
|
|
|
|
fname := table + ".db"
|
|
|
|
// make the dir
|
|
|
|
os.MkdirAll(dir, 0700)
|
|
|
|
// database path
|
|
|
|
dbPath := filepath.Join(dir, fname)
|
|
|
|
|
2020-04-07 21:45:27 +03:00
|
|
|
// create new db handle
|
2020-06-08 18:19:22 +03:00
|
|
|
// Bolt DB only allows one process to open the file R/W so make sure we're doing this under a lock
|
2020-07-14 11:35:46 +03:00
|
|
|
return bolt.Open(dbPath, 0700, &bolt.Options{Timeout: 5 * time.Second})
|
2020-04-07 14:53:22 +03:00
|
|
|
}
|
|
|
|
|
2020-08-20 17:08:35 +03:00
|
|
|
func (m *fileStore) list(db *bolt.DB, limit, offset uint, prefix, suffix string) []string {
|
|
|
|
|
|
|
|
var keys []string
|
2020-04-07 21:45:27 +03:00
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
db.View(func(tx *bolt.Tx) error {
|
2020-04-08 11:57:51 +03:00
|
|
|
b := tx.Bucket([]byte(dataBucket))
|
2020-04-07 21:45:27 +03:00
|
|
|
// nothing to read
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2020-08-20 17:08:35 +03:00
|
|
|
c := b.Cursor()
|
|
|
|
var k, v []byte
|
|
|
|
var cont func(k []byte) bool
|
|
|
|
|
|
|
|
if prefix != "" {
|
|
|
|
// for prefix we can speed up the search, not for suffix though :(
|
|
|
|
k, v = c.Seek([]byte(prefix))
|
|
|
|
cont = func(k []byte) bool {
|
|
|
|
return bytes.HasPrefix(k, []byte(prefix))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
k, v = c.First()
|
|
|
|
cont = func(k []byte) bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
2020-04-07 21:45:27 +03:00
|
|
|
|
2020-08-20 17:08:35 +03:00
|
|
|
for ; k != nil && cont(k); k, v = c.Next() {
|
2020-04-07 21:45:27 +03:00
|
|
|
storedRecord := &record{}
|
|
|
|
|
|
|
|
if err := json.Unmarshal(v, storedRecord); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !storedRecord.ExpiresAt.IsZero() {
|
|
|
|
if storedRecord.ExpiresAt.Before(time.Now()) {
|
2020-08-20 17:08:35 +03:00
|
|
|
continue
|
2020-04-07 21:45:27 +03:00
|
|
|
}
|
|
|
|
}
|
2020-08-20 17:08:35 +03:00
|
|
|
if suffix != "" && !bytes.HasSuffix(k, []byte(suffix)) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if offset > 0 {
|
|
|
|
offset--
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
keys = append(keys, string(k))
|
|
|
|
// this check still works if no limit was passed to begin with, you'll just end up with large -ve value
|
|
|
|
if limit == 1 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
limit--
|
2020-04-07 21:45:27 +03:00
|
|
|
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2020-08-20 17:08:35 +03:00
|
|
|
return keys
|
2020-04-07 21:45:27 +03:00
|
|
|
}
|
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
func (m *fileStore) get(db *bolt.DB, k string) (*store.Record, error) {
|
2020-04-07 21:45:27 +03:00
|
|
|
var value []byte
|
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
db.View(func(tx *bolt.Tx) error {
|
2020-04-07 21:45:27 +03:00
|
|
|
// @todo this is still very experimental...
|
2020-04-08 11:57:51 +03:00
|
|
|
b := tx.Bucket([]byte(dataBucket))
|
2020-04-07 21:45:27 +03:00
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
value = b.Get([]byte(k))
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if value == nil {
|
|
|
|
return nil, store.ErrNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
storedRecord := &record{}
|
|
|
|
|
|
|
|
if err := json.Unmarshal(value, storedRecord); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
newRecord := &store.Record{}
|
|
|
|
newRecord.Key = storedRecord.Key
|
|
|
|
newRecord.Value = storedRecord.Value
|
2020-06-03 11:45:08 +03:00
|
|
|
newRecord.Metadata = make(map[string]interface{})
|
|
|
|
|
|
|
|
for k, v := range storedRecord.Metadata {
|
|
|
|
newRecord.Metadata[k] = v
|
|
|
|
}
|
2020-04-07 21:45:27 +03:00
|
|
|
|
|
|
|
if !storedRecord.ExpiresAt.IsZero() {
|
|
|
|
if storedRecord.ExpiresAt.Before(time.Now()) {
|
|
|
|
return nil, store.ErrNotFound
|
|
|
|
}
|
|
|
|
newRecord.Expiry = time.Until(storedRecord.ExpiresAt)
|
|
|
|
}
|
|
|
|
|
|
|
|
return newRecord, nil
|
|
|
|
}
|
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
func (m *fileStore) set(db *bolt.DB, r *store.Record) error {
|
2020-04-07 21:45:27 +03:00
|
|
|
// copy the incoming record and then
|
|
|
|
// convert the expiry in to a hard timestamp
|
|
|
|
item := &record{}
|
|
|
|
item.Key = r.Key
|
|
|
|
item.Value = r.Value
|
2020-06-03 11:45:08 +03:00
|
|
|
item.Metadata = make(map[string]interface{})
|
|
|
|
|
2020-04-07 21:45:27 +03:00
|
|
|
if r.Expiry != 0 {
|
|
|
|
item.ExpiresAt = time.Now().Add(r.Expiry)
|
|
|
|
}
|
|
|
|
|
2020-06-03 11:45:08 +03:00
|
|
|
for k, v := range r.Metadata {
|
|
|
|
item.Metadata[k] = v
|
|
|
|
}
|
|
|
|
|
2020-04-07 21:45:27 +03:00
|
|
|
// marshal the data
|
|
|
|
data, _ := json.Marshal(item)
|
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
return db.Update(func(tx *bolt.Tx) error {
|
2020-04-08 11:57:51 +03:00
|
|
|
b := tx.Bucket([]byte(dataBucket))
|
2020-04-07 21:45:27 +03:00
|
|
|
if b == nil {
|
|
|
|
var err error
|
2020-04-08 11:57:51 +03:00
|
|
|
b, err = tx.CreateBucketIfNotExists([]byte(dataBucket))
|
2020-04-07 21:45:27 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return b.Put([]byte(r.Key), data)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-05-01 00:51:25 +03:00
|
|
|
func (f *fileStore) Close() error {
|
2020-04-08 11:51:10 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-01 00:51:25 +03:00
|
|
|
func (f *fileStore) Init(opts ...store.Option) error {
|
|
|
|
return f.init(opts...)
|
2020-04-07 21:45:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *fileStore) Delete(key string, opts ...store.DeleteOption) error {
|
2020-05-01 00:51:25 +03:00
|
|
|
var deleteOptions store.DeleteOptions
|
2020-04-07 21:45:27 +03:00
|
|
|
for _, o := range opts {
|
|
|
|
o(&deleteOptions)
|
|
|
|
}
|
2020-05-01 00:51:25 +03:00
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
db, err := m.getDB(deleteOptions.Database, deleteOptions.Table)
|
2020-05-01 00:51:25 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-07-14 11:35:46 +03:00
|
|
|
defer db.Close()
|
2020-05-01 00:51:25 +03:00
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
return m.delete(db, key)
|
2020-04-07 14:53:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *fileStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) {
|
2020-05-01 00:51:25 +03:00
|
|
|
var readOpts store.ReadOptions
|
2020-04-07 14:53:22 +03:00
|
|
|
for _, o := range opts {
|
|
|
|
o(&readOpts)
|
|
|
|
}
|
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
db, err := m.getDB(readOpts.Database, readOpts.Table)
|
2020-05-01 00:51:25 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-14 11:35:46 +03:00
|
|
|
defer db.Close()
|
2020-05-01 00:51:25 +03:00
|
|
|
|
2020-04-07 14:53:22 +03:00
|
|
|
var keys []string
|
|
|
|
|
|
|
|
// Handle Prefix / suffix
|
|
|
|
if readOpts.Prefix || readOpts.Suffix {
|
2020-08-20 17:08:35 +03:00
|
|
|
prefix := ""
|
|
|
|
if readOpts.Prefix {
|
|
|
|
prefix = key
|
2020-04-07 14:53:22 +03:00
|
|
|
}
|
2020-08-20 17:08:35 +03:00
|
|
|
suffix := ""
|
|
|
|
if readOpts.Suffix {
|
|
|
|
suffix = key
|
|
|
|
}
|
|
|
|
// list the keys
|
|
|
|
keys = m.list(db, readOpts.Limit, readOpts.Offset, prefix, suffix)
|
2020-04-07 14:53:22 +03:00
|
|
|
} else {
|
|
|
|
keys = []string{key}
|
|
|
|
}
|
|
|
|
|
|
|
|
var results []*store.Record
|
2020-04-07 21:45:27 +03:00
|
|
|
|
2020-04-07 14:53:22 +03:00
|
|
|
for _, k := range keys {
|
2020-07-14 11:35:46 +03:00
|
|
|
r, err := m.get(db, k)
|
2020-04-07 14:53:22 +03:00
|
|
|
if err != nil {
|
|
|
|
return results, err
|
|
|
|
}
|
|
|
|
results = append(results, r)
|
|
|
|
}
|
|
|
|
|
2020-04-07 21:45:27 +03:00
|
|
|
return results, nil
|
2020-04-07 14:53:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *fileStore) Write(r *store.Record, opts ...store.WriteOption) error {
|
2020-05-01 00:51:25 +03:00
|
|
|
var writeOpts store.WriteOptions
|
2020-04-07 14:53:22 +03:00
|
|
|
for _, o := range opts {
|
|
|
|
o(&writeOpts)
|
|
|
|
}
|
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
db, err := m.getDB(writeOpts.Database, writeOpts.Table)
|
2020-05-01 00:51:25 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-07-14 11:35:46 +03:00
|
|
|
defer db.Close()
|
2020-05-01 00:51:25 +03:00
|
|
|
|
2020-04-07 14:53:22 +03:00
|
|
|
if len(opts) > 0 {
|
|
|
|
// Copy the record before applying options, or the incoming record will be mutated
|
|
|
|
newRecord := store.Record{}
|
|
|
|
newRecord.Key = r.Key
|
|
|
|
newRecord.Value = r.Value
|
2020-06-03 11:45:08 +03:00
|
|
|
newRecord.Metadata = make(map[string]interface{})
|
2020-04-07 14:53:22 +03:00
|
|
|
newRecord.Expiry = r.Expiry
|
|
|
|
|
2020-06-03 11:45:08 +03:00
|
|
|
for k, v := range r.Metadata {
|
|
|
|
newRecord.Metadata[k] = v
|
|
|
|
}
|
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
return m.set(db, &newRecord)
|
2020-04-07 14:53:22 +03:00
|
|
|
}
|
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
return m.set(db, r)
|
2020-04-07 14:53:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *fileStore) Options() store.Options {
|
|
|
|
return m.options
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *fileStore) List(opts ...store.ListOption) ([]string, error) {
|
2020-05-01 00:51:25 +03:00
|
|
|
var listOptions store.ListOptions
|
2020-04-07 14:53:22 +03:00
|
|
|
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&listOptions)
|
|
|
|
}
|
2020-04-07 21:45:27 +03:00
|
|
|
|
2020-07-14 11:35:46 +03:00
|
|
|
db, err := m.getDB(listOptions.Database, listOptions.Table)
|
2020-05-01 00:51:25 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-14 11:35:46 +03:00
|
|
|
defer db.Close()
|
2020-05-01 00:51:25 +03:00
|
|
|
|
2020-08-20 17:08:35 +03:00
|
|
|
allKeys := m.list(db, listOptions.Limit, listOptions.Offset, listOptions.Prefix, listOptions.Suffix)
|
2020-04-07 14:53:22 +03:00
|
|
|
|
|
|
|
return allKeys, nil
|
|
|
|
}
|
|
|
|
|
2020-04-07 21:45:27 +03:00
|
|
|
func (m *fileStore) String() string {
|
|
|
|
return "file"
|
2020-04-07 14:53:22 +03:00
|
|
|
}
|