micro/store/file/file.go

432 lines
8.6 KiB
Go
Raw Normal View History

2020-04-07 14:53:22 +03:00
// Package local is a file system backed store
package file
import (
"encoding/json"
"os"
"path/filepath"
"sort"
"strings"
"sync"
2020-04-07 14:53:22 +03:00
"time"
"github.com/micro/go-micro/v2/store"
2020-04-07 21:45:27 +03:00
bolt "go.etcd.io/bbolt"
2020-04-07 14:53:22 +03:00
)
var (
// DefaultDatabase is the namespace that the bbolt store
// will use if no namespace is provided.
DefaultDatabase = "micro"
// DefaultTable when none is specified
DefaultTable = "micro"
// DefaultDir is the default directory for bbolt files
2020-04-11 13:23:41 +03:00
DefaultDir = filepath.Join(os.TempDir(), "micro", "store")
2020-04-08 11:57:51 +03:00
// bucket used for data storage
dataBucket = "data"
)
2020-04-07 14:53:22 +03:00
// NewStore returns a memory store
func NewStore(opts ...store.Option) store.Store {
s := &fileStore{
handles: make(map[string]*fileHandle),
}
s.init(opts...)
2020-04-07 14:53:22 +03:00
return s
}
type fileStore struct {
options store.Options
dir string
2020-04-07 21:45:27 +03:00
// the database handle
sync.RWMutex
handles map[string]*fileHandle
}
type fileHandle struct {
key string
db *bolt.DB
2020-04-07 14:53:22 +03:00
}
2020-04-07 21:45:27 +03:00
// record stored by us
type record struct {
Key string
Value []byte
Metadata map[string]interface{}
2020-04-07 21:45:27 +03:00
ExpiresAt time.Time
}
func key(database, table string) string {
return database + ":" + table
}
func (m *fileStore) delete(fd *fileHandle, key string) error {
return fd.db.Update(func(tx *bolt.Tx) error {
2020-04-08 11:57:51 +03:00
b := tx.Bucket([]byte(dataBucket))
2020-04-07 21:45:27 +03:00
if b == nil {
return nil
}
return b.Delete([]byte(key))
})
}
func (m *fileStore) init(opts ...store.Option) error {
2020-04-07 14:53:22 +03:00
for _, o := range opts {
o(&m.options)
}
2020-04-07 21:45:27 +03:00
2020-04-07 14:53:22 +03:00
if m.options.Database == "" {
m.options.Database = DefaultDatabase
2020-04-07 14:53:22 +03:00
}
2020-04-07 21:45:27 +03:00
2020-04-07 14:53:22 +03:00
if m.options.Table == "" {
// bbolt requires bucketname to not be empty
m.options.Table = DefaultTable
2020-04-07 14:53:22 +03:00
}
2020-04-07 21:45:27 +03:00
// create a directory /tmp/micro
dir := filepath.Join(DefaultDir, m.options.Database)
// Ignoring this as the folder might exist.
// Reads/Writes updates will return with sensible error messages
// about the dir not existing in case this cannot create the path anyway
os.MkdirAll(dir, 0700)
2020-04-07 21:45:27 +03:00
return nil
}
func (f *fileStore) getDB(database, table string) (*fileHandle, error) {
if len(database) == 0 {
database = f.options.Database
}
if len(table) == 0 {
table = f.options.Table
}
k := key(database, table)
f.RLock()
fd, ok := f.handles[k]
f.RUnlock()
2020-04-07 21:45:27 +03:00
// return the file handle
if ok {
return fd, nil
2020-04-07 21:45:27 +03:00
}
// double check locking
f.Lock()
defer f.Unlock()
if fd, ok := f.handles[k]; ok {
return fd, nil
}
// create a directory /tmp/micro
dir := filepath.Join(DefaultDir, database)
// create the database handle
fname := table + ".db"
// make the dir
os.MkdirAll(dir, 0700)
// database path
dbPath := filepath.Join(dir, fname)
2020-04-07 21:45:27 +03:00
// create new db handle
// Bolt DB only allows one process to open the file R/W so make sure we're doing this under a lock
db, err := bolt.Open(dbPath, 0700, &bolt.Options{Timeout: 5 * time.Second})
2020-04-07 21:45:27 +03:00
if err != nil {
return nil, err
2020-04-07 21:45:27 +03:00
}
fd = &fileHandle{
key: k,
db: db,
}
f.handles[k] = fd
2020-04-07 21:45:27 +03:00
return fd, nil
2020-04-07 14:53:22 +03:00
}
func (m *fileStore) list(fd *fileHandle, limit, offset uint) []string {
2020-04-07 21:45:27 +03:00
var allItems []string
fd.db.View(func(tx *bolt.Tx) error {
2020-04-08 11:57:51 +03:00
b := tx.Bucket([]byte(dataBucket))
2020-04-07 21:45:27 +03:00
// nothing to read
if b == nil {
return nil
}
// @todo very inefficient
if err := b.ForEach(func(k, v []byte) error {
storedRecord := &record{}
if err := json.Unmarshal(v, storedRecord); err != nil {
return err
}
if !storedRecord.ExpiresAt.IsZero() {
if storedRecord.ExpiresAt.Before(time.Now()) {
return nil
}
}
allItems = append(allItems, string(k))
return nil
}); err != nil {
return err
}
return nil
})
allKeys := make([]string, len(allItems))
for i, k := range allItems {
allKeys[i] = k
}
if limit != 0 || offset != 0 {
sort.Slice(allKeys, func(i, j int) bool { return allKeys[i] < allKeys[j] })
min := func(i, j uint) uint {
if i < j {
return i
}
return j
}
return allKeys[offset:min(limit, uint(len(allKeys)))]
}
return allKeys
}
func (m *fileStore) get(fd *fileHandle, k string) (*store.Record, error) {
2020-04-07 21:45:27 +03:00
var value []byte
fd.db.View(func(tx *bolt.Tx) error {
2020-04-07 21:45:27 +03:00
// @todo this is still very experimental...
2020-04-08 11:57:51 +03:00
b := tx.Bucket([]byte(dataBucket))
2020-04-07 21:45:27 +03:00
if b == nil {
return nil
}
value = b.Get([]byte(k))
return nil
})
if value == nil {
return nil, store.ErrNotFound
}
storedRecord := &record{}
if err := json.Unmarshal(value, storedRecord); err != nil {
return nil, err
}
newRecord := &store.Record{}
newRecord.Key = storedRecord.Key
newRecord.Value = storedRecord.Value
newRecord.Metadata = make(map[string]interface{})
for k, v := range storedRecord.Metadata {
newRecord.Metadata[k] = v
}
2020-04-07 21:45:27 +03:00
if !storedRecord.ExpiresAt.IsZero() {
if storedRecord.ExpiresAt.Before(time.Now()) {
return nil, store.ErrNotFound
}
newRecord.Expiry = time.Until(storedRecord.ExpiresAt)
}
return newRecord, nil
}
func (m *fileStore) set(fd *fileHandle, r *store.Record) error {
2020-04-07 21:45:27 +03:00
// copy the incoming record and then
// convert the expiry in to a hard timestamp
item := &record{}
item.Key = r.Key
item.Value = r.Value
item.Metadata = make(map[string]interface{})
2020-04-07 21:45:27 +03:00
if r.Expiry != 0 {
item.ExpiresAt = time.Now().Add(r.Expiry)
}
for k, v := range r.Metadata {
item.Metadata[k] = v
}
2020-04-07 21:45:27 +03:00
// marshal the data
data, _ := json.Marshal(item)
return fd.db.Update(func(tx *bolt.Tx) error {
2020-04-08 11:57:51 +03:00
b := tx.Bucket([]byte(dataBucket))
2020-04-07 21:45:27 +03:00
if b == nil {
var err error
2020-04-08 11:57:51 +03:00
b, err = tx.CreateBucketIfNotExists([]byte(dataBucket))
2020-04-07 21:45:27 +03:00
if err != nil {
return err
}
}
return b.Put([]byte(r.Key), data)
})
}
func (f *fileStore) Close() error {
f.Lock()
defer f.Unlock()
for k, v := range f.handles {
v.db.Close()
delete(f.handles, k)
}
return nil
}
func (f *fileStore) Init(opts ...store.Option) error {
return f.init(opts...)
2020-04-07 21:45:27 +03:00
}
func (m *fileStore) Delete(key string, opts ...store.DeleteOption) error {
var deleteOptions store.DeleteOptions
2020-04-07 21:45:27 +03:00
for _, o := range opts {
o(&deleteOptions)
}
fd, err := m.getDB(deleteOptions.Database, deleteOptions.Table)
if err != nil {
return err
}
return m.delete(fd, key)
2020-04-07 14:53:22 +03:00
}
func (m *fileStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) {
var readOpts store.ReadOptions
2020-04-07 14:53:22 +03:00
for _, o := range opts {
o(&readOpts)
}
fd, err := m.getDB(readOpts.Database, readOpts.Table)
if err != nil {
return nil, err
}
2020-04-07 14:53:22 +03:00
var keys []string
// Handle Prefix / suffix
2020-04-07 21:45:27 +03:00
// TODO: do range scan here rather than listing all keys
2020-04-07 14:53:22 +03:00
if readOpts.Prefix || readOpts.Suffix {
// list the keys
k := m.list(fd, readOpts.Limit, readOpts.Offset)
2020-04-07 21:45:27 +03:00
// check for prefix and suffix
for _, v := range k {
if readOpts.Prefix && !strings.HasPrefix(v, key) {
continue
}
if readOpts.Suffix && !strings.HasSuffix(v, key) {
continue
}
keys = append(keys, v)
2020-04-07 14:53:22 +03:00
}
} else {
keys = []string{key}
}
var results []*store.Record
2020-04-07 21:45:27 +03:00
2020-04-07 14:53:22 +03:00
for _, k := range keys {
r, err := m.get(fd, k)
2020-04-07 14:53:22 +03:00
if err != nil {
return results, err
}
results = append(results, r)
}
2020-04-07 21:45:27 +03:00
return results, nil
2020-04-07 14:53:22 +03:00
}
func (m *fileStore) Write(r *store.Record, opts ...store.WriteOption) error {
var writeOpts store.WriteOptions
2020-04-07 14:53:22 +03:00
for _, o := range opts {
o(&writeOpts)
}
fd, err := m.getDB(writeOpts.Database, writeOpts.Table)
if err != nil {
return err
}
2020-04-07 14:53:22 +03:00
if len(opts) > 0 {
// Copy the record before applying options, or the incoming record will be mutated
newRecord := store.Record{}
newRecord.Key = r.Key
newRecord.Value = r.Value
newRecord.Metadata = make(map[string]interface{})
2020-04-07 14:53:22 +03:00
newRecord.Expiry = r.Expiry
if !writeOpts.Expiry.IsZero() {
newRecord.Expiry = time.Until(writeOpts.Expiry)
}
if writeOpts.TTL != 0 {
newRecord.Expiry = writeOpts.TTL
}
for k, v := range r.Metadata {
newRecord.Metadata[k] = v
}
return m.set(fd, &newRecord)
2020-04-07 14:53:22 +03:00
}
return m.set(fd, r)
2020-04-07 14:53:22 +03:00
}
func (m *fileStore) Options() store.Options {
return m.options
}
func (m *fileStore) List(opts ...store.ListOption) ([]string, error) {
var listOptions store.ListOptions
2020-04-07 14:53:22 +03:00
for _, o := range opts {
o(&listOptions)
}
2020-04-07 21:45:27 +03:00
fd, err := m.getDB(listOptions.Database, listOptions.Table)
if err != nil {
return nil, err
}
2020-04-07 21:45:27 +03:00
// TODO apply prefix/suffix in range query
allKeys := m.list(fd, listOptions.Limit, listOptions.Offset)
2020-04-07 14:53:22 +03:00
if len(listOptions.Prefix) > 0 {
var prefixKeys []string
for _, k := range allKeys {
if strings.HasPrefix(k, listOptions.Prefix) {
prefixKeys = append(prefixKeys, k)
}
}
allKeys = prefixKeys
}
2020-04-07 21:45:27 +03:00
2020-04-07 14:53:22 +03:00
if len(listOptions.Suffix) > 0 {
var suffixKeys []string
for _, k := range allKeys {
if strings.HasSuffix(k, listOptions.Suffix) {
suffixKeys = append(suffixKeys, k)
}
}
allKeys = suffixKeys
}
return allKeys, nil
}
2020-04-07 21:45:27 +03:00
func (m *fileStore) String() string {
return "file"
2020-04-07 14:53:22 +03:00
}