2021-06-30 17:50:58 +03:00
|
|
|
package flow
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2021-07-15 22:56:26 +03:00
|
|
|
"path/filepath"
|
2021-06-30 17:50:58 +03:00
|
|
|
"sync"
|
|
|
|
|
2022-06-29 22:54:06 +03:00
|
|
|
"github.com/heimdalr/dag"
|
2021-10-02 19:55:07 +03:00
|
|
|
"go.unistack.org/micro/v3/client"
|
|
|
|
"go.unistack.org/micro/v3/codec"
|
|
|
|
"go.unistack.org/micro/v3/logger"
|
|
|
|
"go.unistack.org/micro/v3/metadata"
|
|
|
|
"go.unistack.org/micro/v3/store"
|
|
|
|
"go.unistack.org/micro/v3/util/id"
|
2021-06-30 17:50:58 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
type microFlow struct {
|
|
|
|
opts Options
|
|
|
|
}
|
|
|
|
|
|
|
|
type microWorkflow struct {
|
2021-07-15 22:56:26 +03:00
|
|
|
opts Options
|
2022-06-29 22:54:06 +03:00
|
|
|
g *dag.DAG
|
2021-07-15 22:56:26 +03:00
|
|
|
steps map[string]Step
|
2021-09-30 01:24:16 +03:00
|
|
|
id string
|
2021-07-15 22:56:26 +03:00
|
|
|
status Status
|
2021-09-30 01:24:16 +03:00
|
|
|
sync.RWMutex
|
|
|
|
init bool
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (w *microWorkflow) ID() string {
|
|
|
|
return w.id
|
|
|
|
}
|
|
|
|
|
2021-07-15 22:56:26 +03:00
|
|
|
func (w *microWorkflow) Status() Status {
|
|
|
|
return w.status
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *microWorkflow) AppendSteps(steps ...Step) error {
|
2022-06-29 22:54:06 +03:00
|
|
|
var err error
|
2021-07-14 17:12:54 +03:00
|
|
|
w.Lock()
|
2022-07-08 22:12:44 +03:00
|
|
|
defer w.Unlock()
|
2021-07-14 17:12:54 +03:00
|
|
|
|
|
|
|
for _, s := range steps {
|
|
|
|
w.steps[s.String()] = s
|
2022-06-29 22:54:06 +03:00
|
|
|
if _, err = w.g.AddVertex(s); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-07-14 17:12:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, dst := range steps {
|
|
|
|
for _, req := range dst.Requires() {
|
|
|
|
src, ok := w.steps[req]
|
|
|
|
if !ok {
|
|
|
|
return ErrStepNotExists
|
|
|
|
}
|
2022-06-29 22:54:06 +03:00
|
|
|
if err = w.g.AddEdge(src.String(), dst.String()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-07-14 17:12:54 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-29 22:54:06 +03:00
|
|
|
w.g.ReduceTransitively()
|
2021-07-14 17:12:54 +03:00
|
|
|
|
2021-06-30 17:50:58 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-07-15 22:56:26 +03:00
|
|
|
func (w *microWorkflow) RemoveSteps(steps ...Step) error {
|
2021-07-14 17:12:54 +03:00
|
|
|
// TODO: handle case when some step requires or required by removed step
|
2021-06-30 17:50:58 +03:00
|
|
|
|
|
|
|
w.Lock()
|
2022-07-08 22:12:44 +03:00
|
|
|
defer w.Unlock()
|
2021-07-14 17:12:54 +03:00
|
|
|
|
|
|
|
for _, s := range steps {
|
|
|
|
delete(w.steps, s.String())
|
2022-06-29 22:54:06 +03:00
|
|
|
w.g.DeleteVertex(s.String())
|
2021-07-14 17:12:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, dst := range steps {
|
|
|
|
for _, req := range dst.Requires() {
|
|
|
|
src, ok := w.steps[req]
|
|
|
|
if !ok {
|
|
|
|
return ErrStepNotExists
|
|
|
|
}
|
2022-06-29 22:54:06 +03:00
|
|
|
w.g.AddEdge(src.String(), dst.String())
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-29 22:54:06 +03:00
|
|
|
w.g.ReduceTransitively()
|
2021-07-14 17:12:54 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-20 22:40:48 +03:00
|
|
|
func (w *microWorkflow) Abort(ctx context.Context, id string) error {
|
|
|
|
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", id))
|
2021-07-16 00:17:16 +03:00
|
|
|
return workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusAborted.String())})
|
|
|
|
}
|
|
|
|
|
2021-08-20 22:40:48 +03:00
|
|
|
func (w *microWorkflow) Suspend(ctx context.Context, id string) error {
|
|
|
|
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", id))
|
2021-07-16 00:17:16 +03:00
|
|
|
return workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusSuspend.String())})
|
|
|
|
}
|
|
|
|
|
2021-08-20 22:40:48 +03:00
|
|
|
func (w *microWorkflow) Resume(ctx context.Context, id string) error {
|
|
|
|
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", id))
|
2021-07-16 00:17:16 +03:00
|
|
|
return workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusRunning.String())})
|
|
|
|
}
|
2021-07-15 22:56:26 +03:00
|
|
|
|
2021-07-16 00:17:16 +03:00
|
|
|
func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...ExecuteOption) (string, error) {
|
2021-07-14 17:12:54 +03:00
|
|
|
w.Lock()
|
|
|
|
if !w.init {
|
2022-06-29 22:54:06 +03:00
|
|
|
w.g.ReduceTransitively()
|
2021-07-14 17:12:54 +03:00
|
|
|
w.init = true
|
|
|
|
}
|
|
|
|
w.Unlock()
|
|
|
|
|
2021-08-20 22:44:17 +03:00
|
|
|
eid, err := id.New()
|
2021-07-14 17:12:54 +03:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2021-07-15 22:56:26 +03:00
|
|
|
|
2022-07-08 22:12:44 +03:00
|
|
|
// stepStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("steps", eid))
|
2021-08-20 22:44:17 +03:00
|
|
|
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", eid))
|
2021-07-14 17:12:54 +03:00
|
|
|
|
|
|
|
options := NewExecuteOptions(opts...)
|
2021-07-15 22:56:26 +03:00
|
|
|
|
2021-06-30 17:50:58 +03:00
|
|
|
nopts := make([]ExecuteOption, 0, len(opts)+5)
|
2021-07-15 22:56:26 +03:00
|
|
|
|
2021-07-14 17:12:54 +03:00
|
|
|
nopts = append(nopts,
|
|
|
|
ExecuteClient(w.opts.Client),
|
|
|
|
ExecuteTracer(w.opts.Tracer),
|
|
|
|
ExecuteLogger(w.opts.Logger),
|
|
|
|
ExecuteMeter(w.opts.Meter),
|
|
|
|
)
|
|
|
|
nopts = append(nopts, opts...)
|
2021-07-15 22:56:26 +03:00
|
|
|
|
2021-07-16 00:17:16 +03:00
|
|
|
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
|
2021-07-15 22:56:26 +03:00
|
|
|
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
2021-08-20 22:44:17 +03:00
|
|
|
return eid, werr
|
2021-07-15 22:56:26 +03:00
|
|
|
}
|
2022-07-08 22:12:44 +03:00
|
|
|
|
|
|
|
var startID string
|
|
|
|
if options.Start == "" {
|
|
|
|
mp := w.g.GetRoots()
|
|
|
|
if len(mp) != 1 {
|
|
|
|
return eid, ErrStepNotExists
|
|
|
|
}
|
|
|
|
for k := range mp {
|
|
|
|
startID = k
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for k, v := range w.g.GetVertices() {
|
|
|
|
if v == options.Start {
|
|
|
|
startID = k
|
2021-07-15 22:56:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-08 22:12:44 +03:00
|
|
|
if startID == "" {
|
|
|
|
return eid, ErrStepNotExists
|
|
|
|
}
|
|
|
|
|
|
|
|
if options.Async {
|
|
|
|
go w.handleWorkflow(startID, nopts...)
|
|
|
|
return eid, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return eid, w.handleWorkflow(startID, nopts...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *microWorkflow) handleWorkflow(startID string, opts ...ExecuteOption) error {
|
|
|
|
w.RLock()
|
|
|
|
defer w.RUnlock()
|
|
|
|
|
|
|
|
// stepStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("steps", eid))
|
|
|
|
// workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", eid))
|
|
|
|
|
|
|
|
// Get IDs of all descendant vertices.
|
|
|
|
flowIDs, errDes := w.g.GetDescendants(startID)
|
|
|
|
if errDes != nil {
|
|
|
|
return errDes
|
|
|
|
}
|
|
|
|
|
|
|
|
// inputChannels provides for input channels for each of the descendant vertices (+ the start-vertex).
|
|
|
|
inputChannels := make(map[string]chan FlowResult, len(flowIDs)+1)
|
|
|
|
|
|
|
|
// Iterate vertex IDs and create an input channel for each of them and a single
|
|
|
|
// output channel for leaves. Note, this "pre-flight" is needed to ensure we
|
|
|
|
// really have an input channel regardless of how we traverse the tree and spawn
|
|
|
|
// workers.
|
|
|
|
leafCount := 0
|
|
|
|
|
|
|
|
for id := range flowIDs {
|
|
|
|
|
|
|
|
// Get all parents of this vertex.
|
|
|
|
parents, errPar := w.g.GetParents(id)
|
|
|
|
if errPar != nil {
|
|
|
|
return errPar
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a buffered input channel that has capacity for all parent results.
|
|
|
|
inputChannels[id] = make(chan FlowResult, len(parents))
|
|
|
|
|
|
|
|
if w.g.isLeaf(id) {
|
|
|
|
leafCount += 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// outputChannel caries the results of leaf vertices.
|
|
|
|
outputChannel := make(chan FlowResult, leafCount)
|
|
|
|
|
|
|
|
// To also process the start vertex and to have its results being passed to its
|
|
|
|
// children, add it to the vertex IDs. Also add an input channel for the start
|
|
|
|
// vertex and feed the inputs to this channel.
|
|
|
|
flowIDs[startID] = struct{}{}
|
|
|
|
inputChannels[startID] = make(chan FlowResult, len(inputs))
|
|
|
|
for _, i := range inputs {
|
|
|
|
inputChannels[startID] <- i
|
|
|
|
}
|
|
|
|
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
|
|
|
|
// Iterate all vertex IDs (now incl. start vertex) and handle each worker (incl.
|
|
|
|
// inputs and outputs) in a separate goroutine.
|
|
|
|
for id := range flowIDs {
|
|
|
|
|
|
|
|
// Get all children of this vertex that later need to be notified. Note, we
|
|
|
|
// collect all children before the goroutine to be able to release the read
|
|
|
|
// lock as early as possible.
|
|
|
|
children, errChildren := w.g.GetChildren(id)
|
|
|
|
if errChildren != nil {
|
|
|
|
return errChildren
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remember to wait for this goroutine.
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
go func(id string) {
|
|
|
|
// Get this vertex's input channel.
|
|
|
|
// Note, only concurrent read here, which is fine.
|
|
|
|
c := inputChannels[id]
|
|
|
|
|
|
|
|
// Await all parent inputs and stuff them into a slice.
|
|
|
|
parentCount := cap(c)
|
|
|
|
parentResults := make([]FlowResult, parentCount)
|
|
|
|
for i := 0; i < parentCount; i++ {
|
|
|
|
parentResults[i] = <-c
|
|
|
|
}
|
|
|
|
|
|
|
|
// Execute the worker.
|
|
|
|
errWorker := callback(w.g, id, parentResults)
|
|
|
|
if errWorker != nil {
|
|
|
|
return errWorker
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send this worker's FlowResult onto all children's input channels or, if it is
|
|
|
|
// a leaf (i.e. no children), send the result onto the output channel.
|
|
|
|
if len(children) > 0 {
|
|
|
|
for child := range children {
|
|
|
|
inputChannels[child] <- flowResult
|
2021-07-14 17:12:54 +03:00
|
|
|
}
|
2022-07-08 22:12:44 +03:00
|
|
|
} else {
|
|
|
|
outputChannel <- flowResult
|
|
|
|
}
|
|
|
|
|
|
|
|
// "Sign off".
|
|
|
|
wg.Done()
|
|
|
|
}(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all go routines to finish.
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// Await all leaf vertex results and stuff them into a slice.
|
|
|
|
resultCount := cap(outputChannel)
|
|
|
|
results := make([]FlowResult, resultCount)
|
|
|
|
for i := 0; i < resultCount; i++ {
|
|
|
|
results[i] = <-outputChannel
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
go func() {
|
|
|
|
for idx := range steps {
|
|
|
|
for nidx := range steps[idx] {
|
|
|
|
wStatus := &codec.Frame{}
|
|
|
|
if werr := workflowStore.Read(w.opts.Context, "status", wStatus); werr != nil {
|
|
|
|
cherr <- werr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if status := StringStatus[string(wStatus.Data)]; status != StatusRunning {
|
|
|
|
chstatus <- status
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if w.opts.Logger.V(logger.TraceLevel) {
|
|
|
|
w.opts.Logger.Tracef(nctx, "will be executed %v", steps[idx][nidx])
|
|
|
|
}
|
|
|
|
cstep := steps[idx][nidx]
|
|
|
|
// nolint: nestif
|
|
|
|
if len(cstep.Requires()) == 0 {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(step Step) {
|
|
|
|
defer wg.Done()
|
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "req"), req); werr != nil {
|
|
|
|
cherr <- werr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "status"), &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
|
|
|
|
cherr <- werr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
rsp, serr := step.Execute(nctx, req, nopts...)
|
|
|
|
if serr != nil {
|
|
|
|
step.SetStatus(StatusFailure)
|
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "rsp"), serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
|
|
|
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
|
|
|
}
|
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "status"), &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
|
|
|
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
|
|
|
}
|
|
|
|
cherr <- serr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "rsp"), rsp); werr != nil {
|
|
|
|
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
|
|
|
cherr <- werr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "status"), &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
|
|
|
|
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
|
|
|
cherr <- werr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}(cstep)
|
|
|
|
wg.Wait()
|
|
|
|
} else {
|
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "req"), req); werr != nil {
|
2021-07-15 22:56:26 +03:00
|
|
|
cherr <- werr
|
|
|
|
return
|
|
|
|
}
|
2022-07-08 22:12:44 +03:00
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "status"), &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
|
2021-07-15 22:56:26 +03:00
|
|
|
cherr <- werr
|
|
|
|
return
|
|
|
|
}
|
2022-07-08 22:12:44 +03:00
|
|
|
rsp, serr := cstep.Execute(nctx, req, nopts...)
|
2021-07-15 22:56:26 +03:00
|
|
|
if serr != nil {
|
2022-07-08 22:12:44 +03:00
|
|
|
cstep.SetStatus(StatusFailure)
|
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "rsp"), serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
2021-07-15 22:56:26 +03:00
|
|
|
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
|
|
|
}
|
2022-07-08 22:12:44 +03:00
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "status"), &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
2021-07-15 22:56:26 +03:00
|
|
|
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
|
|
|
}
|
|
|
|
cherr <- serr
|
|
|
|
return
|
2021-09-28 23:43:43 +03:00
|
|
|
}
|
2022-07-08 22:12:44 +03:00
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "rsp"), rsp); werr != nil {
|
2021-09-28 23:43:43 +03:00
|
|
|
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
|
|
|
cherr <- werr
|
|
|
|
return
|
|
|
|
}
|
2022-07-08 22:12:44 +03:00
|
|
|
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "status"), &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
|
2021-09-28 23:43:43 +03:00
|
|
|
cherr <- werr
|
|
|
|
return
|
2021-07-15 22:56:26 +03:00
|
|
|
}
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
2021-07-15 22:56:26 +03:00
|
|
|
}
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
2022-07-08 22:12:44 +03:00
|
|
|
close(done)
|
|
|
|
}()
|
2021-06-30 17:50:58 +03:00
|
|
|
|
2022-07-08 22:12:44 +03:00
|
|
|
if options.Async {
|
|
|
|
return eid, nil
|
2021-07-15 22:56:26 +03:00
|
|
|
}
|
2022-07-08 22:12:44 +03:00
|
|
|
|
|
|
|
logger.Tracef(ctx, "wait for finish or error")
|
|
|
|
select {
|
|
|
|
case <-nctx.Done():
|
|
|
|
err = nctx.Err()
|
|
|
|
case cerr := <-cherr:
|
|
|
|
err = cerr
|
|
|
|
case <-done:
|
|
|
|
close(cherr)
|
|
|
|
case <-chstatus:
|
|
|
|
close(chstatus)
|
|
|
|
return eid, nil
|
2021-07-15 22:56:26 +03:00
|
|
|
}
|
|
|
|
|
2022-07-08 22:12:44 +03:00
|
|
|
switch {
|
|
|
|
case nctx.Err() != nil:
|
|
|
|
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusAborted.String())}); werr != nil {
|
|
|
|
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
|
|
|
}
|
|
|
|
case err == nil:
|
|
|
|
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
|
|
|
|
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
|
|
|
}
|
|
|
|
case err != nil:
|
|
|
|
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil {
|
|
|
|
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
return err
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
|
2022-01-10 16:47:37 +03:00
|
|
|
// NewFlow create new flow
|
2021-06-30 17:50:58 +03:00
|
|
|
func NewFlow(opts ...Option) Flow {
|
|
|
|
options := NewOptions(opts...)
|
|
|
|
return µFlow{opts: options}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *microFlow) Options() Options {
|
|
|
|
return f.opts
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *microFlow) Init(opts ...Option) error {
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&f.opts)
|
|
|
|
}
|
|
|
|
if err := f.opts.Client.Init(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := f.opts.Tracer.Init(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := f.opts.Logger.Init(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := f.opts.Meter.Init(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := f.opts.Store.Init(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *microFlow) WorkflowList(ctx context.Context) ([]Workflow, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *microFlow) WorkflowCreate(ctx context.Context, id string, steps ...Step) (Workflow, error) {
|
2022-06-29 22:54:06 +03:00
|
|
|
w := µWorkflow{opts: f.opts, id: id, g: &dag.DAG{}, steps: make(map[string]Step, len(steps))}
|
2021-06-30 17:50:58 +03:00
|
|
|
|
|
|
|
for _, s := range steps {
|
|
|
|
w.steps[s.String()] = s
|
2022-06-29 22:54:06 +03:00
|
|
|
w.g.AddVertex(s)
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, dst := range steps {
|
|
|
|
for _, req := range dst.Requires() {
|
|
|
|
src, ok := w.steps[req]
|
|
|
|
if !ok {
|
|
|
|
return nil, ErrStepNotExists
|
|
|
|
}
|
2022-06-29 22:54:06 +03:00
|
|
|
w.g.AddEdge(src.String(), dst.String())
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-29 22:54:06 +03:00
|
|
|
w.g.ReduceTransitively()
|
2021-06-30 17:50:58 +03:00
|
|
|
|
|
|
|
w.init = true
|
|
|
|
|
|
|
|
return w, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *microFlow) WorkflowRemove(ctx context.Context, id string) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *microFlow) WorkflowSave(ctx context.Context, w Workflow) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *microFlow) WorkflowLoad(ctx context.Context, id string) (Workflow, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type microCallStep struct {
|
2021-09-30 01:24:16 +03:00
|
|
|
rsp *Message
|
|
|
|
req *Message
|
2021-06-30 17:50:58 +03:00
|
|
|
service string
|
|
|
|
method string
|
2021-09-30 21:00:02 +03:00
|
|
|
opts StepOptions
|
2021-07-15 22:56:26 +03:00
|
|
|
status Status
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) Request() *Message {
|
|
|
|
return s.req
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) Response() *Message {
|
|
|
|
return s.rsp
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) ID() string {
|
|
|
|
return s.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) Options() StepOptions {
|
|
|
|
return s.opts
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) Endpoint() string {
|
|
|
|
return s.method
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) Requires() []string {
|
|
|
|
return s.opts.Requires
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) Require(steps ...Step) error {
|
|
|
|
for _, step := range steps {
|
|
|
|
s.opts.Requires = append(s.opts.Requires, step.String())
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) String() string {
|
|
|
|
if s.opts.ID != "" {
|
|
|
|
return s.opts.ID
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%s.%s", s.service, s.method)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) Name() string {
|
|
|
|
return s.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) Hashcode() interface{} {
|
|
|
|
return s.String()
|
|
|
|
}
|
|
|
|
|
2021-07-15 22:56:26 +03:00
|
|
|
func (s *microCallStep) GetStatus() Status {
|
|
|
|
return s.status
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) SetStatus(status Status) {
|
|
|
|
s.status = status
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microCallStep) Execute(ctx context.Context, req *Message, opts ...ExecuteOption) (*Message, error) {
|
2021-06-30 17:50:58 +03:00
|
|
|
options := NewExecuteOptions(opts...)
|
|
|
|
if options.Client == nil {
|
2021-07-15 22:56:26 +03:00
|
|
|
return nil, ErrMissingClient
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
rsp := &codec.Frame{}
|
|
|
|
copts := []client.CallOption{client.WithRetries(0)}
|
|
|
|
if options.Timeout > 0 {
|
2021-09-28 23:43:43 +03:00
|
|
|
copts = append(copts,
|
|
|
|
client.WithRequestTimeout(options.Timeout),
|
|
|
|
client.WithDialTimeout(options.Timeout))
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
2021-07-15 22:56:26 +03:00
|
|
|
nctx := metadata.NewOutgoingContext(ctx, req.Header)
|
2021-09-28 23:43:43 +03:00
|
|
|
err := options.Client.Call(nctx, options.Client.NewRequest(s.service, s.method, &codec.Frame{Data: req.Body}), rsp, copts...)
|
2021-07-15 22:56:26 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
md, _ := metadata.FromOutgoingContext(nctx)
|
|
|
|
return &Message{Header: md, Body: rsp.Data}, err
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
type microPublishStep struct {
|
2021-07-15 22:56:26 +03:00
|
|
|
req *Message
|
|
|
|
rsp *Message
|
2021-09-30 01:24:16 +03:00
|
|
|
topic string
|
2021-09-30 21:00:02 +03:00
|
|
|
opts StepOptions
|
2021-07-15 22:56:26 +03:00
|
|
|
status Status
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) Request() *Message {
|
|
|
|
return s.req
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) Response() *Message {
|
|
|
|
return s.rsp
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) ID() string {
|
|
|
|
return s.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) Options() StepOptions {
|
|
|
|
return s.opts
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) Endpoint() string {
|
|
|
|
return s.topic
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) Requires() []string {
|
|
|
|
return s.opts.Requires
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) Require(steps ...Step) error {
|
|
|
|
for _, step := range steps {
|
|
|
|
s.opts.Requires = append(s.opts.Requires, step.String())
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) String() string {
|
|
|
|
if s.opts.ID != "" {
|
|
|
|
return s.opts.ID
|
|
|
|
}
|
2021-09-28 23:43:43 +03:00
|
|
|
return s.topic
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) Name() string {
|
|
|
|
return s.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) Hashcode() interface{} {
|
|
|
|
return s.String()
|
|
|
|
}
|
|
|
|
|
2021-07-15 22:56:26 +03:00
|
|
|
func (s *microPublishStep) GetStatus() Status {
|
|
|
|
return s.status
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) SetStatus(status Status) {
|
|
|
|
s.status = status
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *microPublishStep) Execute(ctx context.Context, req *Message, opts ...ExecuteOption) (*Message, error) {
|
|
|
|
return nil, nil
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
|
2022-01-10 16:47:37 +03:00
|
|
|
// NewCallStep create new step with client.Call
|
2021-07-14 17:12:54 +03:00
|
|
|
func NewCallStep(service string, name string, method string, opts ...StepOption) Step {
|
2021-06-30 17:50:58 +03:00
|
|
|
options := NewStepOptions(opts...)
|
2021-07-14 17:12:54 +03:00
|
|
|
return µCallStep{service: service, method: name + "." + method, opts: options}
|
2021-06-30 17:50:58 +03:00
|
|
|
}
|
|
|
|
|
2022-01-10 16:47:37 +03:00
|
|
|
// NewPublishStep create new step with client.Publish
|
2021-06-30 17:50:58 +03:00
|
|
|
func NewPublishStep(topic string, opts ...StepOption) Step {
|
|
|
|
options := NewStepOptions(opts...)
|
|
|
|
return µPublishStep{topic: topic, opts: options}
|
|
|
|
}
|