2019-11-02 16:25:10 +03:00
|
|
|
// Package kubernetes implements kubernetes micro runtime
|
|
|
|
package kubernetes
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/micro/go-micro/runtime"
|
|
|
|
"github.com/micro/go-micro/runtime/kubernetes/client"
|
|
|
|
"github.com/micro/go-micro/util/log"
|
|
|
|
)
|
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
// action to take on runtime service
|
|
|
|
type action int
|
|
|
|
|
|
|
|
const (
|
|
|
|
start action = iota
|
|
|
|
update
|
|
|
|
stop
|
|
|
|
)
|
|
|
|
|
|
|
|
// task is queued into runtime queue
|
|
|
|
type task struct {
|
|
|
|
action action
|
|
|
|
service *service
|
|
|
|
}
|
|
|
|
|
2019-11-02 16:25:10 +03:00
|
|
|
type kubernetes struct {
|
|
|
|
sync.RWMutex
|
|
|
|
// options configure runtime
|
|
|
|
options runtime.Options
|
|
|
|
// indicates if we're running
|
|
|
|
running bool
|
2019-11-15 16:41:40 +03:00
|
|
|
// task queue for kubernetes services
|
|
|
|
queue chan *task
|
2019-11-02 16:25:10 +03:00
|
|
|
// used to stop the runtime
|
|
|
|
closed chan bool
|
|
|
|
// client is kubernetes client
|
|
|
|
client client.Kubernetes
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewRuntime creates new kubernetes runtime
|
|
|
|
func NewRuntime(opts ...runtime.Option) runtime.Runtime {
|
|
|
|
// get default options
|
|
|
|
options := runtime.Options{}
|
|
|
|
|
|
|
|
// apply requested options
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&options)
|
|
|
|
}
|
|
|
|
|
|
|
|
// kubernetes client
|
|
|
|
client := client.NewClientInCluster()
|
|
|
|
|
|
|
|
return &kubernetes{
|
2019-11-15 16:41:40 +03:00
|
|
|
options: options,
|
|
|
|
closed: make(chan bool),
|
|
|
|
queue: make(chan *task, 128),
|
|
|
|
client: client,
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Init initializes runtime options
|
|
|
|
func (k *kubernetes) Init(opts ...runtime.Option) error {
|
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&k.options)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
// Creates a service
|
2019-11-02 16:25:10 +03:00
|
|
|
func (k *kubernetes) Create(s *runtime.Service, opts ...runtime.CreateOption) error {
|
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
|
|
|
var options runtime.CreateOptions
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&options)
|
|
|
|
}
|
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
svcName := s.Name
|
|
|
|
if len(s.Version) > 0 {
|
|
|
|
svcName = strings.Join([]string{s.Name, s.Version}, "-")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !client.ServiceRegexp.MatchString(svcName) {
|
|
|
|
return fmt.Errorf("invalid service name: %s", svcName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// create new kubernetes micro service
|
|
|
|
service := newService(s, options)
|
|
|
|
|
|
|
|
log.Debugf("Runtime queueing service %s for start action", service.Name)
|
|
|
|
|
2019-11-02 16:25:10 +03:00
|
|
|
// push into start queue
|
2019-11-15 16:41:40 +03:00
|
|
|
k.queue <- &task{
|
|
|
|
action: start,
|
|
|
|
service: service,
|
|
|
|
}
|
2019-11-02 16:25:10 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-22 20:10:00 +03:00
|
|
|
// getMicroService queries kubernetes for micro service
|
|
|
|
// NOTE: this function is not thread-safe
|
|
|
|
func (k *kubernetes) getMicroService(labels map[string]string) ([]*runtime.Service, error) {
|
|
|
|
// get the service status
|
|
|
|
serviceList := new(client.ServiceList)
|
|
|
|
r := &client.Resource{
|
|
|
|
Kind: "service",
|
|
|
|
Value: serviceList,
|
|
|
|
}
|
|
|
|
if err := k.client.Get(r, labels); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the deployment status
|
|
|
|
depList := new(client.DeploymentList)
|
|
|
|
d := &client.Resource{
|
|
|
|
Kind: "deployment",
|
|
|
|
Value: depList,
|
|
|
|
}
|
|
|
|
if err := k.client.Get(d, labels); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// service map
|
|
|
|
svcMap := make(map[string]*runtime.Service)
|
|
|
|
|
|
|
|
// collect info from kubernetes service
|
|
|
|
for _, kservice := range serviceList.Items {
|
|
|
|
name := kservice.Metadata.Labels["name"]
|
|
|
|
version := kservice.Metadata.Labels["version"]
|
|
|
|
svcMap[name] = &runtime.Service{
|
|
|
|
Name: name,
|
|
|
|
Version: version,
|
|
|
|
Metadata: make(map[string]string),
|
|
|
|
}
|
|
|
|
// copy annotations metadata into service metadata
|
|
|
|
for k, v := range kservice.Metadata.Annotations {
|
|
|
|
svcMap[name].Metadata[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// collect additional info from kubernetes deployment
|
|
|
|
for _, kdep := range depList.Items {
|
|
|
|
name := kdep.Metadata.Labels["name"]
|
|
|
|
if svc, ok := svcMap[name]; ok {
|
|
|
|
// set the service source
|
|
|
|
svc.Source = kdep.Metadata.Annotations["source"]
|
|
|
|
// copy all annotations metadata into service metadata
|
|
|
|
for k, v := range kdep.Metadata.Annotations {
|
|
|
|
svc.Metadata[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
// parse out deployment status
|
|
|
|
if len(kdep.Status.Conditions) > 0 {
|
|
|
|
status := kdep.Status.Conditions[0].Type
|
|
|
|
// pick the last known condition type and mark the service status with it
|
|
|
|
log.Debugf("Runtime setting %s service deployment status: %v", name, status)
|
|
|
|
svc.Metadata["status"] = status
|
|
|
|
}
|
|
|
|
|
|
|
|
// parse out deployment build
|
|
|
|
if build, ok := kdep.Metadata.Annotations["build"]; ok {
|
|
|
|
buildTime, err := time.Parse(time.RFC3339, build)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("Runtime failed parsing build time for %s: %v", name, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
svc.Metadata["build"] = fmt.Sprintf("%d", buildTime.Unix())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// if no build annotation is found, set it to current time
|
|
|
|
svc.Metadata["build"] = fmt.Sprintf("%d", time.Now().Unix())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// collect all the services and return
|
|
|
|
services := make([]*runtime.Service, 0, len(serviceList.Items))
|
|
|
|
for _, service := range svcMap {
|
|
|
|
services = append(services, service)
|
|
|
|
}
|
|
|
|
|
|
|
|
return services, nil
|
|
|
|
}
|
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
// Get returns all instances of given service
|
|
|
|
func (k *kubernetes) Get(name string, opts ...runtime.GetOption) ([]*runtime.Service, error) {
|
2019-11-02 16:25:10 +03:00
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
// if no name has been passed in, return error
|
|
|
|
if len(name) == 0 {
|
|
|
|
return nil, errors.New("missing service name")
|
|
|
|
}
|
|
|
|
|
2019-11-22 20:10:00 +03:00
|
|
|
// set the default labels
|
2019-11-15 16:41:40 +03:00
|
|
|
labels := map[string]string{
|
|
|
|
"micro": "service",
|
|
|
|
"name": name,
|
|
|
|
}
|
2019-11-22 20:10:00 +03:00
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
var options runtime.GetOptions
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&options)
|
|
|
|
}
|
2019-11-02 16:25:10 +03:00
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
// add version to labels if a version has been supplied
|
|
|
|
if len(options.Version) > 0 {
|
|
|
|
labels["version"] = options.Version
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
log.Debugf("Runtime querying service %s", name)
|
|
|
|
|
2019-11-22 20:10:00 +03:00
|
|
|
return k.getMicroService(labels)
|
|
|
|
}
|
2019-11-15 16:41:40 +03:00
|
|
|
|
2019-11-22 20:10:00 +03:00
|
|
|
// List the managed services
|
|
|
|
func (k *kubernetes) List() ([]*runtime.Service, error) {
|
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
|
|
|
labels := map[string]string{
|
|
|
|
"micro": "service",
|
2019-11-15 16:41:40 +03:00
|
|
|
}
|
|
|
|
|
2019-11-22 20:10:00 +03:00
|
|
|
log.Debugf("Runtime listing all micro services")
|
|
|
|
|
|
|
|
return k.getMicroService(labels)
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the service in place
|
|
|
|
func (k *kubernetes) Update(s *runtime.Service) error {
|
|
|
|
// parse version into human readable timestamp
|
|
|
|
updateTimeStamp, err := strconv.ParseInt(s.Version, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
unixTimeUTC := time.Unix(updateTimeStamp, 0)
|
2019-11-15 16:41:40 +03:00
|
|
|
|
|
|
|
// create new kubernetes micro service
|
|
|
|
service := newService(s, runtime.CreateOptions{})
|
|
|
|
|
|
|
|
// update build time annotation
|
|
|
|
service.kdeploy.Spec.Template.Metadata.Annotations["build"] = unixTimeUTC.Format(time.RFC3339)
|
|
|
|
|
|
|
|
log.Debugf("Runtime queueing service %s for update action", service.Name)
|
|
|
|
|
|
|
|
// queue service for removal
|
|
|
|
k.queue <- &task{
|
|
|
|
action: update,
|
|
|
|
service: service,
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
2019-11-15 16:41:40 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove a service
|
|
|
|
func (k *kubernetes) Delete(s *runtime.Service) error {
|
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
|
|
|
// create new kubernetes micro service
|
|
|
|
service := newService(s, runtime.CreateOptions{})
|
|
|
|
|
|
|
|
log.Debugf("Runtime queueing service %s for delete action", service.Name)
|
|
|
|
|
|
|
|
// queue service for removal
|
|
|
|
k.queue <- &task{
|
|
|
|
action: stop,
|
|
|
|
service: service,
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// run runs the runtime management loop
|
|
|
|
func (k *kubernetes) run(events <-chan runtime.Event) {
|
2019-11-07 10:44:57 +03:00
|
|
|
t := time.NewTicker(time.Second * 10)
|
2019-11-02 16:25:10 +03:00
|
|
|
defer t.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-t.C:
|
2019-11-15 16:41:40 +03:00
|
|
|
// TODO: figure out what to do here
|
|
|
|
// - do we even need the ticker for k8s services?
|
|
|
|
case task := <-k.queue:
|
|
|
|
switch task.action {
|
|
|
|
case start:
|
|
|
|
log.Debugf("Runtime starting new service: %s", task.service.Name)
|
|
|
|
if err := task.service.Start(k.client); err != nil {
|
|
|
|
log.Debugf("Runtime failed to start service %s: %v", task.service.Name, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
case stop:
|
|
|
|
log.Debugf("Runtime stopping service: %s", task.service.Name)
|
|
|
|
if err := task.service.Stop(k.client); err != nil {
|
|
|
|
log.Debugf("Runtime failed to stop service %s: %v", task.service.Name, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
case update:
|
|
|
|
log.Debugf("Runtime updating service: %s", task.service.Name)
|
|
|
|
if err := task.service.Update(k.client); err != nil {
|
|
|
|
log.Debugf("Runtime failed to update service %s: %v", task.service.Name, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
log.Debugf("Runtime received unknown action for service: %s", task.service.Name)
|
2019-11-07 10:44:57 +03:00
|
|
|
}
|
2019-11-02 16:25:10 +03:00
|
|
|
case event := <-events:
|
|
|
|
// NOTE: we only handle Update events for now
|
|
|
|
log.Debugf("Runtime received notification event: %v", event)
|
|
|
|
switch event.Type {
|
|
|
|
case runtime.Update:
|
|
|
|
// parse returned response to timestamp
|
|
|
|
updateTimeStamp, err := strconv.ParseInt(event.Version, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("Runtime error parsing update build time: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2019-11-15 16:41:40 +03:00
|
|
|
unixTimeUTC := time.Unix(updateTimeStamp, 0)
|
2019-11-02 16:25:10 +03:00
|
|
|
if len(event.Service) > 0 {
|
2019-11-15 16:41:40 +03:00
|
|
|
s := &runtime.Service{
|
|
|
|
Name: event.Service,
|
|
|
|
Version: event.Version,
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
2019-11-15 16:41:40 +03:00
|
|
|
// create new kubernetes micro service
|
|
|
|
service := newService(s, runtime.CreateOptions{})
|
|
|
|
// update build time annotation
|
|
|
|
service.kdeploy.Spec.Template.Metadata.Annotations["build"] = unixTimeUTC.Format(time.RFC3339)
|
|
|
|
|
|
|
|
log.Debugf("Runtime updating service: %s", service.Name)
|
|
|
|
if err := service.Update(k.client); err != nil {
|
|
|
|
log.Debugf("Runtime failed to update service %s: %v", service.Name, err)
|
|
|
|
continue
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case <-k.closed:
|
|
|
|
log.Debugf("Runtime stopped")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// starts the runtime
|
|
|
|
func (k *kubernetes) Start() error {
|
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
|
|
|
// already running
|
|
|
|
if k.running {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// set running
|
|
|
|
k.running = true
|
|
|
|
k.closed = make(chan bool)
|
|
|
|
|
|
|
|
var events <-chan runtime.Event
|
|
|
|
if k.options.Notifier != nil {
|
|
|
|
var err error
|
|
|
|
events, err = k.options.Notifier.Notify()
|
|
|
|
if err != nil {
|
|
|
|
// TODO: should we bail here?
|
|
|
|
log.Debugf("Runtime failed to start update notifier")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
go k.run(events)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown the runtime
|
|
|
|
func (k *kubernetes) Stop() error {
|
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
|
|
|
if !k.running {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-k.closed:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
close(k.closed)
|
|
|
|
// set not running
|
|
|
|
k.running = false
|
|
|
|
// stop the notifier too
|
|
|
|
if k.options.Notifier != nil {
|
|
|
|
return k.options.Notifier.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// String implements stringer interface
|
|
|
|
func (k *kubernetes) String() string {
|
|
|
|
return "kubernetes"
|
|
|
|
}
|