2019-11-02 16:25:10 +03:00
|
|
|
// Package kubernetes implements kubernetes micro runtime
|
|
|
|
package kubernetes
|
|
|
|
|
|
|
|
import (
|
2020-07-10 18:25:46 +03:00
|
|
|
"encoding/base64"
|
|
|
|
"errors"
|
2020-03-18 21:27:29 +03:00
|
|
|
"fmt"
|
2020-07-10 18:25:46 +03:00
|
|
|
"strings"
|
2019-11-02 16:25:10 +03:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2020-07-10 18:25:46 +03:00
|
|
|
"github.com/micro/go-micro/v2/logger"
|
2020-04-23 15:53:42 +03:00
|
|
|
log "github.com/micro/go-micro/v2/logger"
|
2020-01-30 14:39:00 +03:00
|
|
|
"github.com/micro/go-micro/v2/runtime"
|
|
|
|
"github.com/micro/go-micro/v2/util/kubernetes/client"
|
2019-11-02 16:25:10 +03:00
|
|
|
)
|
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
// action to take on runtime service
|
|
|
|
type action int
|
|
|
|
|
2019-11-02 16:25:10 +03:00
|
|
|
type kubernetes struct {
|
|
|
|
sync.RWMutex
|
|
|
|
// options configure runtime
|
|
|
|
options runtime.Options
|
|
|
|
// indicates if we're running
|
|
|
|
running bool
|
|
|
|
// used to stop the runtime
|
|
|
|
closed chan bool
|
|
|
|
// client is kubernetes client
|
2019-12-24 20:51:30 +03:00
|
|
|
client client.Client
|
2020-04-23 15:53:42 +03:00
|
|
|
// namespaces which exist
|
|
|
|
namespaces []client.Namespace
|
|
|
|
}
|
|
|
|
|
|
|
|
// namespaceExists returns a boolean indicating if a namespace exists
|
|
|
|
func (k *kubernetes) namespaceExists(name string) (bool, error) {
|
|
|
|
// populate the cache
|
|
|
|
if k.namespaces == nil {
|
2020-07-15 11:26:25 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Populating namespace cache")
|
|
|
|
}
|
|
|
|
|
2020-04-23 15:53:42 +03:00
|
|
|
namespaceList := new(client.NamespaceList)
|
|
|
|
resource := &client.Resource{Kind: "namespace", Value: namespaceList}
|
|
|
|
if err := k.client.List(resource); err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2020-07-15 11:26:25 +03:00
|
|
|
|
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Popualted namespace cache successfully with %v items", len(namespaceList.Items))
|
|
|
|
}
|
2020-04-23 15:53:42 +03:00
|
|
|
k.namespaces = namespaceList.Items
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if the namespace exists in the cache
|
|
|
|
for _, n := range k.namespaces {
|
|
|
|
if n.Metadata.Name == name {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createNamespace creates a new k8s namespace
|
|
|
|
func (k *kubernetes) createNamespace(namespace string) error {
|
|
|
|
ns := client.Namespace{Metadata: &client.Metadata{Name: namespace}}
|
|
|
|
err := k.client.Create(&client.Resource{Kind: "namespace", Value: ns})
|
|
|
|
|
|
|
|
// add to cache
|
|
|
|
if err == nil && k.namespaces != nil {
|
|
|
|
k.namespaces = append(k.namespaces, ns)
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
|
|
|
|
2019-11-25 19:31:14 +03:00
|
|
|
// getService queries kubernetes for micro service
|
2019-11-22 20:10:00 +03:00
|
|
|
// NOTE: this function is not thread-safe
|
2020-04-23 15:53:42 +03:00
|
|
|
func (k *kubernetes) getService(labels map[string]string, opts ...client.GetOption) ([]*service, error) {
|
2019-11-22 20:10:00 +03:00
|
|
|
// get the service status
|
|
|
|
serviceList := new(client.ServiceList)
|
|
|
|
r := &client.Resource{
|
|
|
|
Kind: "service",
|
|
|
|
Value: serviceList,
|
|
|
|
}
|
2019-11-26 16:49:52 +03:00
|
|
|
|
2020-04-23 15:53:42 +03:00
|
|
|
opts = append(opts, client.GetLabels(labels))
|
|
|
|
|
2019-11-26 16:49:52 +03:00
|
|
|
// get the service from k8s
|
2020-04-23 15:53:42 +03:00
|
|
|
if err := k.client.Get(r, opts...); err != nil {
|
2019-11-22 20:10:00 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the deployment status
|
|
|
|
depList := new(client.DeploymentList)
|
|
|
|
d := &client.Resource{
|
|
|
|
Kind: "deployment",
|
|
|
|
Value: depList,
|
|
|
|
}
|
2020-04-23 15:53:42 +03:00
|
|
|
if err := k.client.Get(d, opts...); err != nil {
|
2019-11-22 20:10:00 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-03-02 18:49:10 +03:00
|
|
|
// get the pods from k8s
|
|
|
|
podList := new(client.PodList)
|
|
|
|
p := &client.Resource{
|
|
|
|
Kind: "pod",
|
|
|
|
Value: podList,
|
|
|
|
}
|
2020-04-23 15:53:42 +03:00
|
|
|
if err := k.client.Get(p, opts...); err != nil {
|
2020-03-02 18:49:10 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-11-22 20:10:00 +03:00
|
|
|
// service map
|
2020-03-15 18:09:18 +03:00
|
|
|
svcMap := make(map[string]*service)
|
2019-11-22 20:10:00 +03:00
|
|
|
|
|
|
|
// collect info from kubernetes service
|
|
|
|
for _, kservice := range serviceList.Items {
|
2019-11-25 19:31:14 +03:00
|
|
|
// name of the service
|
2019-11-22 20:10:00 +03:00
|
|
|
name := kservice.Metadata.Labels["name"]
|
2019-11-25 19:31:14 +03:00
|
|
|
// version of the service
|
2019-11-22 20:10:00 +03:00
|
|
|
version := kservice.Metadata.Labels["version"]
|
2019-11-25 19:31:14 +03:00
|
|
|
|
2020-03-18 21:27:29 +03:00
|
|
|
srv := &service{
|
2020-03-15 18:09:18 +03:00
|
|
|
Service: &runtime.Service{
|
|
|
|
Name: name,
|
|
|
|
Version: version,
|
|
|
|
Metadata: make(map[string]string),
|
|
|
|
},
|
|
|
|
kservice: &kservice,
|
2019-11-22 20:10:00 +03:00
|
|
|
}
|
2019-11-25 19:31:14 +03:00
|
|
|
|
2020-03-18 21:27:29 +03:00
|
|
|
// set the address
|
|
|
|
address := kservice.Spec.ClusterIP
|
|
|
|
port := kservice.Spec.Ports[0]
|
|
|
|
srv.Service.Metadata["address"] = fmt.Sprintf("%s:%d", address, port.Port)
|
2020-03-20 15:48:12 +03:00
|
|
|
// set the type of service
|
|
|
|
srv.Service.Metadata["type"] = kservice.Metadata.Labels["micro"]
|
2020-03-18 21:27:29 +03:00
|
|
|
|
2019-11-22 20:10:00 +03:00
|
|
|
// copy annotations metadata into service metadata
|
|
|
|
for k, v := range kservice.Metadata.Annotations {
|
2020-03-18 21:27:29 +03:00
|
|
|
srv.Service.Metadata[k] = v
|
2019-11-22 20:10:00 +03:00
|
|
|
}
|
2020-03-18 21:27:29 +03:00
|
|
|
|
|
|
|
// save as service
|
|
|
|
svcMap[name+version] = srv
|
2019-11-22 20:10:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// collect additional info from kubernetes deployment
|
|
|
|
for _, kdep := range depList.Items {
|
2019-11-25 19:31:14 +03:00
|
|
|
// name of the service
|
2019-11-22 20:10:00 +03:00
|
|
|
name := kdep.Metadata.Labels["name"]
|
2019-11-25 19:31:14 +03:00
|
|
|
// versio of the service
|
|
|
|
version := kdep.Metadata.Labels["version"]
|
|
|
|
|
|
|
|
// access existing service map based on name + version
|
|
|
|
if svc, ok := svcMap[name+version]; ok {
|
|
|
|
// we're expecting our own service name in metadata
|
|
|
|
if _, ok := kdep.Metadata.Annotations["name"]; !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// set the service name, version and source
|
|
|
|
// based on existing annotations we stored
|
2020-03-15 18:09:18 +03:00
|
|
|
svc.Service.Name = kdep.Metadata.Annotations["name"]
|
|
|
|
svc.Service.Version = kdep.Metadata.Annotations["version"]
|
|
|
|
svc.Service.Source = kdep.Metadata.Annotations["source"]
|
2019-11-25 19:31:14 +03:00
|
|
|
|
|
|
|
// delete from metadata
|
|
|
|
delete(kdep.Metadata.Annotations, "name")
|
|
|
|
delete(kdep.Metadata.Annotations, "version")
|
|
|
|
delete(kdep.Metadata.Annotations, "source")
|
|
|
|
|
2019-11-22 20:10:00 +03:00
|
|
|
// copy all annotations metadata into service metadata
|
|
|
|
for k, v := range kdep.Metadata.Annotations {
|
2020-03-15 18:09:18 +03:00
|
|
|
svc.Service.Metadata[k] = v
|
2019-11-22 20:10:00 +03:00
|
|
|
}
|
|
|
|
|
2020-03-19 00:50:52 +03:00
|
|
|
// parse out deployment status and inject into service metadata
|
|
|
|
if len(kdep.Status.Conditions) > 0 {
|
2020-06-08 12:47:25 +03:00
|
|
|
svc.Status(kdep.Status.Conditions[0].Type, nil)
|
2020-03-31 14:03:32 +03:00
|
|
|
svc.Metadata["started"] = kdep.Status.Conditions[0].LastUpdateTime
|
2020-03-19 00:50:52 +03:00
|
|
|
} else {
|
2020-06-08 12:47:25 +03:00
|
|
|
svc.Status("n/a", nil)
|
2020-03-19 00:50:52 +03:00
|
|
|
}
|
2020-03-17 02:47:34 +03:00
|
|
|
|
2020-03-19 00:50:52 +03:00
|
|
|
// get the real status
|
2020-03-17 02:47:34 +03:00
|
|
|
for _, item := range podList.Items {
|
2020-03-19 00:50:52 +03:00
|
|
|
var status string
|
|
|
|
|
2020-03-19 01:13:21 +03:00
|
|
|
// check the name
|
|
|
|
if item.Metadata.Labels["name"] != name {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// check the version
|
|
|
|
if item.Metadata.Labels["version"] != version {
|
2020-03-19 00:50:52 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-03-17 02:47:34 +03:00
|
|
|
switch item.Status.Phase {
|
|
|
|
case "Failed":
|
|
|
|
status = item.Status.Reason
|
|
|
|
default:
|
|
|
|
status = item.Status.Phase
|
2019-11-22 20:10:00 +03:00
|
|
|
}
|
2020-03-17 02:47:34 +03:00
|
|
|
|
2020-03-25 22:32:41 +03:00
|
|
|
// skip if we can't get the container
|
|
|
|
if len(item.Status.Containers) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-03-19 01:47:03 +03:00
|
|
|
// now try get a deeper status
|
|
|
|
state := item.Status.Containers[0].State
|
|
|
|
|
|
|
|
// set start time
|
|
|
|
if state.Running != nil {
|
|
|
|
svc.Metadata["started"] = state.Running.Started
|
|
|
|
}
|
|
|
|
|
|
|
|
// set status from waiting
|
|
|
|
if v := state.Waiting; v != nil {
|
|
|
|
if len(v.Reason) > 0 {
|
|
|
|
status = v.Reason
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TODO: set from terminated
|
2020-06-08 12:47:25 +03:00
|
|
|
svc.Status(status, nil)
|
2020-03-17 02:47:34 +03:00
|
|
|
}
|
|
|
|
|
2020-03-15 18:09:18 +03:00
|
|
|
// save deployment
|
|
|
|
svc.kdeploy = &kdep
|
2019-11-22 20:10:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// collect all the services and return
|
2020-03-15 18:09:18 +03:00
|
|
|
services := make([]*service, 0, len(serviceList.Items))
|
2019-11-25 19:31:14 +03:00
|
|
|
|
2019-11-22 20:10:00 +03:00
|
|
|
for _, service := range svcMap {
|
|
|
|
services = append(services, service)
|
|
|
|
}
|
|
|
|
|
|
|
|
return services, nil
|
|
|
|
}
|
|
|
|
|
2019-11-26 16:49:52 +03:00
|
|
|
// run runs the runtime management loop
|
|
|
|
func (k *kubernetes) run(events <-chan runtime.Event) {
|
|
|
|
t := time.NewTicker(time.Second * 10)
|
|
|
|
defer t.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-t.C:
|
|
|
|
// TODO: figure out what to do here
|
|
|
|
// - do we even need the ticker for k8s services?
|
|
|
|
case event := <-events:
|
|
|
|
// NOTE: we only handle Update events for now
|
2020-04-23 15:53:42 +03:00
|
|
|
if log.V(log.DebugLevel, log.DefaultLogger) {
|
|
|
|
log.Debugf("Runtime received notification event: %v", event)
|
2020-03-11 20:55:39 +03:00
|
|
|
}
|
2019-11-26 16:49:52 +03:00
|
|
|
switch event.Type {
|
|
|
|
case runtime.Update:
|
|
|
|
// only process if there's an actual service
|
|
|
|
// we do not update all the things individually
|
2020-05-19 12:14:07 +03:00
|
|
|
if event.Service == nil {
|
2019-11-26 16:49:52 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-11-27 01:28:08 +03:00
|
|
|
// format the name
|
2020-05-19 12:14:07 +03:00
|
|
|
name := client.Format(event.Service.Name)
|
2019-11-27 01:28:08 +03:00
|
|
|
|
2019-11-26 16:49:52 +03:00
|
|
|
// set the default labels
|
|
|
|
labels := map[string]string{
|
2019-11-29 14:35:00 +03:00
|
|
|
"micro": k.options.Type,
|
2019-11-27 01:28:08 +03:00
|
|
|
"name": name,
|
2019-11-26 16:49:52 +03:00
|
|
|
}
|
|
|
|
|
2020-05-19 12:14:07 +03:00
|
|
|
if len(event.Service.Version) > 0 {
|
|
|
|
labels["version"] = event.Service.Version
|
2019-11-26 16:49:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// get the deployment status
|
|
|
|
deployed := new(client.DeploymentList)
|
|
|
|
|
|
|
|
// get the existing service rather than creating a new one
|
|
|
|
err := k.client.Get(&client.Resource{
|
|
|
|
Kind: "deployment",
|
|
|
|
Value: deployed,
|
2020-04-23 15:53:42 +03:00
|
|
|
}, client.GetLabels(labels))
|
2019-11-26 16:49:52 +03:00
|
|
|
|
|
|
|
if err != nil {
|
2020-04-23 15:53:42 +03:00
|
|
|
if log.V(log.DebugLevel, log.DefaultLogger) {
|
|
|
|
log.Debugf("Runtime update failed to get service %s: %v", event.Service, err)
|
2020-03-11 20:55:39 +03:00
|
|
|
}
|
2019-11-26 16:49:52 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// technically we should not receive multiple versions but hey ho
|
|
|
|
for _, service := range deployed.Items {
|
2019-11-27 01:28:08 +03:00
|
|
|
// check the name matches
|
|
|
|
if service.Metadata.Name != name {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-11-26 16:49:52 +03:00
|
|
|
// update build time annotation
|
2019-11-26 17:56:23 +03:00
|
|
|
if service.Spec.Template.Metadata.Annotations == nil {
|
|
|
|
service.Spec.Template.Metadata.Annotations = make(map[string]string)
|
2019-11-26 20:33:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// update the build time
|
2020-04-17 18:29:05 +03:00
|
|
|
service.Spec.Template.Metadata.Annotations["updated"] = fmt.Sprintf("%d", event.Timestamp.Unix())
|
|
|
|
|
2020-04-23 15:53:42 +03:00
|
|
|
if log.V(log.DebugLevel, log.DefaultLogger) {
|
|
|
|
log.Debugf("Runtime updating service: %s deployment: %s", event.Service, service.Metadata.Name)
|
2020-03-11 20:55:39 +03:00
|
|
|
}
|
2019-11-26 16:49:52 +03:00
|
|
|
if err := k.client.Update(deploymentResource(&service)); err != nil {
|
2020-04-23 15:53:42 +03:00
|
|
|
if log.V(log.DebugLevel, log.DefaultLogger) {
|
|
|
|
log.Debugf("Runtime failed to update service %s: %v", event.Service, err)
|
2020-03-11 20:55:39 +03:00
|
|
|
}
|
2019-11-26 16:49:52 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case <-k.closed:
|
2020-04-23 15:53:42 +03:00
|
|
|
if log.V(log.DebugLevel, log.DefaultLogger) {
|
|
|
|
log.Debugf("Runtime stopped")
|
2020-03-11 20:55:39 +03:00
|
|
|
}
|
2019-11-26 16:49:52 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Init initializes runtime options
|
|
|
|
func (k *kubernetes) Init(opts ...runtime.Option) error {
|
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&k.options)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-01 16:40:15 +03:00
|
|
|
func (k *kubernetes) Logs(s *runtime.Service, options ...runtime.LogsOption) (runtime.LogStream, error) {
|
|
|
|
klo := newLog(k.client, s.Name, options...)
|
|
|
|
stream, err := klo.Stream()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// If requested, also read existing records and stream those too
|
|
|
|
if klo.options.Count > 0 {
|
|
|
|
go func() {
|
|
|
|
records, err := klo.Read()
|
|
|
|
if err != nil {
|
2020-07-09 18:29:01 +03:00
|
|
|
log.Errorf("Failed to get logs for service '%v' from k8s: %v", s.Name, err)
|
2020-04-01 16:40:15 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// @todo: this might actually not run before podLogStream starts
|
|
|
|
// and might cause out of order log retrieval at the receiving end.
|
|
|
|
// A better approach would probably to suppor this inside the `klog.Stream` method.
|
|
|
|
for _, record := range records {
|
|
|
|
stream.Chan() <- record
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
return stream, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type kubeStream struct {
|
|
|
|
// the k8s log stream
|
|
|
|
stream chan runtime.LogRecord
|
|
|
|
// the stop chan
|
2020-04-02 14:16:35 +03:00
|
|
|
sync.Mutex
|
2020-04-01 16:40:15 +03:00
|
|
|
stop chan bool
|
2020-04-02 01:03:26 +03:00
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *kubeStream) Error() error {
|
|
|
|
return k.err
|
2020-04-01 16:40:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (k *kubeStream) Chan() chan runtime.LogRecord {
|
|
|
|
return k.stream
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *kubeStream) Stop() error {
|
2020-04-02 14:16:35 +03:00
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
2020-04-01 16:40:15 +03:00
|
|
|
select {
|
|
|
|
case <-k.stop:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
close(k.stop)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-26 16:49:52 +03:00
|
|
|
// Creates a service
|
|
|
|
func (k *kubernetes) Create(s *runtime.Service, opts ...runtime.CreateOption) error {
|
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
2019-11-29 14:35:00 +03:00
|
|
|
options := runtime.CreateOptions{
|
2020-04-23 18:22:41 +03:00
|
|
|
Type: k.options.Type,
|
|
|
|
Namespace: client.DefaultNamespace,
|
2019-11-29 14:35:00 +03:00
|
|
|
}
|
2019-11-26 16:49:52 +03:00
|
|
|
for _, o := range opts {
|
|
|
|
o(&options)
|
|
|
|
}
|
|
|
|
|
2020-03-13 21:39:59 +03:00
|
|
|
// default type if it doesn't exist
|
2020-01-18 05:13:24 +03:00
|
|
|
if len(options.Type) == 0 {
|
|
|
|
options.Type = k.options.Type
|
|
|
|
}
|
2020-02-24 20:47:47 +03:00
|
|
|
|
2020-03-13 21:39:59 +03:00
|
|
|
// default the source if it doesn't exist
|
|
|
|
if len(s.Source) == 0 {
|
|
|
|
s.Source = k.options.Source
|
|
|
|
}
|
|
|
|
|
2020-04-23 15:53:42 +03:00
|
|
|
// ensure the namespace exists
|
|
|
|
namespace := client.SerializeResourceName(options.Namespace)
|
2020-04-23 21:19:13 +03:00
|
|
|
// only do this if the namespace is not default
|
|
|
|
if namespace != "default" {
|
|
|
|
if exist, err := k.namespaceExists(namespace); err == nil && !exist {
|
|
|
|
if err := k.createNamespace(namespace); err != nil {
|
2020-07-15 11:26:25 +03:00
|
|
|
if logger.V(logger.WarnLevel, logger.DefaultLogger) {
|
|
|
|
logger.Warnf("Error creating namespacr %v: %v", namespace, err)
|
|
|
|
}
|
2020-04-23 21:19:13 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else if err != nil {
|
2020-07-15 11:26:25 +03:00
|
|
|
if logger.V(logger.WarnLevel, logger.DefaultLogger) {
|
|
|
|
logger.Warnf("Error checking namespace %v exists: %v", namespace, err)
|
|
|
|
}
|
2020-04-23 15:53:42 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-03-13 21:39:59 +03:00
|
|
|
// determine the image from the source and options
|
|
|
|
options.Image = k.getImage(s, options)
|
2020-01-18 05:13:24 +03:00
|
|
|
|
2020-07-10 18:25:46 +03:00
|
|
|
// create a secret for the credentials if some where provided
|
|
|
|
if len(options.Credentials) > 0 {
|
|
|
|
secret, err := k.createCredentials(s, options)
|
|
|
|
if err != nil {
|
|
|
|
if logger.V(logger.WarnLevel, logger.DefaultLogger) {
|
|
|
|
logger.Warnf("Error generating auth credentials for service: %v", err)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Generated auth credentials for service %v", s.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// pass the secret name to the client via the credentials option
|
|
|
|
options.Credentials = secret
|
|
|
|
}
|
|
|
|
|
2020-03-13 21:39:59 +03:00
|
|
|
// create new service
|
2019-11-26 16:49:52 +03:00
|
|
|
service := newService(s, options)
|
|
|
|
|
2019-12-24 20:51:30 +03:00
|
|
|
// start the service
|
2020-04-23 15:53:42 +03:00
|
|
|
return service.Start(k.client, client.CreateNamespace(options.Namespace))
|
2019-11-26 16:49:52 +03:00
|
|
|
}
|
|
|
|
|
2019-11-25 19:31:14 +03:00
|
|
|
// Read returns all instances of given service
|
2019-11-29 14:35:00 +03:00
|
|
|
func (k *kubernetes) Read(opts ...runtime.ReadOption) ([]*runtime.Service, error) {
|
2019-11-02 16:25:10 +03:00
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
2019-11-22 20:10:00 +03:00
|
|
|
// set the default labels
|
2020-04-23 22:20:48 +03:00
|
|
|
labels := map[string]string{}
|
2019-11-22 20:10:00 +03:00
|
|
|
|
2020-04-23 18:22:41 +03:00
|
|
|
options := runtime.ReadOptions{
|
|
|
|
Namespace: client.DefaultNamespace,
|
|
|
|
}
|
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
for _, o := range opts {
|
|
|
|
o(&options)
|
|
|
|
}
|
2019-11-02 16:25:10 +03:00
|
|
|
|
2019-11-29 14:35:00 +03:00
|
|
|
if len(options.Service) > 0 {
|
|
|
|
labels["name"] = client.Format(options.Service)
|
|
|
|
}
|
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
// add version to labels if a version has been supplied
|
|
|
|
if len(options.Version) > 0 {
|
|
|
|
labels["version"] = options.Version
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
|
|
|
|
2019-11-29 14:35:00 +03:00
|
|
|
if len(options.Type) > 0 {
|
2019-11-29 16:05:18 +03:00
|
|
|
labels["micro"] = options.Type
|
2019-11-29 14:35:00 +03:00
|
|
|
}
|
2019-11-15 16:41:40 +03:00
|
|
|
|
2020-04-23 19:14:30 +03:00
|
|
|
srvs, err := k.getService(labels, client.GetNamespace(options.Namespace))
|
2020-03-15 18:09:18 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var services []*runtime.Service
|
|
|
|
for _, service := range srvs {
|
|
|
|
services = append(services, service.Service)
|
|
|
|
}
|
|
|
|
|
|
|
|
return services, nil
|
2019-11-22 20:10:00 +03:00
|
|
|
}
|
2019-11-15 16:41:40 +03:00
|
|
|
|
2019-11-02 16:25:10 +03:00
|
|
|
// Update the service in place
|
2020-04-23 15:53:42 +03:00
|
|
|
func (k *kubernetes) Update(s *runtime.Service, opts ...runtime.UpdateOption) error {
|
2020-04-23 18:22:41 +03:00
|
|
|
options := runtime.UpdateOptions{
|
|
|
|
Namespace: client.DefaultNamespace,
|
|
|
|
}
|
|
|
|
|
2020-04-23 15:53:42 +03:00
|
|
|
for _, o := range opts {
|
|
|
|
o(&options)
|
|
|
|
}
|
|
|
|
|
2020-04-17 19:54:34 +03:00
|
|
|
labels := map[string]string{}
|
2020-03-15 18:09:18 +03:00
|
|
|
|
|
|
|
if len(s.Name) > 0 {
|
|
|
|
labels["name"] = client.Format(s.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(s.Version) > 0 {
|
|
|
|
labels["version"] = s.Version
|
2020-03-13 21:39:59 +03:00
|
|
|
}
|
|
|
|
|
2020-03-15 18:09:18 +03:00
|
|
|
// get the existing service
|
|
|
|
services, err := k.getService(labels)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-13 21:39:59 +03:00
|
|
|
|
2020-03-15 18:09:18 +03:00
|
|
|
// update the relevant services
|
|
|
|
for _, service := range services {
|
|
|
|
// nil check
|
|
|
|
if service.kdeploy.Metadata == nil || service.kdeploy.Metadata.Annotations == nil {
|
|
|
|
md := new(client.Metadata)
|
|
|
|
md.Annotations = make(map[string]string)
|
|
|
|
service.kdeploy.Metadata = md
|
|
|
|
}
|
2019-11-15 16:41:40 +03:00
|
|
|
|
2020-03-15 18:09:18 +03:00
|
|
|
// update metadata
|
|
|
|
for k, v := range s.Metadata {
|
|
|
|
service.kdeploy.Metadata.Annotations[k] = v
|
|
|
|
}
|
2020-04-17 18:29:05 +03:00
|
|
|
|
2020-03-15 18:09:18 +03:00
|
|
|
// update build time annotation
|
2020-04-17 18:29:05 +03:00
|
|
|
service.kdeploy.Spec.Template.Metadata.Annotations["updated"] = fmt.Sprintf("%d", time.Now().Unix())
|
2019-11-15 16:41:40 +03:00
|
|
|
|
2020-03-15 18:09:18 +03:00
|
|
|
// update the service
|
2020-04-23 15:53:42 +03:00
|
|
|
if err := service.Update(k.client, client.UpdateNamespace(options.Namespace)); err != nil {
|
2020-03-15 18:09:18 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2019-11-15 16:41:40 +03:00
|
|
|
}
|
|
|
|
|
2019-11-26 16:49:52 +03:00
|
|
|
// Delete removes a service
|
2020-04-23 15:53:42 +03:00
|
|
|
func (k *kubernetes) Delete(s *runtime.Service, opts ...runtime.DeleteOption) error {
|
2020-04-23 18:22:41 +03:00
|
|
|
options := runtime.DeleteOptions{
|
|
|
|
Namespace: client.DefaultNamespace,
|
|
|
|
}
|
2020-04-23 15:53:42 +03:00
|
|
|
for _, o := range opts {
|
|
|
|
o(&options)
|
|
|
|
}
|
|
|
|
|
2019-11-15 16:41:40 +03:00
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
|
|
|
// create new kubernetes micro service
|
2019-11-29 14:35:00 +03:00
|
|
|
service := newService(s, runtime.CreateOptions{
|
2020-04-23 15:53:42 +03:00
|
|
|
Type: k.options.Type,
|
|
|
|
Namespace: options.Namespace,
|
2019-11-29 14:35:00 +03:00
|
|
|
})
|
2019-11-15 16:41:40 +03:00
|
|
|
|
2020-07-10 18:25:46 +03:00
|
|
|
// delete the service credentials
|
|
|
|
ns := client.DeleteNamespace(options.Namespace)
|
|
|
|
k.client.Delete(&client.Resource{Name: credentialsName(s), Kind: "secret"}, ns)
|
|
|
|
|
|
|
|
return service.Stop(k.client, ns)
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
|
|
|
|
2019-11-26 16:49:52 +03:00
|
|
|
// Start starts the runtime
|
2019-11-02 16:25:10 +03:00
|
|
|
func (k *kubernetes) Start() error {
|
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
|
|
|
// already running
|
|
|
|
if k.running {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// set running
|
|
|
|
k.running = true
|
|
|
|
k.closed = make(chan bool)
|
|
|
|
|
|
|
|
var events <-chan runtime.Event
|
2020-01-16 16:34:04 +03:00
|
|
|
if k.options.Scheduler != nil {
|
2019-11-02 16:25:10 +03:00
|
|
|
var err error
|
2020-01-16 16:34:04 +03:00
|
|
|
events, err = k.options.Scheduler.Notify()
|
2019-11-02 16:25:10 +03:00
|
|
|
if err != nil {
|
|
|
|
// TODO: should we bail here?
|
2020-04-23 15:53:42 +03:00
|
|
|
if log.V(log.DebugLevel, log.DefaultLogger) {
|
|
|
|
log.Debugf("Runtime failed to start update notifier")
|
2020-03-11 20:55:39 +03:00
|
|
|
}
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
go k.run(events)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-26 16:49:52 +03:00
|
|
|
// Stop shuts down the runtime
|
2019-11-02 16:25:10 +03:00
|
|
|
func (k *kubernetes) Stop() error {
|
|
|
|
k.Lock()
|
|
|
|
defer k.Unlock()
|
|
|
|
|
|
|
|
if !k.running {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-k.closed:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
close(k.closed)
|
|
|
|
// set not running
|
|
|
|
k.running = false
|
2020-01-16 16:34:04 +03:00
|
|
|
// stop the scheduler
|
|
|
|
if k.options.Scheduler != nil {
|
|
|
|
return k.options.Scheduler.Close()
|
2019-11-02 16:25:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// String implements stringer interface
|
|
|
|
func (k *kubernetes) String() string {
|
|
|
|
return "kubernetes"
|
|
|
|
}
|
2019-11-26 16:49:52 +03:00
|
|
|
|
|
|
|
// NewRuntime creates new kubernetes runtime
|
|
|
|
func NewRuntime(opts ...runtime.Option) runtime.Runtime {
|
|
|
|
// get default options
|
2019-11-29 14:35:00 +03:00
|
|
|
options := runtime.Options{
|
|
|
|
// Create labels with type "micro": "service"
|
|
|
|
Type: "service",
|
|
|
|
}
|
2019-11-26 16:49:52 +03:00
|
|
|
|
|
|
|
// apply requested options
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&options)
|
|
|
|
}
|
|
|
|
|
|
|
|
// kubernetes client
|
2019-12-27 23:08:46 +03:00
|
|
|
client := client.NewClusterClient()
|
2019-11-26 16:49:52 +03:00
|
|
|
|
|
|
|
return &kubernetes{
|
|
|
|
options: options,
|
|
|
|
closed: make(chan bool),
|
|
|
|
client: client,
|
|
|
|
}
|
|
|
|
}
|
2020-02-24 20:47:47 +03:00
|
|
|
|
2020-03-13 21:39:59 +03:00
|
|
|
func (k *kubernetes) getImage(s *runtime.Service, options runtime.CreateOptions) string {
|
|
|
|
// use the image when its specified
|
|
|
|
if len(options.Image) > 0 {
|
|
|
|
return options.Image
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(k.options.Image) > 0 {
|
|
|
|
return k.options.Image
|
2020-02-24 20:47:47 +03:00
|
|
|
}
|
|
|
|
|
2020-03-13 21:39:59 +03:00
|
|
|
return ""
|
2020-02-24 20:47:47 +03:00
|
|
|
}
|
2020-07-10 18:25:46 +03:00
|
|
|
func (k *kubernetes) createCredentials(service *runtime.Service, options runtime.CreateOptions) (string, error) {
|
|
|
|
// validate the creds
|
|
|
|
comps := strings.Split(options.Credentials, ":")
|
|
|
|
if len(comps) != 2 {
|
|
|
|
return "", errors.New("Invalid credentials, expected format 'user:pass'")
|
|
|
|
}
|
|
|
|
|
|
|
|
// construct the k8s secret object
|
|
|
|
secret := &client.Secret{
|
|
|
|
Type: "Opaque",
|
|
|
|
Data: map[string]string{
|
|
|
|
"id": base64.StdEncoding.EncodeToString([]byte(comps[0])),
|
|
|
|
"secret": base64.StdEncoding.EncodeToString([]byte(comps[1])),
|
|
|
|
},
|
|
|
|
Metadata: &client.Metadata{
|
|
|
|
Name: credentialsName(service),
|
|
|
|
Namespace: options.Namespace,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// create options specify the namespace
|
|
|
|
ns := client.CreateNamespace(options.Namespace)
|
|
|
|
|
|
|
|
// crete the secret in kubernetes
|
|
|
|
name := credentialsName(service)
|
|
|
|
err := k.client.Create(&client.Resource{Kind: "secret", Name: name, Value: secret}, ns)
|
|
|
|
return name, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func credentialsName(service *runtime.Service) string {
|
|
|
|
name := fmt.Sprintf("%v-%v-credentials", service.Name, service.Version)
|
|
|
|
return client.SerializeResourceName(name)
|
|
|
|
}
|