Merge pull request #295 from crawford/rules

config/validate: add some sanity checks
This commit is contained in:
Alex Crawford 2015-01-15 16:25:37 -08:00
commit 4b5b801171
4 changed files with 284 additions and 68 deletions

56
config/decode.go Normal file
View File

@ -0,0 +1,56 @@
package config
import (
"bytes"
"compress/gzip"
"encoding/base64"
"fmt"
)
func DecodeBase64Content(content string) ([]byte, error) {
output, err := base64.StdEncoding.DecodeString(content)
if err != nil {
return nil, fmt.Errorf("Unable to decode base64: %q", err)
}
return output, nil
}
func DecodeGzipContent(content string) ([]byte, error) {
gzr, err := gzip.NewReader(bytes.NewReader([]byte(content)))
if err != nil {
return nil, fmt.Errorf("Unable to decode gzip: %q", err)
}
defer gzr.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(gzr)
return buf.Bytes(), nil
}
func DecodeContent(content string, encoding string) ([]byte, error) {
switch encoding {
case "":
return []byte(content), nil
case "b64", "base64":
return DecodeBase64Content(content)
case "gz", "gzip":
return DecodeGzipContent(content)
case "gz+base64", "gzip+base64", "gz+b64", "gzip+b64":
gz, err := DecodeBase64Content(content)
if err != nil {
return nil, err
}
return DecodeGzipContent(string(gz))
}
return nil, fmt.Errorf("Unsupported encoding %q", encoding)
}

View File

@ -18,7 +18,10 @@ package validate
import (
"fmt"
"net/url"
"path"
"reflect"
"strings"
"github.com/coreos/coreos-cloudinit/config"
)
@ -27,8 +30,40 @@ type rule func(config node, report *Report)
// Rules contains all of the validation rules.
var Rules []rule = []rule{
checkDiscoveryUrl,
checkEncoding,
checkStructure,
checkValidity,
checkWriteFiles,
checkWriteFilesUnderCoreos,
}
// checkDiscoveryUrl verifies that the string is a valid url.
func checkDiscoveryUrl(cfg node, report *Report) {
c := cfg.Child("coreos").Child("etcd").Child("discovery")
if !c.IsValid() {
return
}
if _, err := url.ParseRequestURI(c.String()); err != nil {
report.Warning(c.line, "discovery URL is not valid")
}
}
// checkEncoding validates that, for each file under 'write_files', the
// content can be decoded given the specified encoding.
func checkEncoding(cfg node, report *Report) {
for _, f := range cfg.Child("write_files").children {
e := f.Child("encoding")
if !e.IsValid() {
continue
}
c := f.Child("contents")
if _, err := config.DecodeContent(c.String(), e.String()); err != nil {
report.Error(c.line, fmt.Sprintf("contents cannot be decoded as %q", e.String()))
}
}
}
// checkStructure compares the provided config to the empty config.CloudConfig
@ -67,6 +102,24 @@ func checkNodeStructure(n, g node, r *Report) {
}
}
// isCompatible determines if the type of kind n can be converted to the type
// of kind g in the context of YAML. This is not an exhaustive list, but its
// enough for the purposes of cloud-config validation.
func isCompatible(n, g reflect.Kind) bool {
switch g {
case reflect.String:
return n == reflect.String || n == reflect.Int || n == reflect.Float64 || n == reflect.Bool
case reflect.Struct:
return n == reflect.Struct || n == reflect.Map
case reflect.Float64:
return n == reflect.Float64 || n == reflect.Int
case reflect.Bool, reflect.Slice, reflect.Int:
return n == g
default:
panic(fmt.Sprintf("isCompatible(): unhandled kind %s", g))
}
}
// checkValidity checks the value of every node in the provided config by
// running config.AssertValid() on it.
func checkValidity(cfg node, report *Report) {
@ -98,20 +151,29 @@ func checkNodeValidity(n, g node, r *Report) {
}
}
// isCompatible determines if the type of kind n can be converted to the type
// of kind g in the context of YAML. This is not an exhaustive list, but its
// enough for the purposes of cloud-config validation.
func isCompatible(n, g reflect.Kind) bool {
switch g {
case reflect.String:
return n == reflect.String || n == reflect.Int || n == reflect.Float64 || n == reflect.Bool
case reflect.Struct:
return n == reflect.Struct || n == reflect.Map
case reflect.Float64:
return n == reflect.Float64 || n == reflect.Int
case reflect.Bool, reflect.Slice, reflect.Int:
return n == g
default:
panic(fmt.Sprintf("isCompatible(): unhandled kind %s", g))
// checkWriteFiles checks to make sure that the target file can actually be
// written. Note that this check is approximate (it only checks to see if the file
// is under /usr).
func checkWriteFiles(cfg node, report *Report) {
for _, f := range cfg.Child("write_files").children {
c := f.Child("path")
if !c.IsValid() {
continue
}
d := path.Dir(c.String())
switch {
case strings.HasPrefix(d, "/usr"):
report.Error(c.line, "file cannot be written to a read-only filesystem")
}
}
}
// checkWriteFilesUnderCoreos checks to see if the 'write_files' node is a
// child of 'coreos' (it shouldn't be).
func checkWriteFilesUnderCoreos(cfg node, report *Report) {
c := cfg.Child("coreos").Child("write_files")
if c.IsValid() {
report.Info(c.line, "write_files doesn't belong under coreos")
}
}

View File

@ -21,6 +21,85 @@ import (
"testing"
)
func TestCheckDiscoveryUrl(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "coreos:\n etcd:\n discovery: https://discovery.etcd.io/00000000000000000000000000000000",
},
{
config: "coreos:\n etcd:\n discovery: http://custom.domain/mytoken",
},
{
config: "coreos:\n etcd:\n discovery: disco",
entries: []Entry{{entryWarning, "discovery URL is not valid", 3}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkDiscoveryUrl(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}
func TestCheckEncoding(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "write_files:\n - encoding: base64\n contents: aGVsbG8K",
},
{
config: "write_files:\n - contents: !!binary aGVsbG8K",
},
{
config: "write_files:\n - encoding: base64\n contents: !!binary aGVsbG8K",
entries: []Entry{{entryError, `contents cannot be decoded as "base64"`, 3}},
},
{
config: "write_files:\n - encoding: base64\n contents: !!binary YUdWc2JHOEsK",
},
{
config: "write_files:\n - encoding: gzip\n contents: !!binary H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
},
{
config: "write_files:\n - encoding: gzip+base64\n contents: H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
},
{
config: "write_files:\n - encoding: custom\n contents: hello",
entries: []Entry{{entryError, `contents cannot be decoded as "custom"`, 3}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkEncoding(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}
func TestCheckStructure(t *testing.T) {
tests := []struct {
config string
@ -249,3 +328,74 @@ func TestCheckValidity(t *testing.T) {
}
}
}
func TestCheckWriteFiles(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "write_files:\n - path: /valid",
},
{
config: "write_files:\n - path: /tmp/usr/valid",
},
{
config: "write_files:\n - path: /usr/invalid",
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
},
{
config: "write-files:\n - path: /tmp/../usr/invalid",
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkWriteFiles(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}
func TestCheckWriteFilesUnderCoreos(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "write_files:\n - path: /hi",
},
{
config: "coreos:\n write_files:\n - path: /hi",
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
},
{
config: "coreos:\n write-files:\n - path: /hyphen",
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkWriteFilesUnderCoreos(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}

View File

@ -17,9 +17,6 @@
package system
import (
"bytes"
"compress/gzip"
"encoding/base64"
"fmt"
"io/ioutil"
"log"
@ -50,61 +47,12 @@ func (f *File) Permissions() (os.FileMode, error) {
return os.FileMode(perm), nil
}
func DecodeBase64Content(content string) ([]byte, error) {
output, err := base64.StdEncoding.DecodeString(content)
if err != nil {
return nil, fmt.Errorf("Unable to decode base64: %v", err)
}
return output, nil
}
func DecodeGzipContent(content string) ([]byte, error) {
gzr, err := gzip.NewReader(bytes.NewReader([]byte(content)))
if err != nil {
return nil, fmt.Errorf("Unable to decode gzip: %v", err)
}
defer gzr.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(gzr)
return buf.Bytes(), nil
}
func DecodeContent(content string, encoding string) ([]byte, error) {
switch encoding {
case "":
return []byte(content), nil
case "b64", "base64":
return DecodeBase64Content(content)
case "gz", "gzip":
return DecodeGzipContent(content)
case "gz+base64", "gzip+base64", "gz+b64", "gzip+b64":
gz, err := DecodeBase64Content(content)
if err != nil {
return nil, err
}
return DecodeGzipContent(string(gz))
}
return nil, fmt.Errorf("Unsupported encoding %s", encoding)
}
func WriteFile(f *File, root string) (string, error) {
fullpath := path.Join(root, f.Path)
dir := path.Dir(fullpath)
log.Printf("Writing file to %q", fullpath)
content, err := DecodeContent(f.Content, f.Encoding)
content, err := config.DecodeContent(f.Content, f.Encoding)
if err != nil {
return "", fmt.Errorf("Unable to decode %s (%v)", f.Path, err)