Compare commits

..

19 Commits

Author SHA1 Message Date
Alex Crawford
ae3676096c coreos-cloudinit: bump to v1.2.1 2015-01-21 14:29:25 -08:00
Alex Crawford
a548b557ed doc: add coreos-ssh-import-github-users 2015-01-21 14:28:43 -08:00
Alex Crawford
a9c132a706 coreos-cloudinit: bump to v1.2.0+git 2015-01-20 14:42:48 -08:00
Alex Crawford
c3c4b86a3b coreos-cloudinit: bump to v1.2.0 2015-01-20 14:42:18 -08:00
Alex Crawford
44142ff8af Merge pull request #301 from crawford/github
config: add support for multiple github users
2015-01-20 14:26:38 -08:00
Alex Crawford
e9529ede44 config: add support for multiple github users 2015-01-20 13:45:52 -08:00
Alex Crawford
4b5b801171 Merge pull request #295 from crawford/rules
config/validate: add some sanity checks
2015-01-15 16:25:37 -08:00
Alex Crawford
551cbb1e5d config/validate: add rule for file encoding 2015-01-15 15:01:44 -08:00
Alex Crawford
3c93938f8a decode: refactor file decoding into config package 2015-01-15 15:01:44 -08:00
Alex Crawford
f61c08c246 config/validate: add rule for coreos.write_files 2015-01-15 15:01:44 -08:00
Alex Crawford
571903cec6 config/validate: add rule for etcd discovery token 2015-01-15 15:01:44 -08:00
Alex Crawford
bdbd1930ed config/validate: add rule for file paths 2015-01-15 15:01:44 -08:00
Alex Crawford
cc75a943ba Merge pull request #300 from crawford/float
validate: allow promotion of int to float64
2015-01-15 13:17:44 -08:00
Alex Crawford
fc77ba6355 validate: allow promotion of int to float64 2015-01-14 17:54:01 -08:00
Eugene Yakubovich
7cfa0df7c4 Merge pull request #299 from eyakubovich/master
config: document and update flannel config values
2015-01-13 15:55:42 -08:00
Eugene Yakubovich
58f0dadaf9 config: document and update flannel config values
Fixes #297
2015-01-13 15:51:47 -08:00
Michael Marineau
1ab530f157 Merge pull request #293 from realestate-com-au/master
select first available hostname returned by EC2 metadata.
2015-01-05 12:47:27 -08:00
Kevin Yung
13e4b77130 ec2: allow spaces seperated hostname in metadata
AWS hostname metadata will return space seperated hostname and domain
names when DHCPOptionSet is using multiple domain names. This patch will
cater for this scenario.
2015-01-05 16:01:57 +11:00
Alex Crawford
54c62cbb70 coreos-cloudinit: bump to v1.1.0+git 2014-12-30 16:52:10 +01:00
12 changed files with 345 additions and 94 deletions

View File

@@ -109,7 +109,7 @@ flanneld. For example, the following cloud-config...
coreos:
flannel:
etcd-prefix: /coreos.com/network2
etcd_prefix: /coreos.com/network2
```
...will generate a systemd unit drop-in like so:
@@ -119,7 +119,15 @@ coreos:
Environment="FLANNELD_ETCD_PREFIX=/coreos.com/network2"
```
For the complete list of flannel configuraion parameters, see the [flannel documentation][flannel-readme].
List of flannel configuration parameters:
- **etcd_endpoints**: Comma separated list of etcd endpoints
- **etcd_cafile**: Path to CA file used for TLS communication with etcd
- **etcd_certfile**: Path to certificate file used for TLS communication with etcd
- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
- **etcd_prefix**: Etcd prefix path to be used for flannel keys
- **ip_masq**: Install IP masquerade rules for traffic outside of flannel subnet
- **subnet_file**: Path to flannel subnet file to write out
- **interface**: Interface (name or IP) that should be used for inter-host communication
[flannel-readme]: https://github.com/coreos/flannel/blob/master/README.md
@@ -283,6 +291,7 @@ All but the `passwd` and `ssh-authorized-keys` fields will be ignored if the use
- **no-user-group**: Boolean. Skip default group creation.
- **ssh-authorized-keys**: List of public SSH keys to authorize for this user
- **coreos-ssh-import-github**: Authorize SSH keys from Github user
- **coreos-ssh-import-github-users**: Authorize SSH keys from a list of Github users
- **coreos-ssh-import-url**: Authorize SSH keys imported from a url endpoint.
- **system**: Create the user as a system user. No home directory will be created.
- **no-log-init**: Boolean. Skip initialization of lastlog and faillog databases.

56
config/decode.go Normal file
View File

@@ -0,0 +1,56 @@
package config
import (
"bytes"
"compress/gzip"
"encoding/base64"
"fmt"
)
func DecodeBase64Content(content string) ([]byte, error) {
output, err := base64.StdEncoding.DecodeString(content)
if err != nil {
return nil, fmt.Errorf("Unable to decode base64: %q", err)
}
return output, nil
}
func DecodeGzipContent(content string) ([]byte, error) {
gzr, err := gzip.NewReader(bytes.NewReader([]byte(content)))
if err != nil {
return nil, fmt.Errorf("Unable to decode gzip: %q", err)
}
defer gzr.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(gzr)
return buf.Bytes(), nil
}
func DecodeContent(content string, encoding string) ([]byte, error) {
switch encoding {
case "":
return []byte(content), nil
case "b64", "base64":
return DecodeBase64Content(content)
case "gz", "gzip":
return DecodeGzipContent(content)
case "gz+base64", "gzip+base64", "gz+b64", "gzip+b64":
gz, err := DecodeBase64Content(content)
if err != nil {
return nil, err
}
return DecodeGzipContent(string(gz))
}
return nil, fmt.Errorf("Unsupported encoding %q", encoding)
}

View File

@@ -1,9 +1,12 @@
package config
type Flannel struct {
EtcdEndpoint string `yaml:"etcd_endpoint" env:"FLANNELD_ETCD_ENDPOINT"`
EtcdPrefix string `yaml:"etcd_prefix" env:"FLANNELD_ETCD_PREFIX"`
IPMasq string `yaml:"ip_masq" env:"FLANNELD_IP_MASQ"`
SubnetFile string `yaml:"subnet_file" env:"FLANNELD_SUBNET_FILE"`
Iface string `yaml:"interface" env:"FLANNELD_IFACE"`
EtcdEndpoints string `yaml:"etcd_endpoints" env:"FLANNELD_ETCD_ENDPOINTS"`
EtcdCAFile string `yaml:"etcd_cafile" env:"FLANNELD_ETCD_CAFILE"`
EtcdCertFile string `yaml:"etcd_certfile" env:"FLANNELD_ETCD_CERTFILE"`
EtcdKeyFile string `yaml:"etcd_keyfile" env:"FLANNELD_ETCD_KEYFILE"`
EtcdPrefix string `yaml:"etcd_prefix" env:"FLANNELD_ETCD_PREFIX"`
IPMasq string `yaml:"ip_masq" env:"FLANNELD_IP_MASQ"`
SubnetFile string `yaml:"subnet_file" env:"FLANNELD_SUBNET_FILE"`
Iface string `yaml:"interface" env:"FLANNELD_IFACE"`
}

View File

@@ -17,17 +17,18 @@
package config
type User struct {
Name string `yaml:"name"`
PasswordHash string `yaml:"passwd"`
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
SSHImportGithubUser string `yaml:"coreos_ssh_import_github"`
SSHImportURL string `yaml:"coreos_ssh_import_url"`
GECOS string `yaml:"gecos"`
Homedir string `yaml:"homedir"`
NoCreateHome bool `yaml:"no_create_home"`
PrimaryGroup string `yaml:"primary_group"`
Groups []string `yaml:"groups"`
NoUserGroup bool `yaml:"no_user_group"`
System bool `yaml:"system"`
NoLogInit bool `yaml:"no_log_init"`
Name string `yaml:"name"`
PasswordHash string `yaml:"passwd"`
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
SSHImportGithubUser string `yaml:"coreos_ssh_import_github"`
SSHImportGithubUsers []string `yaml:"coreos_ssh_import_github_users"`
SSHImportURL string `yaml:"coreos_ssh_import_url"`
GECOS string `yaml:"gecos"`
Homedir string `yaml:"homedir"`
NoCreateHome bool `yaml:"no_create_home"`
PrimaryGroup string `yaml:"primary_group"`
Groups []string `yaml:"groups"`
NoUserGroup bool `yaml:"no_user_group"`
System bool `yaml:"system"`
NoLogInit bool `yaml:"no_log_init"`
}

View File

@@ -18,7 +18,10 @@ package validate
import (
"fmt"
"net/url"
"path"
"reflect"
"strings"
"github.com/coreos/coreos-cloudinit/config"
)
@@ -27,8 +30,40 @@ type rule func(config node, report *Report)
// Rules contains all of the validation rules.
var Rules []rule = []rule{
checkDiscoveryUrl,
checkEncoding,
checkStructure,
checkValidity,
checkWriteFiles,
checkWriteFilesUnderCoreos,
}
// checkDiscoveryUrl verifies that the string is a valid url.
func checkDiscoveryUrl(cfg node, report *Report) {
c := cfg.Child("coreos").Child("etcd").Child("discovery")
if !c.IsValid() {
return
}
if _, err := url.ParseRequestURI(c.String()); err != nil {
report.Warning(c.line, "discovery URL is not valid")
}
}
// checkEncoding validates that, for each file under 'write_files', the
// content can be decoded given the specified encoding.
func checkEncoding(cfg node, report *Report) {
for _, f := range cfg.Child("write_files").children {
e := f.Child("encoding")
if !e.IsValid() {
continue
}
c := f.Child("contents")
if _, err := config.DecodeContent(c.String(), e.String()); err != nil {
report.Error(c.line, fmt.Sprintf("contents cannot be decoded as %q", e.String()))
}
}
}
// checkStructure compares the provided config to the empty config.CloudConfig
@@ -67,6 +102,24 @@ func checkNodeStructure(n, g node, r *Report) {
}
}
// isCompatible determines if the type of kind n can be converted to the type
// of kind g in the context of YAML. This is not an exhaustive list, but its
// enough for the purposes of cloud-config validation.
func isCompatible(n, g reflect.Kind) bool {
switch g {
case reflect.String:
return n == reflect.String || n == reflect.Int || n == reflect.Float64 || n == reflect.Bool
case reflect.Struct:
return n == reflect.Struct || n == reflect.Map
case reflect.Float64:
return n == reflect.Float64 || n == reflect.Int
case reflect.Bool, reflect.Slice, reflect.Int:
return n == g
default:
panic(fmt.Sprintf("isCompatible(): unhandled kind %s", g))
}
}
// checkValidity checks the value of every node in the provided config by
// running config.AssertValid() on it.
func checkValidity(cfg node, report *Report) {
@@ -98,18 +151,29 @@ func checkNodeValidity(n, g node, r *Report) {
}
}
// isCompatible determines if the type of kind n can be converted to the type
// of kind g in the context of YAML. This is not an exhaustive list, but its
// enough for the purposes of cloud-config validation.
func isCompatible(n, g reflect.Kind) bool {
switch g {
case reflect.String:
return n == reflect.String || n == reflect.Int || n == reflect.Float64 || n == reflect.Bool
case reflect.Struct:
return n == reflect.Struct || n == reflect.Map
case reflect.Bool, reflect.Slice, reflect.Int, reflect.Float64:
return n == g
default:
panic(fmt.Sprintf("isCompatible(): unhandled kind %s", g))
// checkWriteFiles checks to make sure that the target file can actually be
// written. Note that this check is approximate (it only checks to see if the file
// is under /usr).
func checkWriteFiles(cfg node, report *Report) {
for _, f := range cfg.Child("write_files").children {
c := f.Child("path")
if !c.IsValid() {
continue
}
d := path.Dir(c.String())
switch {
case strings.HasPrefix(d, "/usr"):
report.Error(c.line, "file cannot be written to a read-only filesystem")
}
}
}
// checkWriteFilesUnderCoreos checks to see if the 'write_files' node is a
// child of 'coreos' (it shouldn't be).
func checkWriteFilesUnderCoreos(cfg node, report *Report) {
c := cfg.Child("coreos").Child("write_files")
if c.IsValid() {
report.Info(c.line, "write_files doesn't belong under coreos")
}
}

View File

@@ -21,6 +21,85 @@ import (
"testing"
)
func TestCheckDiscoveryUrl(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "coreos:\n etcd:\n discovery: https://discovery.etcd.io/00000000000000000000000000000000",
},
{
config: "coreos:\n etcd:\n discovery: http://custom.domain/mytoken",
},
{
config: "coreos:\n etcd:\n discovery: disco",
entries: []Entry{{entryWarning, "discovery URL is not valid", 3}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkDiscoveryUrl(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}
func TestCheckEncoding(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "write_files:\n - encoding: base64\n contents: aGVsbG8K",
},
{
config: "write_files:\n - contents: !!binary aGVsbG8K",
},
{
config: "write_files:\n - encoding: base64\n contents: !!binary aGVsbG8K",
entries: []Entry{{entryError, `contents cannot be decoded as "base64"`, 3}},
},
{
config: "write_files:\n - encoding: base64\n contents: !!binary YUdWc2JHOEsK",
},
{
config: "write_files:\n - encoding: gzip\n contents: !!binary H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
},
{
config: "write_files:\n - encoding: gzip+base64\n contents: H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
},
{
config: "write_files:\n - encoding: custom\n contents: hello",
entries: []Entry{{entryError, `contents cannot be decoded as "custom"`, 3}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkEncoding(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}
func TestCheckStructure(t *testing.T) {
tests := []struct {
config string
@@ -249,3 +328,74 @@ func TestCheckValidity(t *testing.T) {
}
}
}
func TestCheckWriteFiles(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "write_files:\n - path: /valid",
},
{
config: "write_files:\n - path: /tmp/usr/valid",
},
{
config: "write_files:\n - path: /usr/invalid",
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
},
{
config: "write-files:\n - path: /tmp/../usr/invalid",
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkWriteFiles(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}
func TestCheckWriteFilesUnderCoreos(t *testing.T) {
tests := []struct {
config string
entries []Entry
}{
{},
{
config: "write_files:\n - path: /hi",
},
{
config: "coreos:\n write_files:\n - path: /hi",
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
},
{
config: "coreos:\n write-files:\n - path: /hyphen",
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
},
}
for i, tt := range tests {
r := Report{}
n, err := parseCloudConfig([]byte(tt.config), &r)
if err != nil {
panic(err)
}
checkWriteFilesUnderCoreos(n, &r)
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
}
}
}

View File

@@ -40,7 +40,7 @@ import (
)
const (
version = "1.1.0"
version = "1.2.1"
datasourceInterval = 100 * time.Millisecond
datasourceMaxInterval = 30 * time.Second
datasourceTimeout = 5 * time.Minute

View File

@@ -69,7 +69,7 @@ func (ms metadataService) FetchMetadata() ([]byte, error) {
}
if hostname, err := ms.fetchAttribute(fmt.Sprintf("%s/hostname", ms.MetadataUrl())); err == nil {
attrs["hostname"] = hostname
attrs["hostname"] = strings.Split(hostname, " ")[0]
} else if _, ok := err.(pkg.ErrNotFound); !ok {
return nil, err
}

View File

@@ -173,6 +173,20 @@ func TestFetchMetadata(t *testing.T) {
},
expect: []byte(`{"hostname":"host","local-ipv4":"1.2.3.4","network_config":{"content_path":"path"},"public-ipv4":"5.6.7.8","public_keys":{"test1":"key"}}`),
},
{
root: "/",
metadataPath: "2009-04-04/meta-data",
resources: map[string]string{
"/2009-04-04/meta-data/hostname": "host domain another_domain",
"/2009-04-04/meta-data/local-ipv4": "1.2.3.4",
"/2009-04-04/meta-data/public-ipv4": "5.6.7.8",
"/2009-04-04/meta-data/public-keys": "0=test1\n",
"/2009-04-04/meta-data/public-keys/0": "openssh-key",
"/2009-04-04/meta-data/public-keys/0/openssh-key": "key",
"/2009-04-04/meta-data/network_config/content_path": "path",
},
expect: []byte(`{"hostname":"host","local-ipv4":"1.2.3.4","network_config":{"content_path":"path"},"public-ipv4":"5.6.7.8","public_keys":{"test1":"key"}}`),
},
{
clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},

View File

@@ -87,6 +87,12 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
return err
}
}
for _, u := range user.SSHImportGithubUsers {
log.Printf("Authorizing github user %s SSH keys for CoreOS user '%s'", u, user.Name)
if err := SSHImportGithubUser(user.Name, u); err != nil {
return err
}
}
if user.SSHImportURL != "" {
log.Printf("Authorizing SSH keys for CoreOS user '%s' from '%s'", user.Name, user.SSHImportURL)
if err := SSHImportKeysFromURL(user.Name, user.SSHImportURL); err != nil {

View File

@@ -17,9 +17,6 @@
package system
import (
"bytes"
"compress/gzip"
"encoding/base64"
"fmt"
"io/ioutil"
"log"
@@ -50,61 +47,12 @@ func (f *File) Permissions() (os.FileMode, error) {
return os.FileMode(perm), nil
}
func DecodeBase64Content(content string) ([]byte, error) {
output, err := base64.StdEncoding.DecodeString(content)
if err != nil {
return nil, fmt.Errorf("Unable to decode base64: %v", err)
}
return output, nil
}
func DecodeGzipContent(content string) ([]byte, error) {
gzr, err := gzip.NewReader(bytes.NewReader([]byte(content)))
if err != nil {
return nil, fmt.Errorf("Unable to decode gzip: %v", err)
}
defer gzr.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(gzr)
return buf.Bytes(), nil
}
func DecodeContent(content string, encoding string) ([]byte, error) {
switch encoding {
case "":
return []byte(content), nil
case "b64", "base64":
return DecodeBase64Content(content)
case "gz", "gzip":
return DecodeGzipContent(content)
case "gz+base64", "gzip+base64", "gz+b64", "gzip+b64":
gz, err := DecodeBase64Content(content)
if err != nil {
return nil, err
}
return DecodeGzipContent(string(gz))
}
return nil, fmt.Errorf("Unsupported encoding %s", encoding)
}
func WriteFile(f *File, root string) (string, error) {
fullpath := path.Join(root, f.Path)
dir := path.Dir(fullpath)
log.Printf("Writing file to %q", fullpath)
content, err := DecodeContent(f.Content, f.Encoding)
content, err := config.DecodeContent(f.Content, f.Encoding)
if err != nil {
return "", fmt.Errorf("Unable to decode %s (%v)", f.Path, err)

View File

@@ -18,10 +18,10 @@ func TestFlannelEnvVars(t *testing.T) {
},
{
config.Flannel{
EtcdEndpoint: "http://12.34.56.78:4001",
EtcdPrefix: "/coreos.com/network/tenant1",
EtcdEndpoints: "http://12.34.56.78:4001",
EtcdPrefix: "/coreos.com/network/tenant1",
},
`FLANNELD_ETCD_ENDPOINT=http://12.34.56.78:4001
`FLANNELD_ETCD_ENDPOINTS=http://12.34.56.78:4001
FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1`,
},
} {
@@ -43,13 +43,13 @@ func TestFlannelFile(t *testing.T) {
},
{
config.Flannel{
EtcdEndpoint: "http://12.34.56.78:4001",
EtcdPrefix: "/coreos.com/network/tenant1",
EtcdEndpoints: "http://12.34.56.78:4001",
EtcdPrefix: "/coreos.com/network/tenant1",
},
&File{config.File{
Path: "run/flannel/options.env",
RawFilePermissions: "0644",
Content: `FLANNELD_ETCD_ENDPOINT=http://12.34.56.78:4001
Content: `FLANNELD_ETCD_ENDPOINTS=http://12.34.56.78:4001
FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1`,
}},
},