Compare commits
87 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
ae3676096c | ||
|
a548b557ed | ||
|
a9c132a706 | ||
|
c3c4b86a3b | ||
|
44142ff8af | ||
|
e9529ede44 | ||
|
4b5b801171 | ||
|
551cbb1e5d | ||
|
3c93938f8a | ||
|
f61c08c246 | ||
|
571903cec6 | ||
|
bdbd1930ed | ||
|
cc75a943ba | ||
|
fc77ba6355 | ||
|
7cfa0df7c4 | ||
|
58f0dadaf9 | ||
|
1ab530f157 | ||
|
13e4b77130 | ||
|
54c62cbb70 | ||
|
c8e864fef5 | ||
|
60a3377e7c | ||
|
5527f09778 | ||
|
54a64454b9 | ||
|
0e70d4f01f | ||
|
af8e590575 | ||
|
40d943fb7a | ||
|
248536a5cd | ||
|
4ed1d03c97 | ||
|
057ab37364 | ||
|
182241c8d3 | ||
|
edced59fa6 | ||
|
9be836df31 | ||
|
4e54447b8e | ||
|
999c38b09b | ||
|
06d13de5c3 | ||
|
5b0903d162 | ||
|
10669be7c0 | ||
|
2edae741e1 | ||
|
ea90e553d1 | ||
|
b0cfd86902 | ||
|
565a9540c9 | ||
|
fd10e27b99 | ||
|
39763d772c | ||
|
ee69b77bfb | ||
|
353444e56d | ||
|
112ba1e31f | ||
|
9c3cd9e69c | ||
|
685d8317bc | ||
|
f42d102b26 | ||
|
c944e9ef94 | ||
|
f10d6e8bef | ||
|
f3f3af79fd | ||
|
0e63aa0f6b | ||
|
b254e17e89 | ||
|
5c059b66f0 | ||
|
c628bef666 | ||
|
2270db3f7a | ||
|
d0d467813d | ||
|
123f111efe | ||
|
521ecfdab5 | ||
|
6d0fdf1a47 | ||
|
ffc54b028c | ||
|
420f7cf202 | ||
|
624df676d0 | ||
|
75ed8dacf9 | ||
|
dcaabe4d4a | ||
|
92c57423ba | ||
|
7447e133c9 | ||
|
4e466c12da | ||
|
333468dba3 | ||
|
55c3a793ad | ||
|
eca51031c8 | ||
|
19522bcb82 | ||
|
62248ea33d | ||
|
d2a19cc86d | ||
|
08131ffab1 | ||
|
4a0019c669 | ||
|
3275ead1ec | ||
|
32b6a55724 | ||
|
6c43644369 | ||
|
e6593d49e6 | ||
|
ab752b239f | ||
|
44fdf95d99 | ||
|
0a62614eec | ||
|
ea95920f31 | ||
|
b6062f0644 | ||
|
c5fada6e69 |
16
.travis.yml
16
.travis.yml
@@ -1,11 +1,17 @@
|
|||||||
language: go
|
language: go
|
||||||
go:
|
sudo: false
|
||||||
- 1.3
|
matrix:
|
||||||
- 1.2
|
include:
|
||||||
|
- go: 1.4
|
||||||
|
env: TOOLS_CMD=golang.org/x/tools/cmd
|
||||||
|
- go: 1.3
|
||||||
|
env: TOOLS_CMD=code.google.com/p/go.tools/cmd
|
||||||
|
- go: 1.2
|
||||||
|
env: TOOLS_CMD=code.google.com/p/go.tools/cmd
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- go get code.google.com/p/go.tools/cmd/cover
|
- go get ${TOOLS_CMD}/cover
|
||||||
- go get code.google.com/p/go.tools/cmd/vet
|
- go get ${TOOLS_CMD}/vet
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- ./test
|
- ./test
|
||||||
|
@@ -1,6 +1,8 @@
|
|||||||
# Using Cloud-Config
|
# Using Cloud-Config
|
||||||
|
|
||||||
CoreOS allows you to declaratively customize various OS-level items, such as network configuration, user accounts, and systemd units. This document describes the full list of items we can configure. The `coreos-cloudinit` program uses these files as it configures the OS after startup or during runtime. Your cloud-config is processed during each boot.
|
CoreOS allows you to declaratively customize various OS-level items, such as network configuration, user accounts, and systemd units. This document describes the full list of items we can configure. The `coreos-cloudinit` program uses these files as it configures the OS after startup or during runtime.
|
||||||
|
|
||||||
|
Your cloud-config is processed during each boot. Invalid cloud-config won't be processed but will be logged in the journal. You can validate your cloud-config with the [CoreOS validator]({{site.url}}/validate) or by running `coreos-cloudinit -validate`.
|
||||||
|
|
||||||
## Configuration File
|
## Configuration File
|
||||||
|
|
||||||
@@ -16,7 +18,7 @@ We've designed our implementation to allow the same cloud-config file to work ac
|
|||||||
|
|
||||||
The cloud-config file uses the [YAML][yaml] file format, which uses whitespace and new-lines to delimit lists, associative arrays, and values.
|
The cloud-config file uses the [YAML][yaml] file format, which uses whitespace and new-lines to delimit lists, associative arrays, and values.
|
||||||
|
|
||||||
A cloud-config file should contain `#cloud-config`, followed by an associative array which has zero or more of the following keys:
|
A cloud-config file must contain `#cloud-config`, followed by an associative array which has zero or more of the following keys:
|
||||||
|
|
||||||
- `coreos`
|
- `coreos`
|
||||||
- `ssh_authorized_keys`
|
- `ssh_authorized_keys`
|
||||||
@@ -66,7 +68,6 @@ Environment="ETCD_PEER_ADDR=192.0.2.13:7001"
|
|||||||
```
|
```
|
||||||
|
|
||||||
For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
|
For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
|
||||||
Note that hyphens in the coreos.etcd.* keys are mapped to underscores.
|
|
||||||
|
|
||||||
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
|
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
|
||||||
|
|
||||||
@@ -99,27 +100,61 @@ For more information on fleet configuration, see the [fleet documentation][fleet
|
|||||||
|
|
||||||
#### flannel
|
#### flannel
|
||||||
|
|
||||||
The `coreos.flannel.*` parameters also work very similarly to `coreos.etcd.*` and `coreos.fleet.*`. They can be used to set enviornment variables for flanneld. Given the following cloud-config...
|
The `coreos.flannel.*` parameters also work very similarly to `coreos.etcd.*`
|
||||||
|
and `coreos.fleet.*`. They can be used to set environment variables for
|
||||||
|
flanneld. For example, the following cloud-config...
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
|
|
||||||
coreos:
|
coreos:
|
||||||
flannel:
|
flannel:
|
||||||
etcd-prefix: /coreos.com/network2
|
etcd_prefix: /coreos.com/network2
|
||||||
```
|
```
|
||||||
|
|
||||||
...will generate systemd unit drop-in like so:
|
...will generate a systemd unit drop-in like so:
|
||||||
|
|
||||||
```
|
```
|
||||||
[Service]
|
[Service]
|
||||||
Environment="FLANNELD_ETCD_PREFIX=/coreos.com/network2"
|
Environment="FLANNELD_ETCD_PREFIX=/coreos.com/network2"
|
||||||
```
|
```
|
||||||
|
|
||||||
For complete list of flannel configuraion parameters, see the [flannel documentation][flannel-readme].
|
List of flannel configuration parameters:
|
||||||
|
- **etcd_endpoints**: Comma separated list of etcd endpoints
|
||||||
|
- **etcd_cafile**: Path to CA file used for TLS communication with etcd
|
||||||
|
- **etcd_certfile**: Path to certificate file used for TLS communication with etcd
|
||||||
|
- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
|
||||||
|
- **etcd_prefix**: Etcd prefix path to be used for flannel keys
|
||||||
|
- **ip_masq**: Install IP masquerade rules for traffic outside of flannel subnet
|
||||||
|
- **subnet_file**: Path to flannel subnet file to write out
|
||||||
|
- **interface**: Interface (name or IP) that should be used for inter-host communication
|
||||||
|
|
||||||
[flannel-readme]: https://github.com/coreos/flannel/blob/master/README.md
|
[flannel-readme]: https://github.com/coreos/flannel/blob/master/README.md
|
||||||
|
|
||||||
|
#### locksmith
|
||||||
|
|
||||||
|
The `coreos.locksmith.*` parameters can be used to set environment variables
|
||||||
|
for locksmith. For example, the following cloud-config...
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
coreos:
|
||||||
|
locksmith:
|
||||||
|
endpoint: example.com:4001
|
||||||
|
```
|
||||||
|
|
||||||
|
...will generate a systemd unit drop-in like so:
|
||||||
|
|
||||||
|
```
|
||||||
|
[Service]
|
||||||
|
Environment="LOCKSMITHD_ENDPOINT=example.com:4001"
|
||||||
|
```
|
||||||
|
|
||||||
|
For the complete list of locksmith configuraion parameters, see the [locksmith documentation][locksmith-readme].
|
||||||
|
|
||||||
|
[locksmith-readme]: https://github.com/coreos/locksmith/blob/master/README.md
|
||||||
|
|
||||||
#### update
|
#### update
|
||||||
|
|
||||||
The `coreos.update.*` parameters manipulate settings related to how CoreOS instances are updated.
|
The `coreos.update.*` parameters manipulate settings related to how CoreOS instances are updated.
|
||||||
@@ -158,6 +193,10 @@ Each item is an object with the following fields:
|
|||||||
- **content**: Plaintext string representing entire unit file. If no value is provided, the unit is assumed to exist already.
|
- **content**: Plaintext string representing entire unit file. If no value is provided, the unit is assumed to exist already.
|
||||||
- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. The default behavior is to not execute any commands.
|
- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. The default behavior is to not execute any commands.
|
||||||
- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. The default value is false.
|
- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. The default value is false.
|
||||||
|
- **drop-ins**: A list of unit drop-ins with the following fields:
|
||||||
|
- **name**: String representing unit's name. Required.
|
||||||
|
- **content**: Plaintext string representing entire file. Required.
|
||||||
|
|
||||||
|
|
||||||
**NOTE:** The command field is ignored for all network, netdev, and link units. The systemd-networkd.service unit will be restarted in their place.
|
**NOTE:** The command field is ignored for all network, netdev, and link units. The systemd-networkd.service unit will be restarted in their place.
|
||||||
|
|
||||||
@@ -184,6 +223,21 @@ coreos:
|
|||||||
ExecStop=/usr/bin/docker stop -t 2 redis_server
|
ExecStop=/usr/bin/docker stop -t 2 redis_server
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Add the DOCKER_OPTS environment variable to docker.service.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
coreos:
|
||||||
|
units:
|
||||||
|
- name: docker.service
|
||||||
|
drop-ins:
|
||||||
|
- name: 50-insecure-registry.conf
|
||||||
|
content: |
|
||||||
|
[Service]
|
||||||
|
Environment=DOCKER_OPTS='--insecure-registry="10.0.1.0/24"'
|
||||||
|
```
|
||||||
|
|
||||||
Start the built-in `etcd` and `fleet` services:
|
Start the built-in `etcd` and `fleet` services:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
@@ -237,6 +291,7 @@ All but the `passwd` and `ssh-authorized-keys` fields will be ignored if the use
|
|||||||
- **no-user-group**: Boolean. Skip default group creation.
|
- **no-user-group**: Boolean. Skip default group creation.
|
||||||
- **ssh-authorized-keys**: List of public SSH keys to authorize for this user
|
- **ssh-authorized-keys**: List of public SSH keys to authorize for this user
|
||||||
- **coreos-ssh-import-github**: Authorize SSH keys from Github user
|
- **coreos-ssh-import-github**: Authorize SSH keys from Github user
|
||||||
|
- **coreos-ssh-import-github-users**: Authorize SSH keys from a list of Github users
|
||||||
- **coreos-ssh-import-url**: Authorize SSH keys imported from a url endpoint.
|
- **coreos-ssh-import-url**: Authorize SSH keys imported from a url endpoint.
|
||||||
- **system**: Create the user as a system user. No home directory will be created.
|
- **system**: Create the user as a system user. No home directory will be created.
|
||||||
- **no-log-init**: Boolean. Skip initialization of lastlog and faillog databases.
|
- **no-log-init**: Boolean. Skip initialization of lastlog and faillog databases.
|
||||||
@@ -328,9 +383,11 @@ Each item in the list may have the following keys:
|
|||||||
- **content**: Data to write at the provided `path`
|
- **content**: Data to write at the provided `path`
|
||||||
- **permissions**: Integer representing file permissions, typically in octal notation (i.e. 0644)
|
- **permissions**: Integer representing file permissions, typically in octal notation (i.e. 0644)
|
||||||
- **owner**: User and group that should own the file written to disk. This is equivalent to the `<user>:<group>` argument to `chown <user>:<group> <path>`.
|
- **owner**: User and group that should own the file written to disk. This is equivalent to the `<user>:<group>` argument to `chown <user>:<group> <path>`.
|
||||||
|
- **encoding**: Optional. The encoding of the data in content. If not specified this defaults to the yaml document encoding (usually utf-8). Supported encoding types are:
|
||||||
|
- **b64, base64**: Base64 encoded content
|
||||||
|
- **gz, gzip**: gzip encoded content, for use with the !!binary tag
|
||||||
|
- **gz+b64, gz+base64, gzip+b64, gzip+base64**: Base64 encoded gzip content
|
||||||
|
|
||||||
Explicitly not implemented is the **encoding** attribute.
|
|
||||||
The **content** field must represent exactly what should be written to disk.
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
#cloud-config
|
#cloud-config
|
||||||
@@ -345,6 +402,24 @@ write_files:
|
|||||||
owner: root
|
owner: root
|
||||||
content: |
|
content: |
|
||||||
Good news, everyone!
|
Good news, everyone!
|
||||||
|
- path: /tmp/like_this
|
||||||
|
permissions: 0644
|
||||||
|
owner: root
|
||||||
|
encoding: gzip
|
||||||
|
content: !!binary |
|
||||||
|
H4sIAKgdh1QAAwtITM5WyK1USMqvUCjPLMlQSMssS1VIya9KzVPIySwszS9SyCpNLwYARQFQ5CcAAAA=
|
||||||
|
- path: /tmp/or_like_this
|
||||||
|
permissions: 0644
|
||||||
|
owner: root
|
||||||
|
encoding: gzip+base64
|
||||||
|
content: |
|
||||||
|
H4sIAKgdh1QAAwtITM5WyK1USMqvUCjPLMlQSMssS1VIya9KzVPIySwszS9SyCpNLwYARQFQ5CcAAAA=
|
||||||
|
- path: /tmp/todolist
|
||||||
|
permissions: 0644
|
||||||
|
owner: root
|
||||||
|
encoding: base64
|
||||||
|
content: |
|
||||||
|
UGFjayBteSBib3ggd2l0aCBmaXZlIGRvemVuIGxpcXVvciBqdWdz
|
||||||
```
|
```
|
||||||
|
|
||||||
### manage_etc_hosts
|
### manage_etc_hosts
|
||||||
|
10
Godeps/Godeps.json
generated
10
Godeps/Godeps.json
generated
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/coreos-cloudinit",
|
"ImportPath": "github.com/coreos/coreos-cloudinit",
|
||||||
"GoVersion": "go1.3.1",
|
"GoVersion": "go1.3.3",
|
||||||
"Packages": [
|
"Packages": [
|
||||||
"./..."
|
"./..."
|
||||||
],
|
],
|
||||||
@@ -13,6 +13,10 @@
|
|||||||
"ImportPath": "github.com/coreos/go-systemd/dbus",
|
"ImportPath": "github.com/coreos/go-systemd/dbus",
|
||||||
"Rev": "4fbc5060a317b142e6c7bfbedb65596d5f0ab99b"
|
"Rev": "4fbc5060a317b142e6c7bfbedb65596d5f0ab99b"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/yaml",
|
||||||
|
"Rev": "6b16a5714269b2f70720a45406b1babd947a17ef"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/dotcloud/docker/pkg/netlink",
|
"ImportPath": "github.com/dotcloud/docker/pkg/netlink",
|
||||||
"Comment": "v0.11.1-359-g55d41c3e21e1",
|
"Comment": "v0.11.1-359-g55d41c3e21e1",
|
||||||
@@ -25,10 +29,6 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "github.com/tarm/goserial",
|
"ImportPath": "github.com/tarm/goserial",
|
||||||
"Rev": "cdabc8d44e8e84f58f18074ae44337e1f2f375b9"
|
"Rev": "cdabc8d44e8e84f58f18074ae44337e1f2f375b9"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "gopkg.in/yaml.v1",
|
|
||||||
"Rev": "feb4ca79644e8e7e39c06095246ee54b1282c118"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@@ -1,3 +1,6 @@
|
|||||||
|
|
||||||
|
Copyright (c) 2011-2014 - Canonical Inc.
|
||||||
|
|
||||||
This software is licensed under the LGPLv3, included below.
|
This software is licensed under the LGPLv3, included below.
|
||||||
|
|
||||||
As a special exception to the GNU Lesser General Public License version 3
|
As a special exception to the GNU Lesser General Public License version 3
|
@@ -1,3 +1,6 @@
|
|||||||
|
Note: This is a fork of https://github.com/go-yaml/yaml. The following README
|
||||||
|
doesn't necessarily apply to this fork.
|
||||||
|
|
||||||
# YAML support for the Go language
|
# YAML support for the Go language
|
||||||
|
|
||||||
Introduction
|
Introduction
|
||||||
@@ -12,10 +15,10 @@ C library to parse and generate YAML data quickly and reliably.
|
|||||||
Compatibility
|
Compatibility
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
The yaml package is almost compatible with YAML 1.1, including support for
|
The yaml package supports most of YAML 1.1 and 1.2, including support for
|
||||||
anchors, tags, etc. There are still a few missing bits, such as document
|
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||||||
merging, base-60 floats (huh?), and multi-document unmarshalling. These
|
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||||||
features are not hard to add, and will be introduced as necessary.
|
supported since they're a poor design and are gone in YAML 1.2.
|
||||||
|
|
||||||
Installation and usage
|
Installation and usage
|
||||||
----------------------
|
----------------------
|
@@ -1,6 +1,8 @@
|
|||||||
package yaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
@@ -31,10 +33,12 @@ type parser struct {
|
|||||||
parser yaml_parser_t
|
parser yaml_parser_t
|
||||||
event yaml_event_t
|
event yaml_event_t
|
||||||
doc *node
|
doc *node
|
||||||
|
transform transformString
|
||||||
}
|
}
|
||||||
|
|
||||||
func newParser(b []byte) *parser {
|
func newParser(b []byte, t transformString) *parser {
|
||||||
p := parser{}
|
p := parser{transform: t}
|
||||||
|
|
||||||
if !yaml_parser_initialize(&p.parser) {
|
if !yaml_parser_initialize(&p.parser) {
|
||||||
panic("Failed to initialize YAML emitter")
|
panic("Failed to initialize YAML emitter")
|
||||||
}
|
}
|
||||||
@@ -63,7 +67,7 @@ func (p *parser) destroy() {
|
|||||||
func (p *parser) skip() {
|
func (p *parser) skip() {
|
||||||
if p.event.typ != yaml_NO_EVENT {
|
if p.event.typ != yaml_NO_EVENT {
|
||||||
if p.event.typ == yaml_STREAM_END_EVENT {
|
if p.event.typ == yaml_STREAM_END_EVENT {
|
||||||
panic("Attempted to go past the end of stream. Corrupted value?")
|
fail("Attempted to go past the end of stream. Corrupted value?")
|
||||||
}
|
}
|
||||||
yaml_event_delete(&p.event)
|
yaml_event_delete(&p.event)
|
||||||
}
|
}
|
||||||
@@ -89,7 +93,7 @@ func (p *parser) fail() {
|
|||||||
} else {
|
} else {
|
||||||
msg = "Unknown problem parsing YAML content"
|
msg = "Unknown problem parsing YAML content"
|
||||||
}
|
}
|
||||||
panic(where + msg)
|
fail(where + msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) anchor(n *node, anchor []byte) {
|
func (p *parser) anchor(n *node, anchor []byte) {
|
||||||
@@ -114,10 +118,9 @@ func (p *parser) parse() *node {
|
|||||||
// Happens when attempting to decode an empty buffer.
|
// Happens when attempting to decode an empty buffer.
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
panic("Attempted to parse unknown event: " +
|
panic("Attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
|
||||||
strconv.Itoa(int(p.event.typ)))
|
|
||||||
}
|
}
|
||||||
panic("Unreachable")
|
panic("unreachable")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) node(kind int) *node {
|
func (p *parser) node(kind int) *node {
|
||||||
@@ -135,8 +138,7 @@ func (p *parser) document() *node {
|
|||||||
p.skip()
|
p.skip()
|
||||||
n.children = append(n.children, p.parse())
|
n.children = append(n.children, p.parse())
|
||||||
if p.event.typ != yaml_DOCUMENT_END_EVENT {
|
if p.event.typ != yaml_DOCUMENT_END_EVENT {
|
||||||
panic("Expected end of document event but got " +
|
panic("Expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
|
||||||
strconv.Itoa(int(p.event.typ)))
|
|
||||||
}
|
}
|
||||||
p.skip()
|
p.skip()
|
||||||
return n
|
return n
|
||||||
@@ -175,7 +177,10 @@ func (p *parser) mapping() *node {
|
|||||||
p.anchor(n, p.event.anchor)
|
p.anchor(n, p.event.anchor)
|
||||||
p.skip()
|
p.skip()
|
||||||
for p.event.typ != yaml_MAPPING_END_EVENT {
|
for p.event.typ != yaml_MAPPING_END_EVENT {
|
||||||
n.children = append(n.children, p.parse(), p.parse())
|
key := p.parse()
|
||||||
|
key.value = p.transform(key.value)
|
||||||
|
value := p.parse()
|
||||||
|
n.children = append(n.children, key, value)
|
||||||
}
|
}
|
||||||
p.skip()
|
p.skip()
|
||||||
return n
|
return n
|
||||||
@@ -218,7 +223,7 @@ func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()
|
|||||||
var arg interface{}
|
var arg interface{}
|
||||||
*out = reflect.ValueOf(&arg).Elem()
|
*out = reflect.ValueOf(&arg).Elem()
|
||||||
return func() {
|
return func() {
|
||||||
*good = setter.SetYAML(tag, arg)
|
*good = setter.SetYAML(shortTag(tag), arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -226,7 +231,7 @@ func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()
|
|||||||
for again {
|
for again {
|
||||||
again = false
|
again = false
|
||||||
setter, _ := (*out).Interface().(Setter)
|
setter, _ := (*out).Interface().(Setter)
|
||||||
if tag != "!!null" || setter != nil {
|
if tag != yaml_NULL_TAG || setter != nil {
|
||||||
if pv := (*out); pv.Kind() == reflect.Ptr {
|
if pv := (*out); pv.Kind() == reflect.Ptr {
|
||||||
if pv.IsNil() {
|
if pv.IsNil() {
|
||||||
*out = reflect.New(pv.Type().Elem()).Elem()
|
*out = reflect.New(pv.Type().Elem()).Elem()
|
||||||
@@ -242,7 +247,7 @@ func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()
|
|||||||
var arg interface{}
|
var arg interface{}
|
||||||
*out = reflect.ValueOf(&arg).Elem()
|
*out = reflect.ValueOf(&arg).Elem()
|
||||||
return func() {
|
return func() {
|
||||||
*good = setter.SetYAML(tag, arg)
|
*good = setter.SetYAML(shortTag(tag), arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -279,10 +284,10 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
|||||||
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||||
an, ok := d.doc.anchors[n.value]
|
an, ok := d.doc.anchors[n.value]
|
||||||
if !ok {
|
if !ok {
|
||||||
panic("Unknown anchor '" + n.value + "' referenced")
|
fail("Unknown anchor '" + n.value + "' referenced")
|
||||||
}
|
}
|
||||||
if d.aliases[n.value] {
|
if d.aliases[n.value] {
|
||||||
panic("Anchor '" + n.value + "' value contains itself")
|
fail("Anchor '" + n.value + "' value contains itself")
|
||||||
}
|
}
|
||||||
d.aliases[n.value] = true
|
d.aliases[n.value] = true
|
||||||
good = d.unmarshal(an, out)
|
good = d.unmarshal(an, out)
|
||||||
@@ -290,23 +295,50 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
|||||||
return good
|
return good
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var zeroValue reflect.Value
|
||||||
|
|
||||||
|
func resetMap(out reflect.Value) {
|
||||||
|
for _, k := range out.MapKeys() {
|
||||||
|
out.SetMapIndex(k, zeroValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var durationType = reflect.TypeOf(time.Duration(0))
|
var durationType = reflect.TypeOf(time.Duration(0))
|
||||||
|
|
||||||
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||||
var tag string
|
var tag string
|
||||||
var resolved interface{}
|
var resolved interface{}
|
||||||
if n.tag == "" && !n.implicit {
|
if n.tag == "" && !n.implicit {
|
||||||
tag = "!!str"
|
tag = yaml_STR_TAG
|
||||||
resolved = n.value
|
resolved = n.value
|
||||||
} else {
|
} else {
|
||||||
tag, resolved = resolve(n.tag, n.value)
|
tag, resolved = resolve(n.tag, n.value)
|
||||||
|
if tag == yaml_BINARY_TAG {
|
||||||
|
data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
||||||
|
if err != nil {
|
||||||
|
fail("!!binary value contains invalid base64 data")
|
||||||
|
}
|
||||||
|
resolved = string(data)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if set := d.setter(tag, &out, &good); set != nil {
|
if set := d.setter(tag, &out, &good); set != nil {
|
||||||
defer set()
|
defer set()
|
||||||
}
|
}
|
||||||
|
if resolved == nil {
|
||||||
|
if out.Kind() == reflect.Map && !out.CanAddr() {
|
||||||
|
resetMap(out)
|
||||||
|
} else {
|
||||||
|
out.Set(reflect.Zero(out.Type()))
|
||||||
|
}
|
||||||
|
good = true
|
||||||
|
return
|
||||||
|
}
|
||||||
switch out.Kind() {
|
switch out.Kind() {
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
if resolved != nil {
|
if tag == yaml_BINARY_TAG {
|
||||||
|
out.SetString(resolved.(string))
|
||||||
|
good = true
|
||||||
|
} else if resolved != nil {
|
||||||
out.SetString(n.value)
|
out.SetString(n.value)
|
||||||
good = true
|
good = true
|
||||||
}
|
}
|
||||||
@@ -380,11 +412,6 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
|||||||
good = true
|
good = true
|
||||||
}
|
}
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
switch resolved.(type) {
|
|
||||||
case nil:
|
|
||||||
out.Set(reflect.Zero(out.Type()))
|
|
||||||
good = true
|
|
||||||
default:
|
|
||||||
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
||||||
elem := reflect.New(out.Type().Elem())
|
elem := reflect.New(out.Type().Elem())
|
||||||
elem.Elem().Set(reflect.ValueOf(resolved))
|
elem.Elem().Set(reflect.ValueOf(resolved))
|
||||||
@@ -392,7 +419,6 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
|||||||
good = true
|
good = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return good
|
return good
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -404,7 +430,7 @@ func settableValueOf(i interface{}) reflect.Value {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||||
if set := d.setter("!!seq", &out, &good); set != nil {
|
if set := d.setter(yaml_SEQ_TAG, &out, &good); set != nil {
|
||||||
defer set()
|
defer set()
|
||||||
}
|
}
|
||||||
var iface reflect.Value
|
var iface reflect.Value
|
||||||
@@ -433,7 +459,7 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||||
if set := d.setter("!!map", &out, &good); set != nil {
|
if set := d.setter(yaml_MAP_TAG, &out, &good); set != nil {
|
||||||
defer set()
|
defer set()
|
||||||
}
|
}
|
||||||
if out.Kind() == reflect.Struct {
|
if out.Kind() == reflect.Struct {
|
||||||
@@ -465,6 +491,13 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
|||||||
}
|
}
|
||||||
k := reflect.New(kt).Elem()
|
k := reflect.New(kt).Elem()
|
||||||
if d.unmarshal(n.children[i], k) {
|
if d.unmarshal(n.children[i], k) {
|
||||||
|
kkind := k.Kind()
|
||||||
|
if kkind == reflect.Interface {
|
||||||
|
kkind = k.Elem().Kind()
|
||||||
|
}
|
||||||
|
if kkind == reflect.Map || kkind == reflect.Slice {
|
||||||
|
fail(fmt.Sprintf("invalid map key: %#v", k.Interface()))
|
||||||
|
}
|
||||||
e := reflect.New(et).Elem()
|
e := reflect.New(et).Elem()
|
||||||
if d.unmarshal(n.children[i+1], e) {
|
if d.unmarshal(n.children[i+1], e) {
|
||||||
out.SetMapIndex(k, e)
|
out.SetMapIndex(k, e)
|
||||||
@@ -511,28 +544,28 @@ func (d *decoder) merge(n *node, out reflect.Value) {
|
|||||||
case aliasNode:
|
case aliasNode:
|
||||||
an, ok := d.doc.anchors[n.value]
|
an, ok := d.doc.anchors[n.value]
|
||||||
if ok && an.kind != mappingNode {
|
if ok && an.kind != mappingNode {
|
||||||
panic(wantMap)
|
fail(wantMap)
|
||||||
}
|
}
|
||||||
d.unmarshal(n, out)
|
d.unmarshal(n, out)
|
||||||
case sequenceNode:
|
case sequenceNode:
|
||||||
// Step backwards as earlier nodes take precedence.
|
// Step backwards as earlier nodes take precedence.
|
||||||
for i := len(n.children)-1; i >= 0; i-- {
|
for i := len(n.children) - 1; i >= 0; i-- {
|
||||||
ni := n.children[i]
|
ni := n.children[i]
|
||||||
if ni.kind == aliasNode {
|
if ni.kind == aliasNode {
|
||||||
an, ok := d.doc.anchors[ni.value]
|
an, ok := d.doc.anchors[ni.value]
|
||||||
if ok && an.kind != mappingNode {
|
if ok && an.kind != mappingNode {
|
||||||
panic(wantMap)
|
fail(wantMap)
|
||||||
}
|
}
|
||||||
} else if ni.kind != mappingNode {
|
} else if ni.kind != mappingNode {
|
||||||
panic(wantMap)
|
fail(wantMap)
|
||||||
}
|
}
|
||||||
d.unmarshal(ni, out)
|
d.unmarshal(ni, out)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
panic(wantMap)
|
fail(wantMap)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func isMerge(n *node) bool {
|
func isMerge(n *node) bool {
|
||||||
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == "!!merge" || n.tag == "tag:yaml.org,2002:merge")
|
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
|
||||||
}
|
}
|
@@ -1,10 +1,11 @@
|
|||||||
package yaml_test
|
package yaml_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/coreos/yaml"
|
||||||
. "gopkg.in/check.v1"
|
. "gopkg.in/check.v1"
|
||||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
|
|
||||||
"math"
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -316,7 +317,10 @@ var unmarshalTests = []struct {
|
|||||||
map[string]*string{"foo": new(string)},
|
map[string]*string{"foo": new(string)},
|
||||||
}, {
|
}, {
|
||||||
"foo: null",
|
"foo: null",
|
||||||
map[string]string{},
|
map[string]string{"foo": ""},
|
||||||
|
}, {
|
||||||
|
"foo: null",
|
||||||
|
map[string]interface{}{"foo": nil},
|
||||||
},
|
},
|
||||||
|
|
||||||
// Ignored field
|
// Ignored field
|
||||||
@@ -377,6 +381,24 @@ var unmarshalTests = []struct {
|
|||||||
"a: <foo>",
|
"a: <foo>",
|
||||||
map[string]string{"a": "<foo>"},
|
map[string]string{"a": "<foo>"},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Base 60 floats are obsolete and unsupported.
|
||||||
|
{
|
||||||
|
"a: 1:1\n",
|
||||||
|
map[string]string{"a": "1:1"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Binary data.
|
||||||
|
{
|
||||||
|
"a: !!binary gIGC\n",
|
||||||
|
map[string]string{"a": "\x80\x81\x82"},
|
||||||
|
}, {
|
||||||
|
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||||
|
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||||
|
}, {
|
||||||
|
"a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
|
||||||
|
map[string]string{"a": strings.Repeat("\x00", 52)},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
type inlineB struct {
|
type inlineB struct {
|
||||||
@@ -424,12 +446,15 @@ func (s *S) TestUnmarshalNaN(c *C) {
|
|||||||
var unmarshalErrorTests = []struct {
|
var unmarshalErrorTests = []struct {
|
||||||
data, error string
|
data, error string
|
||||||
}{
|
}{
|
||||||
{"v: !!float 'error'", "YAML error: Can't decode !!str 'error' as a !!float"},
|
{"v: !!float 'error'", "YAML error: cannot decode !!str `error` as a !!float"},
|
||||||
{"v: [A,", "YAML error: line 1: did not find expected node content"},
|
{"v: [A,", "YAML error: line 1: did not find expected node content"},
|
||||||
{"v:\n- [A,", "YAML error: line 2: did not find expected node content"},
|
{"v:\n- [A,", "YAML error: line 2: did not find expected node content"},
|
||||||
{"a: *b\n", "YAML error: Unknown anchor 'b' referenced"},
|
{"a: *b\n", "YAML error: Unknown anchor 'b' referenced"},
|
||||||
{"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"},
|
{"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"},
|
||||||
{"value: -", "YAML error: block sequence entries are not allowed in this context"},
|
{"value: -", "YAML error: block sequence entries are not allowed in this context"},
|
||||||
|
{"a: !!binary ==", "YAML error: !!binary value contains invalid base64 data"},
|
||||||
|
{"{[.]}", `YAML error: invalid map key: \[\]interface \{\}\{"\."\}`},
|
||||||
|
{"{{.}}", `YAML error: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S) TestUnmarshalErrors(c *C) {
|
func (s *S) TestUnmarshalErrors(c *C) {
|
||||||
@@ -532,6 +557,23 @@ func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
|
|||||||
c.Assert(m["ghi"].value, Equals, 3)
|
c.Assert(m["ghi"].value, Equals, 3)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalWithTransform(c *C) {
|
||||||
|
data := `{a_b: 1, c-d: 2, e-f_g: 3, h_i-j: 4}`
|
||||||
|
expect := map[string]int{
|
||||||
|
"a_b": 1,
|
||||||
|
"c_d": 2,
|
||||||
|
"e_f_g": 3,
|
||||||
|
"h_i_j": 4,
|
||||||
|
}
|
||||||
|
m := map[string]int{}
|
||||||
|
yaml.UnmarshalMappingKeyTransform = func(i string) string {
|
||||||
|
return strings.Replace(i, "-", "_", -1)
|
||||||
|
}
|
||||||
|
err := yaml.Unmarshal([]byte(data), m)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(m, DeepEquals, expect)
|
||||||
|
}
|
||||||
|
|
||||||
// From http://yaml.org/type/merge.html
|
// From http://yaml.org/type/merge.html
|
||||||
var mergeTests = `
|
var mergeTests = `
|
||||||
anchors:
|
anchors:
|
||||||
@@ -624,6 +666,30 @@ func (s *S) TestMergeStruct(c *C) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var unmarshalNullTests = []func() interface{}{
|
||||||
|
func() interface{} { var v interface{}; v = "v"; return &v },
|
||||||
|
func() interface{} { var s = "s"; return &s },
|
||||||
|
func() interface{} { var s = "s"; sptr := &s; return &sptr },
|
||||||
|
func() interface{} { var i = 1; return &i },
|
||||||
|
func() interface{} { var i = 1; iptr := &i; return &iptr },
|
||||||
|
func() interface{} { m := map[string]int{"s": 1}; return &m },
|
||||||
|
func() interface{} { m := map[string]int{"s": 1}; return m },
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalNull(c *C) {
|
||||||
|
for _, test := range unmarshalNullTests {
|
||||||
|
item := test()
|
||||||
|
zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
|
||||||
|
err := yaml.Unmarshal([]byte("null"), item)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
if reflect.TypeOf(item).Kind() == reflect.Map {
|
||||||
|
c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
|
||||||
|
} else {
|
||||||
|
c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//var data []byte
|
//var data []byte
|
||||||
//func init() {
|
//func init() {
|
||||||
// var err error
|
// var err error
|
@@ -973,9 +973,9 @@ func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
|
|||||||
if bytes.HasPrefix(tag, tag_directive.prefix) {
|
if bytes.HasPrefix(tag, tag_directive.prefix) {
|
||||||
emitter.tag_data.handle = tag_directive.handle
|
emitter.tag_data.handle = tag_directive.handle
|
||||||
emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
|
emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
|
||||||
}
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
}
|
||||||
emitter.tag_data.suffix = tag
|
emitter.tag_data.suffix = tag
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -1279,6 +1279,9 @@ func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_
|
|||||||
for k := 0; k < w; k++ {
|
for k := 0; k < w; k++ {
|
||||||
octet := value[i]
|
octet := value[i]
|
||||||
i++
|
i++
|
||||||
|
if !put(emitter, '%') {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
c := octet >> 4
|
c := octet >> 4
|
||||||
if c < 10 {
|
if c < 10 {
|
@@ -2,8 +2,10 @@ package yaml
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -50,14 +52,19 @@ func (e *encoder) must(ok bool) {
|
|||||||
if msg == "" {
|
if msg == "" {
|
||||||
msg = "Unknown problem generating YAML content"
|
msg = "Unknown problem generating YAML content"
|
||||||
}
|
}
|
||||||
panic(msg)
|
fail(msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *encoder) marshal(tag string, in reflect.Value) {
|
func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||||
|
if !in.IsValid() {
|
||||||
|
e.nilv()
|
||||||
|
return
|
||||||
|
}
|
||||||
var value interface{}
|
var value interface{}
|
||||||
if getter, ok := in.Interface().(Getter); ok {
|
if getter, ok := in.Interface().(Getter); ok {
|
||||||
tag, value = getter.GetYAML()
|
tag, value = getter.GetYAML()
|
||||||
|
tag = longTag(tag)
|
||||||
if value == nil {
|
if value == nil {
|
||||||
e.nilv()
|
e.nilv()
|
||||||
return
|
return
|
||||||
@@ -98,7 +105,7 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
|
|||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
e.boolv(tag, in)
|
e.boolv(tag, in)
|
||||||
default:
|
default:
|
||||||
panic("Can't marshal type yet: " + in.Type().String())
|
panic("Can't marshal type: " + in.Type().String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,11 +174,46 @@ func (e *encoder) slicev(tag string, in reflect.Value) {
|
|||||||
e.emit()
|
e.emit()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
||||||
|
//
|
||||||
|
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
||||||
|
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
||||||
|
// the time being for compatibility with other parsers.
|
||||||
|
func isBase60Float(s string) (result bool) {
|
||||||
|
// Fast path.
|
||||||
|
if s == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
c := s[0]
|
||||||
|
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Do the full match.
|
||||||
|
return base60float.MatchString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// From http://yaml.org/type/float.html, except the regular expression there
|
||||||
|
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
||||||
|
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
||||||
|
|
||||||
func (e *encoder) stringv(tag string, in reflect.Value) {
|
func (e *encoder) stringv(tag string, in reflect.Value) {
|
||||||
var style yaml_scalar_style_t
|
var style yaml_scalar_style_t
|
||||||
s := in.String()
|
s := in.String()
|
||||||
if rtag, _ := resolve("", s); rtag != "!!str" {
|
rtag, rs := resolve("", s)
|
||||||
|
if rtag == yaml_BINARY_TAG {
|
||||||
|
if tag == "" || tag == yaml_STR_TAG {
|
||||||
|
tag = rtag
|
||||||
|
s = rs.(string)
|
||||||
|
} else if tag == yaml_BINARY_TAG {
|
||||||
|
fail("explicitly tagged !!binary data must be base64-encoded")
|
||||||
|
} else {
|
||||||
|
fail("cannot marshal invalid UTF-8 data as " + shortTag(tag))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
|
||||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||||
|
} else if strings.Contains(s, "\n") {
|
||||||
|
style = yaml_LITERAL_SCALAR_STYLE
|
||||||
} else {
|
} else {
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
style = yaml_PLAIN_SCALAR_STYLE
|
||||||
}
|
}
|
||||||
@@ -218,9 +260,6 @@ func (e *encoder) nilv() {
|
|||||||
|
|
||||||
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
||||||
implicit := tag == ""
|
implicit := tag == ""
|
||||||
if !implicit {
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
}
|
|
||||||
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
||||||
e.emit()
|
e.emit()
|
||||||
}
|
}
|
@@ -2,12 +2,13 @@ package yaml_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
|
|
||||||
. "gopkg.in/check.v1"
|
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/yaml"
|
||||||
|
. "gopkg.in/check.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var marshalIntTest = 123
|
var marshalIntTest = 123
|
||||||
@@ -17,6 +18,9 @@ var marshalTests = []struct {
|
|||||||
data string
|
data string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
nil,
|
||||||
|
"null\n",
|
||||||
|
}, {
|
||||||
&struct{}{},
|
&struct{}{},
|
||||||
"{}\n",
|
"{}\n",
|
||||||
}, {
|
}, {
|
||||||
@@ -87,7 +91,7 @@ var marshalTests = []struct {
|
|||||||
"v:\n- A\n- B\n",
|
"v:\n- A\n- B\n",
|
||||||
}, {
|
}, {
|
||||||
map[string][]string{"v": []string{"A", "B\nC"}},
|
map[string][]string{"v": []string{"A", "B\nC"}},
|
||||||
"v:\n- A\n- 'B\n\n C'\n",
|
"v:\n- A\n- |-\n B\n C\n",
|
||||||
}, {
|
}, {
|
||||||
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
|
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
|
||||||
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
|
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
|
||||||
@@ -220,11 +224,39 @@ var marshalTests = []struct {
|
|||||||
"a: 3s\n",
|
"a: 3s\n",
|
||||||
},
|
},
|
||||||
|
|
||||||
// Issue #24.
|
// Issue #24: bug in map merging logic.
|
||||||
{
|
{
|
||||||
map[string]string{"a": "<foo>"},
|
map[string]string{"a": "<foo>"},
|
||||||
"a: <foo>\n",
|
"a: <foo>\n",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
|
||||||
|
// with old YAML 1.1 parsers.
|
||||||
|
{
|
||||||
|
map[string]string{"a": "1:1"},
|
||||||
|
"a: \"1:1\"\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Binary data.
|
||||||
|
{
|
||||||
|
map[string]string{"a": "\x00"},
|
||||||
|
"a: \"\\0\"\n",
|
||||||
|
}, {
|
||||||
|
map[string]string{"a": "\x80\x81\x82"},
|
||||||
|
"a: !!binary gIGC\n",
|
||||||
|
}, {
|
||||||
|
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||||
|
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"a": typeWithGetter{"!!str", "\x80\x81\x82"}},
|
||||||
|
"a: !!binary gIGC\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Escaping of tags.
|
||||||
|
{
|
||||||
|
map[string]interface{}{"a": typeWithGetter{"foo!bar", 1}},
|
||||||
|
"a: !<foo%21bar> 1\n",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S) TestMarshal(c *C) {
|
func (s *S) TestMarshal(c *C) {
|
||||||
@@ -238,21 +270,30 @@ func (s *S) TestMarshal(c *C) {
|
|||||||
var marshalErrorTests = []struct {
|
var marshalErrorTests = []struct {
|
||||||
value interface{}
|
value interface{}
|
||||||
error string
|
error string
|
||||||
}{
|
panic string
|
||||||
{
|
}{{
|
||||||
&struct {
|
value: &struct {
|
||||||
B int
|
B int
|
||||||
inlineB ",inline"
|
inlineB ",inline"
|
||||||
}{1, inlineB{2, inlineC{3}}},
|
}{1, inlineB{2, inlineC{3}}},
|
||||||
`Duplicated key 'b' in struct struct \{ B int; .*`,
|
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
|
||||||
},
|
}, {
|
||||||
}
|
value: typeWithGetter{"!!binary", "\x80"},
|
||||||
|
error: "YAML error: explicitly tagged !!binary data must be base64-encoded",
|
||||||
|
}, {
|
||||||
|
value: typeWithGetter{"!!float", "\x80"},
|
||||||
|
error: `YAML error: cannot marshal invalid UTF-8 data as !!float`,
|
||||||
|
}}
|
||||||
|
|
||||||
func (s *S) TestMarshalErrors(c *C) {
|
func (s *S) TestMarshalErrors(c *C) {
|
||||||
for _, item := range marshalErrorTests {
|
for _, item := range marshalErrorTests {
|
||||||
|
if item.panic != "" {
|
||||||
|
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
|
||||||
|
} else {
|
||||||
_, err := yaml.Marshal(item.value)
|
_, err := yaml.Marshal(item.value)
|
||||||
c.Assert(err, ErrorMatches, item.error)
|
c.Assert(err, ErrorMatches, item.error)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var marshalTaggedIfaceTest interface{} = &struct{ A string }{"B"}
|
var marshalTaggedIfaceTest interface{} = &struct{ A string }{"B"}
|
190
Godeps/_workspace/src/github.com/coreos/yaml/resolve.go
generated
vendored
Normal file
190
Godeps/_workspace/src/github.com/coreos/yaml/resolve.go
generated
vendored
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: merge, timestamps, base 60 floats, omap.
|
||||||
|
|
||||||
|
type resolveMapItem struct {
|
||||||
|
value interface{}
|
||||||
|
tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
var resolveTable = make([]byte, 256)
|
||||||
|
var resolveMap = make(map[string]resolveMapItem)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
t := resolveTable
|
||||||
|
t[int('+')] = 'S' // Sign
|
||||||
|
t[int('-')] = 'S'
|
||||||
|
for _, c := range "0123456789" {
|
||||||
|
t[int(c)] = 'D' // Digit
|
||||||
|
}
|
||||||
|
for _, c := range "yYnNtTfFoO~" {
|
||||||
|
t[int(c)] = 'M' // In map
|
||||||
|
}
|
||||||
|
t[int('.')] = '.' // Float (potentially in map)
|
||||||
|
|
||||||
|
var resolveMapList = []struct {
|
||||||
|
v interface{}
|
||||||
|
tag string
|
||||||
|
l []string
|
||||||
|
}{
|
||||||
|
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
|
||||||
|
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
|
||||||
|
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
|
||||||
|
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
|
||||||
|
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
|
||||||
|
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
|
||||||
|
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
|
||||||
|
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
|
||||||
|
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
|
||||||
|
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
|
||||||
|
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
|
||||||
|
{"<<", yaml_MERGE_TAG, []string{"<<"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := resolveMap
|
||||||
|
for _, item := range resolveMapList {
|
||||||
|
for _, s := range item.l {
|
||||||
|
m[s] = resolveMapItem{item.v, item.tag}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const longTagPrefix = "tag:yaml.org,2002:"
|
||||||
|
|
||||||
|
func shortTag(tag string) string {
|
||||||
|
// TODO This can easily be made faster and produce less garbage.
|
||||||
|
if strings.HasPrefix(tag, longTagPrefix) {
|
||||||
|
return "!!" + tag[len(longTagPrefix):]
|
||||||
|
}
|
||||||
|
return tag
|
||||||
|
}
|
||||||
|
|
||||||
|
func longTag(tag string) string {
|
||||||
|
if strings.HasPrefix(tag, "!!") {
|
||||||
|
return longTagPrefix + tag[2:]
|
||||||
|
}
|
||||||
|
return tag
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolvableTag(tag string) bool {
|
||||||
|
switch tag {
|
||||||
|
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||||
|
if !resolvableTag(tag) {
|
||||||
|
return tag, in
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
switch tag {
|
||||||
|
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fail(fmt.Sprintf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)))
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Any data is accepted as a !!str or !!binary.
|
||||||
|
// Otherwise, the prefix is enough of a hint about what it might be.
|
||||||
|
hint := byte('N')
|
||||||
|
if in != "" {
|
||||||
|
hint = resolveTable[in[0]]
|
||||||
|
}
|
||||||
|
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
|
||||||
|
// Handle things we can lookup in a map.
|
||||||
|
if item, ok := resolveMap[in]; ok {
|
||||||
|
return item.tag, item.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
|
||||||
|
// are purposefully unsupported here. They're still quoted on
|
||||||
|
// the way out for compatibility with other parser, though.
|
||||||
|
|
||||||
|
switch hint {
|
||||||
|
case 'M':
|
||||||
|
// We've already checked the map above.
|
||||||
|
|
||||||
|
case '.':
|
||||||
|
// Not in the map, so maybe a normal float.
|
||||||
|
floatv, err := strconv.ParseFloat(in, 64)
|
||||||
|
if err == nil {
|
||||||
|
return yaml_FLOAT_TAG, floatv
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'D', 'S':
|
||||||
|
// Int, float, or timestamp.
|
||||||
|
plain := strings.Replace(in, "_", "", -1)
|
||||||
|
intv, err := strconv.ParseInt(plain, 0, 64)
|
||||||
|
if err == nil {
|
||||||
|
if intv == int64(int(intv)) {
|
||||||
|
return yaml_INT_TAG, int(intv)
|
||||||
|
} else {
|
||||||
|
return yaml_INT_TAG, intv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
floatv, err := strconv.ParseFloat(plain, 64)
|
||||||
|
if err == nil {
|
||||||
|
return yaml_FLOAT_TAG, floatv
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(plain, "0b") {
|
||||||
|
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||||||
|
if err == nil {
|
||||||
|
return yaml_INT_TAG, int(intv)
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(plain, "-0b") {
|
||||||
|
intv, err := strconv.ParseInt(plain[3:], 2, 64)
|
||||||
|
if err == nil {
|
||||||
|
return yaml_INT_TAG, -int(intv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// XXX Handle timestamps here.
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tag == yaml_BINARY_TAG {
|
||||||
|
return yaml_BINARY_TAG, in
|
||||||
|
}
|
||||||
|
if utf8.ValidString(in) {
|
||||||
|
return yaml_STR_TAG, in
|
||||||
|
}
|
||||||
|
return yaml_BINARY_TAG, encodeBase64(in)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
||||||
|
// as appropriate for the resulting length.
|
||||||
|
func encodeBase64(s string) string {
|
||||||
|
const lineLen = 70
|
||||||
|
encLen := base64.StdEncoding.EncodedLen(len(s))
|
||||||
|
lines := encLen/lineLen + 1
|
||||||
|
buf := make([]byte, encLen*2+lines)
|
||||||
|
in := buf[0:encLen]
|
||||||
|
out := buf[encLen:]
|
||||||
|
base64.StdEncoding.Encode(in, []byte(s))
|
||||||
|
k := 0
|
||||||
|
for i := 0; i < len(in); i += lineLen {
|
||||||
|
j := i + lineLen
|
||||||
|
if j > len(in) {
|
||||||
|
j = len(in)
|
||||||
|
}
|
||||||
|
k += copy(out[k:], in[i:j])
|
||||||
|
if lines > 1 {
|
||||||
|
out[k] = '\n'
|
||||||
|
k++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(out[:k])
|
||||||
|
}
|
@@ -10,23 +10,20 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type yamlError string
|
||||||
|
|
||||||
|
func fail(msg string) {
|
||||||
|
panic(yamlError(msg))
|
||||||
|
}
|
||||||
|
|
||||||
func handleErr(err *error) {
|
func handleErr(err *error) {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
if _, ok := r.(runtime.Error); ok {
|
if e, ok := r.(yamlError); ok {
|
||||||
panic(r)
|
*err = errors.New("YAML error: " + string(e))
|
||||||
} else if _, ok := r.(*reflect.ValueError); ok {
|
|
||||||
panic(r)
|
|
||||||
} else if _, ok := r.(externalPanic); ok {
|
|
||||||
panic(r)
|
|
||||||
} else if s, ok := r.(string); ok {
|
|
||||||
*err = errors.New("YAML error: " + s)
|
|
||||||
} else if e, ok := r.(error); ok {
|
|
||||||
*err = e
|
|
||||||
} else {
|
} else {
|
||||||
panic(r)
|
panic(r)
|
||||||
}
|
}
|
||||||
@@ -78,7 +75,7 @@ type Getter interface {
|
|||||||
// F int `yaml:"a,omitempty"`
|
// F int `yaml:"a,omitempty"`
|
||||||
// B int
|
// B int
|
||||||
// }
|
// }
|
||||||
// var T t
|
// var t T
|
||||||
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||||
//
|
//
|
||||||
// See the documentation of Marshal for the format of tags and a list of
|
// See the documentation of Marshal for the format of tags and a list of
|
||||||
@@ -87,11 +84,15 @@ type Getter interface {
|
|||||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||||
defer handleErr(&err)
|
defer handleErr(&err)
|
||||||
d := newDecoder()
|
d := newDecoder()
|
||||||
p := newParser(in)
|
p := newParser(in, UnmarshalMappingKeyTransform)
|
||||||
defer p.destroy()
|
defer p.destroy()
|
||||||
node := p.parse()
|
node := p.parse()
|
||||||
if node != nil {
|
if node != nil {
|
||||||
d.unmarshal(node, reflect.ValueOf(out))
|
v := reflect.ValueOf(out)
|
||||||
|
if v.Kind() == reflect.Ptr && !v.IsNil() {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
d.unmarshal(node, v)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -145,6 +146,17 @@ func Marshal(in interface{}) (out []byte, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalMappingKeyTransform is a string transformation that is applied to
|
||||||
|
// each mapping key in a YAML document before it is unmarshalled. By default,
|
||||||
|
// UnmarshalMappingKeyTransform is an identity transform (no modification).
|
||||||
|
var UnmarshalMappingKeyTransform transformString = identityTransform
|
||||||
|
|
||||||
|
type transformString func(in string) (out string)
|
||||||
|
|
||||||
|
func identityTransform(in string) (out string) {
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
// Maintain a mapping of keys to structure field indexes
|
// Maintain a mapping of keys to structure field indexes
|
||||||
|
|
||||||
@@ -174,12 +186,6 @@ type fieldInfo struct {
|
|||||||
var structMap = make(map[reflect.Type]*structInfo)
|
var structMap = make(map[reflect.Type]*structInfo)
|
||||||
var fieldMapMutex sync.RWMutex
|
var fieldMapMutex sync.RWMutex
|
||||||
|
|
||||||
type externalPanic string
|
|
||||||
|
|
||||||
func (e externalPanic) String() string {
|
|
||||||
return string(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||||
fieldMapMutex.RLock()
|
fieldMapMutex.RLock()
|
||||||
sinfo, found := structMap[st]
|
sinfo, found := structMap[st]
|
||||||
@@ -220,8 +226,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
|
|||||||
case "inline":
|
case "inline":
|
||||||
inline = true
|
inline = true
|
||||||
default:
|
default:
|
||||||
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
|
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
|
||||||
panic(externalPanic(msg))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tag = fields[0]
|
tag = fields[0]
|
||||||
@@ -229,6 +234,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
|
|||||||
|
|
||||||
if inline {
|
if inline {
|
||||||
switch field.Type.Kind() {
|
switch field.Type.Kind() {
|
||||||
|
// TODO: Implement support for inline maps.
|
||||||
//case reflect.Map:
|
//case reflect.Map:
|
||||||
// if inlineMap >= 0 {
|
// if inlineMap >= 0 {
|
||||||
// return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
// return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||||
@@ -256,8 +262,8 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
|
|||||||
fieldsList = append(fieldsList, finfo)
|
fieldsList = append(fieldsList, finfo)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
//panic("Option ,inline needs a struct value or map field")
|
//return nil, errors.New("Option ,inline needs a struct value or map field")
|
||||||
panic("Option ,inline needs a struct value field")
|
return nil, errors.New("Option ,inline needs a struct value field")
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
@@ -294,6 +294,10 @@ const (
|
|||||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
||||||
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
||||||
|
|
||||||
|
// Not in original libyaml.
|
||||||
|
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
||||||
|
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
|
||||||
|
|
||||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
||||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
||||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
147
Godeps/_workspace/src/gopkg.in/yaml.v1/resolve.go
generated
vendored
147
Godeps/_workspace/src/gopkg.in/yaml.v1/resolve.go
generated
vendored
@@ -1,147 +0,0 @@
|
|||||||
package yaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: merge, timestamps, base 60 floats, omap.
|
|
||||||
|
|
||||||
type resolveMapItem struct {
|
|
||||||
value interface{}
|
|
||||||
tag string
|
|
||||||
}
|
|
||||||
|
|
||||||
var resolveTable = make([]byte, 256)
|
|
||||||
var resolveMap = make(map[string]resolveMapItem)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
t := resolveTable
|
|
||||||
t[int('+')] = 'S' // Sign
|
|
||||||
t[int('-')] = 'S'
|
|
||||||
for _, c := range "0123456789" {
|
|
||||||
t[int(c)] = 'D' // Digit
|
|
||||||
}
|
|
||||||
for _, c := range "yYnNtTfFoO~" {
|
|
||||||
t[int(c)] = 'M' // In map
|
|
||||||
}
|
|
||||||
t[int('.')] = '.' // Float (potentially in map)
|
|
||||||
|
|
||||||
var resolveMapList = []struct {
|
|
||||||
v interface{}
|
|
||||||
tag string
|
|
||||||
l []string
|
|
||||||
}{
|
|
||||||
{true, "!!bool", []string{"y", "Y", "yes", "Yes", "YES"}},
|
|
||||||
{true, "!!bool", []string{"true", "True", "TRUE"}},
|
|
||||||
{true, "!!bool", []string{"on", "On", "ON"}},
|
|
||||||
{false, "!!bool", []string{"n", "N", "no", "No", "NO"}},
|
|
||||||
{false, "!!bool", []string{"false", "False", "FALSE"}},
|
|
||||||
{false, "!!bool", []string{"off", "Off", "OFF"}},
|
|
||||||
{nil, "!!null", []string{"~", "null", "Null", "NULL"}},
|
|
||||||
{math.NaN(), "!!float", []string{".nan", ".NaN", ".NAN"}},
|
|
||||||
{math.Inf(+1), "!!float", []string{".inf", ".Inf", ".INF"}},
|
|
||||||
{math.Inf(+1), "!!float", []string{"+.inf", "+.Inf", "+.INF"}},
|
|
||||||
{math.Inf(-1), "!!float", []string{"-.inf", "-.Inf", "-.INF"}},
|
|
||||||
{"<<", "!!merge", []string{"<<"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := resolveMap
|
|
||||||
for _, item := range resolveMapList {
|
|
||||||
for _, s := range item.l {
|
|
||||||
m[s] = resolveMapItem{item.v, item.tag}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const longTagPrefix = "tag:yaml.org,2002:"
|
|
||||||
|
|
||||||
func shortTag(tag string) string {
|
|
||||||
if strings.HasPrefix(tag, longTagPrefix) {
|
|
||||||
return "!!" + tag[len(longTagPrefix):]
|
|
||||||
}
|
|
||||||
return tag
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolvableTag(tag string) bool {
|
|
||||||
switch tag {
|
|
||||||
case "", "!!str", "!!bool", "!!int", "!!float", "!!null":
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve(tag string, in string) (rtag string, out interface{}) {
|
|
||||||
tag = shortTag(tag)
|
|
||||||
if !resolvableTag(tag) {
|
|
||||||
return tag, in
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if tag != "" && tag != rtag {
|
|
||||||
panic("Can't decode " + rtag + " '" + in + "' as a " + tag)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if in == "" {
|
|
||||||
return "!!null", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
c := resolveTable[in[0]]
|
|
||||||
if c == 0 {
|
|
||||||
// It's a string for sure. Nothing to do.
|
|
||||||
return "!!str", in
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle things we can lookup in a map.
|
|
||||||
if item, ok := resolveMap[in]; ok {
|
|
||||||
return item.tag, item.value
|
|
||||||
}
|
|
||||||
|
|
||||||
switch c {
|
|
||||||
case 'M':
|
|
||||||
// We've already checked the map above.
|
|
||||||
|
|
||||||
case '.':
|
|
||||||
// Not in the map, so maybe a normal float.
|
|
||||||
floatv, err := strconv.ParseFloat(in, 64)
|
|
||||||
if err == nil {
|
|
||||||
return "!!float", floatv
|
|
||||||
}
|
|
||||||
// XXX Handle base 60 floats here (WTF!)
|
|
||||||
|
|
||||||
case 'D', 'S':
|
|
||||||
// Int, float, or timestamp.
|
|
||||||
plain := strings.Replace(in, "_", "", -1)
|
|
||||||
intv, err := strconv.ParseInt(plain, 0, 64)
|
|
||||||
if err == nil {
|
|
||||||
if intv == int64(int(intv)) {
|
|
||||||
return "!!int", int(intv)
|
|
||||||
} else {
|
|
||||||
return "!!int", intv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
floatv, err := strconv.ParseFloat(plain, 64)
|
|
||||||
if err == nil {
|
|
||||||
return "!!float", floatv
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(plain, "0b") {
|
|
||||||
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
|
||||||
if err == nil {
|
|
||||||
return "!!int", int(intv)
|
|
||||||
}
|
|
||||||
} else if strings.HasPrefix(plain, "-0b") {
|
|
||||||
intv, err := strconv.ParseInt(plain[3:], 2, 64)
|
|
||||||
if err == nil {
|
|
||||||
return "!!int", -int(intv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// XXX Handle timestamps here.
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("resolveTable item not yet handled: " +
|
|
||||||
string([]byte{c}) + " (with " + in + ")")
|
|
||||||
}
|
|
||||||
return "!!str", in
|
|
||||||
}
|
|
@@ -19,9 +19,10 @@ package config
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CloudConfig encapsulates the entire cloud-config configuration file and maps
|
// CloudConfig encapsulates the entire cloud-config configuration file and maps
|
||||||
@@ -29,14 +30,7 @@ import (
|
|||||||
// used for internal use) have the YAML tag '-' so that they aren't marshalled.
|
// used for internal use) have the YAML tag '-' so that they aren't marshalled.
|
||||||
type CloudConfig struct {
|
type CloudConfig struct {
|
||||||
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
|
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
|
||||||
Coreos struct {
|
CoreOS CoreOS `yaml:"coreos"`
|
||||||
Etcd Etcd `yaml:"etcd"`
|
|
||||||
Flannel Flannel `yaml:"flannel"`
|
|
||||||
Fleet Fleet `yaml:"fleet"`
|
|
||||||
OEM OEM `yaml:"oem"`
|
|
||||||
Update Update `yaml:"update"`
|
|
||||||
Units []Unit `yaml:"units"`
|
|
||||||
} `yaml:"coreos"`
|
|
||||||
WriteFiles []File `yaml:"write_files"`
|
WriteFiles []File `yaml:"write_files"`
|
||||||
Hostname string `yaml:"hostname"`
|
Hostname string `yaml:"hostname"`
|
||||||
Users []User `yaml:"users"`
|
Users []User `yaml:"users"`
|
||||||
@@ -45,6 +39,16 @@ type CloudConfig struct {
|
|||||||
NetworkConfig string `yaml:"-"`
|
NetworkConfig string `yaml:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CoreOS struct {
|
||||||
|
Etcd Etcd `yaml:"etcd"`
|
||||||
|
Flannel Flannel `yaml:"flannel"`
|
||||||
|
Fleet Fleet `yaml:"fleet"`
|
||||||
|
Locksmith Locksmith `yaml:"locksmith"`
|
||||||
|
OEM OEM `yaml:"oem"`
|
||||||
|
Update Update `yaml:"update"`
|
||||||
|
Units []Unit `yaml:"units"`
|
||||||
|
}
|
||||||
|
|
||||||
func IsCloudConfig(userdata string) bool {
|
func IsCloudConfig(userdata string) bool {
|
||||||
header := strings.SplitN(userdata, "\n", 2)[0]
|
header := strings.SplitN(userdata, "\n", 2)[0]
|
||||||
|
|
||||||
@@ -60,15 +64,12 @@ func IsCloudConfig(userdata string) bool {
|
|||||||
// string of YAML), returning any error encountered. It will ignore unknown
|
// string of YAML), returning any error encountered. It will ignore unknown
|
||||||
// fields but log encountering them.
|
// fields but log encountering them.
|
||||||
func NewCloudConfig(contents string) (*CloudConfig, error) {
|
func NewCloudConfig(contents string) (*CloudConfig, error) {
|
||||||
|
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
|
||||||
|
return strings.Replace(nameIn, "-", "_", -1)
|
||||||
|
}
|
||||||
var cfg CloudConfig
|
var cfg CloudConfig
|
||||||
ncontents, err := normalizeConfig(contents)
|
err := yaml.Unmarshal([]byte(contents), &cfg)
|
||||||
if err != nil {
|
|
||||||
return &cfg, err
|
return &cfg, err
|
||||||
}
|
|
||||||
if err = yaml.Unmarshal(ncontents, &cfg); err != nil {
|
|
||||||
return &cfg, err
|
|
||||||
}
|
|
||||||
return &cfg, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc CloudConfig) String() string {
|
func (cc CloudConfig) String() string {
|
||||||
@@ -91,7 +92,7 @@ func IsZero(c interface{}) bool {
|
|||||||
|
|
||||||
type ErrorValid struct {
|
type ErrorValid struct {
|
||||||
Value string
|
Value string
|
||||||
Valid []string
|
Valid string
|
||||||
Field string
|
Field string
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -125,16 +126,15 @@ func AssertValid(value reflect.Value, valid string) *ErrorValid {
|
|||||||
if valid == "" || isZero(value) {
|
if valid == "" || isZero(value) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
vs := fmt.Sprintf("%v", value.Interface())
|
vs := fmt.Sprintf("%v", value.Interface())
|
||||||
valids := strings.Split(valid, ",")
|
if m, _ := regexp.MatchString(valid, vs); m {
|
||||||
for _, valid := range valids {
|
|
||||||
if vs == valid {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return &ErrorValid{
|
return &ErrorValid{
|
||||||
Value: vs,
|
Value: vs,
|
||||||
Valid: valids,
|
Valid: valid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -156,31 +156,3 @@ func isZero(v reflect.Value) bool {
|
|||||||
func isFieldExported(f reflect.StructField) bool {
|
func isFieldExported(f reflect.StructField) bool {
|
||||||
return f.PkgPath == ""
|
return f.PkgPath == ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func normalizeConfig(config string) ([]byte, error) {
|
|
||||||
var cfg map[interface{}]interface{}
|
|
||||||
if err := yaml.Unmarshal([]byte(config), &cfg); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return yaml.Marshal(normalizeKeys(cfg))
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizeKeys(m map[interface{}]interface{}) map[interface{}]interface{} {
|
|
||||||
for k, v := range m {
|
|
||||||
if m, ok := m[k].(map[interface{}]interface{}); ok {
|
|
||||||
normalizeKeys(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s, ok := m[k].([]interface{}); ok {
|
|
||||||
for _, e := range s {
|
|
||||||
if m, ok := e.(map[interface{}]interface{}); ok {
|
|
||||||
normalizeKeys(m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(m, k)
|
|
||||||
m[strings.Replace(fmt.Sprint(k), "-", "_", -1)] = v
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
@@ -18,13 +18,67 @@ package config
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestNewCloudConfig(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
contents string
|
||||||
|
|
||||||
|
config CloudConfig
|
||||||
|
}{
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
contents: "#cloud-config\nwrite_files:\n - path: underscore",
|
||||||
|
config: CloudConfig{WriteFiles: []File{File{Path: "underscore"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
contents: "#cloud-config\nwrite-files:\n - path: hyphen",
|
||||||
|
config: CloudConfig{WriteFiles: []File{File{Path: "hyphen"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
contents: "#cloud-config\ncoreos:\n update:\n reboot-strategy: off",
|
||||||
|
config: CloudConfig{CoreOS: CoreOS{Update: Update{RebootStrategy: "off"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
contents: "#cloud-config\ncoreos:\n update:\n reboot-strategy: false",
|
||||||
|
config: CloudConfig{CoreOS: CoreOS{Update: Update{RebootStrategy: "false"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
contents: "#cloud-config\nwrite_files:\n - permissions: 0744",
|
||||||
|
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "0744"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
contents: "#cloud-config\nwrite_files:\n - permissions: 744",
|
||||||
|
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "744"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
contents: "#cloud-config\nwrite_files:\n - permissions: '0744'",
|
||||||
|
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "0744"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
contents: "#cloud-config\nwrite_files:\n - permissions: '744'",
|
||||||
|
config: CloudConfig{WriteFiles: []File{File{RawFilePermissions: "744"}}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range tests {
|
||||||
|
config, err := NewCloudConfig(tt.contents)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("bad error (test case #%d): want %v, got %s", i, nil, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(&tt.config, config) {
|
||||||
|
t.Errorf("bad config (test case #%d): want %#v, got %#v", i, tt.config, config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestIsZero(t *testing.T) {
|
func TestIsZero(t *testing.T) {
|
||||||
for _, tt := range []struct {
|
tests := []struct {
|
||||||
c interface{}
|
c interface{}
|
||||||
|
|
||||||
empty bool
|
empty bool
|
||||||
}{
|
}{
|
||||||
{struct{}{}, true},
|
{struct{}{}, true},
|
||||||
@@ -34,7 +88,9 @@ func TestIsZero(t *testing.T) {
|
|||||||
{struct{ A string }{A: "hello"}, false},
|
{struct{ A string }{A: "hello"}, false},
|
||||||
{struct{ A int }{}, true},
|
{struct{ A int }{}, true},
|
||||||
{struct{ A int }{A: 1}, false},
|
{struct{ A int }{A: 1}, false},
|
||||||
} {
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
if empty := IsZero(tt.c); tt.empty != empty {
|
if empty := IsZero(tt.c); tt.empty != empty {
|
||||||
t.Errorf("bad result (%q): want %t, got %t", tt.c, tt.empty, empty)
|
t.Errorf("bad result (%q): want %t, got %t", tt.c, tt.empty, empty)
|
||||||
}
|
}
|
||||||
@@ -42,66 +98,68 @@ func TestIsZero(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAssertStructValid(t *testing.T) {
|
func TestAssertStructValid(t *testing.T) {
|
||||||
for _, tt := range []struct {
|
tests := []struct {
|
||||||
c interface{}
|
c interface{}
|
||||||
|
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{struct{}{}, nil},
|
{struct{}{}, nil},
|
||||||
{struct {
|
{struct {
|
||||||
A, b string `valid:"1,2"`
|
A, b string `valid:"^1|2$"`
|
||||||
}{}, nil},
|
}{}, nil},
|
||||||
{struct {
|
{struct {
|
||||||
A, b string `valid:"1,2"`
|
A, b string `valid:"^1|2$"`
|
||||||
}{A: "1", b: "2"}, nil},
|
}{A: "1", b: "2"}, nil},
|
||||||
{struct {
|
{struct {
|
||||||
A, b string `valid:"1,2"`
|
A, b string `valid:"^1|2$"`
|
||||||
}{A: "1", b: "hello"}, nil},
|
}{A: "1", b: "hello"}, nil},
|
||||||
{struct {
|
{struct {
|
||||||
A, b string `valid:"1,2"`
|
A, b string `valid:"^1|2$"`
|
||||||
}{A: "hello", b: "2"}, &ErrorValid{Value: "hello", Field: "A", Valid: []string{"1", "2"}}},
|
}{A: "hello", b: "2"}, &ErrorValid{Value: "hello", Field: "A", Valid: "^1|2$"}},
|
||||||
{struct {
|
{struct {
|
||||||
A, b int `valid:"1,2"`
|
A, b int `valid:"^1|2$"`
|
||||||
}{}, nil},
|
}{}, nil},
|
||||||
{struct {
|
{struct {
|
||||||
A, b int `valid:"1,2"`
|
A, b int `valid:"^1|2$"`
|
||||||
}{A: 1, b: 2}, nil},
|
}{A: 1, b: 2}, nil},
|
||||||
{struct {
|
{struct {
|
||||||
A, b int `valid:"1,2"`
|
A, b int `valid:"^1|2$"`
|
||||||
}{A: 1, b: 9}, nil},
|
}{A: 1, b: 9}, nil},
|
||||||
{struct {
|
{struct {
|
||||||
A, b int `valid:"1,2"`
|
A, b int `valid:"^1|2$"`
|
||||||
}{A: 9, b: 2}, &ErrorValid{Value: "9", Field: "A", Valid: []string{"1", "2"}}},
|
}{A: 9, b: 2}, &ErrorValid{Value: "9", Field: "A", Valid: "^1|2$"}},
|
||||||
} {
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
if err := AssertStructValid(tt.c); !reflect.DeepEqual(tt.err, err) {
|
if err := AssertStructValid(tt.c); !reflect.DeepEqual(tt.err, err) {
|
||||||
t.Errorf("bad result (%q): want %q, got %q", tt.c, tt.err, err)
|
t.Errorf("bad result (%q): want %q, got %q", tt.c, tt.err, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCloudConfigInvalidKeys(t *testing.T) {
|
func TestConfigCompile(t *testing.T) {
|
||||||
defer func() {
|
tests := []interface{}{
|
||||||
if r := recover(); r != nil {
|
Etcd{},
|
||||||
t.Fatalf("panic while instantiating CloudConfig with nil keys: %v", r)
|
File{},
|
||||||
|
Flannel{},
|
||||||
|
Fleet{},
|
||||||
|
Locksmith{},
|
||||||
|
OEM{},
|
||||||
|
Unit{},
|
||||||
|
Update{},
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
|
|
||||||
for _, tt := range []struct {
|
for _, tt := range tests {
|
||||||
contents string
|
ttt := reflect.TypeOf(tt)
|
||||||
}{
|
for i := 0; i < ttt.NumField(); i++ {
|
||||||
{"coreos:"},
|
ft := ttt.Field(i)
|
||||||
{"ssh_authorized_keys:"},
|
if !isFieldExported(ft) {
|
||||||
{"ssh_authorized_keys:\n -"},
|
continue
|
||||||
{"ssh_authorized_keys:\n - 0:"},
|
}
|
||||||
{"write_files:"},
|
|
||||||
{"write_files:\n -"},
|
if _, err := regexp.Compile(ft.Tag.Get("valid")); err != nil {
|
||||||
{"write_files:\n - 0:"},
|
t.Errorf("bad regexp(%s.%s): want %v, got %s", ttt.Name(), ft.Name, nil, err)
|
||||||
{"users:"},
|
}
|
||||||
{"users:\n -"},
|
|
||||||
{"users:\n - 0:"},
|
|
||||||
} {
|
|
||||||
_, err := NewCloudConfig(tt.contents)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error instantiating CloudConfig with invalid keys: %v", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -136,7 +194,7 @@ hostname:
|
|||||||
if cfg.Hostname != "foo" {
|
if cfg.Hostname != "foo" {
|
||||||
t.Fatalf("hostname not correctly set when invalid keys are present")
|
t.Fatalf("hostname not correctly set when invalid keys are present")
|
||||||
}
|
}
|
||||||
if cfg.Coreos.Etcd.Discovery != "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877" {
|
if cfg.CoreOS.Etcd.Discovery != "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877" {
|
||||||
t.Fatalf("etcd section not correctly set when invalid keys are present")
|
t.Fatalf("etcd section not correctly set when invalid keys are present")
|
||||||
}
|
}
|
||||||
if len(cfg.WriteFiles) < 1 || cfg.WriteFiles[0].Content != "fun" || cfg.WriteFiles[0].Path != "/var/party" {
|
if len(cfg.WriteFiles) < 1 || cfg.WriteFiles[0].Content != "fun" || cfg.WriteFiles[0].Path != "/var/party" {
|
||||||
@@ -242,10 +300,10 @@ hostname: trontastic
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(cfg.Coreos.Units) != 1 {
|
if len(cfg.CoreOS.Units) != 1 {
|
||||||
t.Error("Failed to parse correct number of units")
|
t.Error("Failed to parse correct number of units")
|
||||||
} else {
|
} else {
|
||||||
u := cfg.Coreos.Units[0]
|
u := cfg.CoreOS.Units[0]
|
||||||
expect := `[Match]
|
expect := `[Match]
|
||||||
Name=eth47
|
Name=eth47
|
||||||
|
|
||||||
@@ -261,55 +319,18 @@ Address=10.209.171.177/19
|
|||||||
if u.Name != "50-eth0.network" {
|
if u.Name != "50-eth0.network" {
|
||||||
t.Errorf("Unit has incorrect name %s", u.Name)
|
t.Errorf("Unit has incorrect name %s", u.Name)
|
||||||
}
|
}
|
||||||
if u.Type() != "network" {
|
|
||||||
t.Errorf("Unit has incorrect type '%s'", u.Type())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Coreos.OEM.ID != "rackspace" {
|
if cfg.CoreOS.OEM.ID != "rackspace" {
|
||||||
t.Errorf("Failed parsing coreos.oem. Expected ID 'rackspace', got %q.", cfg.Coreos.OEM.ID)
|
t.Errorf("Failed parsing coreos.oem. Expected ID 'rackspace', got %q.", cfg.CoreOS.OEM.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Hostname != "trontastic" {
|
if cfg.Hostname != "trontastic" {
|
||||||
t.Errorf("Failed to parse hostname")
|
t.Errorf("Failed to parse hostname")
|
||||||
}
|
}
|
||||||
if cfg.Coreos.Update.RebootStrategy != "reboot" {
|
if cfg.CoreOS.Update.RebootStrategy != "reboot" {
|
||||||
t.Errorf("Failed to parse locksmith strategy")
|
t.Errorf("Failed to parse locksmith strategy")
|
||||||
}
|
}
|
||||||
|
|
||||||
contents = `
|
|
||||||
coreos:
|
|
||||||
write_files:
|
|
||||||
- path: /home/me/notes
|
|
||||||
permissions: 0744
|
|
||||||
`
|
|
||||||
cfg, err = NewCloudConfig(contents)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Encountered unexpected error :%v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cfg.WriteFiles) != 1 {
|
|
||||||
t.Error("Failed to parse correct number of write_files")
|
|
||||||
} else {
|
|
||||||
wf := cfg.WriteFiles[0]
|
|
||||||
if wf.Content != "" {
|
|
||||||
t.Errorf("WriteFile has incorrect contents '%s'", wf.Content)
|
|
||||||
}
|
|
||||||
if wf.Encoding != "" {
|
|
||||||
t.Errorf("WriteFile has incorrect encoding %s", wf.Encoding)
|
|
||||||
}
|
|
||||||
// Verify that the normalization of the config converted 0744 to its decimal
|
|
||||||
// representation, 484.
|
|
||||||
if wf.RawFilePermissions != "484" {
|
|
||||||
t.Errorf("WriteFile has incorrect permissions %s", wf.RawFilePermissions)
|
|
||||||
}
|
|
||||||
if wf.Path != "/home/me/notes" {
|
|
||||||
t.Errorf("WriteFile has incorrect path %s", wf.Path)
|
|
||||||
}
|
|
||||||
if wf.Owner != "" {
|
|
||||||
t.Errorf("WriteFile has incorrect owner %s", wf.Owner)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert that our interface conversion doesn't panic
|
// Assert that our interface conversion doesn't panic
|
||||||
@@ -338,26 +359,6 @@ func TestCloudConfigSerializationHeader(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDropInIgnored asserts that users are unable to set DropIn=True on units
|
|
||||||
func TestDropInIgnored(t *testing.T) {
|
|
||||||
contents := `
|
|
||||||
coreos:
|
|
||||||
units:
|
|
||||||
- name: test
|
|
||||||
dropin: true
|
|
||||||
`
|
|
||||||
cfg, err := NewCloudConfig(contents)
|
|
||||||
if err != nil || len(cfg.Coreos.Units) != 1 {
|
|
||||||
t.Fatalf("Encountered unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if len(cfg.Coreos.Units) != 1 || cfg.Coreos.Units[0].Name != "test" {
|
|
||||||
t.Fatalf("Expected 1 unit, but got %d: %v", len(cfg.Coreos.Units), cfg.Coreos.Units)
|
|
||||||
}
|
|
||||||
if cfg.Coreos.Units[0].DropIn {
|
|
||||||
t.Errorf("dropin option on unit in cloud-config was not ignored!")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCloudConfigUsers(t *testing.T) {
|
func TestCloudConfigUsers(t *testing.T) {
|
||||||
contents := `
|
contents := `
|
||||||
users:
|
users:
|
||||||
@@ -496,30 +497,3 @@ users:
|
|||||||
t.Errorf("ssh import url is %q, expected 'https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys'", user.SSHImportURL)
|
t.Errorf("ssh import url is %q, expected 'https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys'", user.SSHImportURL)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNormalizeKeys(t *testing.T) {
|
|
||||||
for _, tt := range []struct {
|
|
||||||
in string
|
|
||||||
out string
|
|
||||||
}{
|
|
||||||
{"my_key_name: the-value\n", "my_key_name: the-value\n"},
|
|
||||||
{"my-key_name: the-value\n", "my_key_name: the-value\n"},
|
|
||||||
{"my-key-name: the-value\n", "my_key_name: the-value\n"},
|
|
||||||
|
|
||||||
{"a:\n- key_name: the-value\n", "a:\n- key_name: the-value\n"},
|
|
||||||
{"a:\n- key-name: the-value\n", "a:\n- key_name: the-value\n"},
|
|
||||||
|
|
||||||
{"a:\n b:\n - key_name: the-value\n", "a:\n b:\n - key_name: the-value\n"},
|
|
||||||
{"a:\n b:\n - key-name: the-value\n", "a:\n b:\n - key_name: the-value\n"},
|
|
||||||
|
|
||||||
{"coreos:\n update:\n reboot-strategy: off\n", "coreos:\n update:\n reboot_strategy: false\n"},
|
|
||||||
} {
|
|
||||||
out, err := normalizeConfig(tt.in)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad error (%q): want nil, got %s", tt.in, err)
|
|
||||||
}
|
|
||||||
if string(out) != tt.out {
|
|
||||||
t.Fatalf("bad normalization (%q): want %q, got %q", tt.in, tt.out, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
56
config/decode.go
Normal file
56
config/decode.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func DecodeBase64Content(content string) ([]byte, error) {
|
||||||
|
output, err := base64.StdEncoding.DecodeString(content)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to decode base64: %q", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return output, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DecodeGzipContent(content string) ([]byte, error) {
|
||||||
|
gzr, err := gzip.NewReader(bytes.NewReader([]byte(content)))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to decode gzip: %q", err)
|
||||||
|
}
|
||||||
|
defer gzr.Close()
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
buf.ReadFrom(gzr)
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DecodeContent(content string, encoding string) ([]byte, error) {
|
||||||
|
switch encoding {
|
||||||
|
case "":
|
||||||
|
return []byte(content), nil
|
||||||
|
|
||||||
|
case "b64", "base64":
|
||||||
|
return DecodeBase64Content(content)
|
||||||
|
|
||||||
|
case "gz", "gzip":
|
||||||
|
return DecodeGzipContent(content)
|
||||||
|
|
||||||
|
case "gz+base64", "gzip+base64", "gz+b64", "gzip+b64":
|
||||||
|
gz, err := DecodeBase64Content(content)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return DecodeGzipContent(string(gz))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("Unsupported encoding %q", encoding)
|
||||||
|
}
|
@@ -21,28 +21,33 @@ type Etcd struct {
|
|||||||
BindAddr string `yaml:"bind_addr" env:"ETCD_BIND_ADDR"`
|
BindAddr string `yaml:"bind_addr" env:"ETCD_BIND_ADDR"`
|
||||||
CAFile string `yaml:"ca_file" env:"ETCD_CA_FILE"`
|
CAFile string `yaml:"ca_file" env:"ETCD_CA_FILE"`
|
||||||
CertFile string `yaml:"cert_file" env:"ETCD_CERT_FILE"`
|
CertFile string `yaml:"cert_file" env:"ETCD_CERT_FILE"`
|
||||||
ClusterActiveSize string `yaml:"cluster_active_size" env:"ETCD_CLUSTER_ACTIVE_SIZE"`
|
ClusterActiveSize int `yaml:"cluster_active_size" env:"ETCD_CLUSTER_ACTIVE_SIZE"`
|
||||||
ClusterRemoveDelay string `yaml:"cluster_remove_delay" env:"ETCD_CLUSTER_REMOVE_DELAY"`
|
ClusterRemoveDelay float64 `yaml:"cluster_remove_delay" env:"ETCD_CLUSTER_REMOVE_DELAY"`
|
||||||
ClusterSyncInterval string `yaml:"cluster_sync_interval" env:"ETCD_CLUSTER_SYNC_INTERVAL"`
|
ClusterSyncInterval float64 `yaml:"cluster_sync_interval" env:"ETCD_CLUSTER_SYNC_INTERVAL"`
|
||||||
Cors string `yaml:"cors" env:"ETCD_CORS"`
|
CorsOrigins string `yaml:"cors" env:"ETCD_CORS"`
|
||||||
CPUProfileFile string `yaml:"cpu_profile_file" env:"ETCD_CPU_PROFILE_FILE"`
|
|
||||||
DataDir string `yaml:"data_dir" env:"ETCD_DATA_DIR"`
|
DataDir string `yaml:"data_dir" env:"ETCD_DATA_DIR"`
|
||||||
Discovery string `yaml:"discovery" env:"ETCD_DISCOVERY"`
|
Discovery string `yaml:"discovery" env:"ETCD_DISCOVERY"`
|
||||||
HTTPReadTimeout string `yaml:"http_read_timeout" env:"ETCD_HTTP_READ_TIMEOUT"`
|
GraphiteHost string `yaml:"graphite_host" env:"ETCD_GRAPHITE_HOST"`
|
||||||
HTTPWriteTimeout string `yaml:"http_write_timeout" env:"ETCD_HTTP_WRITE_TIMEOUT"`
|
HTTPReadTimeout float64 `yaml:"http_read_timeout" env:"ETCD_HTTP_READ_TIMEOUT"`
|
||||||
|
HTTPWriteTimeout float64 `yaml:"http_write_timeout" env:"ETCD_HTTP_WRITE_TIMEOUT"`
|
||||||
KeyFile string `yaml:"key_file" env:"ETCD_KEY_FILE"`
|
KeyFile string `yaml:"key_file" env:"ETCD_KEY_FILE"`
|
||||||
MaxClusterSize string `yaml:"max_cluster_size" env:"ETCD_MAX_CLUSTER_SIZE"`
|
MaxResultBuffer int `yaml:"max_result_buffer" env:"ETCD_MAX_RESULT_BUFFER"`
|
||||||
MaxResultBuffer string `yaml:"max_result_buffer" env:"ETCD_MAX_RESULT_BUFFER"`
|
MaxRetryAttempts int `yaml:"max_retry_attempts" env:"ETCD_MAX_RETRY_ATTEMPTS"`
|
||||||
MaxRetryAttempts string `yaml:"max_retry_attempts" env:"ETCD_MAX_RETRY_ATTEMPTS"`
|
|
||||||
Name string `yaml:"name" env:"ETCD_NAME"`
|
Name string `yaml:"name" env:"ETCD_NAME"`
|
||||||
PeerAddr string `yaml:"peer_addr" env:"ETCD_PEER_ADDR"`
|
PeerAddr string `yaml:"peer_addr" env:"ETCD_PEER_ADDR"`
|
||||||
PeerBindAddr string `yaml:"peer_bind_addr" env:"ETCD_PEER_BIND_ADDR"`
|
PeerBindAddr string `yaml:"peer_bind_addr" env:"ETCD_PEER_BIND_ADDR"`
|
||||||
PeerCAFile string `yaml:"peer_ca_file" env:"ETCD_PEER_CA_FILE"`
|
PeerCAFile string `yaml:"peer_ca_file" env:"ETCD_PEER_CA_FILE"`
|
||||||
PeerCertFile string `yaml:"peer_cert_file" env:"ETCD_PEER_CERT_FILE"`
|
PeerCertFile string `yaml:"peer_cert_file" env:"ETCD_PEER_CERT_FILE"`
|
||||||
|
PeerElectionTimeout int `yaml:"peer_election_timeout" env:"ETCD_PEER_ELECTION_TIMEOUT"`
|
||||||
|
PeerHeartbeatInterval int `yaml:"peer_heartbeat_interval" env:"ETCD_PEER_HEARTBEAT_INTERVAL"`
|
||||||
PeerKeyFile string `yaml:"peer_key_file" env:"ETCD_PEER_KEY_FILE"`
|
PeerKeyFile string `yaml:"peer_key_file" env:"ETCD_PEER_KEY_FILE"`
|
||||||
Peers string `yaml:"peers" env:"ETCD_PEERS"`
|
Peers string `yaml:"peers" env:"ETCD_PEERS"`
|
||||||
PeersFile string `yaml:"peers_file" env:"ETCD_PEERS_FILE"`
|
PeersFile string `yaml:"peers_file" env:"ETCD_PEERS_FILE"`
|
||||||
Snapshot string `yaml:"snapshot" env:"ETCD_SNAPSHOT"`
|
RetryInterval float64 `yaml:"retry_interval" env:"ETCD_RETRY_INTERVAL"`
|
||||||
Verbose string `yaml:"verbose" env:"ETCD_VERBOSE"`
|
Snapshot bool `yaml:"snapshot" env:"ETCD_SNAPSHOT"`
|
||||||
VeryVerbose string `yaml:"very_verbose" env:"ETCD_VERY_VERBOSE"`
|
SnapshotCount int `yaml:"snapshot_count" env:"ETCD_SNAPSHOTCOUNT"`
|
||||||
|
StrTrace string `yaml:"trace" env:"ETCD_TRACE"`
|
||||||
|
Verbose bool `yaml:"verbose" env:"ETCD_VERBOSE"`
|
||||||
|
VeryVerbose bool `yaml:"very_verbose" env:"ETCD_VERY_VERBOSE"`
|
||||||
|
VeryVeryVerbose bool `yaml:"very_very_verbose" env:"ETCD_VERY_VERY_VERBOSE"`
|
||||||
}
|
}
|
||||||
|
@@ -17,9 +17,9 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
Encoding string `yaml:"-"`
|
Encoding string `yaml:"encoding" valid:"^(base64|b64|gz|gzip|gz\\+base64|gzip\\+base64|gz\\+b64|gzip\\+b64)$"`
|
||||||
Content string `yaml:"content"`
|
Content string `yaml:"content"`
|
||||||
Owner string `yaml:"owner"`
|
Owner string `yaml:"owner"`
|
||||||
Path string `yaml:"path"`
|
Path string `yaml:"path"`
|
||||||
RawFilePermissions string `yaml:"permissions"`
|
RawFilePermissions string `yaml:"permissions" valid:"^0?[0-7]{3,4}$"`
|
||||||
}
|
}
|
||||||
|
71
config/file_test.go
Normal file
71
config/file_test.go
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEncodingValid(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
value string
|
||||||
|
|
||||||
|
isValid bool
|
||||||
|
}{
|
||||||
|
{value: "base64", isValid: true},
|
||||||
|
{value: "b64", isValid: true},
|
||||||
|
{value: "gz", isValid: true},
|
||||||
|
{value: "gzip", isValid: true},
|
||||||
|
{value: "gz+base64", isValid: true},
|
||||||
|
{value: "gzip+base64", isValid: true},
|
||||||
|
{value: "gz+b64", isValid: true},
|
||||||
|
{value: "gzip+b64", isValid: true},
|
||||||
|
{value: "gzzzzbase64", isValid: false},
|
||||||
|
{value: "gzipppbase64", isValid: false},
|
||||||
|
{value: "unknown", isValid: false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
isValid := (nil == AssertStructValid(File{Encoding: tt.value}))
|
||||||
|
if tt.isValid != isValid {
|
||||||
|
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRawFilePermissionsValid(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
value string
|
||||||
|
|
||||||
|
isValid bool
|
||||||
|
}{
|
||||||
|
{value: "744", isValid: true},
|
||||||
|
{value: "0744", isValid: true},
|
||||||
|
{value: "1744", isValid: true},
|
||||||
|
{value: "01744", isValid: true},
|
||||||
|
{value: "11744", isValid: false},
|
||||||
|
{value: "rwxr--r--", isValid: false},
|
||||||
|
{value: "800", isValid: false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
isValid := (nil == AssertStructValid(File{RawFilePermissions: tt.value}))
|
||||||
|
if tt.isValid != isValid {
|
||||||
|
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -1,9 +1,12 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
type Flannel struct {
|
type Flannel struct {
|
||||||
EtcdEndpoint string `yaml:"etcd-endpoint" env:"FLANNELD_ETCD_ENDPOINT"`
|
EtcdEndpoints string `yaml:"etcd_endpoints" env:"FLANNELD_ETCD_ENDPOINTS"`
|
||||||
EtcdPrefix string `yaml:"etcd-prefix" env:"FLANNELD_ETCD_PREFIX"`
|
EtcdCAFile string `yaml:"etcd_cafile" env:"FLANNELD_ETCD_CAFILE"`
|
||||||
IPMasq string `yaml:"ip-masq" env:"FLANNELD_IP_MASQ"`
|
EtcdCertFile string `yaml:"etcd_certfile" env:"FLANNELD_ETCD_CERTFILE"`
|
||||||
SubnetFile string `yaml:"subnet-file" env:"FLANNELD_SUBNET_FILE"`
|
EtcdKeyFile string `yaml:"etcd_keyfile" env:"FLANNELD_ETCD_KEYFILE"`
|
||||||
|
EtcdPrefix string `yaml:"etcd_prefix" env:"FLANNELD_ETCD_PREFIX"`
|
||||||
|
IPMasq string `yaml:"ip_masq" env:"FLANNELD_IP_MASQ"`
|
||||||
|
SubnetFile string `yaml:"subnet_file" env:"FLANNELD_SUBNET_FILE"`
|
||||||
Iface string `yaml:"interface" env:"FLANNELD_IFACE"`
|
Iface string `yaml:"interface" env:"FLANNELD_IFACE"`
|
||||||
}
|
}
|
||||||
|
@@ -18,13 +18,14 @@ package config
|
|||||||
|
|
||||||
type Fleet struct {
|
type Fleet struct {
|
||||||
AgentTTL string `yaml:"agent_ttl" env:"FLEET_AGENT_TTL"`
|
AgentTTL string `yaml:"agent_ttl" env:"FLEET_AGENT_TTL"`
|
||||||
EngineReconcileInterval string `yaml:"engine_reconcile_interval" env:"FLEET_ENGINE_RECONCILE_INTERVAL"`
|
EngineReconcileInterval float64 `yaml:"engine_reconcile_interval" env:"FLEET_ENGINE_RECONCILE_INTERVAL"`
|
||||||
EtcdCAFile string `yaml:"etcd_cafile" env:"FLEET_ETCD_CAFILE"`
|
EtcdCAFile string `yaml:"etcd_cafile" env:"FLEET_ETCD_CAFILE"`
|
||||||
EtcdCertFile string `yaml:"etcd_certfile" env:"FLEET_ETCD_CERTFILE"`
|
EtcdCertFile string `yaml:"etcd_certfile" env:"FLEET_ETCD_CERTFILE"`
|
||||||
EtcdKeyFile string `yaml:"etcd_keyfile" env:"FLEET_ETCD_KEYFILE"`
|
EtcdKeyFile string `yaml:"etcd_keyfile" env:"FLEET_ETCD_KEYFILE"`
|
||||||
EtcdRequestTimeout string `yaml:"etcd_request_timeout" env:"FLEET_ETCD_REQUEST_TIMEOUT"`
|
EtcdKeyPrefix string `yaml:"etcd_key_prefix" env:"FLEET_ETCD_KEY_PREFIX"`
|
||||||
|
EtcdRequestTimeout float64 `yaml:"etcd_request_timeout" env:"FLEET_ETCD_REQUEST_TIMEOUT"`
|
||||||
EtcdServers string `yaml:"etcd_servers" env:"FLEET_ETCD_SERVERS"`
|
EtcdServers string `yaml:"etcd_servers" env:"FLEET_ETCD_SERVERS"`
|
||||||
Metadata string `yaml:"metadata" env:"FLEET_METADATA"`
|
Metadata string `yaml:"metadata" env:"FLEET_METADATA"`
|
||||||
PublicIP string `yaml:"public_ip" env:"FLEET_PUBLIC_IP"`
|
PublicIP string `yaml:"public_ip" env:"FLEET_PUBLIC_IP"`
|
||||||
Verbosity string `yaml:"verbosity" env:"FLEET_VERBOSITY"`
|
Verbosity int `yaml:"verbosity" env:"FLEET_VERBOSITY"`
|
||||||
}
|
}
|
||||||
|
8
config/locksmith.go
Normal file
8
config/locksmith.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
type Locksmith struct {
|
||||||
|
Endpoint string `yaml:"endpoint" env:"LOCKSMITHD_ENDPOINT"`
|
||||||
|
EtcdCAFile string `yaml:"etcd_cafile" env:"LOCKSMITHD_ETCD_CAFILE"`
|
||||||
|
EtcdCertFile string `yaml:"etcd_certfile" env:"LOCKSMITHD_ETCD_CERTFILE"`
|
||||||
|
EtcdKeyFile string `yaml:"etcd_keyfile" env:"LOCKSMITHD_ETCD_KEYFILE"`
|
||||||
|
}
|
@@ -16,35 +16,17 @@
|
|||||||
|
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Unit struct {
|
type Unit struct {
|
||||||
Name string `yaml:"name"`
|
Name string `yaml:"name"`
|
||||||
Mask bool `yaml:"mask"`
|
Mask bool `yaml:"mask"`
|
||||||
Enable bool `yaml:"enable"`
|
Enable bool `yaml:"enable"`
|
||||||
Runtime bool `yaml:"runtime"`
|
Runtime bool `yaml:"runtime"`
|
||||||
Content string `yaml:"content"`
|
Content string `yaml:"content"`
|
||||||
Command string `yaml:"command" valid:"start,stop,restart,reload,try-restart,reload-or-restart,reload-or-try-restart"`
|
Command string `yaml:"command" valid:"^(start|stop|restart|reload|try-restart|reload-or-restart|reload-or-try-restart)$"`
|
||||||
|
DropIns []UnitDropIn `yaml:"drop_ins"`
|
||||||
// For drop-in units, a cloudinit.conf is generated.
|
|
||||||
// This is currently unbound in YAML (and hence unsettable in cloud-config files)
|
|
||||||
// until the correct behaviour for multiple drop-in units is determined.
|
|
||||||
DropIn bool `yaml:"-"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *Unit) Type() string {
|
type UnitDropIn struct {
|
||||||
ext := filepath.Ext(u.Name)
|
Name string `yaml:"name"`
|
||||||
return strings.TrimLeft(ext, ".")
|
Content string `yaml:"content"`
|
||||||
}
|
|
||||||
|
|
||||||
func (u *Unit) Group() string {
|
|
||||||
switch u.Type() {
|
|
||||||
case "network", "netdev", "link":
|
|
||||||
return "network"
|
|
||||||
default:
|
|
||||||
return "system"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
46
config/unit_test.go
Normal file
46
config/unit_test.go
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCommandValid(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
value string
|
||||||
|
|
||||||
|
isValid bool
|
||||||
|
}{
|
||||||
|
{value: "start", isValid: true},
|
||||||
|
{value: "stop", isValid: true},
|
||||||
|
{value: "restart", isValid: true},
|
||||||
|
{value: "reload", isValid: true},
|
||||||
|
{value: "try-restart", isValid: true},
|
||||||
|
{value: "reload-or-restart", isValid: true},
|
||||||
|
{value: "reload-or-try-restart", isValid: true},
|
||||||
|
{value: "tryrestart", isValid: false},
|
||||||
|
{value: "unknown", isValid: false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
isValid := (nil == AssertStructValid(Unit{Command: tt.value}))
|
||||||
|
if tt.isValid != isValid {
|
||||||
|
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -17,7 +17,7 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
type Update struct {
|
type Update struct {
|
||||||
RebootStrategy string `yaml:"reboot_strategy" env:"REBOOT_STRATEGY" valid:"best-effort,etcd-lock,reboot,false"`
|
RebootStrategy string `yaml:"reboot_strategy" env:"REBOOT_STRATEGY" valid:"^(best-effort|etcd-lock|reboot|off)$"`
|
||||||
Group string `yaml:"group" env:"GROUP"`
|
Group string `yaml:"group" env:"GROUP"`
|
||||||
Server string `yaml:"server" env:"SERVER"`
|
Server string `yaml:"server" env:"SERVER"`
|
||||||
}
|
}
|
||||||
|
43
config/update_test.go
Normal file
43
config/update_test.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRebootStrategyValid(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
value string
|
||||||
|
|
||||||
|
isValid bool
|
||||||
|
}{
|
||||||
|
{value: "best-effort", isValid: true},
|
||||||
|
{value: "etcd-lock", isValid: true},
|
||||||
|
{value: "reboot", isValid: true},
|
||||||
|
{value: "off", isValid: true},
|
||||||
|
{value: "besteffort", isValid: false},
|
||||||
|
{value: "unknown", isValid: false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
isValid := (nil == AssertStructValid(Update{RebootStrategy: tt.value}))
|
||||||
|
if tt.isValid != isValid {
|
||||||
|
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -21,6 +21,7 @@ type User struct {
|
|||||||
PasswordHash string `yaml:"passwd"`
|
PasswordHash string `yaml:"passwd"`
|
||||||
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
|
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
|
||||||
SSHImportGithubUser string `yaml:"coreos_ssh_import_github"`
|
SSHImportGithubUser string `yaml:"coreos_ssh_import_github"`
|
||||||
|
SSHImportGithubUsers []string `yaml:"coreos_ssh_import_github_users"`
|
||||||
SSHImportURL string `yaml:"coreos_ssh_import_url"`
|
SSHImportURL string `yaml:"coreos_ssh_import_url"`
|
||||||
GECOS string `yaml:"gecos"`
|
GECOS string `yaml:"gecos"`
|
||||||
Homedir string `yaml:"homedir"`
|
Homedir string `yaml:"homedir"`
|
||||||
|
@@ -125,7 +125,7 @@ func toNode(v interface{}, c context, n *node) {
|
|||||||
n.children = append(n.children, cn)
|
n.children = append(n.children, cn)
|
||||||
c.Increment()
|
c.Increment()
|
||||||
}
|
}
|
||||||
case reflect.String, reflect.Int, reflect.Bool:
|
case reflect.String, reflect.Int, reflect.Bool, reflect.Float64:
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("toNode(): unhandled kind %s", vv.Kind()))
|
panic(fmt.Sprintf("toNode(): unhandled kind %s", vv.Kind()))
|
||||||
}
|
}
|
||||||
|
@@ -18,7 +18,10 @@ package validate
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/config"
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
)
|
)
|
||||||
@@ -27,8 +30,40 @@ type rule func(config node, report *Report)
|
|||||||
|
|
||||||
// Rules contains all of the validation rules.
|
// Rules contains all of the validation rules.
|
||||||
var Rules []rule = []rule{
|
var Rules []rule = []rule{
|
||||||
|
checkDiscoveryUrl,
|
||||||
|
checkEncoding,
|
||||||
checkStructure,
|
checkStructure,
|
||||||
checkValidity,
|
checkValidity,
|
||||||
|
checkWriteFiles,
|
||||||
|
checkWriteFilesUnderCoreos,
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkDiscoveryUrl verifies that the string is a valid url.
|
||||||
|
func checkDiscoveryUrl(cfg node, report *Report) {
|
||||||
|
c := cfg.Child("coreos").Child("etcd").Child("discovery")
|
||||||
|
if !c.IsValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := url.ParseRequestURI(c.String()); err != nil {
|
||||||
|
report.Warning(c.line, "discovery URL is not valid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkEncoding validates that, for each file under 'write_files', the
|
||||||
|
// content can be decoded given the specified encoding.
|
||||||
|
func checkEncoding(cfg node, report *Report) {
|
||||||
|
for _, f := range cfg.Child("write_files").children {
|
||||||
|
e := f.Child("encoding")
|
||||||
|
if !e.IsValid() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c := f.Child("contents")
|
||||||
|
if _, err := config.DecodeContent(c.String(), e.String()); err != nil {
|
||||||
|
report.Error(c.line, fmt.Sprintf("contents cannot be decoded as %q", e.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkStructure compares the provided config to the empty config.CloudConfig
|
// checkStructure compares the provided config to the empty config.CloudConfig
|
||||||
@@ -61,12 +96,30 @@ func checkNodeStructure(n, g node, r *Report) {
|
|||||||
toNode(reflect.New(c).Elem().Interface(), context{}, &cg)
|
toNode(reflect.New(c).Elem().Interface(), context{}, &cg)
|
||||||
checkNodeStructure(cn, cg, r)
|
checkNodeStructure(cn, cg, r)
|
||||||
}
|
}
|
||||||
case reflect.String, reflect.Int, reflect.Bool:
|
case reflect.String, reflect.Int, reflect.Float64, reflect.Bool:
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("checkNodeStructure(): unhandled kind %s", g.Kind()))
|
panic(fmt.Sprintf("checkNodeStructure(): unhandled kind %s", g.Kind()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isCompatible determines if the type of kind n can be converted to the type
|
||||||
|
// of kind g in the context of YAML. This is not an exhaustive list, but its
|
||||||
|
// enough for the purposes of cloud-config validation.
|
||||||
|
func isCompatible(n, g reflect.Kind) bool {
|
||||||
|
switch g {
|
||||||
|
case reflect.String:
|
||||||
|
return n == reflect.String || n == reflect.Int || n == reflect.Float64 || n == reflect.Bool
|
||||||
|
case reflect.Struct:
|
||||||
|
return n == reflect.Struct || n == reflect.Map
|
||||||
|
case reflect.Float64:
|
||||||
|
return n == reflect.Float64 || n == reflect.Int
|
||||||
|
case reflect.Bool, reflect.Slice, reflect.Int:
|
||||||
|
return n == g
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("isCompatible(): unhandled kind %s", g))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// checkValidity checks the value of every node in the provided config by
|
// checkValidity checks the value of every node in the provided config by
|
||||||
// running config.AssertValid() on it.
|
// running config.AssertValid() on it.
|
||||||
func checkValidity(cfg node, report *Report) {
|
func checkValidity(cfg node, report *Report) {
|
||||||
@@ -76,7 +129,7 @@ func checkValidity(cfg node, report *Report) {
|
|||||||
|
|
||||||
func checkNodeValidity(n, g node, r *Report) {
|
func checkNodeValidity(n, g node, r *Report) {
|
||||||
if err := config.AssertValid(n.Value, g.field.Tag.Get("valid")); err != nil {
|
if err := config.AssertValid(n.Value, g.field.Tag.Get("valid")); err != nil {
|
||||||
r.Warning(n.line, fmt.Sprintf("invalid value %v", n.Value))
|
r.Error(n.line, fmt.Sprintf("invalid value %v", n.Value.Interface()))
|
||||||
}
|
}
|
||||||
switch g.Kind() {
|
switch g.Kind() {
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
@@ -92,24 +145,35 @@ func checkNodeValidity(n, g node, r *Report) {
|
|||||||
toNode(reflect.New(c).Elem().Interface(), context{}, &cg)
|
toNode(reflect.New(c).Elem().Interface(), context{}, &cg)
|
||||||
checkNodeValidity(cn, cg, r)
|
checkNodeValidity(cn, cg, r)
|
||||||
}
|
}
|
||||||
case reflect.String, reflect.Int, reflect.Bool:
|
case reflect.String, reflect.Int, reflect.Float64, reflect.Bool:
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("checkNodeValidity(): unhandled kind %s", g.Kind()))
|
panic(fmt.Sprintf("checkNodeValidity(): unhandled kind %s", g.Kind()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// isCompatible determines if the type of kind n can be converted to the type
|
// checkWriteFiles checks to make sure that the target file can actually be
|
||||||
// of kind g in the context of YAML. This is not an exhaustive list, but its
|
// written. Note that this check is approximate (it only checks to see if the file
|
||||||
// enough for the purposes of cloud-config validation.
|
// is under /usr).
|
||||||
func isCompatible(n, g reflect.Kind) bool {
|
func checkWriteFiles(cfg node, report *Report) {
|
||||||
switch g {
|
for _, f := range cfg.Child("write_files").children {
|
||||||
case reflect.String:
|
c := f.Child("path")
|
||||||
return n == reflect.String || n == reflect.Int || n == reflect.Bool
|
if !c.IsValid() {
|
||||||
case reflect.Struct:
|
continue
|
||||||
return n == reflect.Struct || n == reflect.Map
|
}
|
||||||
case reflect.Bool, reflect.Slice:
|
|
||||||
return n == g
|
d := path.Dir(c.String())
|
||||||
default:
|
switch {
|
||||||
panic(fmt.Sprintf("isCompatible(): unhandled kind %s", g))
|
case strings.HasPrefix(d, "/usr"):
|
||||||
|
report.Error(c.line, "file cannot be written to a read-only filesystem")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkWriteFilesUnderCoreos checks to see if the 'write_files' node is a
|
||||||
|
// child of 'coreos' (it shouldn't be).
|
||||||
|
func checkWriteFilesUnderCoreos(cfg node, report *Report) {
|
||||||
|
c := cfg.Child("coreos").Child("write_files")
|
||||||
|
if c.IsValid() {
|
||||||
|
report.Info(c.line, "write_files doesn't belong under coreos")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -21,6 +21,85 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestCheckDiscoveryUrl(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
config string
|
||||||
|
|
||||||
|
entries []Entry
|
||||||
|
}{
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
config: "coreos:\n etcd:\n discovery: https://discovery.etcd.io/00000000000000000000000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "coreos:\n etcd:\n discovery: http://custom.domain/mytoken",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "coreos:\n etcd:\n discovery: disco",
|
||||||
|
entries: []Entry{{entryWarning, "discovery URL is not valid", 3}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range tests {
|
||||||
|
r := Report{}
|
||||||
|
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
checkDiscoveryUrl(n, &r)
|
||||||
|
|
||||||
|
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||||
|
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
config string
|
||||||
|
|
||||||
|
entries []Entry
|
||||||
|
}{
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - encoding: base64\n contents: aGVsbG8K",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - contents: !!binary aGVsbG8K",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - encoding: base64\n contents: !!binary aGVsbG8K",
|
||||||
|
entries: []Entry{{entryError, `contents cannot be decoded as "base64"`, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - encoding: base64\n contents: !!binary YUdWc2JHOEsK",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - encoding: gzip\n contents: !!binary H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - encoding: gzip+base64\n contents: H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - encoding: custom\n contents: hello",
|
||||||
|
entries: []Entry{{entryError, `contents cannot be decoded as "custom"`, 3}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range tests {
|
||||||
|
r := Report{}
|
||||||
|
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
checkEncoding(n, &r)
|
||||||
|
|
||||||
|
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||||
|
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestCheckStructure(t *testing.T) {
|
func TestCheckStructure(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
config string
|
config string
|
||||||
@@ -218,7 +297,7 @@ func TestCheckValidity(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: "coreos:\n units:\n - command: lol",
|
config: "coreos:\n units:\n - command: lol",
|
||||||
entries: []Entry{{entryWarning, "invalid value lol", 3}},
|
entries: []Entry{{entryError, "invalid value lol", 3}},
|
||||||
},
|
},
|
||||||
|
|
||||||
// struct
|
// struct
|
||||||
@@ -227,7 +306,7 @@ func TestCheckValidity(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: "coreos:\n update:\n reboot_strategy: always",
|
config: "coreos:\n update:\n reboot_strategy: always",
|
||||||
entries: []Entry{{entryWarning, "invalid value always", 3}},
|
entries: []Entry{{entryError, "invalid value always", 3}},
|
||||||
},
|
},
|
||||||
|
|
||||||
// unknown
|
// unknown
|
||||||
@@ -249,3 +328,74 @@ func TestCheckValidity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCheckWriteFiles(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
config string
|
||||||
|
|
||||||
|
entries []Entry
|
||||||
|
}{
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - path: /valid",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - path: /tmp/usr/valid",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - path: /usr/invalid",
|
||||||
|
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write-files:\n - path: /tmp/../usr/invalid",
|
||||||
|
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range tests {
|
||||||
|
r := Report{}
|
||||||
|
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
checkWriteFiles(n, &r)
|
||||||
|
|
||||||
|
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||||
|
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckWriteFilesUnderCoreos(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
config string
|
||||||
|
|
||||||
|
entries []Entry
|
||||||
|
}{
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - path: /hi",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "coreos:\n write_files:\n - path: /hi",
|
||||||
|
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "coreos:\n write-files:\n - path: /hyphen",
|
||||||
|
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range tests {
|
||||||
|
r := Report{}
|
||||||
|
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
checkWriteFilesUnderCoreos(n, &r)
|
||||||
|
|
||||||
|
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||||
|
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/config"
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/gopkg.in/yaml.v1"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -38,6 +38,8 @@ var (
|
|||||||
// can be validated.
|
// can be validated.
|
||||||
func Validate(userdataBytes []byte) (Report, error) {
|
func Validate(userdataBytes []byte) (Report, error) {
|
||||||
switch {
|
switch {
|
||||||
|
case len(userdataBytes) == 0:
|
||||||
|
return Report{}, nil
|
||||||
case config.IsScript(string(userdataBytes)):
|
case config.IsScript(string(userdataBytes)):
|
||||||
return Report{}, nil
|
return Report{}, nil
|
||||||
case config.IsCloudConfig(string(userdataBytes)):
|
case config.IsCloudConfig(string(userdataBytes)):
|
||||||
@@ -63,7 +65,6 @@ func validateCloudConfig(config []byte, rules []rule) (report Report, err error)
|
|||||||
return report, err
|
return report, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c = normalizeNodeNames(c, &report)
|
|
||||||
for _, r := range rules {
|
for _, r := range rules {
|
||||||
r(c, &report)
|
r(c, &report)
|
||||||
}
|
}
|
||||||
@@ -73,30 +74,79 @@ func validateCloudConfig(config []byte, rules []rule) (report Report, err error)
|
|||||||
// parseCloudConfig parses the provided config into a node structure and logs
|
// parseCloudConfig parses the provided config into a node structure and logs
|
||||||
// any parsing issues into the provided report. Unrecoverable errors are
|
// any parsing issues into the provided report. Unrecoverable errors are
|
||||||
// returned as an error.
|
// returned as an error.
|
||||||
func parseCloudConfig(config []byte, report *Report) (n node, err error) {
|
func parseCloudConfig(cfg []byte, report *Report) (node, error) {
|
||||||
var raw map[interface{}]interface{}
|
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
|
||||||
if err := yaml.Unmarshal(config, &raw); err != nil {
|
return nameIn
|
||||||
|
}
|
||||||
|
// unmarshal the config into an implicitly-typed form. The yaml library
|
||||||
|
// will implicitly convert types into their normalized form
|
||||||
|
// (e.g. 0744 -> 484, off -> false).
|
||||||
|
var weak map[interface{}]interface{}
|
||||||
|
if err := yaml.Unmarshal(cfg, &weak); err != nil {
|
||||||
matches := yamlLineError.FindStringSubmatch(err.Error())
|
matches := yamlLineError.FindStringSubmatch(err.Error())
|
||||||
if len(matches) == 3 {
|
if len(matches) == 3 {
|
||||||
line, err := strconv.Atoi(matches[1])
|
line, err := strconv.Atoi(matches[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, err
|
return node{}, err
|
||||||
}
|
}
|
||||||
msg := matches[2]
|
msg := matches[2]
|
||||||
report.Error(line, msg)
|
report.Error(line, msg)
|
||||||
return n, nil
|
return node{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
matches = yamlError.FindStringSubmatch(err.Error())
|
matches = yamlError.FindStringSubmatch(err.Error())
|
||||||
if len(matches) == 2 {
|
if len(matches) == 2 {
|
||||||
report.Error(1, matches[1])
|
report.Error(1, matches[1])
|
||||||
return n, nil
|
return node{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return n, errors.New("couldn't parse yaml error")
|
return node{}, errors.New("couldn't parse yaml error")
|
||||||
|
}
|
||||||
|
w := NewNode(weak, NewContext(cfg))
|
||||||
|
w = normalizeNodeNames(w, report)
|
||||||
|
|
||||||
|
// unmarshal the config into the explicitly-typed form.
|
||||||
|
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
|
||||||
|
return strings.Replace(nameIn, "-", "_", -1)
|
||||||
|
}
|
||||||
|
var strong config.CloudConfig
|
||||||
|
if err := yaml.Unmarshal([]byte(cfg), &strong); err != nil {
|
||||||
|
return node{}, err
|
||||||
|
}
|
||||||
|
s := NewNode(strong, NewContext(cfg))
|
||||||
|
|
||||||
|
// coerceNodes weak nodes and strong nodes. strong nodes replace weak nodes
|
||||||
|
// if they are compatible types (this happens when the yaml library
|
||||||
|
// converts the input).
|
||||||
|
// (e.g. weak 484 is replaced by strong 0744, weak 4 is not replaced by
|
||||||
|
// strong false)
|
||||||
|
return coerceNodes(w, s), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// coerceNodes recursively evaluates two nodes, returning a new node containing
|
||||||
|
// either the weak or strong node's value and its recursively processed
|
||||||
|
// children. The strong node's value is used if the two nodes are leafs, are
|
||||||
|
// both valid, and are compatible types (defined by isCompatible()). The weak
|
||||||
|
// node is returned in all other cases. coerceNodes is used to counteract the
|
||||||
|
// effects of yaml's automatic type conversion. The weak node is the one
|
||||||
|
// resulting from unmarshalling into an empty interface{} (the type is
|
||||||
|
// inferred). The strong node is the one resulting from unmarshalling into a
|
||||||
|
// struct. If the two nodes are of compatible types, the yaml library correctly
|
||||||
|
// parsed the value into the strongly typed unmarshalling. In this case, we
|
||||||
|
// prefer the strong node because its actually the type we are expecting.
|
||||||
|
func coerceNodes(w, s node) node {
|
||||||
|
n := w
|
||||||
|
n.children = nil
|
||||||
|
if len(w.children) == 0 && len(s.children) == 0 &&
|
||||||
|
w.IsValid() && s.IsValid() &&
|
||||||
|
isCompatible(w.Kind(), s.Kind()) {
|
||||||
|
n.Value = s.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewNode(raw, NewContext(config)), nil
|
for _, cw := range w.children {
|
||||||
|
n.children = append(n.children, coerceNodes(cw, s.Child(cw.name)))
|
||||||
|
}
|
||||||
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
// normalizeNodeNames replaces all occurences of '-' with '_' within key names
|
// normalizeNodeNames replaces all occurences of '-' with '_' within key names
|
||||||
|
@@ -65,6 +65,31 @@ func TestValidateCloudConfig(t *testing.T) {
|
|||||||
rules: []rule{func(_ node, _ *Report) { panic("something happened") }},
|
rules: []rule{func(_ node, _ *Report) { panic("something happened") }},
|
||||||
err: errors.New("something happened"),
|
err: errors.New("something happened"),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - permissions: 0744",
|
||||||
|
rules: Rules,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - permissions: '0744'",
|
||||||
|
rules: Rules,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - permissions: 744",
|
||||||
|
rules: Rules,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "write_files:\n - permissions: '744'",
|
||||||
|
rules: Rules,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "coreos:\n update:\n reboot-strategy: off",
|
||||||
|
rules: Rules,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: "coreos:\n update:\n reboot-strategy: false",
|
||||||
|
rules: Rules,
|
||||||
|
report: Report{entries: []Entry{{entryError, "invalid value false", 3}}},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@@ -78,6 +103,29 @@ func TestValidateCloudConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidate(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
config string
|
||||||
|
|
||||||
|
report Report
|
||||||
|
}{
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
config: "#!/bin/bash\necho hey",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range tests {
|
||||||
|
r, err := Validate([]byte(tt.config))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("bad error (case #%d): want %v, got %v", i, nil, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(tt.report, r) {
|
||||||
|
t.Errorf("bad report (case #%d): want %+v, got %+v", i, tt.report, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkValidate(b *testing.B) {
|
func BenchmarkValidate(b *testing.B) {
|
||||||
config := `#cloud-config
|
config := `#cloud-config
|
||||||
hostname: test
|
hostname: test
|
||||||
|
@@ -40,7 +40,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
version = "0.11.2"
|
version = "1.2.1"
|
||||||
datasourceInterval = 100 * time.Millisecond
|
datasourceInterval = 100 * time.Millisecond
|
||||||
datasourceMaxInterval = 30 * time.Second
|
datasourceMaxInterval = 30 * time.Second
|
||||||
datasourceTimeout = 5 * time.Minute
|
datasourceTimeout = 5 * time.Minute
|
||||||
|
@@ -17,8 +17,12 @@
|
|||||||
package cloudsigma
|
package cloudsigma
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -51,7 +55,8 @@ func (_ *serverContextService) IsAvailable() bool {
|
|||||||
}
|
}
|
||||||
productName := make([]byte, 10)
|
productName := make([]byte, 10)
|
||||||
_, err = productNameFile.Read(productName)
|
_, err = productNameFile.Read(productName)
|
||||||
return err == nil && string(productName) == "CloudSigma"
|
|
||||||
|
return err == nil && string(productName) == "CloudSigma" && hasDHCPLeases()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *serverContextService) AvailabilityChanges() bool {
|
func (_ *serverContextService) AvailabilityChanges() bool {
|
||||||
@@ -73,12 +78,16 @@ func (scs *serverContextService) FetchMetadata() ([]byte, error) {
|
|||||||
UUID string `json:"uuid"`
|
UUID string `json:"uuid"`
|
||||||
Meta map[string]string `json:"meta"`
|
Meta map[string]string `json:"meta"`
|
||||||
Nics []struct {
|
Nics []struct {
|
||||||
Runtime struct {
|
Mac string `json:"mac"`
|
||||||
|
IPv4Conf struct {
|
||||||
InterfaceType string `json:"interface_type"`
|
InterfaceType string `json:"interface_type"`
|
||||||
IPv4 struct {
|
IP struct {
|
||||||
IP string `json:"uuid"`
|
UUID string `json:"uuid"`
|
||||||
} `json:"ip_v4"`
|
} `json:"ip"`
|
||||||
} `json:"runtime"`
|
} `json:"ip_v4_conf"`
|
||||||
|
VLAN struct {
|
||||||
|
UUID string `json:"uuid"`
|
||||||
|
} `json:"vlan"`
|
||||||
} `json:"nics"`
|
} `json:"nics"`
|
||||||
}
|
}
|
||||||
outputMetadata struct {
|
outputMetadata struct {
|
||||||
@@ -112,11 +121,12 @@ func (scs *serverContextService) FetchMetadata() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, nic := range inputMetadata.Nics {
|
for _, nic := range inputMetadata.Nics {
|
||||||
if nic.Runtime.IPv4.IP != "" {
|
if nic.IPv4Conf.IP.UUID != "" {
|
||||||
if nic.Runtime.InterfaceType == "public" {
|
outputMetadata.PublicIPv4 = nic.IPv4Conf.IP.UUID
|
||||||
outputMetadata.PublicIPv4 = nic.Runtime.IPv4.IP
|
}
|
||||||
} else {
|
if nic.VLAN.UUID != "" {
|
||||||
outputMetadata.LocalIPv4 = nic.Runtime.IPv4.IP
|
if localIP, err := scs.findLocalIP(nic.Mac); err == nil {
|
||||||
|
outputMetadata.LocalIPv4 = localIP
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -146,6 +156,36 @@ func (scs *serverContextService) FetchNetworkConfig(a string) ([]byte, error) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (scs *serverContextService) findLocalIP(mac string) (string, error) {
|
||||||
|
ifaces, err := net.Interfaces()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
ifaceMac, err := net.ParseMAC(mac)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
for _, iface := range ifaces {
|
||||||
|
if !bytes.Equal(iface.HardwareAddr, ifaceMac) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addrs, err := iface.Addrs()
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, addr := range addrs {
|
||||||
|
switch ip := addr.(type) {
|
||||||
|
case *net.IPNet:
|
||||||
|
if ip.IP.To4() != nil {
|
||||||
|
return ip.IP.To4().String(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", errors.New("Local IP not found")
|
||||||
|
}
|
||||||
|
|
||||||
func isBase64Encoded(field string, userdata map[string]string) bool {
|
func isBase64Encoded(field string, userdata map[string]string) bool {
|
||||||
base64Fields, ok := userdata["base64_fields"]
|
base64Fields, ok := userdata["base64_fields"]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -159,3 +199,8 @@ func isBase64Encoded(field string, userdata map[string]string) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hasDHCPLeases() bool {
|
||||||
|
files, err := ioutil.ReadDir("/run/systemd/netif/leases/")
|
||||||
|
return err == nil && len(files) > 0
|
||||||
|
}
|
||||||
|
@@ -74,14 +74,41 @@ func TestServerContextFetchMetadata(t *testing.T) {
|
|||||||
"name": "coreos",
|
"name": "coreos",
|
||||||
"nics": [
|
"nics": [
|
||||||
{
|
{
|
||||||
"runtime": {
|
"boot_order": null,
|
||||||
"interface_type": "public",
|
"ip_v4_conf": {
|
||||||
"ip_v4": {
|
"conf": "dhcp",
|
||||||
|
"ip": {
|
||||||
|
"gateway": "31.171.244.1",
|
||||||
|
"meta": {},
|
||||||
|
"nameservers": [
|
||||||
|
"178.22.66.167",
|
||||||
|
"178.22.71.56",
|
||||||
|
"8.8.8.8"
|
||||||
|
],
|
||||||
|
"netmask": 22,
|
||||||
|
"tags": [],
|
||||||
"uuid": "31.171.251.74"
|
"uuid": "31.171.251.74"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"ip_v6": null
|
"ip_v6_conf": null,
|
||||||
},
|
"mac": "22:3d:09:6b:90:f3",
|
||||||
|
"model": "virtio",
|
||||||
"vlan": null
|
"vlan": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"boot_order": null,
|
||||||
|
"ip_v4_conf": null,
|
||||||
|
"ip_v6_conf": null,
|
||||||
|
"mac": "22:ae:4a:fb:8f:31",
|
||||||
|
"model": "virtio",
|
||||||
|
"vlan": {
|
||||||
|
"meta": {
|
||||||
|
"description": "",
|
||||||
|
"name": "CoreOS"
|
||||||
|
},
|
||||||
|
"tags": [],
|
||||||
|
"uuid": "5dec030e-25b8-4621-a5a4-a3302c9d9619"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"smp": 2,
|
"smp": 2,
|
||||||
@@ -106,12 +133,8 @@ func TestServerContextFetchMetadata(t *testing.T) {
|
|||||||
t.Error("Public SSH Keys are not being read properly")
|
t.Error("Public SSH Keys are not being read properly")
|
||||||
}
|
}
|
||||||
|
|
||||||
if metadata.LocalIPv4 != "" {
|
|
||||||
t.Errorf("Local IP is not empty but %s instead", metadata.LocalIPv4)
|
|
||||||
}
|
|
||||||
|
|
||||||
if metadata.PublicIPv4 != "31.171.251.74" {
|
if metadata.PublicIPv4 != "31.171.251.74" {
|
||||||
t.Errorf("Local IP is not 31.171.251.74 but %s instead", metadata.PublicIPv4)
|
t.Errorf("Public IP is not 31.171.251.74 but %s instead", metadata.PublicIPv4)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -69,7 +69,7 @@ func (ms metadataService) FetchMetadata() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hostname, err := ms.fetchAttribute(fmt.Sprintf("%s/hostname", ms.MetadataUrl())); err == nil {
|
if hostname, err := ms.fetchAttribute(fmt.Sprintf("%s/hostname", ms.MetadataUrl())); err == nil {
|
||||||
attrs["hostname"] = hostname
|
attrs["hostname"] = strings.Split(hostname, " ")[0]
|
||||||
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@@ -173,6 +173,20 @@ func TestFetchMetadata(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expect: []byte(`{"hostname":"host","local-ipv4":"1.2.3.4","network_config":{"content_path":"path"},"public-ipv4":"5.6.7.8","public_keys":{"test1":"key"}}`),
|
expect: []byte(`{"hostname":"host","local-ipv4":"1.2.3.4","network_config":{"content_path":"path"},"public-ipv4":"5.6.7.8","public_keys":{"test1":"key"}}`),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
root: "/",
|
||||||
|
metadataPath: "2009-04-04/meta-data",
|
||||||
|
resources: map[string]string{
|
||||||
|
"/2009-04-04/meta-data/hostname": "host domain another_domain",
|
||||||
|
"/2009-04-04/meta-data/local-ipv4": "1.2.3.4",
|
||||||
|
"/2009-04-04/meta-data/public-ipv4": "5.6.7.8",
|
||||||
|
"/2009-04-04/meta-data/public-keys": "0=test1\n",
|
||||||
|
"/2009-04-04/meta-data/public-keys/0": "openssh-key",
|
||||||
|
"/2009-04-04/meta-data/public-keys/0/openssh-key": "key",
|
||||||
|
"/2009-04-04/meta-data/network_config/content_path": "path",
|
||||||
|
},
|
||||||
|
expect: []byte(`{"hostname":"host","local-ipv4":"1.2.3.4","network_config":{"content_path":"path"},"public-ipv4":"5.6.7.8","public_keys":{"test1":"key"}}`),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
||||||
expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
||||||
|
@@ -87,6 +87,12 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for _, u := range user.SSHImportGithubUsers {
|
||||||
|
log.Printf("Authorizing github user %s SSH keys for CoreOS user '%s'", u, user.Name)
|
||||||
|
if err := SSHImportGithubUser(user.Name, u); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
if user.SSHImportURL != "" {
|
if user.SSHImportURL != "" {
|
||||||
log.Printf("Authorizing SSH keys for CoreOS user '%s' from '%s'", user.Name, user.SSHImportURL)
|
log.Printf("Authorizing SSH keys for CoreOS user '%s' from '%s'", user.Name, user.SSHImportURL)
|
||||||
if err := SSHImportKeysFromURL(user.Name, user.SSHImportURL); err != nil {
|
if err := SSHImportKeysFromURL(user.Name, user.SSHImportURL); err != nil {
|
||||||
@@ -110,9 +116,10 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, ccf := range []CloudConfigFile{
|
for _, ccf := range []CloudConfigFile{
|
||||||
system.OEM{OEM: cfg.Coreos.OEM},
|
system.OEM{OEM: cfg.CoreOS.OEM},
|
||||||
system.Update{Update: cfg.Coreos.Update, ReadConfig: system.DefaultReadConfig},
|
system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig},
|
||||||
system.EtcHosts{EtcHosts: cfg.ManageEtcHosts},
|
system.EtcHosts{EtcHosts: cfg.ManageEtcHosts},
|
||||||
|
system.Flannel{Flannel: cfg.CoreOS.Flannel},
|
||||||
} {
|
} {
|
||||||
f, err := ccf.File()
|
f, err := ccf.File()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -124,15 +131,15 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var units []system.Unit
|
var units []system.Unit
|
||||||
for _, u := range cfg.Coreos.Units {
|
for _, u := range cfg.CoreOS.Units {
|
||||||
units = append(units, system.Unit{Unit: u})
|
units = append(units, system.Unit{Unit: u})
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ccu := range []CloudConfigUnit{
|
for _, ccu := range []CloudConfigUnit{
|
||||||
system.Etcd{Etcd: cfg.Coreos.Etcd},
|
system.Etcd{Etcd: cfg.CoreOS.Etcd},
|
||||||
system.Fleet{Fleet: cfg.Coreos.Fleet},
|
system.Fleet{Fleet: cfg.CoreOS.Fleet},
|
||||||
system.Flannel{Flannel: cfg.Coreos.Flannel},
|
system.Locksmith{Locksmith: cfg.CoreOS.Locksmith},
|
||||||
system.Update{Update: cfg.Coreos.Update, ReadConfig: system.DefaultReadConfig},
|
system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig},
|
||||||
} {
|
} {
|
||||||
units = append(units, ccu.Units()...)
|
units = append(units, ccu.Units()...)
|
||||||
}
|
}
|
||||||
@@ -169,16 +176,13 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
|
|||||||
case "digitalocean":
|
case "digitalocean":
|
||||||
interfaces, err = network.ProcessDigitalOceanNetconf(cfg.NetworkConfig)
|
interfaces, err = network.ProcessDigitalOceanNetconf(cfg.NetworkConfig)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Unsupported network config format %q", env.NetconfType())
|
err = fmt.Errorf("Unsupported network config format %q", env.NetconfType())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := system.WriteNetworkdConfigs(interfaces); err != nil {
|
units = append(units, createNetworkingUnits(interfaces)...)
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := system.RestartNetwork(interfaces); err != nil {
|
if err := system.RestartNetwork(interfaces); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -186,7 +190,25 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
|
|||||||
|
|
||||||
um := system.NewUnitManager(env.Root())
|
um := system.NewUnitManager(env.Root())
|
||||||
return processUnits(units, env.Root(), um)
|
return processUnits(units, env.Root(), um)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createNetworkingUnits(interfaces []network.InterfaceGenerator) (units []system.Unit) {
|
||||||
|
appendNewUnit := func(units []system.Unit, name, content string) []system.Unit {
|
||||||
|
if content == "" {
|
||||||
|
return units
|
||||||
|
}
|
||||||
|
return append(units, system.Unit{Unit: config.Unit{
|
||||||
|
Name: name,
|
||||||
|
Runtime: true,
|
||||||
|
Content: content,
|
||||||
|
}})
|
||||||
|
}
|
||||||
|
for _, i := range interfaces {
|
||||||
|
units = appendNewUnit(units, fmt.Sprintf("%s.netdev", i.Filename()), i.Netdev())
|
||||||
|
units = appendNewUnit(units, fmt.Sprintf("%s.link", i.Filename()), i.Link())
|
||||||
|
units = appendNewUnit(units, fmt.Sprintf("%s.network", i.Filename()), i.Network())
|
||||||
|
}
|
||||||
|
return units
|
||||||
}
|
}
|
||||||
|
|
||||||
// processUnits takes a set of Units and applies them to the given root using
|
// processUnits takes a set of Units and applies them to the given root using
|
||||||
@@ -195,66 +217,92 @@ func Apply(cfg config.CloudConfig, env *Environment) error {
|
|||||||
// commands against units. It returns any error encountered.
|
// commands against units. It returns any error encountered.
|
||||||
func processUnits(units []system.Unit, root string, um system.UnitManager) error {
|
func processUnits(units []system.Unit, root string, um system.UnitManager) error {
|
||||||
type action struct {
|
type action struct {
|
||||||
unit string
|
unit system.Unit
|
||||||
command string
|
command string
|
||||||
}
|
}
|
||||||
actions := make([]action, 0, len(units))
|
actions := make([]action, 0, len(units))
|
||||||
reload := false
|
reload := false
|
||||||
|
restartNetworkd := false
|
||||||
for _, unit := range units {
|
for _, unit := range units {
|
||||||
dst := unit.Destination(root)
|
if unit.Name == "" {
|
||||||
|
log.Printf("Skipping unit without name")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if unit.Content != "" {
|
if unit.Content != "" {
|
||||||
log.Printf("Writing unit %s to filesystem at path %s", unit.Name, dst)
|
log.Printf("Writing unit %q to filesystem", unit.Name)
|
||||||
if err := um.PlaceUnit(&unit, dst); err != nil {
|
if err := um.PlaceUnit(unit); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Printf("Placed unit %s at %s", unit.Name, dst)
|
log.Printf("Wrote unit %q", unit.Name)
|
||||||
reload = true
|
reload = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, dropin := range unit.DropIns {
|
||||||
|
if dropin.Name != "" && dropin.Content != "" {
|
||||||
|
log.Printf("Writing drop-in unit %q to filesystem", dropin.Name)
|
||||||
|
if err := um.PlaceUnitDropIn(unit, dropin); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("Wrote drop-in unit %q", dropin.Name)
|
||||||
|
reload = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if unit.Mask {
|
if unit.Mask {
|
||||||
log.Printf("Masking unit file %s", unit.Name)
|
log.Printf("Masking unit file %q", unit.Name)
|
||||||
if err := um.MaskUnit(&unit); err != nil {
|
if err := um.MaskUnit(unit); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if unit.Runtime {
|
} else if unit.Runtime {
|
||||||
log.Printf("Ensuring runtime unit file %s is unmasked", unit.Name)
|
log.Printf("Ensuring runtime unit file %q is unmasked", unit.Name)
|
||||||
if err := um.UnmaskUnit(&unit); err != nil {
|
if err := um.UnmaskUnit(unit); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if unit.Enable {
|
if unit.Enable {
|
||||||
if unit.Group() != "network" {
|
if unit.Group() != "network" {
|
||||||
log.Printf("Enabling unit file %s", unit.Name)
|
log.Printf("Enabling unit file %q", unit.Name)
|
||||||
if err := um.EnableUnitFile(unit.Name, unit.Runtime); err != nil {
|
if err := um.EnableUnitFile(unit); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Printf("Enabled unit %s", unit.Name)
|
log.Printf("Enabled unit %q", unit.Name)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Skipping enable for network-like unit %s", unit.Name)
|
log.Printf("Skipping enable for network-like unit %q", unit.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if unit.Group() == "network" {
|
if unit.Group() == "network" {
|
||||||
actions = append(actions, action{"systemd-networkd.service", "restart"})
|
restartNetworkd = true
|
||||||
} else if unit.Command != "" {
|
} else if unit.Command != "" {
|
||||||
actions = append(actions, action{unit.Name, unit.Command})
|
actions = append(actions, action{unit, unit.Command})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if reload {
|
if reload {
|
||||||
if err := um.DaemonReload(); err != nil {
|
if err := um.DaemonReload(); err != nil {
|
||||||
return errors.New(fmt.Sprintf("failed systemd daemon-reload: %v", err))
|
return errors.New(fmt.Sprintf("failed systemd daemon-reload: %s", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, action := range actions {
|
if restartNetworkd {
|
||||||
log.Printf("Calling unit command '%s %s'", action.command, action.unit)
|
log.Printf("Restarting systemd-networkd")
|
||||||
res, err := um.RunUnitCommand(action.command, action.unit)
|
networkd := system.Unit{Unit: config.Unit{Name: "systemd-networkd.service"}}
|
||||||
|
res, err := um.RunUnitCommand(networkd, "restart")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Printf("Result of '%s %s': %s", action.command, action.unit, res)
|
log.Printf("Restarted systemd-networkd (%s)", res)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, action := range actions {
|
||||||
|
log.Printf("Calling unit command %q on %q'", action.command, action.unit.Name)
|
||||||
|
res, err := um.RunUnitCommand(action.unit, action.command)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("Result of %q on %q: %s", action.command, action.unit.Name, res)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@@ -17,9 +17,11 @@
|
|||||||
package initialize
|
package initialize
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/config"
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
|
"github.com/coreos/coreos-cloudinit/network"
|
||||||
"github.com/coreos/coreos-cloudinit/system"
|
"github.com/coreos/coreos-cloudinit/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -28,103 +30,272 @@ type TestUnitManager struct {
|
|||||||
enabled []string
|
enabled []string
|
||||||
masked []string
|
masked []string
|
||||||
unmasked []string
|
unmasked []string
|
||||||
commands map[string]string
|
commands []UnitAction
|
||||||
reload bool
|
reload bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tum *TestUnitManager) PlaceUnit(unit *system.Unit, dst string) error {
|
type UnitAction struct {
|
||||||
tum.placed = append(tum.placed, unit.Name)
|
unit string
|
||||||
|
command string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tum *TestUnitManager) PlaceUnit(u system.Unit) error {
|
||||||
|
tum.placed = append(tum.placed, u.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (tum *TestUnitManager) EnableUnitFile(unit string, runtime bool) error {
|
func (tum *TestUnitManager) PlaceUnitDropIn(u system.Unit, d config.UnitDropIn) error {
|
||||||
tum.enabled = append(tum.enabled, unit)
|
tum.placed = append(tum.placed, u.Name+".d/"+d.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (tum *TestUnitManager) RunUnitCommand(command, unit string) (string, error) {
|
func (tum *TestUnitManager) EnableUnitFile(u system.Unit) error {
|
||||||
tum.commands = make(map[string]string)
|
tum.enabled = append(tum.enabled, u.Name)
|
||||||
tum.commands[unit] = command
|
return nil
|
||||||
|
}
|
||||||
|
func (tum *TestUnitManager) RunUnitCommand(u system.Unit, c string) (string, error) {
|
||||||
|
tum.commands = append(tum.commands, UnitAction{u.Name, c})
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
func (tum *TestUnitManager) DaemonReload() error {
|
func (tum *TestUnitManager) DaemonReload() error {
|
||||||
tum.reload = true
|
tum.reload = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (tum *TestUnitManager) MaskUnit(unit *system.Unit) error {
|
func (tum *TestUnitManager) MaskUnit(u system.Unit) error {
|
||||||
tum.masked = append(tum.masked, unit.Name)
|
tum.masked = append(tum.masked, u.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (tum *TestUnitManager) UnmaskUnit(unit *system.Unit) error {
|
func (tum *TestUnitManager) UnmaskUnit(u system.Unit) error {
|
||||||
tum.unmasked = append(tum.unmasked, unit.Name)
|
tum.unmasked = append(tum.unmasked, u.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type mockInterface struct {
|
||||||
|
name string
|
||||||
|
filename string
|
||||||
|
netdev string
|
||||||
|
link string
|
||||||
|
network string
|
||||||
|
kind string
|
||||||
|
modprobeParams string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i mockInterface) Name() string {
|
||||||
|
return i.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i mockInterface) Filename() string {
|
||||||
|
return i.filename
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i mockInterface) Netdev() string {
|
||||||
|
return i.netdev
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i mockInterface) Link() string {
|
||||||
|
return i.link
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i mockInterface) Network() string {
|
||||||
|
return i.network
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i mockInterface) Type() string {
|
||||||
|
return i.kind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i mockInterface) ModprobeParams() string {
|
||||||
|
return i.modprobeParams
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateNetworkingUnits(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
interfaces []network.InterfaceGenerator
|
||||||
|
expect []system.Unit
|
||||||
|
}{
|
||||||
|
{nil, nil},
|
||||||
|
{
|
||||||
|
[]network.InterfaceGenerator{
|
||||||
|
network.InterfaceGenerator(mockInterface{filename: "test"}),
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
[]network.InterfaceGenerator{
|
||||||
|
network.InterfaceGenerator(mockInterface{filename: "test1", netdev: "test netdev"}),
|
||||||
|
network.InterfaceGenerator(mockInterface{filename: "test2", link: "test link"}),
|
||||||
|
network.InterfaceGenerator(mockInterface{filename: "test3", network: "test network"}),
|
||||||
|
},
|
||||||
|
[]system.Unit{
|
||||||
|
system.Unit{Unit: config.Unit{Name: "test1.netdev", Runtime: true, Content: "test netdev"}},
|
||||||
|
system.Unit{Unit: config.Unit{Name: "test2.link", Runtime: true, Content: "test link"}},
|
||||||
|
system.Unit{Unit: config.Unit{Name: "test3.network", Runtime: true, Content: "test network"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
[]network.InterfaceGenerator{
|
||||||
|
network.InterfaceGenerator(mockInterface{filename: "test", netdev: "test netdev", link: "test link", network: "test network"}),
|
||||||
|
},
|
||||||
|
[]system.Unit{
|
||||||
|
system.Unit{Unit: config.Unit{Name: "test.netdev", Runtime: true, Content: "test netdev"}},
|
||||||
|
system.Unit{Unit: config.Unit{Name: "test.link", Runtime: true, Content: "test link"}},
|
||||||
|
system.Unit{Unit: config.Unit{Name: "test.network", Runtime: true, Content: "test network"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
units := createNetworkingUnits(tt.interfaces)
|
||||||
|
if !reflect.DeepEqual(tt.expect, units) {
|
||||||
|
t.Errorf("bad units (%+v): want %#v, got %#v", tt.interfaces, tt.expect, units)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestProcessUnits(t *testing.T) {
|
func TestProcessUnits(t *testing.T) {
|
||||||
tum := &TestUnitManager{}
|
tests := []struct {
|
||||||
units := []system.Unit{
|
units []system.Unit
|
||||||
|
|
||||||
|
result TestUnitManager
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
units: []system.Unit{
|
||||||
system.Unit{Unit: config.Unit{
|
system.Unit{Unit: config.Unit{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
Mask: true,
|
Mask: true,
|
||||||
}},
|
}},
|
||||||
}
|
},
|
||||||
if err := processUnits(units, "", tum); err != nil {
|
result: TestUnitManager{
|
||||||
t.Fatalf("unexpected error calling processUnits: %v", err)
|
masked: []string{"foo"},
|
||||||
}
|
},
|
||||||
if len(tum.masked) != 1 || tum.masked[0] != "foo" {
|
},
|
||||||
t.Errorf("expected foo to be masked, but found %v", tum.masked)
|
{
|
||||||
}
|
units: []system.Unit{
|
||||||
|
system.Unit{Unit: config.Unit{
|
||||||
tum = &TestUnitManager{}
|
Name: "baz.service",
|
||||||
units = []system.Unit{
|
Content: "[Service]\nExecStart=/bin/baz",
|
||||||
|
Command: "start",
|
||||||
|
}},
|
||||||
|
system.Unit{Unit: config.Unit{
|
||||||
|
Name: "foo.network",
|
||||||
|
Content: "[Network]\nFoo=true",
|
||||||
|
}},
|
||||||
system.Unit{Unit: config.Unit{
|
system.Unit{Unit: config.Unit{
|
||||||
Name: "bar.network",
|
Name: "bar.network",
|
||||||
|
Content: "[Network]\nBar=true",
|
||||||
}},
|
}},
|
||||||
}
|
},
|
||||||
if err := processUnits(units, "", tum); err != nil {
|
result: TestUnitManager{
|
||||||
t.Fatalf("unexpected error calling processUnits: %v", err)
|
placed: []string{"baz.service", "foo.network", "bar.network"},
|
||||||
}
|
commands: []UnitAction{
|
||||||
if _, ok := tum.commands["systemd-networkd.service"]; !ok {
|
UnitAction{"systemd-networkd.service", "restart"},
|
||||||
t.Errorf("expected systemd-networkd.service to be reloaded!")
|
UnitAction{"baz.service", "start"},
|
||||||
}
|
},
|
||||||
|
reload: true,
|
||||||
tum = &TestUnitManager{}
|
},
|
||||||
units = []system.Unit{
|
},
|
||||||
|
{
|
||||||
|
units: []system.Unit{
|
||||||
system.Unit{Unit: config.Unit{
|
system.Unit{Unit: config.Unit{
|
||||||
Name: "baz.service",
|
Name: "baz.service",
|
||||||
Content: "[Service]\nExecStart=/bin/true",
|
Content: "[Service]\nExecStart=/bin/true",
|
||||||
}},
|
}},
|
||||||
}
|
},
|
||||||
if err := processUnits(units, "", tum); err != nil {
|
result: TestUnitManager{
|
||||||
t.Fatalf("unexpected error calling processUnits: %v", err)
|
placed: []string{"baz.service"},
|
||||||
}
|
reload: true,
|
||||||
if len(tum.placed) != 1 || tum.placed[0] != "baz.service" {
|
},
|
||||||
t.Fatalf("expected baz.service to be written, but got %v", tum.placed)
|
},
|
||||||
}
|
{
|
||||||
|
units: []system.Unit{
|
||||||
tum = &TestUnitManager{}
|
|
||||||
units = []system.Unit{
|
|
||||||
system.Unit{Unit: config.Unit{
|
system.Unit{Unit: config.Unit{
|
||||||
Name: "locksmithd.service",
|
Name: "locksmithd.service",
|
||||||
Runtime: true,
|
Runtime: true,
|
||||||
}},
|
}},
|
||||||
}
|
},
|
||||||
if err := processUnits(units, "", tum); err != nil {
|
result: TestUnitManager{
|
||||||
t.Fatalf("unexpected error calling processUnits: %v", err)
|
unmasked: []string{"locksmithd.service"},
|
||||||
}
|
},
|
||||||
if len(tum.unmasked) != 1 || tum.unmasked[0] != "locksmithd.service" {
|
},
|
||||||
t.Fatalf("expected locksmithd.service to be unmasked, but got %v", tum.unmasked)
|
{
|
||||||
}
|
units: []system.Unit{
|
||||||
|
|
||||||
tum = &TestUnitManager{}
|
|
||||||
units = []system.Unit{
|
|
||||||
system.Unit{Unit: config.Unit{
|
system.Unit{Unit: config.Unit{
|
||||||
Name: "woof",
|
Name: "woof",
|
||||||
Enable: true,
|
Enable: true,
|
||||||
}},
|
}},
|
||||||
|
},
|
||||||
|
result: TestUnitManager{
|
||||||
|
enabled: []string{"woof"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
units: []system.Unit{
|
||||||
|
system.Unit{Unit: config.Unit{
|
||||||
|
Name: "hi.service",
|
||||||
|
Runtime: true,
|
||||||
|
Content: "[Service]\nExecStart=/bin/echo hi",
|
||||||
|
DropIns: []config.UnitDropIn{
|
||||||
|
{
|
||||||
|
Name: "lo.conf",
|
||||||
|
Content: "[Service]\nExecStart=/bin/echo lo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "bye.conf",
|
||||||
|
Content: "[Service]\nExecStart=/bin/echo bye",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
result: TestUnitManager{
|
||||||
|
placed: []string{"hi.service", "hi.service.d/lo.conf", "hi.service.d/bye.conf"},
|
||||||
|
unmasked: []string{"hi.service"},
|
||||||
|
reload: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
units: []system.Unit{
|
||||||
|
system.Unit{Unit: config.Unit{
|
||||||
|
DropIns: []config.UnitDropIn{
|
||||||
|
{
|
||||||
|
Name: "lo.conf",
|
||||||
|
Content: "[Service]\nExecStart=/bin/echo lo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
result: TestUnitManager{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
units: []system.Unit{
|
||||||
|
system.Unit{Unit: config.Unit{
|
||||||
|
Name: "hi.service",
|
||||||
|
DropIns: []config.UnitDropIn{
|
||||||
|
{
|
||||||
|
Content: "[Service]\nExecStart=/bin/echo lo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
result: TestUnitManager{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
units: []system.Unit{
|
||||||
|
system.Unit{Unit: config.Unit{
|
||||||
|
Name: "hi.service",
|
||||||
|
DropIns: []config.UnitDropIn{
|
||||||
|
{
|
||||||
|
Name: "lo.conf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
result: TestUnitManager{},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
if err := processUnits(units, "", tum); err != nil {
|
|
||||||
t.Fatalf("unexpected error calling processUnits: %v", err)
|
for _, tt := range tests {
|
||||||
|
tum := &TestUnitManager{}
|
||||||
|
if err := processUnits(tt.units, "", tum); err != nil {
|
||||||
|
t.Errorf("bad error (%+v): want nil, got %s", tt.units, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(tt.result, *tum) {
|
||||||
|
t.Errorf("bad result (%+v): want %+v, got %+v", tt.units, tt.result, tum)
|
||||||
}
|
}
|
||||||
if len(tum.enabled) != 1 || tum.enabled[0] != "woof" {
|
|
||||||
t.Fatalf("expected woof to be enabled, but got %v", tum.enabled)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -23,35 +23,32 @@ import (
|
|||||||
"github.com/coreos/coreos-cloudinit/config"
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
// dropinContents generates the contents for a drop-in unit given the config.
|
// serviceContents generates the contents for a drop-in unit given the config.
|
||||||
// The argument must be a struct from the 'config' package.
|
// The argument must be a struct from the 'config' package.
|
||||||
func dropinContents(e interface{}) string {
|
func serviceContents(e interface{}) string {
|
||||||
|
vars := getEnvVars(e)
|
||||||
|
if len(vars) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
out := "[Service]\n"
|
||||||
|
for _, v := range vars {
|
||||||
|
out += fmt.Sprintf("Environment=\"%s\"\n", v)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEnvVars(e interface{}) []string {
|
||||||
et := reflect.TypeOf(e)
|
et := reflect.TypeOf(e)
|
||||||
ev := reflect.ValueOf(e)
|
ev := reflect.ValueOf(e)
|
||||||
|
|
||||||
var out string
|
vars := []string{}
|
||||||
for i := 0; i < et.NumField(); i++ {
|
for i := 0; i < et.NumField(); i++ {
|
||||||
if val := ev.Field(i).String(); val != "" {
|
if val := ev.Field(i).Interface(); !config.IsZero(val) {
|
||||||
key := et.Field(i).Tag.Get("env")
|
key := et.Field(i).Tag.Get("env")
|
||||||
out += fmt.Sprintf("Environment=\"%s=%s\"\n", key, val)
|
vars = append(vars, fmt.Sprintf("%s=%v", key, val))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if out == "" {
|
return vars
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return "[Service]\n" + out
|
|
||||||
}
|
|
||||||
|
|
||||||
func dropinFromConfig(cfg interface{}, name string) []Unit {
|
|
||||||
content := dropinContents(cfg)
|
|
||||||
if content == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return []Unit{{config.Unit{
|
|
||||||
Name: name,
|
|
||||||
Runtime: true,
|
|
||||||
DropIn: true,
|
|
||||||
Content: content,
|
|
||||||
}}}
|
|
||||||
}
|
}
|
||||||
|
55
system/env_test.go
Normal file
55
system/env_test.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestServiceContents(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
Config interface{}
|
||||||
|
Contents string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
struct{}{},
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
A string `env:"A"`
|
||||||
|
B int `env:"B"`
|
||||||
|
C bool `env:"C"`
|
||||||
|
D float64 `env:"D"`
|
||||||
|
}{
|
||||||
|
"hi", 1, true, 0.12345,
|
||||||
|
},
|
||||||
|
`[Service]
|
||||||
|
Environment="A=hi"
|
||||||
|
Environment="B=1"
|
||||||
|
Environment="C=true"
|
||||||
|
Environment="D=0.12345"
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
A float64 `env:"A"`
|
||||||
|
B float64 `env:"B"`
|
||||||
|
C float64 `env:"C"`
|
||||||
|
D float64 `env:"D"`
|
||||||
|
}{
|
||||||
|
0.000001, 1, 0.9999999, 0.1,
|
||||||
|
},
|
||||||
|
`[Service]
|
||||||
|
Environment="A=1e-06"
|
||||||
|
Environment="B=1"
|
||||||
|
Environment="C=0.9999999"
|
||||||
|
Environment="D=0.1"
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
if c := serviceContents(tt.Config); c != tt.Contents {
|
||||||
|
t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.Contents, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -28,5 +28,12 @@ type Etcd struct {
|
|||||||
|
|
||||||
// Units creates a Unit file drop-in for etcd, using any configured options.
|
// Units creates a Unit file drop-in for etcd, using any configured options.
|
||||||
func (ee Etcd) Units() []Unit {
|
func (ee Etcd) Units() []Unit {
|
||||||
return dropinFromConfig(ee.Etcd, "etcd.service")
|
return []Unit{{config.Unit{
|
||||||
|
Name: "etcd.service",
|
||||||
|
Runtime: true,
|
||||||
|
DropIns: []config.UnitDropIn{{
|
||||||
|
Name: "20-cloudinit.conf",
|
||||||
|
Content: serviceContents(ee.Etcd),
|
||||||
|
}},
|
||||||
|
}}}
|
||||||
}
|
}
|
||||||
|
@@ -30,7 +30,11 @@ func TestEtcdUnits(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
config.Etcd{},
|
config.Etcd{},
|
||||||
nil,
|
[]Unit{{config.Unit{
|
||||||
|
Name: "etcd.service",
|
||||||
|
Runtime: true,
|
||||||
|
DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}},
|
||||||
|
}}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config.Etcd{
|
config.Etcd{
|
||||||
@@ -40,11 +44,13 @@ func TestEtcdUnits(t *testing.T) {
|
|||||||
[]Unit{{config.Unit{
|
[]Unit{{config.Unit{
|
||||||
Name: "etcd.service",
|
Name: "etcd.service",
|
||||||
Runtime: true,
|
Runtime: true,
|
||||||
DropIn: true,
|
DropIns: []config.UnitDropIn{{
|
||||||
|
Name: "20-cloudinit.conf",
|
||||||
Content: `[Service]
|
Content: `[Service]
|
||||||
Environment="ETCD_DISCOVERY=http://disco.example.com/foobar"
|
Environment="ETCD_DISCOVERY=http://disco.example.com/foobar"
|
||||||
Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
|
Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
|
||||||
`,
|
`,
|
||||||
|
}},
|
||||||
}}},
|
}}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -56,18 +62,20 @@ Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
|
|||||||
[]Unit{{config.Unit{
|
[]Unit{{config.Unit{
|
||||||
Name: "etcd.service",
|
Name: "etcd.service",
|
||||||
Runtime: true,
|
Runtime: true,
|
||||||
DropIn: true,
|
DropIns: []config.UnitDropIn{{
|
||||||
|
Name: "20-cloudinit.conf",
|
||||||
Content: `[Service]
|
Content: `[Service]
|
||||||
Environment="ETCD_DISCOVERY=http://disco.example.com/foobar"
|
Environment="ETCD_DISCOVERY=http://disco.example.com/foobar"
|
||||||
Environment="ETCD_NAME=node001"
|
Environment="ETCD_NAME=node001"
|
||||||
Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
|
Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
|
||||||
`,
|
`,
|
||||||
|
}},
|
||||||
}}},
|
}}},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
units := Etcd{tt.config}.Units()
|
units := Etcd{tt.config}.Units()
|
||||||
if !reflect.DeepEqual(tt.units, units) {
|
if !reflect.DeepEqual(tt.units, units) {
|
||||||
t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.units, units)
|
t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -19,6 +19,7 @@ package system
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
@@ -39,7 +40,7 @@ func (f *File) Permissions() (os.FileMode, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Parse string representation of file mode as integer
|
// Parse string representation of file mode as integer
|
||||||
perm, err := strconv.ParseInt(f.RawFilePermissions, 0, 32)
|
perm, err := strconv.ParseInt(f.RawFilePermissions, 8, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("Unable to parse file permissions %q as integer", f.RawFilePermissions)
|
return 0, fmt.Errorf("Unable to parse file permissions %q as integer", f.RawFilePermissions)
|
||||||
}
|
}
|
||||||
@@ -47,12 +48,15 @@ func (f *File) Permissions() (os.FileMode, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func WriteFile(f *File, root string) (string, error) {
|
func WriteFile(f *File, root string) (string, error) {
|
||||||
if f.Encoding != "" {
|
|
||||||
return "", fmt.Errorf("Unable to write file with encoding %s", f.Encoding)
|
|
||||||
}
|
|
||||||
|
|
||||||
fullpath := path.Join(root, f.Path)
|
fullpath := path.Join(root, f.Path)
|
||||||
dir := path.Dir(fullpath)
|
dir := path.Dir(fullpath)
|
||||||
|
log.Printf("Writing file to %q", fullpath)
|
||||||
|
|
||||||
|
content, err := config.DecodeContent(f.Content, f.Encoding)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Unable to decode %s (%v)", f.Path, err)
|
||||||
|
}
|
||||||
|
|
||||||
if err := EnsureDirectoryExists(dir); err != nil {
|
if err := EnsureDirectoryExists(dir); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -69,7 +73,7 @@ func WriteFile(f *File, root string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ioutil.WriteFile(tmp.Name(), []byte(f.Content), perm); err != nil {
|
if err := ioutil.WriteFile(tmp.Name(), content, perm); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,6 +98,7 @@ func WriteFile(f *File, root string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Printf("Wrote file to %q", fullpath)
|
||||||
return fullpath, nil
|
return fullpath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -97,7 +97,7 @@ func TestDecimalFilePermissions(t *testing.T) {
|
|||||||
|
|
||||||
wf := File{config.File{
|
wf := File{config.File{
|
||||||
Path: fn,
|
Path: fn,
|
||||||
RawFilePermissions: "484", // Decimal representation of 0744
|
RawFilePermissions: "744",
|
||||||
}}
|
}}
|
||||||
|
|
||||||
path, err := WriteFile(&wf, dir)
|
path, err := WriteFile(&wf, dir)
|
||||||
@@ -156,10 +156,97 @@ func TestWriteFileEncodedContent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
|
//all of these decode to "bar"
|
||||||
|
content_tests := map[string]string{
|
||||||
|
"base64": "YmFy",
|
||||||
|
"b64": "YmFy",
|
||||||
|
"gz": "\x1f\x8b\x08\x08w\x14\x87T\x02\xffok\x00KJ,\x02\x00\xaa\x8c\xffv\x03\x00\x00\x00",
|
||||||
|
"gzip": "\x1f\x8b\x08\x08w\x14\x87T\x02\xffok\x00KJ,\x02\x00\xaa\x8c\xffv\x03\x00\x00\x00",
|
||||||
|
"gz+base64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
|
||||||
|
"gzip+base64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
|
||||||
|
"gz+b64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
|
||||||
|
"gzip+b64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
|
||||||
|
}
|
||||||
|
|
||||||
|
for encoding, content := range content_tests {
|
||||||
|
fullPath := path.Join(dir, encoding)
|
||||||
|
|
||||||
|
wf := File{config.File{
|
||||||
|
Path: encoding,
|
||||||
|
Encoding: encoding,
|
||||||
|
Content: content,
|
||||||
|
RawFilePermissions: "0644",
|
||||||
|
}}
|
||||||
|
|
||||||
|
path, err := WriteFile(&wf, dir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Processing of WriteFile failed: %v", err)
|
||||||
|
} else if path != fullPath {
|
||||||
|
t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err := os.Stat(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to stat file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.Mode() != os.FileMode(0644) {
|
||||||
|
t.Errorf("File has incorrect mode: %v", fi.Mode())
|
||||||
|
}
|
||||||
|
|
||||||
|
contents, err := ioutil.ReadFile(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to read expected file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(contents) != "bar" {
|
||||||
|
t.Fatalf("File has incorrect contents: '%s'", contents)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteFileInvalidEncodedContent(t *testing.T) {
|
||||||
|
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to create tempdir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
|
content_encodings := []string{
|
||||||
|
"base64",
|
||||||
|
"b64",
|
||||||
|
"gz",
|
||||||
|
"gzip",
|
||||||
|
"gz+base64",
|
||||||
|
"gzip+base64",
|
||||||
|
"gz+b64",
|
||||||
|
"gzip+b64",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, encoding := range content_encodings {
|
||||||
|
wf := File{config.File{
|
||||||
|
Path: path.Join(dir, "tmp", "foo"),
|
||||||
|
Content: "@&*#%invalid data*@&^#*&",
|
||||||
|
Encoding: encoding,
|
||||||
|
}}
|
||||||
|
|
||||||
|
if _, err := WriteFile(&wf, dir); err == nil {
|
||||||
|
t.Fatalf("Expected error to be raised when writing file with encoding")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteFileUnknownEncodedContent(t *testing.T) {
|
||||||
|
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to create tempdir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
wf := File{config.File{
|
wf := File{config.File{
|
||||||
Path: path.Join(dir, "tmp", "foo"),
|
Path: path.Join(dir, "tmp", "foo"),
|
||||||
Content: "",
|
Content: "",
|
||||||
Encoding: "base64",
|
Encoding: "no-such-encoding",
|
||||||
}}
|
}}
|
||||||
|
|
||||||
if _, err := WriteFile(&wf, dir); err == nil {
|
if _, err := WriteFile(&wf, dir); err == nil {
|
||||||
|
@@ -1,6 +1,9 @@
|
|||||||
package system
|
package system
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/config"
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -10,8 +13,18 @@ type Flannel struct {
|
|||||||
config.Flannel
|
config.Flannel
|
||||||
}
|
}
|
||||||
|
|
||||||
// Units generates a Unit file drop-in for flannel, if any flannel options were
|
func (fl Flannel) envVars() string {
|
||||||
// configured in cloud-config
|
return strings.Join(getEnvVars(fl.Flannel), "\n")
|
||||||
func (fl Flannel) Units() []Unit {
|
}
|
||||||
return dropinFromConfig(fl.Flannel, "flannel.service")
|
|
||||||
|
func (fl Flannel) File() (*File, error) {
|
||||||
|
vars := fl.envVars()
|
||||||
|
if vars == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return &File{config.File{
|
||||||
|
Path: path.Join("run", "flannel", "options.env"),
|
||||||
|
RawFilePermissions: "0644",
|
||||||
|
Content: vars,
|
||||||
|
}}, nil
|
||||||
}
|
}
|
||||||
|
@@ -7,10 +7,35 @@ import (
|
|||||||
"github.com/coreos/coreos-cloudinit/config"
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFlannelUnits(t *testing.T) {
|
func TestFlannelEnvVars(t *testing.T) {
|
||||||
for _, tt := range []struct {
|
for _, tt := range []struct {
|
||||||
config config.Flannel
|
config config.Flannel
|
||||||
units []Unit
|
contents string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
config.Flannel{},
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config.Flannel{
|
||||||
|
EtcdEndpoints: "http://12.34.56.78:4001",
|
||||||
|
EtcdPrefix: "/coreos.com/network/tenant1",
|
||||||
|
},
|
||||||
|
`FLANNELD_ETCD_ENDPOINTS=http://12.34.56.78:4001
|
||||||
|
FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1`,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
out := Flannel{tt.config}.envVars()
|
||||||
|
if out != tt.contents {
|
||||||
|
t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.contents, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFlannelFile(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
config config.Flannel
|
||||||
|
file *File
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
config.Flannel{},
|
config.Flannel{},
|
||||||
@@ -18,23 +43,20 @@ func TestFlannelUnits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
config.Flannel{
|
config.Flannel{
|
||||||
EtcdEndpoint: "http://12.34.56.78:4001",
|
EtcdEndpoints: "http://12.34.56.78:4001",
|
||||||
EtcdPrefix: "/coreos.com/network/tenant1",
|
EtcdPrefix: "/coreos.com/network/tenant1",
|
||||||
},
|
},
|
||||||
[]Unit{{config.Unit{
|
&File{config.File{
|
||||||
Name: "flannel.service",
|
Path: "run/flannel/options.env",
|
||||||
Content: `[Service]
|
RawFilePermissions: "0644",
|
||||||
Environment="FLANNELD_ETCD_ENDPOINT=http://12.34.56.78:4001"
|
Content: `FLANNELD_ETCD_ENDPOINTS=http://12.34.56.78:4001
|
||||||
Environment="FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1"
|
FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1`,
|
||||||
`,
|
}},
|
||||||
Runtime: true,
|
|
||||||
DropIn: true,
|
|
||||||
}}},
|
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
units := Flannel{tt.config}.Units()
|
file, _ := Flannel{tt.config}.File()
|
||||||
if !reflect.DeepEqual(units, tt.units) {
|
if !reflect.DeepEqual(tt.file, file) {
|
||||||
t.Errorf("bad units (%q): want %v, got %v", tt.config, tt.units, units)
|
t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.file, file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -29,5 +29,12 @@ type Fleet struct {
|
|||||||
// Units generates a Unit file drop-in for fleet, if any fleet options were
|
// Units generates a Unit file drop-in for fleet, if any fleet options were
|
||||||
// configured in cloud-config
|
// configured in cloud-config
|
||||||
func (fe Fleet) Units() []Unit {
|
func (fe Fleet) Units() []Unit {
|
||||||
return dropinFromConfig(fe.Fleet, "fleet.service")
|
return []Unit{{config.Unit{
|
||||||
|
Name: "fleet.service",
|
||||||
|
Runtime: true,
|
||||||
|
DropIns: []config.UnitDropIn{{
|
||||||
|
Name: "20-cloudinit.conf",
|
||||||
|
Content: serviceContents(fe.Fleet),
|
||||||
|
}},
|
||||||
|
}}}
|
||||||
}
|
}
|
||||||
|
@@ -30,7 +30,11 @@ func TestFleetUnits(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
config.Fleet{},
|
config.Fleet{},
|
||||||
nil,
|
[]Unit{{config.Unit{
|
||||||
|
Name: "fleet.service",
|
||||||
|
Runtime: true,
|
||||||
|
DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}},
|
||||||
|
}}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config.Fleet{
|
config.Fleet{
|
||||||
@@ -38,17 +42,19 @@ func TestFleetUnits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
[]Unit{{config.Unit{
|
[]Unit{{config.Unit{
|
||||||
Name: "fleet.service",
|
Name: "fleet.service",
|
||||||
|
Runtime: true,
|
||||||
|
DropIns: []config.UnitDropIn{{
|
||||||
|
Name: "20-cloudinit.conf",
|
||||||
Content: `[Service]
|
Content: `[Service]
|
||||||
Environment="FLEET_PUBLIC_IP=12.34.56.78"
|
Environment="FLEET_PUBLIC_IP=12.34.56.78"
|
||||||
`,
|
`,
|
||||||
Runtime: true,
|
}},
|
||||||
DropIn: true,
|
|
||||||
}}},
|
}}},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
units := Fleet{tt.config}.Units()
|
units := Fleet{tt.config}.Units()
|
||||||
if !reflect.DeepEqual(units, tt.units) {
|
if !reflect.DeepEqual(units, tt.units) {
|
||||||
t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.units, units)
|
t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
39
system/locksmith.go
Normal file
39
system/locksmith.go
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Locksmith is a top-level structure which embeds its underlying configuration,
|
||||||
|
// config.Locksmith, and provides the system-specific Unit().
|
||||||
|
type Locksmith struct {
|
||||||
|
config.Locksmith
|
||||||
|
}
|
||||||
|
|
||||||
|
// Units creates a Unit file drop-in for etcd, using any configured options.
|
||||||
|
func (ee Locksmith) Units() []Unit {
|
||||||
|
return []Unit{{config.Unit{
|
||||||
|
Name: "locksmithd.service",
|
||||||
|
Runtime: true,
|
||||||
|
DropIns: []config.UnitDropIn{{
|
||||||
|
Name: "20-cloudinit.conf",
|
||||||
|
Content: serviceContents(ee.Locksmith),
|
||||||
|
}},
|
||||||
|
}}}
|
||||||
|
}
|
60
system/locksmith_test.go
Normal file
60
system/locksmith_test.go
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLocksmithUnits(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
config config.Locksmith
|
||||||
|
units []Unit
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
config.Locksmith{},
|
||||||
|
[]Unit{{config.Unit{
|
||||||
|
Name: "locksmithd.service",
|
||||||
|
Runtime: true,
|
||||||
|
DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}},
|
||||||
|
}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config.Locksmith{
|
||||||
|
Endpoint: "12.34.56.78:4001",
|
||||||
|
},
|
||||||
|
[]Unit{{config.Unit{
|
||||||
|
Name: "locksmithd.service",
|
||||||
|
Runtime: true,
|
||||||
|
DropIns: []config.UnitDropIn{{
|
||||||
|
Name: "20-cloudinit.conf",
|
||||||
|
Content: `[Service]
|
||||||
|
Environment="LOCKSMITHD_ENDPOINT=12.34.56.78:4001"
|
||||||
|
`,
|
||||||
|
}},
|
||||||
|
}}},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
units := Locksmith{tt.config}.Units()
|
||||||
|
if !reflect.DeepEqual(units, tt.units) {
|
||||||
|
t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -29,10 +29,6 @@ import (
|
|||||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/dotcloud/docker/pkg/netlink"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
runtimeNetworkPath = "/run/systemd/network"
|
|
||||||
)
|
|
||||||
|
|
||||||
func RestartNetwork(interfaces []network.InterfaceGenerator) (err error) {
|
func RestartNetwork(interfaces []network.InterfaceGenerator) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if e := restartNetworkd(); e != nil {
|
if e := restartNetworkd(); e != nil {
|
||||||
@@ -96,33 +92,7 @@ func maybeProbeBonding(interfaces []network.InterfaceGenerator) error {
|
|||||||
|
|
||||||
func restartNetworkd() error {
|
func restartNetworkd() error {
|
||||||
log.Printf("Restarting networkd.service\n")
|
log.Printf("Restarting networkd.service\n")
|
||||||
_, err := NewUnitManager("").RunUnitCommand("restart", "systemd-networkd.service")
|
networkd := Unit{config.Unit{Name: "systemd-networkd.service"}}
|
||||||
return err
|
_, err := NewUnitManager("").RunUnitCommand(networkd, "restart")
|
||||||
}
|
|
||||||
|
|
||||||
func WriteNetworkdConfigs(interfaces []network.InterfaceGenerator) error {
|
|
||||||
for _, iface := range interfaces {
|
|
||||||
filename := fmt.Sprintf("%s.netdev", iface.Filename())
|
|
||||||
if err := writeConfig(filename, iface.Netdev()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
filename = fmt.Sprintf("%s.link", iface.Filename())
|
|
||||||
if err := writeConfig(filename, iface.Link()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
filename = fmt.Sprintf("%s.network", iface.Filename())
|
|
||||||
if err := writeConfig(filename, iface.Network()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeConfig(filename string, content string) error {
|
|
||||||
if content == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
log.Printf("Writing networkd unit %q\n", filename)
|
|
||||||
_, err := WriteFile(&File{config.File{Content: content, Path: filename}}, runtimeNetworkPath)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@@ -23,7 +23,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus"
|
"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus"
|
||||||
@@ -42,49 +41,51 @@ type systemd struct {
|
|||||||
// never be used as a true MachineID
|
// never be used as a true MachineID
|
||||||
const fakeMachineID = "42000000000000000000000000000042"
|
const fakeMachineID = "42000000000000000000000000000042"
|
||||||
|
|
||||||
// PlaceUnit writes a unit file at the provided destination, creating
|
// PlaceUnit writes a unit file at its desired destination, creating parent
|
||||||
// parent directories as necessary.
|
// directories as necessary.
|
||||||
func (s *systemd) PlaceUnit(u *Unit, dst string) error {
|
func (s *systemd) PlaceUnit(u Unit) error {
|
||||||
dir := filepath.Dir(dst)
|
|
||||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
|
||||||
if err := os.MkdirAll(dir, os.FileMode(0755)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
file := File{config.File{
|
file := File{config.File{
|
||||||
Path: filepath.Base(dst),
|
Path: u.Destination(s.root),
|
||||||
Content: u.Content,
|
Content: u.Content,
|
||||||
RawFilePermissions: "0644",
|
RawFilePermissions: "0644",
|
||||||
}}
|
}}
|
||||||
|
|
||||||
_, err := WriteFile(&file, dir)
|
_, err := WriteFile(&file, "/")
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemd) EnableUnitFile(unit string, runtime bool) error {
|
// PlaceUnitDropIn writes a unit drop-in file at its desired destination,
|
||||||
|
// creating parent directories as necessary.
|
||||||
|
func (s *systemd) PlaceUnitDropIn(u Unit, d config.UnitDropIn) error {
|
||||||
|
file := File{config.File{
|
||||||
|
Path: u.DropInDestination(s.root, d),
|
||||||
|
Content: d.Content,
|
||||||
|
RawFilePermissions: "0644",
|
||||||
|
}}
|
||||||
|
|
||||||
|
_, err := WriteFile(&file, "/")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *systemd) EnableUnitFile(u Unit) error {
|
||||||
conn, err := dbus.New()
|
conn, err := dbus.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
units := []string{unit}
|
units := []string{u.Name}
|
||||||
_, _, err = conn.EnableUnitFiles(units, runtime, true)
|
_, _, err = conn.EnableUnitFiles(units, u.Runtime, true)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemd) RunUnitCommand(command, unit string) (string, error) {
|
func (s *systemd) RunUnitCommand(u Unit, c string) (string, error) {
|
||||||
conn, err := dbus.New()
|
conn, err := dbus.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
var fn func(string, string) (string, error)
|
var fn func(string, string) (string, error)
|
||||||
switch command {
|
switch c {
|
||||||
case "start":
|
case "start":
|
||||||
fn = conn.StartUnit
|
fn = conn.StartUnit
|
||||||
case "stop":
|
case "stop":
|
||||||
@@ -100,10 +101,10 @@ func (s *systemd) RunUnitCommand(command, unit string) (string, error) {
|
|||||||
case "reload-or-try-restart":
|
case "reload-or-try-restart":
|
||||||
fn = conn.ReloadOrTryRestartUnit
|
fn = conn.ReloadOrTryRestartUnit
|
||||||
default:
|
default:
|
||||||
return "", fmt.Errorf("Unsupported systemd command %q", command)
|
return "", fmt.Errorf("Unsupported systemd command %q", c)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fn(unit, "replace")
|
return fn(u.Name, "replace")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemd) DaemonReload() error {
|
func (s *systemd) DaemonReload() error {
|
||||||
@@ -119,8 +120,8 @@ func (s *systemd) DaemonReload() error {
|
|||||||
// /dev/null, analogous to `systemctl mask`.
|
// /dev/null, analogous to `systemctl mask`.
|
||||||
// N.B.: Unlike `systemctl mask`, this function will *remove any existing unit
|
// N.B.: Unlike `systemctl mask`, this function will *remove any existing unit
|
||||||
// file at the location*, to ensure that the mask will succeed.
|
// file at the location*, to ensure that the mask will succeed.
|
||||||
func (s *systemd) MaskUnit(unit *Unit) error {
|
func (s *systemd) MaskUnit(u Unit) error {
|
||||||
masked := unit.Destination(s.root)
|
masked := u.Destination(s.root)
|
||||||
if _, err := os.Stat(masked); os.IsNotExist(err) {
|
if _, err := os.Stat(masked); os.IsNotExist(err) {
|
||||||
if err := os.MkdirAll(path.Dir(masked), os.FileMode(0755)); err != nil {
|
if err := os.MkdirAll(path.Dir(masked), os.FileMode(0755)); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -134,8 +135,8 @@ func (s *systemd) MaskUnit(unit *Unit) error {
|
|||||||
// UnmaskUnit is analogous to systemd's unit_file_unmask. If the file
|
// UnmaskUnit is analogous to systemd's unit_file_unmask. If the file
|
||||||
// associated with the given Unit is empty or appears to be a symlink to
|
// associated with the given Unit is empty or appears to be a symlink to
|
||||||
// /dev/null, it is removed.
|
// /dev/null, it is removed.
|
||||||
func (s *systemd) UnmaskUnit(unit *Unit) error {
|
func (s *systemd) UnmaskUnit(u Unit) error {
|
||||||
masked := unit.Destination(s.root)
|
masked := u.Destination(s.root)
|
||||||
ne, err := nullOrEmpty(masked)
|
ne, err := nullOrEmpty(masked)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
|
@@ -17,6 +17,7 @@
|
|||||||
package system
|
package system
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -25,133 +26,109 @@ import (
|
|||||||
"github.com/coreos/coreos-cloudinit/config"
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPlaceNetworkUnit(t *testing.T) {
|
func TestPlaceUnit(t *testing.T) {
|
||||||
u := Unit{config.Unit{
|
tests := []config.Unit{
|
||||||
|
{
|
||||||
Name: "50-eth0.network",
|
Name: "50-eth0.network",
|
||||||
Runtime: true,
|
Runtime: true,
|
||||||
Content: `[Match]
|
Content: "[Match]\nName=eth47\n\n[Network]\nAddress=10.209.171.177/19\n",
|
||||||
Name=eth47
|
},
|
||||||
|
{
|
||||||
[Network]
|
|
||||||
Address=10.209.171.177/19
|
|
||||||
`,
|
|
||||||
}}
|
|
||||||
|
|
||||||
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to create tempdir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
sd := &systemd{dir}
|
|
||||||
|
|
||||||
dst := u.Destination(dir)
|
|
||||||
expectDst := path.Join(dir, "run", "systemd", "network", "50-eth0.network")
|
|
||||||
if dst != expectDst {
|
|
||||||
t.Fatalf("unit.Destination returned %s, expected %s", dst, expectDst)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sd.PlaceUnit(&u, dst); err != nil {
|
|
||||||
t.Fatalf("PlaceUnit failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := os.Stat(dst)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to stat file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.Mode() != os.FileMode(0644) {
|
|
||||||
t.Errorf("File has incorrect mode: %v", fi.Mode())
|
|
||||||
}
|
|
||||||
|
|
||||||
contents, err := ioutil.ReadFile(dst)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to read expected file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectContents := `[Match]
|
|
||||||
Name=eth47
|
|
||||||
|
|
||||||
[Network]
|
|
||||||
Address=10.209.171.177/19
|
|
||||||
`
|
|
||||||
if string(contents) != expectContents {
|
|
||||||
t.Fatalf("File has incorrect contents '%s'.\nExpected '%s'", string(contents), expectContents)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnitDestination(t *testing.T) {
|
|
||||||
dir := "/some/dir"
|
|
||||||
name := "foobar.service"
|
|
||||||
|
|
||||||
u := Unit{config.Unit{
|
|
||||||
Name: name,
|
|
||||||
DropIn: false,
|
|
||||||
}}
|
|
||||||
|
|
||||||
dst := u.Destination(dir)
|
|
||||||
expectDst := path.Join(dir, "etc", "systemd", "system", "foobar.service")
|
|
||||||
if dst != expectDst {
|
|
||||||
t.Errorf("unit.Destination returned %s, expected %s", dst, expectDst)
|
|
||||||
}
|
|
||||||
|
|
||||||
u.DropIn = true
|
|
||||||
|
|
||||||
dst = u.Destination(dir)
|
|
||||||
expectDst = path.Join(dir, "etc", "systemd", "system", "foobar.service.d", cloudConfigDropIn)
|
|
||||||
if dst != expectDst {
|
|
||||||
t.Errorf("unit.Destination returned %s, expected %s", dst, expectDst)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPlaceMountUnit(t *testing.T) {
|
|
||||||
u := Unit{config.Unit{
|
|
||||||
Name: "media-state.mount",
|
Name: "media-state.mount",
|
||||||
Runtime: false,
|
Content: "[Mount]\nWhat=/dev/sdb1\nWhere=/media/state\n",
|
||||||
Content: `[Mount]
|
},
|
||||||
What=/dev/sdb1
|
}
|
||||||
Where=/media/state
|
|
||||||
`,
|
|
||||||
}}
|
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
|
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create tempdir: %v", err)
|
panic(fmt.Sprintf("Unable to create tempdir: %v", err))
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
|
u := Unit{tt}
|
||||||
sd := &systemd{dir}
|
sd := &systemd{dir}
|
||||||
|
|
||||||
dst := u.Destination(dir)
|
if err := sd.PlaceUnit(u); err != nil {
|
||||||
expectDst := path.Join(dir, "etc", "systemd", "system", "media-state.mount")
|
t.Fatalf("PlaceUnit(): bad error (%+v): want nil, got %s", tt, err)
|
||||||
if dst != expectDst {
|
|
||||||
t.Fatalf("unit.Destination returned %s, expected %s", dst, expectDst)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sd.PlaceUnit(&u, dst); err != nil {
|
fi, err := os.Stat(u.Destination(dir))
|
||||||
t.Fatalf("PlaceUnit failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := os.Stat(dst)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to stat file: %v", err)
|
t.Fatalf("Stat(): bad error (%+v): want nil, got %s", tt, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fi.Mode() != os.FileMode(0644) {
|
if mode := fi.Mode(); mode != os.FileMode(0644) {
|
||||||
t.Errorf("File has incorrect mode: %v", fi.Mode())
|
t.Errorf("bad filemode (%+v): want %v, got %v", tt, os.FileMode(0644), mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
contents, err := ioutil.ReadFile(dst)
|
c, err := ioutil.ReadFile(u.Destination(dir))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to read expected file: %v", err)
|
t.Fatalf("ReadFile(): bad error (%+v): want nil, got %s", tt, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
expectContents := `[Mount]
|
if string(c) != tt.Content {
|
||||||
What=/dev/sdb1
|
t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.Content, string(c))
|
||||||
Where=/media/state
|
}
|
||||||
`
|
|
||||||
if string(contents) != expectContents {
|
os.RemoveAll(dir)
|
||||||
t.Fatalf("File has incorrect contents '%s'.\nExpected '%s'", string(contents), expectContents)
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPlaceUnitDropIn(t *testing.T) {
|
||||||
|
tests := []config.Unit{
|
||||||
|
{
|
||||||
|
Name: "false.service",
|
||||||
|
Runtime: true,
|
||||||
|
DropIns: []config.UnitDropIn{
|
||||||
|
{
|
||||||
|
Name: "00-true.conf",
|
||||||
|
Content: "[Service]\nExecStart=\nExecStart=/usr/bin/true\n",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "true.service",
|
||||||
|
DropIns: []config.UnitDropIn{
|
||||||
|
{
|
||||||
|
Name: "00-false.conf",
|
||||||
|
Content: "[Service]\nExecStart=\nExecStart=/usr/bin/false\n",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to create tempdir: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
u := Unit{tt}
|
||||||
|
sd := &systemd{dir}
|
||||||
|
|
||||||
|
if err := sd.PlaceUnitDropIn(u, u.DropIns[0]); err != nil {
|
||||||
|
t.Fatalf("PlaceUnit(): bad error (%+v): want nil, got %s", tt, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err := os.Stat(u.DropInDestination(dir, u.DropIns[0]))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Stat(): bad error (%+v): want nil, got %s", tt, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if mode := fi.Mode(); mode != os.FileMode(0644) {
|
||||||
|
t.Errorf("bad filemode (%+v): want %v, got %v", tt, os.FileMode(0644), mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := ioutil.ReadFile(u.DropInDestination(dir, u.DropIns[0]))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadFile(): bad error (%+v): want nil, got %s", tt, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(c) != u.DropIns[0].Content {
|
||||||
|
t.Errorf("bad contents (%+v): want %q, got %q", tt, u.DropIns[0].Content, string(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
os.RemoveAll(dir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,7 +157,7 @@ func TestMaskUnit(t *testing.T) {
|
|||||||
sd := &systemd{dir}
|
sd := &systemd{dir}
|
||||||
|
|
||||||
// Ensure mask works with units that do not currently exist
|
// Ensure mask works with units that do not currently exist
|
||||||
uf := &Unit{config.Unit{Name: "foo.service"}}
|
uf := Unit{config.Unit{Name: "foo.service"}}
|
||||||
if err := sd.MaskUnit(uf); err != nil {
|
if err := sd.MaskUnit(uf); err != nil {
|
||||||
t.Fatalf("Unable to mask new unit: %v", err)
|
t.Fatalf("Unable to mask new unit: %v", err)
|
||||||
}
|
}
|
||||||
@@ -194,7 +171,7 @@ func TestMaskUnit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ensure mask works with unit files that already exist
|
// Ensure mask works with unit files that already exist
|
||||||
ub := &Unit{config.Unit{Name: "bar.service"}}
|
ub := Unit{config.Unit{Name: "bar.service"}}
|
||||||
barPath := path.Join(dir, "etc", "systemd", "system", "bar.service")
|
barPath := path.Join(dir, "etc", "systemd", "system", "bar.service")
|
||||||
if _, err := os.Create(barPath); err != nil {
|
if _, err := os.Create(barPath); err != nil {
|
||||||
t.Fatalf("Error creating new unit file: %v", err)
|
t.Fatalf("Error creating new unit file: %v", err)
|
||||||
@@ -220,12 +197,12 @@ func TestUnmaskUnit(t *testing.T) {
|
|||||||
|
|
||||||
sd := &systemd{dir}
|
sd := &systemd{dir}
|
||||||
|
|
||||||
nilUnit := &Unit{config.Unit{Name: "null.service"}}
|
nilUnit := Unit{config.Unit{Name: "null.service"}}
|
||||||
if err := sd.UnmaskUnit(nilUnit); err != nil {
|
if err := sd.UnmaskUnit(nilUnit); err != nil {
|
||||||
t.Errorf("unexpected error from unmasking nonexistent unit: %v", err)
|
t.Errorf("unexpected error from unmasking nonexistent unit: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uf := &Unit{config.Unit{Name: "foo.service", Content: "[Service]\nExecStart=/bin/true"}}
|
uf := Unit{config.Unit{Name: "foo.service", Content: "[Service]\nExecStart=/bin/true"}}
|
||||||
dst := uf.Destination(dir)
|
dst := uf.Destination(dir)
|
||||||
if err := os.MkdirAll(path.Dir(dst), os.FileMode(0755)); err != nil {
|
if err := os.MkdirAll(path.Dir(dst), os.FileMode(0755)); err != nil {
|
||||||
t.Fatalf("Unable to create unit directory: %v", err)
|
t.Fatalf("Unable to create unit directory: %v", err)
|
||||||
@@ -245,7 +222,7 @@ func TestUnmaskUnit(t *testing.T) {
|
|||||||
t.Errorf("unmask of non-empty unit mutated unit contents unexpectedly")
|
t.Errorf("unmask of non-empty unit mutated unit contents unexpectedly")
|
||||||
}
|
}
|
||||||
|
|
||||||
ub := &Unit{config.Unit{Name: "bar.service"}}
|
ub := Unit{config.Unit{Name: "bar.service"}}
|
||||||
dst = ub.Destination(dir)
|
dst = ub.Destination(dir)
|
||||||
if err := os.Symlink("/dev/null", dst); err != nil {
|
if err := os.Symlink("/dev/null", dst); err != nil {
|
||||||
t.Fatalf("Unable to create masked unit: %v", err)
|
t.Fatalf("Unable to create masked unit: %v", err)
|
||||||
|
@@ -19,40 +19,66 @@ package system
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/coreos/coreos-cloudinit/config"
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Name for drop-in service configuration files created by cloudconfig
|
|
||||||
const cloudConfigDropIn = "20-cloudinit.conf"
|
|
||||||
|
|
||||||
type UnitManager interface {
|
type UnitManager interface {
|
||||||
PlaceUnit(unit *Unit, dst string) error
|
PlaceUnit(unit Unit) error
|
||||||
EnableUnitFile(unit string, runtime bool) error
|
PlaceUnitDropIn(unit Unit, dropIn config.UnitDropIn) error
|
||||||
RunUnitCommand(command, unit string) (string, error)
|
EnableUnitFile(unit Unit) error
|
||||||
|
RunUnitCommand(unit Unit, command string) (string, error)
|
||||||
|
MaskUnit(unit Unit) error
|
||||||
|
UnmaskUnit(unit Unit) error
|
||||||
DaemonReload() error
|
DaemonReload() error
|
||||||
MaskUnit(unit *Unit) error
|
|
||||||
UnmaskUnit(unit *Unit) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unit is a top-level structure which embeds its underlying configuration,
|
// Unit is a top-level structure which embeds its underlying configuration,
|
||||||
// config.Unit, and provides the system-specific Destination().
|
// config.Unit, and provides the system-specific Destination(), Type(), and
|
||||||
|
// Group().
|
||||||
type Unit struct {
|
type Unit struct {
|
||||||
config.Unit
|
config.Unit
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destination builds the appropriate absolute file path for
|
// Type returns the extension of the unit (everything that follows the final
|
||||||
// the Unit. The root argument indicates the effective base
|
// period).
|
||||||
// directory of the system (similar to a chroot).
|
func (u Unit) Type() string {
|
||||||
func (u *Unit) Destination(root string) string {
|
ext := filepath.Ext(u.Name)
|
||||||
|
return strings.TrimLeft(ext, ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group returns "network" or "system" depending on whether or not the unit is
|
||||||
|
// a network unit or otherwise.
|
||||||
|
func (u Unit) Group() string {
|
||||||
|
switch u.Type() {
|
||||||
|
case "network", "netdev", "link":
|
||||||
|
return "network"
|
||||||
|
default:
|
||||||
|
return "system"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destination builds the appropriate absolute file path for the Unit. The root
|
||||||
|
// argument indicates the effective base directory of the system (similar to a
|
||||||
|
// chroot).
|
||||||
|
func (u Unit) Destination(root string) string {
|
||||||
|
return path.Join(u.prefix(root), u.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DropInDestination builds the appropriate absolute file path for the
|
||||||
|
// UnitDropIn. The root argument indicates the effective base directory of the
|
||||||
|
// system (similar to a chroot) and the dropIn argument is the UnitDropIn for
|
||||||
|
// which the destination is being calculated.
|
||||||
|
func (u Unit) DropInDestination(root string, dropIn config.UnitDropIn) string {
|
||||||
|
return path.Join(u.prefix(root), fmt.Sprintf("%s.d", u.Name), dropIn.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u Unit) prefix(root string) string {
|
||||||
dir := "etc"
|
dir := "etc"
|
||||||
if u.Runtime {
|
if u.Runtime {
|
||||||
dir = "run"
|
dir = "run"
|
||||||
}
|
}
|
||||||
|
return path.Join(root, dir, "systemd", u.Group())
|
||||||
if u.DropIn {
|
|
||||||
return path.Join(root, dir, "systemd", u.Group(), fmt.Sprintf("%s.d", u.Name), cloudConfigDropIn)
|
|
||||||
} else {
|
|
||||||
return path.Join(root, dir, "systemd", u.Group(), u.Name)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
138
system/unit_test.go
Normal file
138
system/unit_test.go
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 CoreOS, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/coreos/coreos-cloudinit/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestType(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
|
||||||
|
typ string
|
||||||
|
}{
|
||||||
|
{},
|
||||||
|
{"test.service", "service"},
|
||||||
|
{"hello", ""},
|
||||||
|
{"lots.of.dots", "dots"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
u := Unit{config.Unit{
|
||||||
|
Name: tt.name,
|
||||||
|
}}
|
||||||
|
if typ := u.Type(); tt.typ != typ {
|
||||||
|
t.Errorf("bad type (%+v): want %q, got %q", tt, tt.typ, typ)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGroup(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
|
||||||
|
group string
|
||||||
|
}{
|
||||||
|
{"test.service", "system"},
|
||||||
|
{"test.link", "network"},
|
||||||
|
{"test.network", "network"},
|
||||||
|
{"test.netdev", "network"},
|
||||||
|
{"test.conf", "system"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
u := Unit{config.Unit{
|
||||||
|
Name: tt.name,
|
||||||
|
}}
|
||||||
|
if group := u.Group(); tt.group != group {
|
||||||
|
t.Errorf("bad group (%+v): want %q, got %q", tt, tt.group, group)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDestination(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
root string
|
||||||
|
name string
|
||||||
|
runtime bool
|
||||||
|
|
||||||
|
destination string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
root: "/some/dir",
|
||||||
|
name: "foobar.service",
|
||||||
|
destination: "/some/dir/etc/systemd/system/foobar.service",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "/some/dir",
|
||||||
|
name: "foobar.service",
|
||||||
|
runtime: true,
|
||||||
|
destination: "/some/dir/run/systemd/system/foobar.service",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
u := Unit{config.Unit{
|
||||||
|
Name: tt.name,
|
||||||
|
Runtime: tt.runtime,
|
||||||
|
}}
|
||||||
|
if d := u.Destination(tt.root); tt.destination != d {
|
||||||
|
t.Errorf("bad destination (%+v): want %q, got %q", tt, tt.destination, d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDropInDestination(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
root string
|
||||||
|
unitName string
|
||||||
|
dropInName string
|
||||||
|
runtime bool
|
||||||
|
|
||||||
|
destination string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
root: "/some/dir",
|
||||||
|
unitName: "foo.service",
|
||||||
|
dropInName: "bar.conf",
|
||||||
|
destination: "/some/dir/etc/systemd/system/foo.service.d/bar.conf",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "/some/dir",
|
||||||
|
unitName: "foo.service",
|
||||||
|
dropInName: "bar.conf",
|
||||||
|
runtime: true,
|
||||||
|
destination: "/some/dir/run/systemd/system/foo.service.d/bar.conf",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
u := Unit{config.Unit{
|
||||||
|
Name: tt.unitName,
|
||||||
|
Runtime: tt.runtime,
|
||||||
|
DropIns: []config.UnitDropIn{{
|
||||||
|
Name: tt.dropInName,
|
||||||
|
}},
|
||||||
|
}}
|
||||||
|
if d := u.DropInDestination(tt.root, u.DropIns[0]); tt.destination != d {
|
||||||
|
t.Errorf("bad destination (%+v): want %q, got %q", tt, tt.destination, d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -126,7 +126,7 @@ func (uc Update) Units() []Unit {
|
|||||||
Runtime: true,
|
Runtime: true,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
if uc.Update.RebootStrategy == "false" {
|
if uc.Update.RebootStrategy == "off" {
|
||||||
ls.Command = "stop"
|
ls.Command = "stop"
|
||||||
ls.Mask = true
|
ls.Mask = true
|
||||||
}
|
}
|
||||||
|
@@ -72,7 +72,7 @@ func TestUpdateUnits(t *testing.T) {
|
|||||||
}}},
|
}}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: config.Update{RebootStrategy: "false"},
|
config: config.Update{RebootStrategy: "off"},
|
||||||
units: []Unit{{config.Unit{
|
units: []Unit{{config.Unit{
|
||||||
Name: "locksmithd.service",
|
Name: "locksmithd.service",
|
||||||
Command: "stop",
|
Command: "stop",
|
||||||
@@ -100,7 +100,7 @@ func TestUpdateFile(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: config.Update{RebootStrategy: "wizzlewazzle"},
|
config: config.Update{RebootStrategy: "wizzlewazzle"},
|
||||||
err: &config.ErrorValid{Value: "wizzlewazzle", Field: "RebootStrategy", Valid: []string{"best-effort", "etcd-lock", "reboot", "false"}},
|
err: &config.ErrorValid{Value: "wizzlewazzle", Field: "RebootStrategy", Valid: "^(best-effort|etcd-lock|reboot|off)$"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: config.Update{Group: "master", Server: "http://foo.com"},
|
config: config.Update{Group: "master", Server: "http://foo.com"},
|
||||||
@@ -135,9 +135,9 @@ func TestUpdateFile(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
config: config.Update{RebootStrategy: "false"},
|
config: config.Update{RebootStrategy: "off"},
|
||||||
file: &File{config.File{
|
file: &File{config.File{
|
||||||
Content: "REBOOT_STRATEGY=false\n",
|
Content: "REBOOT_STRATEGY=off\n",
|
||||||
Path: "etc/coreos/update.conf",
|
Path: "etc/coreos/update.conf",
|
||||||
RawFilePermissions: "0644",
|
RawFilePermissions: "0644",
|
||||||
}},
|
}},
|
||||||
|
Reference in New Issue
Block a user